Merge master.kernel.org:/pub/scm/linux/kernel/git/davej/cpufreq
[linux-2.6] / net / core / netpoll.c
1 /*
2  * Common framework for low-level network console, dump, and debugger code
3  *
4  * Sep 8 2003  Matt Mackall <mpm@selenic.com>
5  *
6  * based on the netconsole code from:
7  *
8  * Copyright (C) 2001  Ingo Molnar <mingo@redhat.com>
9  * Copyright (C) 2002  Red Hat, Inc.
10  */
11
12 #include <linux/smp_lock.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/string.h>
16 #include <linux/if_arp.h>
17 #include <linux/inetdevice.h>
18 #include <linux/inet.h>
19 #include <linux/interrupt.h>
20 #include <linux/netpoll.h>
21 #include <linux/sched.h>
22 #include <linux/delay.h>
23 #include <linux/rcupdate.h>
24 #include <linux/workqueue.h>
25 #include <net/tcp.h>
26 #include <net/udp.h>
27 #include <asm/unaligned.h>
28
29 /*
30  * We maintain a small pool of fully-sized skbs, to make sure the
31  * message gets out even in extreme OOM situations.
32  */
33
34 #define MAX_UDP_CHUNK 1460
35 #define MAX_SKBS 32
36 #define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
37 #define MAX_RETRIES 20000
38
39 static DEFINE_SPINLOCK(skb_list_lock);
40 static int nr_skbs;
41 static struct sk_buff *skbs;
42
43 static DEFINE_SPINLOCK(queue_lock);
44 static int queue_depth;
45 static struct sk_buff *queue_head, *queue_tail;
46
47 static atomic_t trapped;
48
49 #define NETPOLL_RX_ENABLED  1
50 #define NETPOLL_RX_DROP     2
51
52 #define MAX_SKB_SIZE \
53                 (MAX_UDP_CHUNK + sizeof(struct udphdr) + \
54                                 sizeof(struct iphdr) + sizeof(struct ethhdr))
55
56 static void zap_completion_queue(void);
57
58 static void queue_process(void *p)
59 {
60         unsigned long flags;
61         struct sk_buff *skb;
62
63         while (queue_head) {
64                 spin_lock_irqsave(&queue_lock, flags);
65
66                 skb = queue_head;
67                 queue_head = skb->next;
68                 if (skb == queue_tail)
69                         queue_head = NULL;
70
71                 queue_depth--;
72
73                 spin_unlock_irqrestore(&queue_lock, flags);
74
75                 dev_queue_xmit(skb);
76         }
77 }
78
79 static DECLARE_WORK(send_queue, queue_process, NULL);
80
81 void netpoll_queue(struct sk_buff *skb)
82 {
83         unsigned long flags;
84
85         if (queue_depth == MAX_QUEUE_DEPTH) {
86                 __kfree_skb(skb);
87                 return;
88         }
89
90         spin_lock_irqsave(&queue_lock, flags);
91         if (!queue_head)
92                 queue_head = skb;
93         else
94                 queue_tail->next = skb;
95         queue_tail = skb;
96         queue_depth++;
97         spin_unlock_irqrestore(&queue_lock, flags);
98
99         schedule_work(&send_queue);
100 }
101
102 static int checksum_udp(struct sk_buff *skb, struct udphdr *uh,
103                              unsigned short ulen, u32 saddr, u32 daddr)
104 {
105         unsigned int psum;
106
107         if (uh->check == 0 || skb->ip_summed == CHECKSUM_UNNECESSARY)
108                 return 0;
109
110         psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
111
112         if (skb->ip_summed == CHECKSUM_HW &&
113             !(u16)csum_fold(csum_add(psum, skb->csum)))
114                 return 0;
115
116         skb->csum = psum;
117
118         return __skb_checksum_complete(skb);
119 }
120
121 /*
122  * Check whether delayed processing was scheduled for our NIC. If so,
123  * we attempt to grab the poll lock and use ->poll() to pump the card.
124  * If this fails, either we've recursed in ->poll() or it's already
125  * running on another CPU.
126  *
127  * Note: we don't mask interrupts with this lock because we're using
128  * trylock here and interrupts are already disabled in the softirq
129  * case. Further, we test the poll_owner to avoid recursion on UP
130  * systems where the lock doesn't exist.
131  *
132  * In cases where there is bi-directional communications, reading only
133  * one message at a time can lead to packets being dropped by the
134  * network adapter, forcing superfluous retries and possibly timeouts.
135  * Thus, we set our budget to greater than 1.
136  */
137 static void poll_napi(struct netpoll *np)
138 {
139         struct netpoll_info *npinfo = np->dev->npinfo;
140         int budget = 16;
141
142         if (test_bit(__LINK_STATE_RX_SCHED, &np->dev->state) &&
143             npinfo->poll_owner != smp_processor_id() &&
144             spin_trylock(&npinfo->poll_lock)) {
145                 npinfo->rx_flags |= NETPOLL_RX_DROP;
146                 atomic_inc(&trapped);
147
148                 np->dev->poll(np->dev, &budget);
149
150                 atomic_dec(&trapped);
151                 npinfo->rx_flags &= ~NETPOLL_RX_DROP;
152                 spin_unlock(&npinfo->poll_lock);
153         }
154 }
155
156 void netpoll_poll(struct netpoll *np)
157 {
158         if(!np->dev || !netif_running(np->dev) || !np->dev->poll_controller)
159                 return;
160
161         /* Process pending work on NIC */
162         np->dev->poll_controller(np->dev);
163         if (np->dev->poll)
164                 poll_napi(np);
165
166         zap_completion_queue();
167 }
168
169 static void refill_skbs(void)
170 {
171         struct sk_buff *skb;
172         unsigned long flags;
173
174         spin_lock_irqsave(&skb_list_lock, flags);
175         while (nr_skbs < MAX_SKBS) {
176                 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
177                 if (!skb)
178                         break;
179
180                 skb->next = skbs;
181                 skbs = skb;
182                 nr_skbs++;
183         }
184         spin_unlock_irqrestore(&skb_list_lock, flags);
185 }
186
187 static void zap_completion_queue(void)
188 {
189         unsigned long flags;
190         struct softnet_data *sd = &get_cpu_var(softnet_data);
191
192         if (sd->completion_queue) {
193                 struct sk_buff *clist;
194
195                 local_irq_save(flags);
196                 clist = sd->completion_queue;
197                 sd->completion_queue = NULL;
198                 local_irq_restore(flags);
199
200                 while (clist != NULL) {
201                         struct sk_buff *skb = clist;
202                         clist = clist->next;
203                         if(skb->destructor)
204                                 dev_kfree_skb_any(skb); /* put this one back */
205                         else
206                                 __kfree_skb(skb);
207                 }
208         }
209
210         put_cpu_var(softnet_data);
211 }
212
213 static struct sk_buff * find_skb(struct netpoll *np, int len, int reserve)
214 {
215         int once = 1, count = 0;
216         unsigned long flags;
217         struct sk_buff *skb = NULL;
218
219         zap_completion_queue();
220 repeat:
221         if (nr_skbs < MAX_SKBS)
222                 refill_skbs();
223
224         skb = alloc_skb(len, GFP_ATOMIC);
225
226         if (!skb) {
227                 spin_lock_irqsave(&skb_list_lock, flags);
228                 skb = skbs;
229                 if (skb) {
230                         skbs = skb->next;
231                         skb->next = NULL;
232                         nr_skbs--;
233                 }
234                 spin_unlock_irqrestore(&skb_list_lock, flags);
235         }
236
237         if(!skb) {
238                 count++;
239                 if (once && (count == 1000000)) {
240                         printk("out of netpoll skbs!\n");
241                         once = 0;
242                 }
243                 netpoll_poll(np);
244                 goto repeat;
245         }
246
247         atomic_set(&skb->users, 1);
248         skb_reserve(skb, reserve);
249         return skb;
250 }
251
252 static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
253 {
254         int status;
255         struct netpoll_info *npinfo;
256
257         if (!np || !np->dev || !netif_running(np->dev)) {
258                 __kfree_skb(skb);
259                 return;
260         }
261
262         npinfo = np->dev->npinfo;
263
264         /* avoid recursion */
265         if (npinfo->poll_owner == smp_processor_id() ||
266             np->dev->xmit_lock_owner == smp_processor_id()) {
267                 if (np->drop)
268                         np->drop(skb);
269                 else
270                         __kfree_skb(skb);
271                 return;
272         }
273
274         do {
275                 npinfo->tries--;
276                 spin_lock(&np->dev->xmit_lock);
277                 np->dev->xmit_lock_owner = smp_processor_id();
278
279                 /*
280                  * network drivers do not expect to be called if the queue is
281                  * stopped.
282                  */
283                 if (netif_queue_stopped(np->dev)) {
284                         np->dev->xmit_lock_owner = -1;
285                         spin_unlock(&np->dev->xmit_lock);
286                         netpoll_poll(np);
287                         udelay(50);
288                         continue;
289                 }
290
291                 status = np->dev->hard_start_xmit(skb, np->dev);
292                 np->dev->xmit_lock_owner = -1;
293                 spin_unlock(&np->dev->xmit_lock);
294
295                 /* success */
296                 if(!status) {
297                         npinfo->tries = MAX_RETRIES; /* reset */
298                         return;
299                 }
300
301                 /* transmit busy */
302                 netpoll_poll(np);
303                 udelay(50);
304         } while (npinfo->tries > 0);
305 }
306
307 void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
308 {
309         int total_len, eth_len, ip_len, udp_len;
310         struct sk_buff *skb;
311         struct udphdr *udph;
312         struct iphdr *iph;
313         struct ethhdr *eth;
314
315         udp_len = len + sizeof(*udph);
316         ip_len = eth_len = udp_len + sizeof(*iph);
317         total_len = eth_len + ETH_HLEN + NET_IP_ALIGN;
318
319         skb = find_skb(np, total_len, total_len - len);
320         if (!skb)
321                 return;
322
323         memcpy(skb->data, msg, len);
324         skb->len += len;
325
326         udph = (struct udphdr *) skb_push(skb, sizeof(*udph));
327         udph->source = htons(np->local_port);
328         udph->dest = htons(np->remote_port);
329         udph->len = htons(udp_len);
330         udph->check = 0;
331
332         iph = (struct iphdr *)skb_push(skb, sizeof(*iph));
333
334         /* iph->version = 4; iph->ihl = 5; */
335         put_unaligned(0x45, (unsigned char *)iph);
336         iph->tos      = 0;
337         put_unaligned(htons(ip_len), &(iph->tot_len));
338         iph->id       = 0;
339         iph->frag_off = 0;
340         iph->ttl      = 64;
341         iph->protocol = IPPROTO_UDP;
342         iph->check    = 0;
343         put_unaligned(htonl(np->local_ip), &(iph->saddr));
344         put_unaligned(htonl(np->remote_ip), &(iph->daddr));
345         iph->check    = ip_fast_csum((unsigned char *)iph, iph->ihl);
346
347         eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
348
349         eth->h_proto = htons(ETH_P_IP);
350         memcpy(eth->h_source, np->local_mac, 6);
351         memcpy(eth->h_dest, np->remote_mac, 6);
352
353         skb->dev = np->dev;
354
355         netpoll_send_skb(np, skb);
356 }
357
358 static void arp_reply(struct sk_buff *skb)
359 {
360         struct netpoll_info *npinfo = skb->dev->npinfo;
361         struct arphdr *arp;
362         unsigned char *arp_ptr;
363         int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
364         u32 sip, tip;
365         struct sk_buff *send_skb;
366         struct netpoll *np = NULL;
367
368         if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev)
369                 np = npinfo->rx_np;
370         if (!np)
371                 return;
372
373         /* No arp on this interface */
374         if (skb->dev->flags & IFF_NOARP)
375                 return;
376
377         if (!pskb_may_pull(skb, (sizeof(struct arphdr) +
378                                  (2 * skb->dev->addr_len) +
379                                  (2 * sizeof(u32)))))
380                 return;
381
382         skb->h.raw = skb->nh.raw = skb->data;
383         arp = skb->nh.arph;
384
385         if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
386              arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
387             arp->ar_pro != htons(ETH_P_IP) ||
388             arp->ar_op != htons(ARPOP_REQUEST))
389                 return;
390
391         arp_ptr = (unsigned char *)(arp+1) + skb->dev->addr_len;
392         memcpy(&sip, arp_ptr, 4);
393         arp_ptr += 4 + skb->dev->addr_len;
394         memcpy(&tip, arp_ptr, 4);
395
396         /* Should we ignore arp? */
397         if (tip != htonl(np->local_ip) || LOOPBACK(tip) || MULTICAST(tip))
398                 return;
399
400         size = sizeof(struct arphdr) + 2 * (skb->dev->addr_len + 4);
401         send_skb = find_skb(np, size + LL_RESERVED_SPACE(np->dev),
402                             LL_RESERVED_SPACE(np->dev));
403
404         if (!send_skb)
405                 return;
406
407         send_skb->nh.raw = send_skb->data;
408         arp = (struct arphdr *) skb_put(send_skb, size);
409         send_skb->dev = skb->dev;
410         send_skb->protocol = htons(ETH_P_ARP);
411
412         /* Fill the device header for the ARP frame */
413
414         if (np->dev->hard_header &&
415             np->dev->hard_header(send_skb, skb->dev, ptype,
416                                        np->remote_mac, np->local_mac,
417                                        send_skb->len) < 0) {
418                 kfree_skb(send_skb);
419                 return;
420         }
421
422         /*
423          * Fill out the arp protocol part.
424          *
425          * we only support ethernet device type,
426          * which (according to RFC 1390) should always equal 1 (Ethernet).
427          */
428
429         arp->ar_hrd = htons(np->dev->type);
430         arp->ar_pro = htons(ETH_P_IP);
431         arp->ar_hln = np->dev->addr_len;
432         arp->ar_pln = 4;
433         arp->ar_op = htons(type);
434
435         arp_ptr=(unsigned char *)(arp + 1);
436         memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
437         arp_ptr += np->dev->addr_len;
438         memcpy(arp_ptr, &tip, 4);
439         arp_ptr += 4;
440         memcpy(arp_ptr, np->remote_mac, np->dev->addr_len);
441         arp_ptr += np->dev->addr_len;
442         memcpy(arp_ptr, &sip, 4);
443
444         netpoll_send_skb(np, send_skb);
445 }
446
447 int __netpoll_rx(struct sk_buff *skb)
448 {
449         int proto, len, ulen;
450         struct iphdr *iph;
451         struct udphdr *uh;
452         struct netpoll *np = skb->dev->npinfo->rx_np;
453
454         if (!np)
455                 goto out;
456         if (skb->dev->type != ARPHRD_ETHER)
457                 goto out;
458
459         /* check if netpoll clients need ARP */
460         if (skb->protocol == __constant_htons(ETH_P_ARP) &&
461             atomic_read(&trapped)) {
462                 arp_reply(skb);
463                 return 1;
464         }
465
466         proto = ntohs(eth_hdr(skb)->h_proto);
467         if (proto != ETH_P_IP)
468                 goto out;
469         if (skb->pkt_type == PACKET_OTHERHOST)
470                 goto out;
471         if (skb_shared(skb))
472                 goto out;
473
474         iph = (struct iphdr *)skb->data;
475         if (!pskb_may_pull(skb, sizeof(struct iphdr)))
476                 goto out;
477         if (iph->ihl < 5 || iph->version != 4)
478                 goto out;
479         if (!pskb_may_pull(skb, iph->ihl*4))
480                 goto out;
481         if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
482                 goto out;
483
484         len = ntohs(iph->tot_len);
485         if (skb->len < len || len < iph->ihl*4)
486                 goto out;
487
488         if (iph->protocol != IPPROTO_UDP)
489                 goto out;
490
491         len -= iph->ihl*4;
492         uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
493         ulen = ntohs(uh->len);
494
495         if (ulen != len)
496                 goto out;
497         if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
498                 goto out;
499         if (np->local_ip && np->local_ip != ntohl(iph->daddr))
500                 goto out;
501         if (np->remote_ip && np->remote_ip != ntohl(iph->saddr))
502                 goto out;
503         if (np->local_port && np->local_port != ntohs(uh->dest))
504                 goto out;
505
506         np->rx_hook(np, ntohs(uh->source),
507                     (char *)(uh+1),
508                     ulen - sizeof(struct udphdr));
509
510         kfree_skb(skb);
511         return 1;
512
513 out:
514         if (atomic_read(&trapped)) {
515                 kfree_skb(skb);
516                 return 1;
517         }
518
519         return 0;
520 }
521
522 int netpoll_parse_options(struct netpoll *np, char *opt)
523 {
524         char *cur=opt, *delim;
525
526         if(*cur != '@') {
527                 if ((delim = strchr(cur, '@')) == NULL)
528                         goto parse_failed;
529                 *delim=0;
530                 np->local_port=simple_strtol(cur, NULL, 10);
531                 cur=delim;
532         }
533         cur++;
534         printk(KERN_INFO "%s: local port %d\n", np->name, np->local_port);
535
536         if(*cur != '/') {
537                 if ((delim = strchr(cur, '/')) == NULL)
538                         goto parse_failed;
539                 *delim=0;
540                 np->local_ip=ntohl(in_aton(cur));
541                 cur=delim;
542
543                 printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n",
544                        np->name, HIPQUAD(np->local_ip));
545         }
546         cur++;
547
548         if ( *cur != ',') {
549                 /* parse out dev name */
550                 if ((delim = strchr(cur, ',')) == NULL)
551                         goto parse_failed;
552                 *delim=0;
553                 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
554                 cur=delim;
555         }
556         cur++;
557
558         printk(KERN_INFO "%s: interface %s\n", np->name, np->dev_name);
559
560         if ( *cur != '@' ) {
561                 /* dst port */
562                 if ((delim = strchr(cur, '@')) == NULL)
563                         goto parse_failed;
564                 *delim=0;
565                 np->remote_port=simple_strtol(cur, NULL, 10);
566                 cur=delim;
567         }
568         cur++;
569         printk(KERN_INFO "%s: remote port %d\n", np->name, np->remote_port);
570
571         /* dst ip */
572         if ((delim = strchr(cur, '/')) == NULL)
573                 goto parse_failed;
574         *delim=0;
575         np->remote_ip=ntohl(in_aton(cur));
576         cur=delim+1;
577
578         printk(KERN_INFO "%s: remote IP %d.%d.%d.%d\n",
579                        np->name, HIPQUAD(np->remote_ip));
580
581         if( *cur != 0 )
582         {
583                 /* MAC address */
584                 if ((delim = strchr(cur, ':')) == NULL)
585                         goto parse_failed;
586                 *delim=0;
587                 np->remote_mac[0]=simple_strtol(cur, NULL, 16);
588                 cur=delim+1;
589                 if ((delim = strchr(cur, ':')) == NULL)
590                         goto parse_failed;
591                 *delim=0;
592                 np->remote_mac[1]=simple_strtol(cur, NULL, 16);
593                 cur=delim+1;
594                 if ((delim = strchr(cur, ':')) == NULL)
595                         goto parse_failed;
596                 *delim=0;
597                 np->remote_mac[2]=simple_strtol(cur, NULL, 16);
598                 cur=delim+1;
599                 if ((delim = strchr(cur, ':')) == NULL)
600                         goto parse_failed;
601                 *delim=0;
602                 np->remote_mac[3]=simple_strtol(cur, NULL, 16);
603                 cur=delim+1;
604                 if ((delim = strchr(cur, ':')) == NULL)
605                         goto parse_failed;
606                 *delim=0;
607                 np->remote_mac[4]=simple_strtol(cur, NULL, 16);
608                 cur=delim+1;
609                 np->remote_mac[5]=simple_strtol(cur, NULL, 16);
610         }
611
612         printk(KERN_INFO "%s: remote ethernet address "
613                "%02x:%02x:%02x:%02x:%02x:%02x\n",
614                np->name,
615                np->remote_mac[0],
616                np->remote_mac[1],
617                np->remote_mac[2],
618                np->remote_mac[3],
619                np->remote_mac[4],
620                np->remote_mac[5]);
621
622         return 0;
623
624  parse_failed:
625         printk(KERN_INFO "%s: couldn't parse config at %s!\n",
626                np->name, cur);
627         return -1;
628 }
629
630 int netpoll_setup(struct netpoll *np)
631 {
632         struct net_device *ndev = NULL;
633         struct in_device *in_dev;
634         struct netpoll_info *npinfo;
635         unsigned long flags;
636
637         if (np->dev_name)
638                 ndev = dev_get_by_name(np->dev_name);
639         if (!ndev) {
640                 printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
641                        np->name, np->dev_name);
642                 return -1;
643         }
644
645         np->dev = ndev;
646         if (!ndev->npinfo) {
647                 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
648                 if (!npinfo)
649                         goto release;
650
651                 npinfo->rx_flags = 0;
652                 npinfo->rx_np = NULL;
653                 spin_lock_init(&npinfo->poll_lock);
654                 npinfo->poll_owner = -1;
655                 npinfo->tries = MAX_RETRIES;
656                 spin_lock_init(&npinfo->rx_lock);
657         } else
658                 npinfo = ndev->npinfo;
659
660         if (!ndev->poll_controller) {
661                 printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
662                        np->name, np->dev_name);
663                 goto release;
664         }
665
666         if (!netif_running(ndev)) {
667                 unsigned long atmost, atleast;
668
669                 printk(KERN_INFO "%s: device %s not up yet, forcing it\n",
670                        np->name, np->dev_name);
671
672                 rtnl_lock();
673                 if (dev_change_flags(ndev, ndev->flags | IFF_UP) < 0) {
674                         printk(KERN_ERR "%s: failed to open %s\n",
675                                np->name, np->dev_name);
676                         rtnl_unlock();
677                         goto release;
678                 }
679                 rtnl_unlock();
680
681                 atleast = jiffies + HZ/10;
682                 atmost = jiffies + 4*HZ;
683                 while (!netif_carrier_ok(ndev)) {
684                         if (time_after(jiffies, atmost)) {
685                                 printk(KERN_NOTICE
686                                        "%s: timeout waiting for carrier\n",
687                                        np->name);
688                                 break;
689                         }
690                         cond_resched();
691                 }
692
693                 /* If carrier appears to come up instantly, we don't
694                  * trust it and pause so that we don't pump all our
695                  * queued console messages into the bitbucket.
696                  */
697
698                 if (time_before(jiffies, atleast)) {
699                         printk(KERN_NOTICE "%s: carrier detect appears"
700                                " untrustworthy, waiting 4 seconds\n",
701                                np->name);
702                         msleep(4000);
703                 }
704         }
705
706         if (is_zero_ether_addr(np->local_mac) && ndev->dev_addr)
707                 memcpy(np->local_mac, ndev->dev_addr, 6);
708
709         if (!np->local_ip) {
710                 rcu_read_lock();
711                 in_dev = __in_dev_get_rcu(ndev);
712
713                 if (!in_dev || !in_dev->ifa_list) {
714                         rcu_read_unlock();
715                         printk(KERN_ERR "%s: no IP address for %s, aborting\n",
716                                np->name, np->dev_name);
717                         goto release;
718                 }
719
720                 np->local_ip = ntohl(in_dev->ifa_list->ifa_local);
721                 rcu_read_unlock();
722                 printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n",
723                        np->name, HIPQUAD(np->local_ip));
724         }
725
726         if (np->rx_hook) {
727                 spin_lock_irqsave(&npinfo->rx_lock, flags);
728                 npinfo->rx_flags |= NETPOLL_RX_ENABLED;
729                 npinfo->rx_np = np;
730                 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
731         }
732
733         /* fill up the skb queue */
734         refill_skbs();
735
736         /* last thing to do is link it to the net device structure */
737         ndev->npinfo = npinfo;
738
739         /* avoid racing with NAPI reading npinfo */
740         synchronize_rcu();
741
742         return 0;
743
744  release:
745         if (!ndev->npinfo)
746                 kfree(npinfo);
747         np->dev = NULL;
748         dev_put(ndev);
749         return -1;
750 }
751
752 void netpoll_cleanup(struct netpoll *np)
753 {
754         struct netpoll_info *npinfo;
755         unsigned long flags;
756
757         if (np->dev) {
758                 npinfo = np->dev->npinfo;
759                 if (npinfo && npinfo->rx_np == np) {
760                         spin_lock_irqsave(&npinfo->rx_lock, flags);
761                         npinfo->rx_np = NULL;
762                         npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
763                         spin_unlock_irqrestore(&npinfo->rx_lock, flags);
764                 }
765                 dev_put(np->dev);
766         }
767
768         np->dev = NULL;
769 }
770
771 int netpoll_trap(void)
772 {
773         return atomic_read(&trapped);
774 }
775
776 void netpoll_set_trap(int trap)
777 {
778         if (trap)
779                 atomic_inc(&trapped);
780         else
781                 atomic_dec(&trapped);
782 }
783
784 EXPORT_SYMBOL(netpoll_set_trap);
785 EXPORT_SYMBOL(netpoll_trap);
786 EXPORT_SYMBOL(netpoll_parse_options);
787 EXPORT_SYMBOL(netpoll_setup);
788 EXPORT_SYMBOL(netpoll_cleanup);
789 EXPORT_SYMBOL(netpoll_send_udp);
790 EXPORT_SYMBOL(netpoll_poll);
791 EXPORT_SYMBOL(netpoll_queue);