Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[linux-2.6] / net / core / netpoll.c
1 /*
2  * Common framework for low-level network console, dump, and debugger code
3  *
4  * Sep 8 2003  Matt Mackall <mpm@selenic.com>
5  *
6  * based on the netconsole code from:
7  *
8  * Copyright (C) 2001  Ingo Molnar <mingo@redhat.com>
9  * Copyright (C) 2002  Red Hat, Inc.
10  */
11
12 #include <linux/smp_lock.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/string.h>
16 #include <linux/if_arp.h>
17 #include <linux/inetdevice.h>
18 #include <linux/inet.h>
19 #include <linux/interrupt.h>
20 #include <linux/netpoll.h>
21 #include <linux/sched.h>
22 #include <linux/delay.h>
23 #include <linux/rcupdate.h>
24 #include <linux/workqueue.h>
25 #include <net/tcp.h>
26 #include <net/udp.h>
27 #include <asm/unaligned.h>
28
29 /*
30  * We maintain a small pool of fully-sized skbs, to make sure the
31  * message gets out even in extreme OOM situations.
32  */
33
34 #define MAX_UDP_CHUNK 1460
35 #define MAX_SKBS 32
36 #define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
37 #define MAX_RETRIES 20000
38
39 static DEFINE_SPINLOCK(skb_list_lock);
40 static int nr_skbs;
41 static struct sk_buff *skbs;
42
43 static DEFINE_SPINLOCK(queue_lock);
44 static int queue_depth;
45 static struct sk_buff *queue_head, *queue_tail;
46
47 static atomic_t trapped;
48
49 #define NETPOLL_RX_ENABLED  1
50 #define NETPOLL_RX_DROP     2
51
52 #define MAX_SKB_SIZE \
53                 (MAX_UDP_CHUNK + sizeof(struct udphdr) + \
54                                 sizeof(struct iphdr) + sizeof(struct ethhdr))
55
56 static void zap_completion_queue(void);
57 static void arp_reply(struct sk_buff *skb);
58
59 static void queue_process(void *p)
60 {
61         unsigned long flags;
62         struct sk_buff *skb;
63
64         while (queue_head) {
65                 spin_lock_irqsave(&queue_lock, flags);
66
67                 skb = queue_head;
68                 queue_head = skb->next;
69                 if (skb == queue_tail)
70                         queue_head = NULL;
71
72                 queue_depth--;
73
74                 spin_unlock_irqrestore(&queue_lock, flags);
75
76                 dev_queue_xmit(skb);
77         }
78 }
79
80 static DECLARE_WORK(send_queue, queue_process, NULL);
81
82 void netpoll_queue(struct sk_buff *skb)
83 {
84         unsigned long flags;
85
86         if (queue_depth == MAX_QUEUE_DEPTH) {
87                 __kfree_skb(skb);
88                 return;
89         }
90
91         spin_lock_irqsave(&queue_lock, flags);
92         if (!queue_head)
93                 queue_head = skb;
94         else
95                 queue_tail->next = skb;
96         queue_tail = skb;
97         queue_depth++;
98         spin_unlock_irqrestore(&queue_lock, flags);
99
100         schedule_work(&send_queue);
101 }
102
103 static int checksum_udp(struct sk_buff *skb, struct udphdr *uh,
104                              unsigned short ulen, u32 saddr, u32 daddr)
105 {
106         unsigned int psum;
107
108         if (uh->check == 0 || skb->ip_summed == CHECKSUM_UNNECESSARY)
109                 return 0;
110
111         psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
112
113         if (skb->ip_summed == CHECKSUM_COMPLETE &&
114             !(u16)csum_fold(csum_add(psum, skb->csum)))
115                 return 0;
116
117         skb->csum = psum;
118
119         return __skb_checksum_complete(skb);
120 }
121
122 /*
123  * Check whether delayed processing was scheduled for our NIC. If so,
124  * we attempt to grab the poll lock and use ->poll() to pump the card.
125  * If this fails, either we've recursed in ->poll() or it's already
126  * running on another CPU.
127  *
128  * Note: we don't mask interrupts with this lock because we're using
129  * trylock here and interrupts are already disabled in the softirq
130  * case. Further, we test the poll_owner to avoid recursion on UP
131  * systems where the lock doesn't exist.
132  *
133  * In cases where there is bi-directional communications, reading only
134  * one message at a time can lead to packets being dropped by the
135  * network adapter, forcing superfluous retries and possibly timeouts.
136  * Thus, we set our budget to greater than 1.
137  */
138 static void poll_napi(struct netpoll *np)
139 {
140         struct netpoll_info *npinfo = np->dev->npinfo;
141         int budget = 16;
142
143         if (test_bit(__LINK_STATE_RX_SCHED, &np->dev->state) &&
144             npinfo->poll_owner != smp_processor_id() &&
145             spin_trylock(&npinfo->poll_lock)) {
146                 npinfo->rx_flags |= NETPOLL_RX_DROP;
147                 atomic_inc(&trapped);
148
149                 np->dev->poll(np->dev, &budget);
150
151                 atomic_dec(&trapped);
152                 npinfo->rx_flags &= ~NETPOLL_RX_DROP;
153                 spin_unlock(&npinfo->poll_lock);
154         }
155 }
156
157 static void service_arp_queue(struct netpoll_info *npi)
158 {
159         struct sk_buff *skb;
160
161         if (unlikely(!npi))
162                 return;
163
164         skb = skb_dequeue(&npi->arp_tx);
165
166         while (skb != NULL) {
167                 arp_reply(skb);
168                 skb = skb_dequeue(&npi->arp_tx);
169         }
170         return;
171 }
172
173 void netpoll_poll(struct netpoll *np)
174 {
175         if(!np->dev || !netif_running(np->dev) || !np->dev->poll_controller)
176                 return;
177
178         /* Process pending work on NIC */
179         np->dev->poll_controller(np->dev);
180         if (np->dev->poll)
181                 poll_napi(np);
182
183         service_arp_queue(np->dev->npinfo);
184
185         zap_completion_queue();
186 }
187
188 static void refill_skbs(void)
189 {
190         struct sk_buff *skb;
191         unsigned long flags;
192
193         spin_lock_irqsave(&skb_list_lock, flags);
194         while (nr_skbs < MAX_SKBS) {
195                 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
196                 if (!skb)
197                         break;
198
199                 skb->next = skbs;
200                 skbs = skb;
201                 nr_skbs++;
202         }
203         spin_unlock_irqrestore(&skb_list_lock, flags);
204 }
205
206 static void zap_completion_queue(void)
207 {
208         unsigned long flags;
209         struct softnet_data *sd = &get_cpu_var(softnet_data);
210
211         if (sd->completion_queue) {
212                 struct sk_buff *clist;
213
214                 local_irq_save(flags);
215                 clist = sd->completion_queue;
216                 sd->completion_queue = NULL;
217                 local_irq_restore(flags);
218
219                 while (clist != NULL) {
220                         struct sk_buff *skb = clist;
221                         clist = clist->next;
222                         if(skb->destructor)
223                                 dev_kfree_skb_any(skb); /* put this one back */
224                         else
225                                 __kfree_skb(skb);
226                 }
227         }
228
229         put_cpu_var(softnet_data);
230 }
231
232 static struct sk_buff * find_skb(struct netpoll *np, int len, int reserve)
233 {
234         int once = 1, count = 0;
235         unsigned long flags;
236         struct sk_buff *skb = NULL;
237
238         zap_completion_queue();
239 repeat:
240         if (nr_skbs < MAX_SKBS)
241                 refill_skbs();
242
243         skb = alloc_skb(len, GFP_ATOMIC);
244
245         if (!skb) {
246                 spin_lock_irqsave(&skb_list_lock, flags);
247                 skb = skbs;
248                 if (skb) {
249                         skbs = skb->next;
250                         skb->next = NULL;
251                         nr_skbs--;
252                 }
253                 spin_unlock_irqrestore(&skb_list_lock, flags);
254         }
255
256         if(!skb) {
257                 count++;
258                 if (once && (count == 1000000)) {
259                         printk("out of netpoll skbs!\n");
260                         once = 0;
261                 }
262                 netpoll_poll(np);
263                 goto repeat;
264         }
265
266         atomic_set(&skb->users, 1);
267         skb_reserve(skb, reserve);
268         return skb;
269 }
270
271 static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
272 {
273         int status;
274         struct netpoll_info *npinfo;
275
276         if (!np || !np->dev || !netif_running(np->dev)) {
277                 __kfree_skb(skb);
278                 return;
279         }
280
281         npinfo = np->dev->npinfo;
282
283         /* avoid recursion */
284         if (npinfo->poll_owner == smp_processor_id() ||
285             np->dev->xmit_lock_owner == smp_processor_id()) {
286                 if (np->drop)
287                         np->drop(skb);
288                 else
289                         __kfree_skb(skb);
290                 return;
291         }
292
293         do {
294                 npinfo->tries--;
295                 netif_tx_lock(np->dev);
296
297                 /*
298                  * network drivers do not expect to be called if the queue is
299                  * stopped.
300                  */
301                 status = NETDEV_TX_BUSY;
302                 if (!netif_queue_stopped(np->dev))
303                         status = np->dev->hard_start_xmit(skb, np->dev);
304
305                 netif_tx_unlock(np->dev);
306
307                 /* success */
308                 if(!status) {
309                         npinfo->tries = MAX_RETRIES; /* reset */
310                         return;
311                 }
312
313                 /* transmit busy */
314                 netpoll_poll(np);
315                 udelay(50);
316         } while (npinfo->tries > 0);
317 }
318
319 void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
320 {
321         int total_len, eth_len, ip_len, udp_len;
322         struct sk_buff *skb;
323         struct udphdr *udph;
324         struct iphdr *iph;
325         struct ethhdr *eth;
326
327         udp_len = len + sizeof(*udph);
328         ip_len = eth_len = udp_len + sizeof(*iph);
329         total_len = eth_len + ETH_HLEN + NET_IP_ALIGN;
330
331         skb = find_skb(np, total_len, total_len - len);
332         if (!skb)
333                 return;
334
335         memcpy(skb->data, msg, len);
336         skb->len += len;
337
338         skb->h.uh = udph = (struct udphdr *) skb_push(skb, sizeof(*udph));
339         udph->source = htons(np->local_port);
340         udph->dest = htons(np->remote_port);
341         udph->len = htons(udp_len);
342         udph->check = 0;
343         udph->check = csum_tcpudp_magic(htonl(np->local_ip),
344                                         htonl(np->remote_ip),
345                                         udp_len, IPPROTO_UDP,
346                                         csum_partial((unsigned char *)udph, udp_len, 0));
347         if (udph->check == 0)
348                 udph->check = -1;
349
350         skb->nh.iph = iph = (struct iphdr *)skb_push(skb, sizeof(*iph));
351
352         /* iph->version = 4; iph->ihl = 5; */
353         put_unaligned(0x45, (unsigned char *)iph);
354         iph->tos      = 0;
355         put_unaligned(htons(ip_len), &(iph->tot_len));
356         iph->id       = 0;
357         iph->frag_off = 0;
358         iph->ttl      = 64;
359         iph->protocol = IPPROTO_UDP;
360         iph->check    = 0;
361         put_unaligned(htonl(np->local_ip), &(iph->saddr));
362         put_unaligned(htonl(np->remote_ip), &(iph->daddr));
363         iph->check    = ip_fast_csum((unsigned char *)iph, iph->ihl);
364
365         eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
366         skb->mac.raw = skb->data;
367         skb->protocol = eth->h_proto = htons(ETH_P_IP);
368         memcpy(eth->h_source, np->local_mac, 6);
369         memcpy(eth->h_dest, np->remote_mac, 6);
370
371         skb->dev = np->dev;
372
373         netpoll_send_skb(np, skb);
374 }
375
376 static void arp_reply(struct sk_buff *skb)
377 {
378         struct netpoll_info *npinfo = skb->dev->npinfo;
379         struct arphdr *arp;
380         unsigned char *arp_ptr;
381         int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
382         u32 sip, tip;
383         struct sk_buff *send_skb;
384         struct netpoll *np = NULL;
385
386         if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev)
387                 np = npinfo->rx_np;
388         if (!np)
389                 return;
390
391         /* No arp on this interface */
392         if (skb->dev->flags & IFF_NOARP)
393                 return;
394
395         if (!pskb_may_pull(skb, (sizeof(struct arphdr) +
396                                  (2 * skb->dev->addr_len) +
397                                  (2 * sizeof(u32)))))
398                 return;
399
400         skb->h.raw = skb->nh.raw = skb->data;
401         arp = skb->nh.arph;
402
403         if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
404              arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
405             arp->ar_pro != htons(ETH_P_IP) ||
406             arp->ar_op != htons(ARPOP_REQUEST))
407                 return;
408
409         arp_ptr = (unsigned char *)(arp+1) + skb->dev->addr_len;
410         memcpy(&sip, arp_ptr, 4);
411         arp_ptr += 4 + skb->dev->addr_len;
412         memcpy(&tip, arp_ptr, 4);
413
414         /* Should we ignore arp? */
415         if (tip != htonl(np->local_ip) || LOOPBACK(tip) || MULTICAST(tip))
416                 return;
417
418         size = sizeof(struct arphdr) + 2 * (skb->dev->addr_len + 4);
419         send_skb = find_skb(np, size + LL_RESERVED_SPACE(np->dev),
420                             LL_RESERVED_SPACE(np->dev));
421
422         if (!send_skb)
423                 return;
424
425         send_skb->nh.raw = send_skb->data;
426         arp = (struct arphdr *) skb_put(send_skb, size);
427         send_skb->dev = skb->dev;
428         send_skb->protocol = htons(ETH_P_ARP);
429
430         /* Fill the device header for the ARP frame */
431
432         if (np->dev->hard_header &&
433             np->dev->hard_header(send_skb, skb->dev, ptype,
434                                        np->remote_mac, np->local_mac,
435                                        send_skb->len) < 0) {
436                 kfree_skb(send_skb);
437                 return;
438         }
439
440         /*
441          * Fill out the arp protocol part.
442          *
443          * we only support ethernet device type,
444          * which (according to RFC 1390) should always equal 1 (Ethernet).
445          */
446
447         arp->ar_hrd = htons(np->dev->type);
448         arp->ar_pro = htons(ETH_P_IP);
449         arp->ar_hln = np->dev->addr_len;
450         arp->ar_pln = 4;
451         arp->ar_op = htons(type);
452
453         arp_ptr=(unsigned char *)(arp + 1);
454         memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
455         arp_ptr += np->dev->addr_len;
456         memcpy(arp_ptr, &tip, 4);
457         arp_ptr += 4;
458         memcpy(arp_ptr, np->remote_mac, np->dev->addr_len);
459         arp_ptr += np->dev->addr_len;
460         memcpy(arp_ptr, &sip, 4);
461
462         netpoll_send_skb(np, send_skb);
463 }
464
465 int __netpoll_rx(struct sk_buff *skb)
466 {
467         int proto, len, ulen;
468         struct iphdr *iph;
469         struct udphdr *uh;
470         struct netpoll_info *npi = skb->dev->npinfo;
471         struct netpoll *np = npi->rx_np;
472
473
474         if (!np)
475                 goto out;
476         if (skb->dev->type != ARPHRD_ETHER)
477                 goto out;
478
479         /* check if netpoll clients need ARP */
480         if (skb->protocol == __constant_htons(ETH_P_ARP) &&
481             atomic_read(&trapped)) {
482                 skb_queue_tail(&npi->arp_tx, skb);
483                 return 1;
484         }
485
486         proto = ntohs(eth_hdr(skb)->h_proto);
487         if (proto != ETH_P_IP)
488                 goto out;
489         if (skb->pkt_type == PACKET_OTHERHOST)
490                 goto out;
491         if (skb_shared(skb))
492                 goto out;
493
494         iph = (struct iphdr *)skb->data;
495         if (!pskb_may_pull(skb, sizeof(struct iphdr)))
496                 goto out;
497         if (iph->ihl < 5 || iph->version != 4)
498                 goto out;
499         if (!pskb_may_pull(skb, iph->ihl*4))
500                 goto out;
501         if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
502                 goto out;
503
504         len = ntohs(iph->tot_len);
505         if (skb->len < len || len < iph->ihl*4)
506                 goto out;
507
508         if (iph->protocol != IPPROTO_UDP)
509                 goto out;
510
511         len -= iph->ihl*4;
512         uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
513         ulen = ntohs(uh->len);
514
515         if (ulen != len)
516                 goto out;
517         if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
518                 goto out;
519         if (np->local_ip && np->local_ip != ntohl(iph->daddr))
520                 goto out;
521         if (np->remote_ip && np->remote_ip != ntohl(iph->saddr))
522                 goto out;
523         if (np->local_port && np->local_port != ntohs(uh->dest))
524                 goto out;
525
526         np->rx_hook(np, ntohs(uh->source),
527                     (char *)(uh+1),
528                     ulen - sizeof(struct udphdr));
529
530         kfree_skb(skb);
531         return 1;
532
533 out:
534         if (atomic_read(&trapped)) {
535                 kfree_skb(skb);
536                 return 1;
537         }
538
539         return 0;
540 }
541
542 int netpoll_parse_options(struct netpoll *np, char *opt)
543 {
544         char *cur=opt, *delim;
545
546         if(*cur != '@') {
547                 if ((delim = strchr(cur, '@')) == NULL)
548                         goto parse_failed;
549                 *delim=0;
550                 np->local_port=simple_strtol(cur, NULL, 10);
551                 cur=delim;
552         }
553         cur++;
554         printk(KERN_INFO "%s: local port %d\n", np->name, np->local_port);
555
556         if(*cur != '/') {
557                 if ((delim = strchr(cur, '/')) == NULL)
558                         goto parse_failed;
559                 *delim=0;
560                 np->local_ip=ntohl(in_aton(cur));
561                 cur=delim;
562
563                 printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n",
564                        np->name, HIPQUAD(np->local_ip));
565         }
566         cur++;
567
568         if ( *cur != ',') {
569                 /* parse out dev name */
570                 if ((delim = strchr(cur, ',')) == NULL)
571                         goto parse_failed;
572                 *delim=0;
573                 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
574                 cur=delim;
575         }
576         cur++;
577
578         printk(KERN_INFO "%s: interface %s\n", np->name, np->dev_name);
579
580         if ( *cur != '@' ) {
581                 /* dst port */
582                 if ((delim = strchr(cur, '@')) == NULL)
583                         goto parse_failed;
584                 *delim=0;
585                 np->remote_port=simple_strtol(cur, NULL, 10);
586                 cur=delim;
587         }
588         cur++;
589         printk(KERN_INFO "%s: remote port %d\n", np->name, np->remote_port);
590
591         /* dst ip */
592         if ((delim = strchr(cur, '/')) == NULL)
593                 goto parse_failed;
594         *delim=0;
595         np->remote_ip=ntohl(in_aton(cur));
596         cur=delim+1;
597
598         printk(KERN_INFO "%s: remote IP %d.%d.%d.%d\n",
599                        np->name, HIPQUAD(np->remote_ip));
600
601         if( *cur != 0 )
602         {
603                 /* MAC address */
604                 if ((delim = strchr(cur, ':')) == NULL)
605                         goto parse_failed;
606                 *delim=0;
607                 np->remote_mac[0]=simple_strtol(cur, NULL, 16);
608                 cur=delim+1;
609                 if ((delim = strchr(cur, ':')) == NULL)
610                         goto parse_failed;
611                 *delim=0;
612                 np->remote_mac[1]=simple_strtol(cur, NULL, 16);
613                 cur=delim+1;
614                 if ((delim = strchr(cur, ':')) == NULL)
615                         goto parse_failed;
616                 *delim=0;
617                 np->remote_mac[2]=simple_strtol(cur, NULL, 16);
618                 cur=delim+1;
619                 if ((delim = strchr(cur, ':')) == NULL)
620                         goto parse_failed;
621                 *delim=0;
622                 np->remote_mac[3]=simple_strtol(cur, NULL, 16);
623                 cur=delim+1;
624                 if ((delim = strchr(cur, ':')) == NULL)
625                         goto parse_failed;
626                 *delim=0;
627                 np->remote_mac[4]=simple_strtol(cur, NULL, 16);
628                 cur=delim+1;
629                 np->remote_mac[5]=simple_strtol(cur, NULL, 16);
630         }
631
632         printk(KERN_INFO "%s: remote ethernet address "
633                "%02x:%02x:%02x:%02x:%02x:%02x\n",
634                np->name,
635                np->remote_mac[0],
636                np->remote_mac[1],
637                np->remote_mac[2],
638                np->remote_mac[3],
639                np->remote_mac[4],
640                np->remote_mac[5]);
641
642         return 0;
643
644  parse_failed:
645         printk(KERN_INFO "%s: couldn't parse config at %s!\n",
646                np->name, cur);
647         return -1;
648 }
649
650 int netpoll_setup(struct netpoll *np)
651 {
652         struct net_device *ndev = NULL;
653         struct in_device *in_dev;
654         struct netpoll_info *npinfo;
655         unsigned long flags;
656
657         if (np->dev_name)
658                 ndev = dev_get_by_name(np->dev_name);
659         if (!ndev) {
660                 printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
661                        np->name, np->dev_name);
662                 return -1;
663         }
664
665         np->dev = ndev;
666         if (!ndev->npinfo) {
667                 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
668                 if (!npinfo)
669                         goto release;
670
671                 npinfo->rx_flags = 0;
672                 npinfo->rx_np = NULL;
673                 spin_lock_init(&npinfo->poll_lock);
674                 npinfo->poll_owner = -1;
675                 npinfo->tries = MAX_RETRIES;
676                 spin_lock_init(&npinfo->rx_lock);
677                 skb_queue_head_init(&npinfo->arp_tx);
678         } else
679                 npinfo = ndev->npinfo;
680
681         if (!ndev->poll_controller) {
682                 printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
683                        np->name, np->dev_name);
684                 goto release;
685         }
686
687         if (!netif_running(ndev)) {
688                 unsigned long atmost, atleast;
689
690                 printk(KERN_INFO "%s: device %s not up yet, forcing it\n",
691                        np->name, np->dev_name);
692
693                 rtnl_lock();
694                 if (dev_change_flags(ndev, ndev->flags | IFF_UP) < 0) {
695                         printk(KERN_ERR "%s: failed to open %s\n",
696                                np->name, np->dev_name);
697                         rtnl_unlock();
698                         goto release;
699                 }
700                 rtnl_unlock();
701
702                 atleast = jiffies + HZ/10;
703                 atmost = jiffies + 4*HZ;
704                 while (!netif_carrier_ok(ndev)) {
705                         if (time_after(jiffies, atmost)) {
706                                 printk(KERN_NOTICE
707                                        "%s: timeout waiting for carrier\n",
708                                        np->name);
709                                 break;
710                         }
711                         cond_resched();
712                 }
713
714                 /* If carrier appears to come up instantly, we don't
715                  * trust it and pause so that we don't pump all our
716                  * queued console messages into the bitbucket.
717                  */
718
719                 if (time_before(jiffies, atleast)) {
720                         printk(KERN_NOTICE "%s: carrier detect appears"
721                                " untrustworthy, waiting 4 seconds\n",
722                                np->name);
723                         msleep(4000);
724                 }
725         }
726
727         if (is_zero_ether_addr(np->local_mac) && ndev->dev_addr)
728                 memcpy(np->local_mac, ndev->dev_addr, 6);
729
730         if (!np->local_ip) {
731                 rcu_read_lock();
732                 in_dev = __in_dev_get_rcu(ndev);
733
734                 if (!in_dev || !in_dev->ifa_list) {
735                         rcu_read_unlock();
736                         printk(KERN_ERR "%s: no IP address for %s, aborting\n",
737                                np->name, np->dev_name);
738                         goto release;
739                 }
740
741                 np->local_ip = ntohl(in_dev->ifa_list->ifa_local);
742                 rcu_read_unlock();
743                 printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n",
744                        np->name, HIPQUAD(np->local_ip));
745         }
746
747         if (np->rx_hook) {
748                 spin_lock_irqsave(&npinfo->rx_lock, flags);
749                 npinfo->rx_flags |= NETPOLL_RX_ENABLED;
750                 npinfo->rx_np = np;
751                 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
752         }
753
754         /* fill up the skb queue */
755         refill_skbs();
756
757         /* last thing to do is link it to the net device structure */
758         ndev->npinfo = npinfo;
759
760         /* avoid racing with NAPI reading npinfo */
761         synchronize_rcu();
762
763         return 0;
764
765  release:
766         if (!ndev->npinfo)
767                 kfree(npinfo);
768         np->dev = NULL;
769         dev_put(ndev);
770         return -1;
771 }
772
773 void netpoll_cleanup(struct netpoll *np)
774 {
775         struct netpoll_info *npinfo;
776         unsigned long flags;
777
778         if (np->dev) {
779                 npinfo = np->dev->npinfo;
780                 if (npinfo && npinfo->rx_np == np) {
781                         spin_lock_irqsave(&npinfo->rx_lock, flags);
782                         npinfo->rx_np = NULL;
783                         npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
784                         spin_unlock_irqrestore(&npinfo->rx_lock, flags);
785                 }
786                 dev_put(np->dev);
787         }
788
789         np->dev = NULL;
790 }
791
792 int netpoll_trap(void)
793 {
794         return atomic_read(&trapped);
795 }
796
797 void netpoll_set_trap(int trap)
798 {
799         if (trap)
800                 atomic_inc(&trapped);
801         else
802                 atomic_dec(&trapped);
803 }
804
805 EXPORT_SYMBOL(netpoll_set_trap);
806 EXPORT_SYMBOL(netpoll_trap);
807 EXPORT_SYMBOL(netpoll_parse_options);
808 EXPORT_SYMBOL(netpoll_setup);
809 EXPORT_SYMBOL(netpoll_cleanup);
810 EXPORT_SYMBOL(netpoll_send_udp);
811 EXPORT_SYMBOL(netpoll_poll);
812 EXPORT_SYMBOL(netpoll_queue);