[CPUFREQ] Fix up merge conflicts with recent ACPI changes.
[linux-2.6] / drivers / infiniband / ulp / ipoib / ipoib_main.c
1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  * $Id: ipoib_main.c 1377 2004-12-23 19:57:12Z roland $
35  */
36
37 #include "ipoib.h"
38
39 #include <linux/module.h>
40
41 #include <linux/init.h>
42 #include <linux/slab.h>
43 #include <linux/kernel.h>
44
45 #include <linux/if_arp.h>       /* For ARPHRD_xxx */
46
47 #include <linux/ip.h>
48 #include <linux/in.h>
49
50 #include <net/dst.h>
51
52 #define IPOIB_QPN(ha) (be32_to_cpup((__be32 *) ha) & 0xffffff)
53
54 MODULE_AUTHOR("Roland Dreier");
55 MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
56 MODULE_LICENSE("Dual BSD/GPL");
57
58 int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE;
59 int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE;
60
61 module_param_named(send_queue_size, ipoib_sendq_size, int, 0444);
62 MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue");
63 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
64 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
65
66 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
67 int ipoib_debug_level;
68
69 module_param_named(debug_level, ipoib_debug_level, int, 0644);
70 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
71 #endif
72
73 struct ipoib_path_iter {
74         struct net_device *dev;
75         struct ipoib_path  path;
76 };
77
78 static const u8 ipv4_bcast_addr[] = {
79         0x00, 0xff, 0xff, 0xff,
80         0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00,
81         0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
82 };
83
84 struct workqueue_struct *ipoib_workqueue;
85
86 struct ib_sa_client ipoib_sa_client;
87
88 static void ipoib_add_one(struct ib_device *device);
89 static void ipoib_remove_one(struct ib_device *device);
90
91 static struct ib_client ipoib_client = {
92         .name   = "ipoib",
93         .add    = ipoib_add_one,
94         .remove = ipoib_remove_one
95 };
96
97 int ipoib_open(struct net_device *dev)
98 {
99         struct ipoib_dev_priv *priv = netdev_priv(dev);
100
101         ipoib_dbg(priv, "bringing up interface\n");
102
103         set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
104
105         if (ipoib_pkey_dev_delay_open(dev))
106                 return 0;
107
108         if (ipoib_ib_dev_open(dev))
109                 return -EINVAL;
110
111         if (ipoib_ib_dev_up(dev)) {
112                 ipoib_ib_dev_stop(dev);
113                 return -EINVAL;
114         }
115
116         if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
117                 struct ipoib_dev_priv *cpriv;
118
119                 /* Bring up any child interfaces too */
120                 mutex_lock(&priv->vlan_mutex);
121                 list_for_each_entry(cpriv, &priv->child_intfs, list) {
122                         int flags;
123
124                         flags = cpriv->dev->flags;
125                         if (flags & IFF_UP)
126                                 continue;
127
128                         dev_change_flags(cpriv->dev, flags | IFF_UP);
129                 }
130                 mutex_unlock(&priv->vlan_mutex);
131         }
132
133         netif_start_queue(dev);
134
135         return 0;
136 }
137
138 static int ipoib_stop(struct net_device *dev)
139 {
140         struct ipoib_dev_priv *priv = netdev_priv(dev);
141
142         ipoib_dbg(priv, "stopping interface\n");
143
144         clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
145
146         netif_stop_queue(dev);
147
148         /*
149          * Now flush workqueue to make sure a scheduled task doesn't
150          * bring our internal state back up.
151          */
152         flush_workqueue(ipoib_workqueue);
153
154         ipoib_ib_dev_down(dev, 1);
155         ipoib_ib_dev_stop(dev);
156
157         if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
158                 struct ipoib_dev_priv *cpriv;
159
160                 /* Bring down any child interfaces too */
161                 mutex_lock(&priv->vlan_mutex);
162                 list_for_each_entry(cpriv, &priv->child_intfs, list) {
163                         int flags;
164
165                         flags = cpriv->dev->flags;
166                         if (!(flags & IFF_UP))
167                                 continue;
168
169                         dev_change_flags(cpriv->dev, flags & ~IFF_UP);
170                 }
171                 mutex_unlock(&priv->vlan_mutex);
172         }
173
174         return 0;
175 }
176
177 static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
178 {
179         struct ipoib_dev_priv *priv = netdev_priv(dev);
180
181         if (new_mtu > IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN)
182                 return -EINVAL;
183
184         priv->admin_mtu = new_mtu;
185
186         dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
187
188         return 0;
189 }
190
191 static struct ipoib_path *__path_find(struct net_device *dev, void *gid)
192 {
193         struct ipoib_dev_priv *priv = netdev_priv(dev);
194         struct rb_node *n = priv->path_tree.rb_node;
195         struct ipoib_path *path;
196         int ret;
197
198         while (n) {
199                 path = rb_entry(n, struct ipoib_path, rb_node);
200
201                 ret = memcmp(gid, path->pathrec.dgid.raw,
202                              sizeof (union ib_gid));
203
204                 if (ret < 0)
205                         n = n->rb_left;
206                 else if (ret > 0)
207                         n = n->rb_right;
208                 else
209                         return path;
210         }
211
212         return NULL;
213 }
214
215 static int __path_add(struct net_device *dev, struct ipoib_path *path)
216 {
217         struct ipoib_dev_priv *priv = netdev_priv(dev);
218         struct rb_node **n = &priv->path_tree.rb_node;
219         struct rb_node *pn = NULL;
220         struct ipoib_path *tpath;
221         int ret;
222
223         while (*n) {
224                 pn = *n;
225                 tpath = rb_entry(pn, struct ipoib_path, rb_node);
226
227                 ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw,
228                              sizeof (union ib_gid));
229                 if (ret < 0)
230                         n = &pn->rb_left;
231                 else if (ret > 0)
232                         n = &pn->rb_right;
233                 else
234                         return -EEXIST;
235         }
236
237         rb_link_node(&path->rb_node, pn, n);
238         rb_insert_color(&path->rb_node, &priv->path_tree);
239
240         list_add_tail(&path->list, &priv->path_list);
241
242         return 0;
243 }
244
245 static void path_free(struct net_device *dev, struct ipoib_path *path)
246 {
247         struct ipoib_dev_priv *priv = netdev_priv(dev);
248         struct ipoib_neigh *neigh, *tn;
249         struct sk_buff *skb;
250         unsigned long flags;
251
252         while ((skb = __skb_dequeue(&path->queue)))
253                 dev_kfree_skb_irq(skb);
254
255         spin_lock_irqsave(&priv->lock, flags);
256
257         list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
258                 /*
259                  * It's safe to call ipoib_put_ah() inside priv->lock
260                  * here, because we know that path->ah will always
261                  * hold one more reference, so ipoib_put_ah() will
262                  * never do more than decrement the ref count.
263                  */
264                 if (neigh->ah)
265                         ipoib_put_ah(neigh->ah);
266
267                 ipoib_neigh_free(dev, neigh);
268         }
269
270         spin_unlock_irqrestore(&priv->lock, flags);
271
272         if (path->ah)
273                 ipoib_put_ah(path->ah);
274
275         kfree(path);
276 }
277
278 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
279
280 struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev)
281 {
282         struct ipoib_path_iter *iter;
283
284         iter = kmalloc(sizeof *iter, GFP_KERNEL);
285         if (!iter)
286                 return NULL;
287
288         iter->dev = dev;
289         memset(iter->path.pathrec.dgid.raw, 0, 16);
290
291         if (ipoib_path_iter_next(iter)) {
292                 kfree(iter);
293                 return NULL;
294         }
295
296         return iter;
297 }
298
299 int ipoib_path_iter_next(struct ipoib_path_iter *iter)
300 {
301         struct ipoib_dev_priv *priv = netdev_priv(iter->dev);
302         struct rb_node *n;
303         struct ipoib_path *path;
304         int ret = 1;
305
306         spin_lock_irq(&priv->lock);
307
308         n = rb_first(&priv->path_tree);
309
310         while (n) {
311                 path = rb_entry(n, struct ipoib_path, rb_node);
312
313                 if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw,
314                            sizeof (union ib_gid)) < 0) {
315                         iter->path = *path;
316                         ret = 0;
317                         break;
318                 }
319
320                 n = rb_next(n);
321         }
322
323         spin_unlock_irq(&priv->lock);
324
325         return ret;
326 }
327
328 void ipoib_path_iter_read(struct ipoib_path_iter *iter,
329                           struct ipoib_path *path)
330 {
331         *path = iter->path;
332 }
333
334 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
335
336 void ipoib_flush_paths(struct net_device *dev)
337 {
338         struct ipoib_dev_priv *priv = netdev_priv(dev);
339         struct ipoib_path *path, *tp;
340         LIST_HEAD(remove_list);
341
342         spin_lock_irq(&priv->tx_lock);
343         spin_lock(&priv->lock);
344
345         list_splice(&priv->path_list, &remove_list);
346         INIT_LIST_HEAD(&priv->path_list);
347
348         list_for_each_entry(path, &remove_list, list)
349                 rb_erase(&path->rb_node, &priv->path_tree);
350
351         list_for_each_entry_safe(path, tp, &remove_list, list) {
352                 if (path->query)
353                         ib_sa_cancel_query(path->query_id, path->query);
354                 spin_unlock(&priv->lock);
355                 spin_unlock_irq(&priv->tx_lock);
356                 wait_for_completion(&path->done);
357                 path_free(dev, path);
358                 spin_lock_irq(&priv->tx_lock);
359                 spin_lock(&priv->lock);
360         }
361         spin_unlock(&priv->lock);
362         spin_unlock_irq(&priv->tx_lock);
363 }
364
365 static void path_rec_completion(int status,
366                                 struct ib_sa_path_rec *pathrec,
367                                 void *path_ptr)
368 {
369         struct ipoib_path *path = path_ptr;
370         struct net_device *dev = path->dev;
371         struct ipoib_dev_priv *priv = netdev_priv(dev);
372         struct ipoib_ah *ah = NULL;
373         struct ipoib_neigh *neigh;
374         struct sk_buff_head skqueue;
375         struct sk_buff *skb;
376         unsigned long flags;
377
378         if (pathrec)
379                 ipoib_dbg(priv, "PathRec LID 0x%04x for GID " IPOIB_GID_FMT "\n",
380                           be16_to_cpu(pathrec->dlid), IPOIB_GID_ARG(pathrec->dgid));
381         else
382                 ipoib_dbg(priv, "PathRec status %d for GID " IPOIB_GID_FMT "\n",
383                           status, IPOIB_GID_ARG(path->pathrec.dgid));
384
385         skb_queue_head_init(&skqueue);
386
387         if (!status) {
388                 struct ib_ah_attr av = {
389                         .dlid          = be16_to_cpu(pathrec->dlid),
390                         .sl            = pathrec->sl,
391                         .port_num      = priv->port,
392                         .static_rate   = pathrec->rate
393                 };
394
395                 ah = ipoib_create_ah(dev, priv->pd, &av);
396         }
397
398         spin_lock_irqsave(&priv->lock, flags);
399
400         path->ah = ah;
401
402         if (ah) {
403                 path->pathrec = *pathrec;
404
405                 ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n",
406                           ah, be16_to_cpu(pathrec->dlid), pathrec->sl);
407
408                 while ((skb = __skb_dequeue(&path->queue)))
409                         __skb_queue_tail(&skqueue, skb);
410
411                 list_for_each_entry(neigh, &path->neigh_list, list) {
412                         kref_get(&path->ah->ref);
413                         neigh->ah = path->ah;
414                         memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw,
415                                sizeof(union ib_gid));
416
417                         while ((skb = __skb_dequeue(&neigh->queue)))
418                                 __skb_queue_tail(&skqueue, skb);
419                 }
420         }
421
422         path->query = NULL;
423         complete(&path->done);
424
425         spin_unlock_irqrestore(&priv->lock, flags);
426
427         while ((skb = __skb_dequeue(&skqueue))) {
428                 skb->dev = dev;
429                 if (dev_queue_xmit(skb))
430                         ipoib_warn(priv, "dev_queue_xmit failed "
431                                    "to requeue packet\n");
432         }
433 }
434
435 static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid)
436 {
437         struct ipoib_dev_priv *priv = netdev_priv(dev);
438         struct ipoib_path *path;
439
440         path = kzalloc(sizeof *path, GFP_ATOMIC);
441         if (!path)
442                 return NULL;
443
444         path->dev = dev;
445
446         skb_queue_head_init(&path->queue);
447
448         INIT_LIST_HEAD(&path->neigh_list);
449
450         memcpy(path->pathrec.dgid.raw, gid, sizeof (union ib_gid));
451         path->pathrec.sgid      = priv->local_gid;
452         path->pathrec.pkey      = cpu_to_be16(priv->pkey);
453         path->pathrec.numb_path = 1;
454
455         return path;
456 }
457
458 static int path_rec_start(struct net_device *dev,
459                           struct ipoib_path *path)
460 {
461         struct ipoib_dev_priv *priv = netdev_priv(dev);
462
463         ipoib_dbg(priv, "Start path record lookup for " IPOIB_GID_FMT "\n",
464                   IPOIB_GID_ARG(path->pathrec.dgid));
465
466         init_completion(&path->done);
467
468         path->query_id =
469                 ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port,
470                                    &path->pathrec,
471                                    IB_SA_PATH_REC_DGID          |
472                                    IB_SA_PATH_REC_SGID          |
473                                    IB_SA_PATH_REC_NUMB_PATH     |
474                                    IB_SA_PATH_REC_PKEY,
475                                    1000, GFP_ATOMIC,
476                                    path_rec_completion,
477                                    path, &path->query);
478         if (path->query_id < 0) {
479                 ipoib_warn(priv, "ib_sa_path_rec_get failed\n");
480                 path->query = NULL;
481                 return path->query_id;
482         }
483
484         return 0;
485 }
486
487 static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
488 {
489         struct ipoib_dev_priv *priv = netdev_priv(dev);
490         struct ipoib_path *path;
491         struct ipoib_neigh *neigh;
492
493         neigh = ipoib_neigh_alloc(skb->dst->neighbour);
494         if (!neigh) {
495                 ++priv->stats.tx_dropped;
496                 dev_kfree_skb_any(skb);
497                 return;
498         }
499
500         /*
501          * We can only be called from ipoib_start_xmit, so we're
502          * inside tx_lock -- no need to save/restore flags.
503          */
504         spin_lock(&priv->lock);
505
506         path = __path_find(dev, skb->dst->neighbour->ha + 4);
507         if (!path) {
508                 path = path_rec_create(dev, skb->dst->neighbour->ha + 4);
509                 if (!path)
510                         goto err_path;
511
512                 __path_add(dev, path);
513         }
514
515         list_add_tail(&neigh->list, &path->neigh_list);
516
517         if (path->ah) {
518                 kref_get(&path->ah->ref);
519                 neigh->ah = path->ah;
520                 memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw,
521                        sizeof(union ib_gid));
522
523                 ipoib_send(dev, skb, path->ah, IPOIB_QPN(skb->dst->neighbour->ha));
524         } else {
525                 neigh->ah  = NULL;
526
527                 if (!path->query && path_rec_start(dev, path))
528                         goto err_list;
529
530                 __skb_queue_tail(&neigh->queue, skb);
531         }
532
533         spin_unlock(&priv->lock);
534         return;
535
536 err_list:
537         list_del(&neigh->list);
538
539 err_path:
540         ipoib_neigh_free(dev, neigh);
541         ++priv->stats.tx_dropped;
542         dev_kfree_skb_any(skb);
543
544         spin_unlock(&priv->lock);
545 }
546
547 static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
548 {
549         struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
550
551         /* Look up path record for unicasts */
552         if (skb->dst->neighbour->ha[4] != 0xff) {
553                 neigh_add_path(skb, dev);
554                 return;
555         }
556
557         /* Add in the P_Key for multicasts */
558         skb->dst->neighbour->ha[8] = (priv->pkey >> 8) & 0xff;
559         skb->dst->neighbour->ha[9] = priv->pkey & 0xff;
560         ipoib_mcast_send(dev, skb->dst->neighbour->ha + 4, skb);
561 }
562
563 static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
564                              struct ipoib_pseudoheader *phdr)
565 {
566         struct ipoib_dev_priv *priv = netdev_priv(dev);
567         struct ipoib_path *path;
568
569         /*
570          * We can only be called from ipoib_start_xmit, so we're
571          * inside tx_lock -- no need to save/restore flags.
572          */
573         spin_lock(&priv->lock);
574
575         path = __path_find(dev, phdr->hwaddr + 4);
576         if (!path) {
577                 path = path_rec_create(dev, phdr->hwaddr + 4);
578                 if (path) {
579                         /* put pseudoheader back on for next time */
580                         skb_push(skb, sizeof *phdr);
581                         __skb_queue_tail(&path->queue, skb);
582
583                         if (path_rec_start(dev, path)) {
584                                 spin_unlock(&priv->lock);
585                                 path_free(dev, path);
586                                 return;
587                         } else
588                                 __path_add(dev, path);
589                 } else {
590                         ++priv->stats.tx_dropped;
591                         dev_kfree_skb_any(skb);
592                 }
593
594                 spin_unlock(&priv->lock);
595                 return;
596         }
597
598         if (path->ah) {
599                 ipoib_dbg(priv, "Send unicast ARP to %04x\n",
600                           be16_to_cpu(path->pathrec.dlid));
601
602                 ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr));
603         } else if ((path->query || !path_rec_start(dev, path)) &&
604                    skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
605                 /* put pseudoheader back on for next time */
606                 skb_push(skb, sizeof *phdr);
607                 __skb_queue_tail(&path->queue, skb);
608         } else {
609                 ++priv->stats.tx_dropped;
610                 dev_kfree_skb_any(skb);
611         }
612
613         spin_unlock(&priv->lock);
614 }
615
616 static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
617 {
618         struct ipoib_dev_priv *priv = netdev_priv(dev);
619         struct ipoib_neigh *neigh;
620         unsigned long flags;
621
622         if (unlikely(!spin_trylock_irqsave(&priv->tx_lock, flags)))
623                 return NETDEV_TX_LOCKED;
624
625         /*
626          * Check if our queue is stopped.  Since we have the LLTX bit
627          * set, we can't rely on netif_stop_queue() preventing our
628          * xmit function from being called with a full queue.
629          */
630         if (unlikely(netif_queue_stopped(dev))) {
631                 spin_unlock_irqrestore(&priv->tx_lock, flags);
632                 return NETDEV_TX_BUSY;
633         }
634
635         if (likely(skb->dst && skb->dst->neighbour)) {
636                 if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) {
637                         ipoib_path_lookup(skb, dev);
638                         goto out;
639                 }
640
641                 neigh = *to_ipoib_neigh(skb->dst->neighbour);
642
643                 if (likely(neigh->ah)) {
644                         if (unlikely(memcmp(&neigh->dgid.raw,
645                                             skb->dst->neighbour->ha + 4,
646                                             sizeof(union ib_gid)))) {
647                                 spin_lock(&priv->lock);
648                                 /*
649                                  * It's safe to call ipoib_put_ah() inside
650                                  * priv->lock here, because we know that
651                                  * path->ah will always hold one more reference,
652                                  * so ipoib_put_ah() will never do more than
653                                  * decrement the ref count.
654                                  */
655                                 ipoib_put_ah(neigh->ah);
656                                 list_del(&neigh->list);
657                                 ipoib_neigh_free(dev, neigh);
658                                 spin_unlock(&priv->lock);
659                                 ipoib_path_lookup(skb, dev);
660                                 goto out;
661                         }
662
663                         ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(skb->dst->neighbour->ha));
664                         goto out;
665                 }
666
667                 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
668                         spin_lock(&priv->lock);
669                         __skb_queue_tail(&neigh->queue, skb);
670                         spin_unlock(&priv->lock);
671                 } else {
672                         ++priv->stats.tx_dropped;
673                         dev_kfree_skb_any(skb);
674                 }
675         } else {
676                 struct ipoib_pseudoheader *phdr =
677                         (struct ipoib_pseudoheader *) skb->data;
678                 skb_pull(skb, sizeof *phdr);
679
680                 if (phdr->hwaddr[4] == 0xff) {
681                         /* Add in the P_Key for multicast*/
682                         phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
683                         phdr->hwaddr[9] = priv->pkey & 0xff;
684
685                         ipoib_mcast_send(dev, phdr->hwaddr + 4, skb);
686                 } else {
687                         /* unicast GID -- should be ARP or RARP reply */
688
689                         if ((be16_to_cpup((__be16 *) skb->data) != ETH_P_ARP) &&
690                             (be16_to_cpup((__be16 *) skb->data) != ETH_P_RARP)) {
691                                 ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x "
692                                            IPOIB_GID_FMT "\n",
693                                            skb->dst ? "neigh" : "dst",
694                                            be16_to_cpup((__be16 *) skb->data),
695                                            IPOIB_QPN(phdr->hwaddr),
696                                            IPOIB_GID_RAW_ARG(phdr->hwaddr + 4));
697                                 dev_kfree_skb_any(skb);
698                                 ++priv->stats.tx_dropped;
699                                 goto out;
700                         }
701
702                         unicast_arp_send(skb, dev, phdr);
703                 }
704         }
705
706 out:
707         spin_unlock_irqrestore(&priv->tx_lock, flags);
708
709         return NETDEV_TX_OK;
710 }
711
712 static struct net_device_stats *ipoib_get_stats(struct net_device *dev)
713 {
714         struct ipoib_dev_priv *priv = netdev_priv(dev);
715
716         return &priv->stats;
717 }
718
719 static void ipoib_timeout(struct net_device *dev)
720 {
721         struct ipoib_dev_priv *priv = netdev_priv(dev);
722
723         ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
724                    jiffies_to_msecs(jiffies - dev->trans_start));
725         ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n",
726                    netif_queue_stopped(dev),
727                    priv->tx_head, priv->tx_tail);
728         /* XXX reset QP, etc. */
729 }
730
731 static int ipoib_hard_header(struct sk_buff *skb,
732                              struct net_device *dev,
733                              unsigned short type,
734                              void *daddr, void *saddr, unsigned len)
735 {
736         struct ipoib_header *header;
737
738         header = (struct ipoib_header *) skb_push(skb, sizeof *header);
739
740         header->proto = htons(type);
741         header->reserved = 0;
742
743         /*
744          * If we don't have a neighbour structure, stuff the
745          * destination address onto the front of the skb so we can
746          * figure out where to send the packet later.
747          */
748         if ((!skb->dst || !skb->dst->neighbour) && daddr) {
749                 struct ipoib_pseudoheader *phdr =
750                         (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
751                 memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
752         }
753
754         return 0;
755 }
756
757 static void ipoib_set_mcast_list(struct net_device *dev)
758 {
759         struct ipoib_dev_priv *priv = netdev_priv(dev);
760
761         if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
762                 ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set");
763                 return;
764         }
765
766         queue_work(ipoib_workqueue, &priv->restart_task);
767 }
768
769 static void ipoib_neigh_destructor(struct neighbour *n)
770 {
771         struct ipoib_neigh *neigh;
772         struct ipoib_dev_priv *priv = netdev_priv(n->dev);
773         unsigned long flags;
774         struct ipoib_ah *ah = NULL;
775
776         ipoib_dbg(priv,
777                   "neigh_destructor for %06x " IPOIB_GID_FMT "\n",
778                   IPOIB_QPN(n->ha),
779                   IPOIB_GID_RAW_ARG(n->ha + 4));
780
781         spin_lock_irqsave(&priv->lock, flags);
782
783         neigh = *to_ipoib_neigh(n);
784         if (neigh) {
785                 if (neigh->ah)
786                         ah = neigh->ah;
787                 list_del(&neigh->list);
788                 ipoib_neigh_free(n->dev, neigh);
789         }
790
791         spin_unlock_irqrestore(&priv->lock, flags);
792
793         if (ah)
794                 ipoib_put_ah(ah);
795 }
796
797 struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour)
798 {
799         struct ipoib_neigh *neigh;
800
801         neigh = kmalloc(sizeof *neigh, GFP_ATOMIC);
802         if (!neigh)
803                 return NULL;
804
805         neigh->neighbour = neighbour;
806         *to_ipoib_neigh(neighbour) = neigh;
807         skb_queue_head_init(&neigh->queue);
808
809         return neigh;
810 }
811
812 void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh)
813 {
814         struct ipoib_dev_priv *priv = netdev_priv(dev);
815         struct sk_buff *skb;
816         *to_ipoib_neigh(neigh->neighbour) = NULL;
817         while ((skb = __skb_dequeue(&neigh->queue))) {
818                 ++priv->stats.tx_dropped;
819                 dev_kfree_skb_any(skb);
820         }
821         kfree(neigh);
822 }
823
824 static int ipoib_neigh_setup_dev(struct net_device *dev, struct neigh_parms *parms)
825 {
826         parms->neigh_destructor = ipoib_neigh_destructor;
827
828         return 0;
829 }
830
831 int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
832 {
833         struct ipoib_dev_priv *priv = netdev_priv(dev);
834
835         /* Allocate RX/TX "rings" to hold queued skbs */
836         priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
837                                 GFP_KERNEL);
838         if (!priv->rx_ring) {
839                 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
840                        ca->name, ipoib_recvq_size);
841                 goto out;
842         }
843
844         priv->tx_ring = kzalloc(ipoib_sendq_size * sizeof *priv->tx_ring,
845                                 GFP_KERNEL);
846         if (!priv->tx_ring) {
847                 printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
848                        ca->name, ipoib_sendq_size);
849                 goto out_rx_ring_cleanup;
850         }
851
852         /* priv->tx_head & tx_tail are already 0 */
853
854         if (ipoib_ib_dev_init(dev, ca, port))
855                 goto out_tx_ring_cleanup;
856
857         return 0;
858
859 out_tx_ring_cleanup:
860         kfree(priv->tx_ring);
861
862 out_rx_ring_cleanup:
863         kfree(priv->rx_ring);
864
865 out:
866         return -ENOMEM;
867 }
868
869 void ipoib_dev_cleanup(struct net_device *dev)
870 {
871         struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv;
872
873         ipoib_delete_debug_files(dev);
874
875         /* Delete any child interfaces first */
876         list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
877                 unregister_netdev(cpriv->dev);
878                 ipoib_dev_cleanup(cpriv->dev);
879                 free_netdev(cpriv->dev);
880         }
881
882         ipoib_ib_dev_cleanup(dev);
883
884         kfree(priv->rx_ring);
885         kfree(priv->tx_ring);
886
887         priv->rx_ring = NULL;
888         priv->tx_ring = NULL;
889 }
890
891 static void ipoib_setup(struct net_device *dev)
892 {
893         struct ipoib_dev_priv *priv = netdev_priv(dev);
894
895         dev->open                = ipoib_open;
896         dev->stop                = ipoib_stop;
897         dev->change_mtu          = ipoib_change_mtu;
898         dev->hard_start_xmit     = ipoib_start_xmit;
899         dev->get_stats           = ipoib_get_stats;
900         dev->tx_timeout          = ipoib_timeout;
901         dev->hard_header         = ipoib_hard_header;
902         dev->set_multicast_list  = ipoib_set_mcast_list;
903         dev->neigh_setup         = ipoib_neigh_setup_dev;
904
905         dev->watchdog_timeo      = HZ;
906
907         dev->flags              |= IFF_BROADCAST | IFF_MULTICAST;
908
909         /*
910          * We add in INFINIBAND_ALEN to allow for the destination
911          * address "pseudoheader" for skbs without neighbour struct.
912          */
913         dev->hard_header_len     = IPOIB_ENCAP_LEN + INFINIBAND_ALEN;
914         dev->addr_len            = INFINIBAND_ALEN;
915         dev->type                = ARPHRD_INFINIBAND;
916         dev->tx_queue_len        = ipoib_sendq_size * 2;
917         dev->features            = NETIF_F_VLAN_CHALLENGED | NETIF_F_LLTX;
918
919         /* MTU will be reset when mcast join happens */
920         dev->mtu                 = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN;
921         priv->mcast_mtu          = priv->admin_mtu = dev->mtu;
922
923         memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
924
925         netif_carrier_off(dev);
926
927         SET_MODULE_OWNER(dev);
928
929         priv->dev = dev;
930
931         spin_lock_init(&priv->lock);
932         spin_lock_init(&priv->tx_lock);
933
934         mutex_init(&priv->mcast_mutex);
935         mutex_init(&priv->vlan_mutex);
936
937         INIT_LIST_HEAD(&priv->path_list);
938         INIT_LIST_HEAD(&priv->child_intfs);
939         INIT_LIST_HEAD(&priv->dead_ahs);
940         INIT_LIST_HEAD(&priv->multicast_list);
941
942         INIT_DELAYED_WORK(&priv->pkey_task,    ipoib_pkey_poll);
943         INIT_DELAYED_WORK(&priv->mcast_task,   ipoib_mcast_join_task);
944         INIT_WORK(&priv->flush_task,   ipoib_ib_dev_flush);
945         INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
946         INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
947 }
948
949 struct ipoib_dev_priv *ipoib_intf_alloc(const char *name)
950 {
951         struct net_device *dev;
952
953         dev = alloc_netdev((int) sizeof (struct ipoib_dev_priv), name,
954                            ipoib_setup);
955         if (!dev)
956                 return NULL;
957
958         return netdev_priv(dev);
959 }
960
961 static ssize_t show_pkey(struct device *dev,
962                          struct device_attribute *attr, char *buf)
963 {
964         struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
965
966         return sprintf(buf, "0x%04x\n", priv->pkey);
967 }
968 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
969
970 static ssize_t create_child(struct device *dev,
971                             struct device_attribute *attr,
972                             const char *buf, size_t count)
973 {
974         int pkey;
975         int ret;
976
977         if (sscanf(buf, "%i", &pkey) != 1)
978                 return -EINVAL;
979
980         if (pkey < 0 || pkey > 0xffff)
981                 return -EINVAL;
982
983         /*
984          * Set the full membership bit, so that we join the right
985          * broadcast group, etc.
986          */
987         pkey |= 0x8000;
988
989         ret = ipoib_vlan_add(to_net_dev(dev), pkey);
990
991         return ret ? ret : count;
992 }
993 static DEVICE_ATTR(create_child, S_IWUGO, NULL, create_child);
994
995 static ssize_t delete_child(struct device *dev,
996                             struct device_attribute *attr,
997                             const char *buf, size_t count)
998 {
999         int pkey;
1000         int ret;
1001
1002         if (sscanf(buf, "%i", &pkey) != 1)
1003                 return -EINVAL;
1004
1005         if (pkey < 0 || pkey > 0xffff)
1006                 return -EINVAL;
1007
1008         ret = ipoib_vlan_delete(to_net_dev(dev), pkey);
1009
1010         return ret ? ret : count;
1011
1012 }
1013 static DEVICE_ATTR(delete_child, S_IWUGO, NULL, delete_child);
1014
1015 int ipoib_add_pkey_attr(struct net_device *dev)
1016 {
1017         return device_create_file(&dev->dev, &dev_attr_pkey);
1018 }
1019
1020 static struct net_device *ipoib_add_port(const char *format,
1021                                          struct ib_device *hca, u8 port)
1022 {
1023         struct ipoib_dev_priv *priv;
1024         int result = -ENOMEM;
1025
1026         priv = ipoib_intf_alloc(format);
1027         if (!priv)
1028                 goto alloc_mem_failed;
1029
1030         SET_NETDEV_DEV(priv->dev, hca->dma_device);
1031
1032         result = ib_query_pkey(hca, port, 0, &priv->pkey);
1033         if (result) {
1034                 printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
1035                        hca->name, port, result);
1036                 goto alloc_mem_failed;
1037         }
1038
1039         /*
1040          * Set the full membership bit, so that we join the right
1041          * broadcast group, etc.
1042          */
1043         priv->pkey |= 0x8000;
1044
1045         priv->dev->broadcast[8] = priv->pkey >> 8;
1046         priv->dev->broadcast[9] = priv->pkey & 0xff;
1047
1048         result = ib_query_gid(hca, port, 0, &priv->local_gid);
1049         if (result) {
1050                 printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",
1051                        hca->name, port, result);
1052                 goto alloc_mem_failed;
1053         } else
1054                 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
1055
1056
1057         result = ipoib_dev_init(priv->dev, hca, port);
1058         if (result < 0) {
1059                 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
1060                        hca->name, port, result);
1061                 goto device_init_failed;
1062         }
1063
1064         INIT_IB_EVENT_HANDLER(&priv->event_handler,
1065                               priv->ca, ipoib_event);
1066         result = ib_register_event_handler(&priv->event_handler);
1067         if (result < 0) {
1068                 printk(KERN_WARNING "%s: ib_register_event_handler failed for "
1069                        "port %d (ret = %d)\n",
1070                        hca->name, port, result);
1071                 goto event_failed;
1072         }
1073
1074         result = register_netdev(priv->dev);
1075         if (result) {
1076                 printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
1077                        hca->name, port, result);
1078                 goto register_failed;
1079         }
1080
1081         ipoib_create_debug_files(priv->dev);
1082
1083         if (ipoib_add_pkey_attr(priv->dev))
1084                 goto sysfs_failed;
1085         if (device_create_file(&priv->dev->dev, &dev_attr_create_child))
1086                 goto sysfs_failed;
1087         if (device_create_file(&priv->dev->dev, &dev_attr_delete_child))
1088                 goto sysfs_failed;
1089
1090         return priv->dev;
1091
1092 sysfs_failed:
1093         ipoib_delete_debug_files(priv->dev);
1094         unregister_netdev(priv->dev);
1095
1096 register_failed:
1097         ib_unregister_event_handler(&priv->event_handler);
1098         flush_scheduled_work();
1099
1100 event_failed:
1101         ipoib_dev_cleanup(priv->dev);
1102
1103 device_init_failed:
1104         free_netdev(priv->dev);
1105
1106 alloc_mem_failed:
1107         return ERR_PTR(result);
1108 }
1109
1110 static void ipoib_add_one(struct ib_device *device)
1111 {
1112         struct list_head *dev_list;
1113         struct net_device *dev;
1114         struct ipoib_dev_priv *priv;
1115         int s, e, p;
1116
1117         if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
1118                 return;
1119
1120         dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
1121         if (!dev_list)
1122                 return;
1123
1124         INIT_LIST_HEAD(dev_list);
1125
1126         if (device->node_type == RDMA_NODE_IB_SWITCH) {
1127                 s = 0;
1128                 e = 0;
1129         } else {
1130                 s = 1;
1131                 e = device->phys_port_cnt;
1132         }
1133
1134         for (p = s; p <= e; ++p) {
1135                 dev = ipoib_add_port("ib%d", device, p);
1136                 if (!IS_ERR(dev)) {
1137                         priv = netdev_priv(dev);
1138                         list_add_tail(&priv->list, dev_list);
1139                 }
1140         }
1141
1142         ib_set_client_data(device, &ipoib_client, dev_list);
1143 }
1144
1145 static void ipoib_remove_one(struct ib_device *device)
1146 {
1147         struct ipoib_dev_priv *priv, *tmp;
1148         struct list_head *dev_list;
1149
1150         if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
1151                 return;
1152
1153         dev_list = ib_get_client_data(device, &ipoib_client);
1154
1155         list_for_each_entry_safe(priv, tmp, dev_list, list) {
1156                 ib_unregister_event_handler(&priv->event_handler);
1157                 flush_scheduled_work();
1158
1159                 unregister_netdev(priv->dev);
1160                 ipoib_dev_cleanup(priv->dev);
1161                 free_netdev(priv->dev);
1162         }
1163
1164         kfree(dev_list);
1165 }
1166
1167 static int __init ipoib_init_module(void)
1168 {
1169         int ret;
1170
1171         ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size);
1172         ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE);
1173         ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE);
1174
1175         ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size);
1176         ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE);
1177         ipoib_sendq_size = max(ipoib_sendq_size, IPOIB_MIN_QUEUE_SIZE);
1178
1179         ret = ipoib_register_debugfs();
1180         if (ret)
1181                 return ret;
1182
1183         /*
1184          * We create our own workqueue mainly because we want to be
1185          * able to flush it when devices are being removed.  We can't
1186          * use schedule_work()/flush_scheduled_work() because both
1187          * unregister_netdev() and linkwatch_event take the rtnl lock,
1188          * so flush_scheduled_work() can deadlock during device
1189          * removal.
1190          */
1191         ipoib_workqueue = create_singlethread_workqueue("ipoib");
1192         if (!ipoib_workqueue) {
1193                 ret = -ENOMEM;
1194                 goto err_fs;
1195         }
1196
1197         ib_sa_register_client(&ipoib_sa_client);
1198
1199         ret = ib_register_client(&ipoib_client);
1200         if (ret)
1201                 goto err_sa;
1202
1203         return 0;
1204
1205 err_sa:
1206         ib_sa_unregister_client(&ipoib_sa_client);
1207         destroy_workqueue(ipoib_workqueue);
1208
1209 err_fs:
1210         ipoib_unregister_debugfs();
1211
1212         return ret;
1213 }
1214
1215 static void __exit ipoib_cleanup_module(void)
1216 {
1217         ib_unregister_client(&ipoib_client);
1218         ib_sa_unregister_client(&ipoib_sa_client);
1219         ipoib_unregister_debugfs();
1220         destroy_workqueue(ipoib_workqueue);
1221 }
1222
1223 module_init(ipoib_init_module);
1224 module_exit(ipoib_cleanup_module);