Merge git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw
[linux-2.6] / drivers / infiniband / hw / ipath / ipath_layer.c
1 /*
2  * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 /*
35  * These are the routines used by layered drivers, currently just the
36  * layered ethernet driver and verbs layer.
37  */
38
39 #include <linux/io.h>
40 #include <asm/byteorder.h>
41
42 #include "ipath_kernel.h"
43 #include "ipath_layer.h"
44 #include "ipath_verbs.h"
45 #include "ipath_common.h"
46
47 /* Acquire before ipath_devs_lock. */
48 static DEFINE_MUTEX(ipath_layer_mutex);
49
50 u16 ipath_layer_rcv_opcode;
51
52 static int (*layer_intr)(void *, u32);
53 static int (*layer_rcv)(void *, void *, struct sk_buff *);
54 static int (*layer_rcv_lid)(void *, void *);
55
56 static void *(*layer_add_one)(int, struct ipath_devdata *);
57 static void (*layer_remove_one)(void *);
58
59 int __ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
60 {
61         int ret = -ENODEV;
62
63         if (dd->ipath_layer.l_arg && layer_intr)
64                 ret = layer_intr(dd->ipath_layer.l_arg, arg);
65
66         return ret;
67 }
68
69 int ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
70 {
71         int ret;
72
73         mutex_lock(&ipath_layer_mutex);
74
75         ret = __ipath_layer_intr(dd, arg);
76
77         mutex_unlock(&ipath_layer_mutex);
78
79         return ret;
80 }
81
82 int __ipath_layer_rcv(struct ipath_devdata *dd, void *hdr,
83                       struct sk_buff *skb)
84 {
85         int ret = -ENODEV;
86
87         if (dd->ipath_layer.l_arg && layer_rcv)
88                 ret = layer_rcv(dd->ipath_layer.l_arg, hdr, skb);
89
90         return ret;
91 }
92
93 int __ipath_layer_rcv_lid(struct ipath_devdata *dd, void *hdr)
94 {
95         int ret = -ENODEV;
96
97         if (dd->ipath_layer.l_arg && layer_rcv_lid)
98                 ret = layer_rcv_lid(dd->ipath_layer.l_arg, hdr);
99
100         return ret;
101 }
102
103 void ipath_layer_lid_changed(struct ipath_devdata *dd)
104 {
105         mutex_lock(&ipath_layer_mutex);
106
107         if (dd->ipath_layer.l_arg && layer_intr)
108                 layer_intr(dd->ipath_layer.l_arg, IPATH_LAYER_INT_LID);
109
110         mutex_unlock(&ipath_layer_mutex);
111 }
112
113 void ipath_layer_add(struct ipath_devdata *dd)
114 {
115         mutex_lock(&ipath_layer_mutex);
116
117         if (layer_add_one)
118                 dd->ipath_layer.l_arg =
119                         layer_add_one(dd->ipath_unit, dd);
120
121         mutex_unlock(&ipath_layer_mutex);
122 }
123
124 void ipath_layer_remove(struct ipath_devdata *dd)
125 {
126         mutex_lock(&ipath_layer_mutex);
127
128         if (dd->ipath_layer.l_arg && layer_remove_one) {
129                 layer_remove_one(dd->ipath_layer.l_arg);
130                 dd->ipath_layer.l_arg = NULL;
131         }
132
133         mutex_unlock(&ipath_layer_mutex);
134 }
135
136 int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *),
137                          void (*l_remove)(void *),
138                          int (*l_intr)(void *, u32),
139                          int (*l_rcv)(void *, void *, struct sk_buff *),
140                          u16 l_rcv_opcode,
141                          int (*l_rcv_lid)(void *, void *))
142 {
143         struct ipath_devdata *dd, *tmp;
144         unsigned long flags;
145
146         mutex_lock(&ipath_layer_mutex);
147
148         layer_add_one = l_add;
149         layer_remove_one = l_remove;
150         layer_intr = l_intr;
151         layer_rcv = l_rcv;
152         layer_rcv_lid = l_rcv_lid;
153         ipath_layer_rcv_opcode = l_rcv_opcode;
154
155         spin_lock_irqsave(&ipath_devs_lock, flags);
156
157         list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
158                 if (!(dd->ipath_flags & IPATH_INITTED))
159                         continue;
160
161                 if (dd->ipath_layer.l_arg)
162                         continue;
163
164                 spin_unlock_irqrestore(&ipath_devs_lock, flags);
165                 dd->ipath_layer.l_arg = l_add(dd->ipath_unit, dd);
166                 spin_lock_irqsave(&ipath_devs_lock, flags);
167         }
168
169         spin_unlock_irqrestore(&ipath_devs_lock, flags);
170         mutex_unlock(&ipath_layer_mutex);
171
172         return 0;
173 }
174
175 EXPORT_SYMBOL_GPL(ipath_layer_register);
176
177 void ipath_layer_unregister(void)
178 {
179         struct ipath_devdata *dd, *tmp;
180         unsigned long flags;
181
182         mutex_lock(&ipath_layer_mutex);
183         spin_lock_irqsave(&ipath_devs_lock, flags);
184
185         list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
186                 if (dd->ipath_layer.l_arg && layer_remove_one) {
187                         spin_unlock_irqrestore(&ipath_devs_lock, flags);
188                         layer_remove_one(dd->ipath_layer.l_arg);
189                         spin_lock_irqsave(&ipath_devs_lock, flags);
190                         dd->ipath_layer.l_arg = NULL;
191                 }
192         }
193
194         spin_unlock_irqrestore(&ipath_devs_lock, flags);
195
196         layer_add_one = NULL;
197         layer_remove_one = NULL;
198         layer_intr = NULL;
199         layer_rcv = NULL;
200         layer_rcv_lid = NULL;
201
202         mutex_unlock(&ipath_layer_mutex);
203 }
204
205 EXPORT_SYMBOL_GPL(ipath_layer_unregister);
206
207 int ipath_layer_open(struct ipath_devdata *dd, u32 * pktmax)
208 {
209         int ret;
210         u32 intval = 0;
211
212         mutex_lock(&ipath_layer_mutex);
213
214         if (!dd->ipath_layer.l_arg) {
215                 ret = -EINVAL;
216                 goto bail;
217         }
218
219         ret = ipath_setrcvhdrsize(dd, IPATH_HEADER_QUEUE_WORDS);
220
221         if (ret < 0)
222                 goto bail;
223
224         *pktmax = dd->ipath_ibmaxlen;
225
226         if (*dd->ipath_statusp & IPATH_STATUS_IB_READY)
227                 intval |= IPATH_LAYER_INT_IF_UP;
228         if (dd->ipath_lid)
229                 intval |= IPATH_LAYER_INT_LID;
230         if (dd->ipath_mlid)
231                 intval |= IPATH_LAYER_INT_BCAST;
232         /*
233          * do this on open, in case low level is already up and
234          * just layered driver was reloaded, etc.
235          */
236         if (intval)
237                 layer_intr(dd->ipath_layer.l_arg, intval);
238
239         ret = 0;
240 bail:
241         mutex_unlock(&ipath_layer_mutex);
242
243         return ret;
244 }
245
246 EXPORT_SYMBOL_GPL(ipath_layer_open);
247
248 u16 ipath_layer_get_lid(struct ipath_devdata *dd)
249 {
250         return dd->ipath_lid;
251 }
252
253 EXPORT_SYMBOL_GPL(ipath_layer_get_lid);
254
255 /**
256  * ipath_layer_get_mac - get the MAC address
257  * @dd: the infinipath device
258  * @mac: the MAC is put here
259  *
260  * This is the EUID-64 OUI octets (top 3), then
261  * skip the next 2 (which should both be zero or 0xff).
262  * The returned MAC is in network order
263  * mac points to at least 6 bytes of buffer
264  * We assume that by the time the LID is set, that the GUID is as valid
265  * as it's ever going to be, rather than adding yet another status bit.
266  */
267
268 int ipath_layer_get_mac(struct ipath_devdata *dd, u8 * mac)
269 {
270         u8 *guid;
271
272         guid = (u8 *) &dd->ipath_guid;
273
274         mac[0] = guid[0];
275         mac[1] = guid[1];
276         mac[2] = guid[2];
277         mac[3] = guid[5];
278         mac[4] = guid[6];
279         mac[5] = guid[7];
280         if ((guid[3] || guid[4]) && !(guid[3] == 0xff && guid[4] == 0xff))
281                 ipath_dbg("Warning, guid bytes 3 and 4 not 0 or 0xffff: "
282                           "%x %x\n", guid[3], guid[4]);
283         return 0;
284 }
285
286 EXPORT_SYMBOL_GPL(ipath_layer_get_mac);
287
288 u16 ipath_layer_get_bcast(struct ipath_devdata *dd)
289 {
290         return dd->ipath_mlid;
291 }
292
293 EXPORT_SYMBOL_GPL(ipath_layer_get_bcast);
294
295 int ipath_layer_send_hdr(struct ipath_devdata *dd, struct ether_header *hdr)
296 {
297         int ret = 0;
298         u32 __iomem *piobuf;
299         u32 plen, *uhdr;
300         size_t count;
301         __be16 vlsllnh;
302
303         if (!(dd->ipath_flags & IPATH_RCVHDRSZ_SET)) {
304                 ipath_dbg("send while not open\n");
305                 ret = -EINVAL;
306         } else
307                 if ((dd->ipath_flags & (IPATH_LINKUNK | IPATH_LINKDOWN)) ||
308                     dd->ipath_lid == 0) {
309                         /*
310                          * lid check is for when sma hasn't yet configured
311                          */
312                         ret = -ENETDOWN;
313                         ipath_cdbg(VERBOSE, "send while not ready, "
314                                    "mylid=%u, flags=0x%x\n",
315                                    dd->ipath_lid, dd->ipath_flags);
316                 }
317
318         vlsllnh = *((__be16 *) hdr);
319         if (vlsllnh != htons(IPATH_LRH_BTH)) {
320                 ipath_dbg("Warning: lrh[0] wrong (%x, not %x); "
321                           "not sending\n", be16_to_cpu(vlsllnh),
322                           IPATH_LRH_BTH);
323                 ret = -EINVAL;
324         }
325         if (ret)
326                 goto done;
327
328         /* Get a PIO buffer to use. */
329         piobuf = ipath_getpiobuf(dd, NULL);
330         if (piobuf == NULL) {
331                 ret = -EBUSY;
332                 goto done;
333         }
334
335         plen = (sizeof(*hdr) >> 2); /* actual length */
336         ipath_cdbg(EPKT, "0x%x+1w pio %p\n", plen, piobuf);
337
338         writeq(plen+1, piobuf); /* len (+1 for pad) to pbc, no flags */
339         ipath_flush_wc();
340         piobuf += 2;
341         uhdr = (u32 *)hdr;
342         count = plen-1; /* amount we can copy before trigger word */
343         __iowrite32_copy(piobuf, uhdr, count);
344         ipath_flush_wc();
345         __raw_writel(uhdr[count], piobuf + count);
346         ipath_flush_wc(); /* ensure it's sent, now */
347
348         ipath_stats.sps_ether_spkts++;  /* ether packet sent */
349
350 done:
351         return ret;
352 }
353
354 EXPORT_SYMBOL_GPL(ipath_layer_send_hdr);
355
356 int ipath_layer_set_piointbufavail_int(struct ipath_devdata *dd)
357 {
358         set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
359
360         ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
361                          dd->ipath_sendctrl);
362         return 0;
363 }
364
365 EXPORT_SYMBOL_GPL(ipath_layer_set_piointbufavail_int);