2 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * These are the routines used by layered drivers, currently just the
36 * layered ethernet driver and verbs layer.
40 #include <asm/byteorder.h>
42 #include "ipath_kernel.h"
43 #include "ipath_layer.h"
44 #include "ipath_verbs.h"
45 #include "ipath_common.h"
47 /* Acquire before ipath_devs_lock. */
48 static DEFINE_MUTEX(ipath_layer_mutex);
50 u16 ipath_layer_rcv_opcode;
52 static int (*layer_intr)(void *, u32);
53 static int (*layer_rcv)(void *, void *, struct sk_buff *);
54 static int (*layer_rcv_lid)(void *, void *);
56 static void *(*layer_add_one)(int, struct ipath_devdata *);
57 static void (*layer_remove_one)(void *);
59 int __ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
63 if (dd->ipath_layer.l_arg && layer_intr)
64 ret = layer_intr(dd->ipath_layer.l_arg, arg);
69 int ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
73 mutex_lock(&ipath_layer_mutex);
75 ret = __ipath_layer_intr(dd, arg);
77 mutex_unlock(&ipath_layer_mutex);
82 int __ipath_layer_rcv(struct ipath_devdata *dd, void *hdr,
87 if (dd->ipath_layer.l_arg && layer_rcv)
88 ret = layer_rcv(dd->ipath_layer.l_arg, hdr, skb);
93 int __ipath_layer_rcv_lid(struct ipath_devdata *dd, void *hdr)
97 if (dd->ipath_layer.l_arg && layer_rcv_lid)
98 ret = layer_rcv_lid(dd->ipath_layer.l_arg, hdr);
103 void ipath_layer_lid_changed(struct ipath_devdata *dd)
105 mutex_lock(&ipath_layer_mutex);
107 if (dd->ipath_layer.l_arg && layer_intr)
108 layer_intr(dd->ipath_layer.l_arg, IPATH_LAYER_INT_LID);
110 mutex_unlock(&ipath_layer_mutex);
113 void ipath_layer_add(struct ipath_devdata *dd)
115 mutex_lock(&ipath_layer_mutex);
118 dd->ipath_layer.l_arg =
119 layer_add_one(dd->ipath_unit, dd);
121 mutex_unlock(&ipath_layer_mutex);
124 void ipath_layer_remove(struct ipath_devdata *dd)
126 mutex_lock(&ipath_layer_mutex);
128 if (dd->ipath_layer.l_arg && layer_remove_one) {
129 layer_remove_one(dd->ipath_layer.l_arg);
130 dd->ipath_layer.l_arg = NULL;
133 mutex_unlock(&ipath_layer_mutex);
136 int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *),
137 void (*l_remove)(void *),
138 int (*l_intr)(void *, u32),
139 int (*l_rcv)(void *, void *, struct sk_buff *),
141 int (*l_rcv_lid)(void *, void *))
143 struct ipath_devdata *dd, *tmp;
146 mutex_lock(&ipath_layer_mutex);
148 layer_add_one = l_add;
149 layer_remove_one = l_remove;
152 layer_rcv_lid = l_rcv_lid;
153 ipath_layer_rcv_opcode = l_rcv_opcode;
155 spin_lock_irqsave(&ipath_devs_lock, flags);
157 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
158 if (!(dd->ipath_flags & IPATH_INITTED))
161 if (dd->ipath_layer.l_arg)
164 spin_unlock_irqrestore(&ipath_devs_lock, flags);
165 dd->ipath_layer.l_arg = l_add(dd->ipath_unit, dd);
166 spin_lock_irqsave(&ipath_devs_lock, flags);
169 spin_unlock_irqrestore(&ipath_devs_lock, flags);
170 mutex_unlock(&ipath_layer_mutex);
175 EXPORT_SYMBOL_GPL(ipath_layer_register);
177 void ipath_layer_unregister(void)
179 struct ipath_devdata *dd, *tmp;
182 mutex_lock(&ipath_layer_mutex);
183 spin_lock_irqsave(&ipath_devs_lock, flags);
185 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
186 if (dd->ipath_layer.l_arg && layer_remove_one) {
187 spin_unlock_irqrestore(&ipath_devs_lock, flags);
188 layer_remove_one(dd->ipath_layer.l_arg);
189 spin_lock_irqsave(&ipath_devs_lock, flags);
190 dd->ipath_layer.l_arg = NULL;
194 spin_unlock_irqrestore(&ipath_devs_lock, flags);
196 layer_add_one = NULL;
197 layer_remove_one = NULL;
200 layer_rcv_lid = NULL;
202 mutex_unlock(&ipath_layer_mutex);
205 EXPORT_SYMBOL_GPL(ipath_layer_unregister);
207 int ipath_layer_open(struct ipath_devdata *dd, u32 * pktmax)
212 mutex_lock(&ipath_layer_mutex);
214 if (!dd->ipath_layer.l_arg) {
219 ret = ipath_setrcvhdrsize(dd, IPATH_HEADER_QUEUE_WORDS);
224 *pktmax = dd->ipath_ibmaxlen;
226 if (*dd->ipath_statusp & IPATH_STATUS_IB_READY)
227 intval |= IPATH_LAYER_INT_IF_UP;
229 intval |= IPATH_LAYER_INT_LID;
231 intval |= IPATH_LAYER_INT_BCAST;
233 * do this on open, in case low level is already up and
234 * just layered driver was reloaded, etc.
237 layer_intr(dd->ipath_layer.l_arg, intval);
241 mutex_unlock(&ipath_layer_mutex);
246 EXPORT_SYMBOL_GPL(ipath_layer_open);
248 u16 ipath_layer_get_lid(struct ipath_devdata *dd)
250 return dd->ipath_lid;
253 EXPORT_SYMBOL_GPL(ipath_layer_get_lid);
256 * ipath_layer_get_mac - get the MAC address
257 * @dd: the infinipath device
258 * @mac: the MAC is put here
260 * This is the EUID-64 OUI octets (top 3), then
261 * skip the next 2 (which should both be zero or 0xff).
262 * The returned MAC is in network order
263 * mac points to at least 6 bytes of buffer
264 * We assume that by the time the LID is set, that the GUID is as valid
265 * as it's ever going to be, rather than adding yet another status bit.
268 int ipath_layer_get_mac(struct ipath_devdata *dd, u8 * mac)
272 guid = (u8 *) &dd->ipath_guid;
280 if ((guid[3] || guid[4]) && !(guid[3] == 0xff && guid[4] == 0xff))
281 ipath_dbg("Warning, guid bytes 3 and 4 not 0 or 0xffff: "
282 "%x %x\n", guid[3], guid[4]);
286 EXPORT_SYMBOL_GPL(ipath_layer_get_mac);
288 u16 ipath_layer_get_bcast(struct ipath_devdata *dd)
290 return dd->ipath_mlid;
293 EXPORT_SYMBOL_GPL(ipath_layer_get_bcast);
295 int ipath_layer_send_hdr(struct ipath_devdata *dd, struct ether_header *hdr)
303 if (!(dd->ipath_flags & IPATH_RCVHDRSZ_SET)) {
304 ipath_dbg("send while not open\n");
307 if ((dd->ipath_flags & (IPATH_LINKUNK | IPATH_LINKDOWN)) ||
308 dd->ipath_lid == 0) {
310 * lid check is for when sma hasn't yet configured
313 ipath_cdbg(VERBOSE, "send while not ready, "
314 "mylid=%u, flags=0x%x\n",
315 dd->ipath_lid, dd->ipath_flags);
318 vlsllnh = *((__be16 *) hdr);
319 if (vlsllnh != htons(IPATH_LRH_BTH)) {
320 ipath_dbg("Warning: lrh[0] wrong (%x, not %x); "
321 "not sending\n", be16_to_cpu(vlsllnh),
328 /* Get a PIO buffer to use. */
329 piobuf = ipath_getpiobuf(dd, NULL);
330 if (piobuf == NULL) {
335 plen = (sizeof(*hdr) >> 2); /* actual length */
336 ipath_cdbg(EPKT, "0x%x+1w pio %p\n", plen, piobuf);
338 writeq(plen+1, piobuf); /* len (+1 for pad) to pbc, no flags */
342 count = plen-1; /* amount we can copy before trigger word */
343 __iowrite32_copy(piobuf, uhdr, count);
345 __raw_writel(uhdr[count], piobuf + count);
346 ipath_flush_wc(); /* ensure it's sent, now */
348 ipath_stats.sps_ether_spkts++; /* ether packet sent */
354 EXPORT_SYMBOL_GPL(ipath_layer_send_hdr);
356 int ipath_layer_set_piointbufavail_int(struct ipath_devdata *dd)
358 set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
360 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
365 EXPORT_SYMBOL_GPL(ipath_layer_set_piointbufavail_int);