2 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * These are the routines used by layered drivers, currently just the
36 * layered ethernet driver and verbs layer.
40 #include <linux/pci.h>
41 #include <asm/byteorder.h>
43 #include "ipath_kernel.h"
44 #include "ipath_layer.h"
45 #include "ipath_common.h"
47 /* Acquire before ipath_devs_lock. */
48 static DEFINE_MUTEX(ipath_layer_mutex);
50 static int ipath_verbs_registered;
52 u16 ipath_layer_rcv_opcode;
54 static int (*layer_intr)(void *, u32);
55 static int (*layer_rcv)(void *, void *, struct sk_buff *);
56 static int (*layer_rcv_lid)(void *, void *);
57 static int (*verbs_piobufavail)(void *);
58 static void (*verbs_rcv)(void *, void *, void *, u32);
60 static void *(*layer_add_one)(int, struct ipath_devdata *);
61 static void (*layer_remove_one)(void *);
62 static void *(*verbs_add_one)(int, struct ipath_devdata *);
63 static void (*verbs_remove_one)(void *);
64 static void (*verbs_timer_cb)(void *);
66 int __ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
70 if (dd->ipath_layer.l_arg && layer_intr)
71 ret = layer_intr(dd->ipath_layer.l_arg, arg);
76 int ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
80 mutex_lock(&ipath_layer_mutex);
82 ret = __ipath_layer_intr(dd, arg);
84 mutex_unlock(&ipath_layer_mutex);
89 int __ipath_layer_rcv(struct ipath_devdata *dd, void *hdr,
94 if (dd->ipath_layer.l_arg && layer_rcv)
95 ret = layer_rcv(dd->ipath_layer.l_arg, hdr, skb);
100 int __ipath_layer_rcv_lid(struct ipath_devdata *dd, void *hdr)
104 if (dd->ipath_layer.l_arg && layer_rcv_lid)
105 ret = layer_rcv_lid(dd->ipath_layer.l_arg, hdr);
110 int __ipath_verbs_piobufavail(struct ipath_devdata *dd)
114 if (dd->verbs_layer.l_arg && verbs_piobufavail)
115 ret = verbs_piobufavail(dd->verbs_layer.l_arg);
120 int __ipath_verbs_rcv(struct ipath_devdata *dd, void *rc, void *ebuf,
125 if (dd->verbs_layer.l_arg && verbs_rcv) {
126 verbs_rcv(dd->verbs_layer.l_arg, rc, ebuf, tlen);
133 int ipath_layer_set_linkstate(struct ipath_devdata *dd, u8 newstate)
139 case IPATH_IB_LINKDOWN:
140 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_POLL <<
141 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
146 case IPATH_IB_LINKDOWN_SLEEP:
147 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_SLEEP <<
148 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
153 case IPATH_IB_LINKDOWN_DISABLE:
154 ipath_set_ib_lstate(dd,
155 INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
156 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
161 case IPATH_IB_LINKINIT:
162 if (dd->ipath_flags & IPATH_LINKINIT) {
166 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_INIT <<
167 INFINIPATH_IBCC_LINKCMD_SHIFT);
168 lstate = IPATH_LINKINIT;
171 case IPATH_IB_LINKARM:
172 if (dd->ipath_flags & IPATH_LINKARMED) {
176 if (!(dd->ipath_flags &
177 (IPATH_LINKINIT | IPATH_LINKACTIVE))) {
181 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED <<
182 INFINIPATH_IBCC_LINKCMD_SHIFT);
184 * Since the port can transition to ACTIVE by receiving
185 * a non VL 15 packet, wait for either state.
187 lstate = IPATH_LINKARMED | IPATH_LINKACTIVE;
190 case IPATH_IB_LINKACTIVE:
191 if (dd->ipath_flags & IPATH_LINKACTIVE) {
195 if (!(dd->ipath_flags & IPATH_LINKARMED)) {
199 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE <<
200 INFINIPATH_IBCC_LINKCMD_SHIFT);
201 lstate = IPATH_LINKACTIVE;
205 ipath_dbg("Invalid linkstate 0x%x requested\n", newstate);
209 ret = ipath_wait_linkstate(dd, lstate, 2000);
215 EXPORT_SYMBOL_GPL(ipath_layer_set_linkstate);
218 * ipath_layer_set_mtu - set the MTU
219 * @dd: the infinipath device
222 * we can handle "any" incoming size, the issue here is whether we
223 * need to restrict our outgoing size. For now, we don't do any
224 * sanity checking on this, and we don't deal with what happens to
225 * programs that are already running when the size changes.
226 * NOTE: changing the MTU will usually cause the IBC to go back to
227 * link initialize (IPATH_IBSTATE_INIT) state...
229 int ipath_layer_set_mtu(struct ipath_devdata *dd, u16 arg)
236 * mtu is IB data payload max. It's the largest power of 2 less
237 * than piosize (or even larger, since it only really controls the
238 * largest we can receive; we can send the max of the mtu and
239 * piosize). We check that it's one of the valid IB sizes.
241 if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
243 ipath_dbg("Trying to set invalid mtu %u, failing\n", arg);
247 if (dd->ipath_ibmtu == arg) {
248 ret = 0; /* same as current */
252 piosize = dd->ipath_ibmaxlen;
253 dd->ipath_ibmtu = arg;
255 if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) {
256 /* Only if it's not the initial value (or reset to it) */
257 if (piosize != dd->ipath_init_ibmaxlen) {
258 dd->ipath_ibmaxlen = piosize;
261 } else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) {
262 piosize = arg + IPATH_PIO_MAXIBHDR;
263 ipath_cdbg(VERBOSE, "ibmaxlen was 0x%x, setting to 0x%x "
264 "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize,
266 dd->ipath_ibmaxlen = piosize;
272 * set the IBC maxpktlength to the size of our pio
275 u64 ibc = dd->ipath_ibcctrl;
276 ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK <<
277 INFINIPATH_IBCC_MAXPKTLEN_SHIFT);
279 piosize = piosize - 2 * sizeof(u32); /* ignore pbc */
280 dd->ipath_ibmaxlen = piosize;
281 piosize /= sizeof(u32); /* in words */
283 * for ICRC, which we only send in diag test pkt mode, and
284 * we don't need to worry about that for mtu
288 ibc |= piosize << INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
289 dd->ipath_ibcctrl = ibc;
290 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
292 dd->ipath_f_tidtemplate(dd);
301 EXPORT_SYMBOL_GPL(ipath_layer_set_mtu);
303 int ipath_set_lid(struct ipath_devdata *dd, u32 arg, u8 lmc)
308 mutex_lock(&ipath_layer_mutex);
310 if (dd->ipath_layer.l_arg && layer_intr)
311 layer_intr(dd->ipath_layer.l_arg, IPATH_LAYER_INT_LID);
313 mutex_unlock(&ipath_layer_mutex);
318 EXPORT_SYMBOL_GPL(ipath_set_lid);
320 int ipath_layer_set_guid(struct ipath_devdata *dd, __be64 guid)
322 /* XXX - need to inform anyone who cares this just happened. */
323 dd->ipath_guid = guid;
327 EXPORT_SYMBOL_GPL(ipath_layer_set_guid);
329 __be64 ipath_layer_get_guid(struct ipath_devdata *dd)
331 return dd->ipath_guid;
334 EXPORT_SYMBOL_GPL(ipath_layer_get_guid);
336 u32 ipath_layer_get_nguid(struct ipath_devdata *dd)
338 return dd->ipath_nguid;
341 EXPORT_SYMBOL_GPL(ipath_layer_get_nguid);
343 u32 ipath_layer_get_majrev(struct ipath_devdata *dd)
345 return dd->ipath_majrev;
348 EXPORT_SYMBOL_GPL(ipath_layer_get_majrev);
350 u32 ipath_layer_get_minrev(struct ipath_devdata *dd)
352 return dd->ipath_minrev;
355 EXPORT_SYMBOL_GPL(ipath_layer_get_minrev);
357 u32 ipath_layer_get_pcirev(struct ipath_devdata *dd)
359 return dd->ipath_pcirev;
362 EXPORT_SYMBOL_GPL(ipath_layer_get_pcirev);
364 u32 ipath_layer_get_flags(struct ipath_devdata *dd)
366 return dd->ipath_flags;
369 EXPORT_SYMBOL_GPL(ipath_layer_get_flags);
371 struct device *ipath_layer_get_device(struct ipath_devdata *dd)
373 return &dd->pcidev->dev;
376 EXPORT_SYMBOL_GPL(ipath_layer_get_device);
378 u16 ipath_layer_get_deviceid(struct ipath_devdata *dd)
380 return dd->ipath_deviceid;
383 EXPORT_SYMBOL_GPL(ipath_layer_get_deviceid);
385 u32 ipath_layer_get_vendorid(struct ipath_devdata *dd)
387 return dd->ipath_vendorid;
390 EXPORT_SYMBOL_GPL(ipath_layer_get_vendorid);
392 u64 ipath_layer_get_lastibcstat(struct ipath_devdata *dd)
394 return dd->ipath_lastibcstat;
397 EXPORT_SYMBOL_GPL(ipath_layer_get_lastibcstat);
399 u32 ipath_layer_get_ibmtu(struct ipath_devdata *dd)
401 return dd->ipath_ibmtu;
404 EXPORT_SYMBOL_GPL(ipath_layer_get_ibmtu);
406 void ipath_layer_add(struct ipath_devdata *dd)
408 mutex_lock(&ipath_layer_mutex);
411 dd->ipath_layer.l_arg =
412 layer_add_one(dd->ipath_unit, dd);
415 dd->verbs_layer.l_arg =
416 verbs_add_one(dd->ipath_unit, dd);
418 mutex_unlock(&ipath_layer_mutex);
421 void ipath_layer_remove(struct ipath_devdata *dd)
423 mutex_lock(&ipath_layer_mutex);
425 if (dd->ipath_layer.l_arg && layer_remove_one) {
426 layer_remove_one(dd->ipath_layer.l_arg);
427 dd->ipath_layer.l_arg = NULL;
430 if (dd->verbs_layer.l_arg && verbs_remove_one) {
431 verbs_remove_one(dd->verbs_layer.l_arg);
432 dd->verbs_layer.l_arg = NULL;
435 mutex_unlock(&ipath_layer_mutex);
438 int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *),
439 void (*l_remove)(void *),
440 int (*l_intr)(void *, u32),
441 int (*l_rcv)(void *, void *, struct sk_buff *),
443 int (*l_rcv_lid)(void *, void *))
445 struct ipath_devdata *dd, *tmp;
448 mutex_lock(&ipath_layer_mutex);
450 layer_add_one = l_add;
451 layer_remove_one = l_remove;
454 layer_rcv_lid = l_rcv_lid;
455 ipath_layer_rcv_opcode = l_rcv_opcode;
457 spin_lock_irqsave(&ipath_devs_lock, flags);
459 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
460 if (!(dd->ipath_flags & IPATH_INITTED))
463 if (dd->ipath_layer.l_arg)
466 if (!(*dd->ipath_statusp & IPATH_STATUS_SMA))
467 *dd->ipath_statusp |= IPATH_STATUS_OIB_SMA;
469 spin_unlock_irqrestore(&ipath_devs_lock, flags);
470 dd->ipath_layer.l_arg = l_add(dd->ipath_unit, dd);
471 spin_lock_irqsave(&ipath_devs_lock, flags);
474 spin_unlock_irqrestore(&ipath_devs_lock, flags);
475 mutex_unlock(&ipath_layer_mutex);
480 EXPORT_SYMBOL_GPL(ipath_layer_register);
482 void ipath_layer_unregister(void)
484 struct ipath_devdata *dd, *tmp;
487 mutex_lock(&ipath_layer_mutex);
488 spin_lock_irqsave(&ipath_devs_lock, flags);
490 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
491 if (dd->ipath_layer.l_arg && layer_remove_one) {
492 spin_unlock_irqrestore(&ipath_devs_lock, flags);
493 layer_remove_one(dd->ipath_layer.l_arg);
494 spin_lock_irqsave(&ipath_devs_lock, flags);
495 dd->ipath_layer.l_arg = NULL;
499 spin_unlock_irqrestore(&ipath_devs_lock, flags);
501 layer_add_one = NULL;
502 layer_remove_one = NULL;
505 layer_rcv_lid = NULL;
507 mutex_unlock(&ipath_layer_mutex);
510 EXPORT_SYMBOL_GPL(ipath_layer_unregister);
512 static void __ipath_verbs_timer(unsigned long arg)
514 struct ipath_devdata *dd = (struct ipath_devdata *) arg;
517 * If port 0 receive packet interrupts are not available, or
518 * can be missed, poll the receive queue
520 if (dd->ipath_flags & IPATH_POLL_RX_INTR)
523 /* Handle verbs layer timeouts. */
524 if (dd->verbs_layer.l_arg && verbs_timer_cb)
525 verbs_timer_cb(dd->verbs_layer.l_arg);
527 mod_timer(&dd->verbs_layer.l_timer, jiffies + 1);
531 * ipath_verbs_register - verbs layer registration
532 * @l_piobufavail: callback for when PIO buffers become available
533 * @l_rcv: callback for receiving a packet
534 * @l_timer_cb: timer callback
535 * @ipath_devdata: device data structure is put here
537 int ipath_verbs_register(void *(*l_add)(int, struct ipath_devdata *),
538 void (*l_remove)(void *arg),
539 int (*l_piobufavail) (void *arg),
540 void (*l_rcv) (void *arg, void *rhdr,
541 void *data, u32 tlen),
542 void (*l_timer_cb) (void *arg))
544 struct ipath_devdata *dd, *tmp;
547 mutex_lock(&ipath_layer_mutex);
549 verbs_add_one = l_add;
550 verbs_remove_one = l_remove;
551 verbs_piobufavail = l_piobufavail;
553 verbs_timer_cb = l_timer_cb;
555 spin_lock_irqsave(&ipath_devs_lock, flags);
557 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
558 if (!(dd->ipath_flags & IPATH_INITTED))
561 if (dd->verbs_layer.l_arg)
564 spin_unlock_irqrestore(&ipath_devs_lock, flags);
565 dd->verbs_layer.l_arg = l_add(dd->ipath_unit, dd);
566 spin_lock_irqsave(&ipath_devs_lock, flags);
569 spin_unlock_irqrestore(&ipath_devs_lock, flags);
570 mutex_unlock(&ipath_layer_mutex);
572 ipath_verbs_registered = 1;
577 EXPORT_SYMBOL_GPL(ipath_verbs_register);
579 void ipath_verbs_unregister(void)
581 struct ipath_devdata *dd, *tmp;
584 mutex_lock(&ipath_layer_mutex);
585 spin_lock_irqsave(&ipath_devs_lock, flags);
587 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
588 *dd->ipath_statusp &= ~IPATH_STATUS_OIB_SMA;
590 if (dd->verbs_layer.l_arg && verbs_remove_one) {
591 spin_unlock_irqrestore(&ipath_devs_lock, flags);
592 verbs_remove_one(dd->verbs_layer.l_arg);
593 spin_lock_irqsave(&ipath_devs_lock, flags);
594 dd->verbs_layer.l_arg = NULL;
598 spin_unlock_irqrestore(&ipath_devs_lock, flags);
600 verbs_add_one = NULL;
601 verbs_remove_one = NULL;
602 verbs_piobufavail = NULL;
604 verbs_timer_cb = NULL;
606 ipath_verbs_registered = 0;
608 mutex_unlock(&ipath_layer_mutex);
611 EXPORT_SYMBOL_GPL(ipath_verbs_unregister);
613 int ipath_layer_open(struct ipath_devdata *dd, u32 * pktmax)
618 mutex_lock(&ipath_layer_mutex);
620 if (!dd->ipath_layer.l_arg) {
625 ret = ipath_setrcvhdrsize(dd, IPATH_HEADER_QUEUE_WORDS);
630 *pktmax = dd->ipath_ibmaxlen;
632 if (*dd->ipath_statusp & IPATH_STATUS_IB_READY)
633 intval |= IPATH_LAYER_INT_IF_UP;
635 intval |= IPATH_LAYER_INT_LID;
637 intval |= IPATH_LAYER_INT_BCAST;
639 * do this on open, in case low level is already up and
640 * just layered driver was reloaded, etc.
643 layer_intr(dd->ipath_layer.l_arg, intval);
647 mutex_unlock(&ipath_layer_mutex);
652 EXPORT_SYMBOL_GPL(ipath_layer_open);
654 u16 ipath_layer_get_lid(struct ipath_devdata *dd)
656 return dd->ipath_lid;
659 EXPORT_SYMBOL_GPL(ipath_layer_get_lid);
662 * ipath_layer_get_mac - get the MAC address
663 * @dd: the infinipath device
664 * @mac: the MAC is put here
666 * This is the EUID-64 OUI octets (top 3), then
667 * skip the next 2 (which should both be zero or 0xff).
668 * The returned MAC is in network order
669 * mac points to at least 6 bytes of buffer
670 * We assume that by the time the LID is set, that the GUID is as valid
671 * as it's ever going to be, rather than adding yet another status bit.
674 int ipath_layer_get_mac(struct ipath_devdata *dd, u8 * mac)
678 guid = (u8 *) &dd->ipath_guid;
686 if ((guid[3] || guid[4]) && !(guid[3] == 0xff && guid[4] == 0xff))
687 ipath_dbg("Warning, guid bytes 3 and 4 not 0 or 0xffff: "
688 "%x %x\n", guid[3], guid[4]);
692 EXPORT_SYMBOL_GPL(ipath_layer_get_mac);
694 u16 ipath_layer_get_bcast(struct ipath_devdata *dd)
696 return dd->ipath_mlid;
699 EXPORT_SYMBOL_GPL(ipath_layer_get_bcast);
701 u32 ipath_layer_get_cr_errpkey(struct ipath_devdata *dd)
703 return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey);
706 EXPORT_SYMBOL_GPL(ipath_layer_get_cr_errpkey);
708 static void update_sge(struct ipath_sge_state *ss, u32 length)
710 struct ipath_sge *sge = &ss->sge;
712 sge->vaddr += length;
713 sge->length -= length;
714 sge->sge_length -= length;
715 if (sge->sge_length == 0) {
717 *sge = *ss->sg_list++;
718 } else if (sge->length == 0 && sge->mr != NULL) {
719 if (++sge->n >= IPATH_SEGSZ) {
720 if (++sge->m >= sge->mr->mapsz)
724 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
725 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
729 #ifdef __LITTLE_ENDIAN
730 static inline u32 get_upper_bits(u32 data, u32 shift)
732 return data >> shift;
735 static inline u32 set_upper_bits(u32 data, u32 shift)
737 return data << shift;
740 static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
742 data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
743 data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
747 static inline u32 get_upper_bits(u32 data, u32 shift)
749 return data << shift;
752 static inline u32 set_upper_bits(u32 data, u32 shift)
754 return data >> shift;
757 static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
759 data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
760 data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
765 static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
773 u32 len = ss->sge.length;
779 if (len > ss->sge.sge_length)
780 len = ss->sge.sge_length;
781 /* If the source address is not aligned, try to align it. */
782 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
784 u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
786 u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
789 y = sizeof(u32) - off;
792 if (len + extra >= sizeof(u32)) {
793 data |= set_upper_bits(v, extra *
795 len = sizeof(u32) - extra;
800 __raw_writel(data, piobuf);
805 /* Clear unused upper bytes */
806 data |= clear_upper_bytes(v, len, extra);
814 /* Source address is aligned. */
815 u32 *addr = (u32 *) ss->sge.vaddr;
816 int shift = extra * BITS_PER_BYTE;
817 int ushift = 32 - shift;
820 while (l >= sizeof(u32)) {
823 data |= set_upper_bits(v, shift);
824 __raw_writel(data, piobuf);
825 data = get_upper_bits(v, ushift);
831 * We still have 'extra' number of bytes leftover.
836 if (l + extra >= sizeof(u32)) {
837 data |= set_upper_bits(v, shift);
838 len -= l + extra - sizeof(u32);
843 __raw_writel(data, piobuf);
848 /* Clear unused upper bytes */
849 data |= clear_upper_bytes(v, l,
857 } else if (len == length) {
861 } else if (len == length) {
865 * Need to round up for the last dword in the
869 __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
871 last = ((u32 *) ss->sge.vaddr)[w - 1];
876 __iowrite32_copy(piobuf, ss->sge.vaddr, w);
879 extra = len & (sizeof(u32) - 1);
881 u32 v = ((u32 *) ss->sge.vaddr)[w];
883 /* Clear unused upper bytes */
884 data = clear_upper_bytes(v, extra, 0);
890 /* Update address before sending packet. */
891 update_sge(ss, length);
892 /* must flush early everything before trigger word */
894 __raw_writel(last, piobuf);
895 /* be sure trigger word is written */
900 * ipath_verbs_send - send a packet from the verbs layer
901 * @dd: the infinipath device
902 * @hdrwords: the number of words in the header
903 * @hdr: the packet header
904 * @len: the length of the packet in bytes
905 * @ss: the SGE to send
907 * This is like ipath_sma_send_pkt() in that we need to be able to send
908 * packets after the chip is initialized (MADs) but also like
909 * ipath_layer_send_hdr() since its used by the verbs layer.
911 int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
912 u32 *hdr, u32 len, struct ipath_sge_state *ss)
918 /* +1 is for the qword padding of pbc */
919 plen = hdrwords + ((len + 3) >> 2) + 1;
920 if (unlikely((plen << 2) > dd->ipath_ibmaxlen)) {
921 ipath_dbg("packet len 0x%x too long, failing\n", plen);
926 /* Get a PIO buffer to use. */
927 piobuf = ipath_getpiobuf(dd, NULL);
928 if (unlikely(piobuf == NULL)) {
934 * Write len to control qword, no flags.
935 * We have to flush after the PBC for correctness on some cpus
936 * or WC buffer can be written out of order.
938 writeq(plen, piobuf);
943 * If there is just the header portion, must flush before
944 * writing last word of header for correctness, and after
945 * the last header word (trigger word).
947 __iowrite32_copy(piobuf, hdr, hdrwords - 1);
949 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
955 __iowrite32_copy(piobuf, hdr, hdrwords);
958 /* The common case is aligned and contained in one segment. */
959 if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
960 !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
962 u32 *addr = (u32 *) ss->sge.vaddr;
964 /* Update address before sending packet. */
966 /* Need to round up for the last dword in the packet. */
968 __iowrite32_copy(piobuf, addr, w - 1);
969 /* must flush early everything before trigger word */
971 __raw_writel(addr[w - 1], piobuf + w - 1);
972 /* be sure trigger word is written */
977 copy_io(piobuf, ss, len);
984 EXPORT_SYMBOL_GPL(ipath_verbs_send);
986 int ipath_layer_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
987 u64 *rwords, u64 *spkts, u64 *rpkts,
992 if (!(dd->ipath_flags & IPATH_INITTED)) {
993 /* no hardware, freeze, etc. */
994 ipath_dbg("unit %u not usable\n", dd->ipath_unit);
998 *swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
999 *rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
1000 *spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
1001 *rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
1002 *xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt);
1010 EXPORT_SYMBOL_GPL(ipath_layer_snapshot_counters);
1013 * ipath_layer_get_counters - get various chip counters
1014 * @dd: the infinipath device
1015 * @cntrs: counters are placed here
1017 * Return the counters needed by recv_pma_get_portcounters().
1019 int ipath_layer_get_counters(struct ipath_devdata *dd,
1020 struct ipath_layer_counters *cntrs)
1024 if (!(dd->ipath_flags & IPATH_INITTED)) {
1025 /* no hardware, freeze, etc. */
1026 ipath_dbg("unit %u not usable\n", dd->ipath_unit);
1030 cntrs->symbol_error_counter =
1031 ipath_snap_cntr(dd, dd->ipath_cregs->cr_ibsymbolerrcnt);
1032 cntrs->link_error_recovery_counter =
1033 ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt);
1035 * The link downed counter counts when the other side downs the
1036 * connection. We add in the number of times we downed the link
1037 * due to local link integrity errors to compensate.
1039 cntrs->link_downed_counter =
1040 ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkdowncnt);
1041 cntrs->port_rcv_errors =
1042 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rxdroppktcnt) +
1043 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvovflcnt) +
1044 ipath_snap_cntr(dd, dd->ipath_cregs->cr_portovflcnt) +
1045 ipath_snap_cntr(dd, dd->ipath_cregs->cr_err_rlencnt) +
1046 ipath_snap_cntr(dd, dd->ipath_cregs->cr_invalidrlencnt) +
1047 ipath_snap_cntr(dd, dd->ipath_cregs->cr_erricrccnt) +
1048 ipath_snap_cntr(dd, dd->ipath_cregs->cr_errvcrccnt) +
1049 ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlpcrccnt) +
1050 ipath_snap_cntr(dd, dd->ipath_cregs->cr_badformatcnt);
1051 cntrs->port_rcv_remphys_errors =
1052 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvebpcnt);
1053 cntrs->port_xmit_discards =
1054 ipath_snap_cntr(dd, dd->ipath_cregs->cr_unsupvlcnt);
1055 cntrs->port_xmit_data =
1056 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
1057 cntrs->port_rcv_data =
1058 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
1059 cntrs->port_xmit_packets =
1060 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
1061 cntrs->port_rcv_packets =
1062 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
1063 cntrs->local_link_integrity_errors = dd->ipath_lli_errors;
1064 cntrs->excessive_buffer_overrun_errors = 0; /* XXX */
1072 EXPORT_SYMBOL_GPL(ipath_layer_get_counters);
1074 int ipath_layer_want_buffer(struct ipath_devdata *dd)
1076 set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
1077 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1078 dd->ipath_sendctrl);
1083 EXPORT_SYMBOL_GPL(ipath_layer_want_buffer);
1085 int ipath_layer_send_hdr(struct ipath_devdata *dd, struct ether_header *hdr)
1088 u32 __iomem *piobuf;
1093 if (!(dd->ipath_flags & IPATH_RCVHDRSZ_SET)) {
1094 ipath_dbg("send while not open\n");
1097 if ((dd->ipath_flags & (IPATH_LINKUNK | IPATH_LINKDOWN)) ||
1098 dd->ipath_lid == 0) {
1100 * lid check is for when sma hasn't yet configured
1103 ipath_cdbg(VERBOSE, "send while not ready, "
1104 "mylid=%u, flags=0x%x\n",
1105 dd->ipath_lid, dd->ipath_flags);
1108 vlsllnh = *((__be16 *) hdr);
1109 if (vlsllnh != htons(IPATH_LRH_BTH)) {
1110 ipath_dbg("Warning: lrh[0] wrong (%x, not %x); "
1111 "not sending\n", be16_to_cpu(vlsllnh),
1118 /* Get a PIO buffer to use. */
1119 piobuf = ipath_getpiobuf(dd, NULL);
1120 if (piobuf == NULL) {
1125 plen = (sizeof(*hdr) >> 2); /* actual length */
1126 ipath_cdbg(EPKT, "0x%x+1w pio %p\n", plen, piobuf);
1128 writeq(plen+1, piobuf); /* len (+1 for pad) to pbc, no flags */
1132 count = plen-1; /* amount we can copy before trigger word */
1133 __iowrite32_copy(piobuf, uhdr, count);
1135 __raw_writel(uhdr[count], piobuf + count);
1136 ipath_flush_wc(); /* ensure it's sent, now */
1138 ipath_stats.sps_ether_spkts++; /* ether packet sent */
1144 EXPORT_SYMBOL_GPL(ipath_layer_send_hdr);
1146 int ipath_layer_set_piointbufavail_int(struct ipath_devdata *dd)
1148 set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
1150 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1151 dd->ipath_sendctrl);
1155 EXPORT_SYMBOL_GPL(ipath_layer_set_piointbufavail_int);
1157 int ipath_layer_enable_timer(struct ipath_devdata *dd)
1160 * HT-400 has a design flaw where the chip and kernel idea
1161 * of the tail register don't always agree, and therefore we won't
1162 * get an interrupt on the next packet received.
1163 * If the board supports per packet receive interrupts, use it.
1164 * Otherwise, the timer function periodically checks for packets
1165 * to cover this case.
1166 * Either way, the timer is needed for verbs layer related
1169 if (dd->ipath_flags & IPATH_GPIO_INTR) {
1170 ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect,
1171 0x2074076542310ULL);
1172 /* Enable GPIO bit 2 interrupt */
1173 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
1177 init_timer(&dd->verbs_layer.l_timer);
1178 dd->verbs_layer.l_timer.function = __ipath_verbs_timer;
1179 dd->verbs_layer.l_timer.data = (unsigned long)dd;
1180 dd->verbs_layer.l_timer.expires = jiffies + 1;
1181 add_timer(&dd->verbs_layer.l_timer);
1186 EXPORT_SYMBOL_GPL(ipath_layer_enable_timer);
1188 int ipath_layer_disable_timer(struct ipath_devdata *dd)
1190 /* Disable GPIO bit 2 interrupt */
1191 if (dd->ipath_flags & IPATH_GPIO_INTR)
1192 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, 0);
1194 del_timer_sync(&dd->verbs_layer.l_timer);
1199 EXPORT_SYMBOL_GPL(ipath_layer_disable_timer);
1202 * ipath_layer_set_verbs_flags - set the verbs layer flags
1203 * @dd: the infinipath device
1204 * @flags: the flags to set
1206 int ipath_layer_set_verbs_flags(struct ipath_devdata *dd, unsigned flags)
1208 struct ipath_devdata *ss;
1209 unsigned long lflags;
1211 spin_lock_irqsave(&ipath_devs_lock, lflags);
1213 list_for_each_entry(ss, &ipath_dev_list, ipath_list) {
1214 if (!(ss->ipath_flags & IPATH_INITTED))
1216 if ((flags & IPATH_VERBS_KERNEL_SMA) &&
1217 !(*ss->ipath_statusp & IPATH_STATUS_SMA))
1218 *ss->ipath_statusp |= IPATH_STATUS_OIB_SMA;
1220 *ss->ipath_statusp &= ~IPATH_STATUS_OIB_SMA;
1223 spin_unlock_irqrestore(&ipath_devs_lock, lflags);
1228 EXPORT_SYMBOL_GPL(ipath_layer_set_verbs_flags);
1231 * ipath_layer_get_npkeys - return the size of the PKEY table for port 0
1232 * @dd: the infinipath device
1234 unsigned ipath_layer_get_npkeys(struct ipath_devdata *dd)
1236 return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys);
1239 EXPORT_SYMBOL_GPL(ipath_layer_get_npkeys);
1242 * ipath_layer_get_pkey - return the indexed PKEY from the port 0 PKEY table
1243 * @dd: the infinipath device
1244 * @index: the PKEY index
1246 unsigned ipath_layer_get_pkey(struct ipath_devdata *dd, unsigned index)
1250 if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys))
1253 ret = dd->ipath_pd[0]->port_pkeys[index];
1258 EXPORT_SYMBOL_GPL(ipath_layer_get_pkey);
1261 * ipath_layer_get_pkeys - return the PKEY table for port 0
1262 * @dd: the infinipath device
1263 * @pkeys: the pkey table is placed here
1265 int ipath_layer_get_pkeys(struct ipath_devdata *dd, u16 * pkeys)
1267 struct ipath_portdata *pd = dd->ipath_pd[0];
1269 memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys));
1274 EXPORT_SYMBOL_GPL(ipath_layer_get_pkeys);
1277 * rm_pkey - decrecment the reference count for the given PKEY
1278 * @dd: the infinipath device
1279 * @key: the PKEY index
1281 * Return true if this was the last reference and the hardware table entry
1282 * needs to be changed.
1284 static int rm_pkey(struct ipath_devdata *dd, u16 key)
1289 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
1290 if (dd->ipath_pkeys[i] != key)
1292 if (atomic_dec_and_test(&dd->ipath_pkeyrefs[i])) {
1293 dd->ipath_pkeys[i] = 0;
1307 * add_pkey - add the given PKEY to the hardware table
1308 * @dd: the infinipath device
1311 * Return an error code if unable to add the entry, zero if no change,
1312 * or 1 if the hardware PKEY register needs to be updated.
1314 static int add_pkey(struct ipath_devdata *dd, u16 key)
1317 u16 lkey = key & 0x7FFF;
1321 if (lkey == 0x7FFF) {
1326 /* Look for an empty slot or a matching PKEY. */
1327 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
1328 if (!dd->ipath_pkeys[i]) {
1332 /* If it matches exactly, try to increment the ref count */
1333 if (dd->ipath_pkeys[i] == key) {
1334 if (atomic_inc_return(&dd->ipath_pkeyrefs[i]) > 1) {
1338 /* Lost the race. Look for an empty slot below. */
1339 atomic_dec(&dd->ipath_pkeyrefs[i]);
1343 * It makes no sense to have both the limited and unlimited
1344 * PKEY set at the same time since the unlimited one will
1345 * disable the limited one.
1347 if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {
1356 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
1357 if (!dd->ipath_pkeys[i] &&
1358 atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {
1359 /* for ipathstats, etc. */
1360 ipath_stats.sps_pkeys[i] = lkey;
1361 dd->ipath_pkeys[i] = key;
1373 * ipath_layer_set_pkeys - set the PKEY table for port 0
1374 * @dd: the infinipath device
1375 * @pkeys: the PKEY table
1377 int ipath_layer_set_pkeys(struct ipath_devdata *dd, u16 * pkeys)
1379 struct ipath_portdata *pd;
1383 pd = dd->ipath_pd[0];
1385 for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
1387 u16 okey = pd->port_pkeys[i];
1392 * The value of this PKEY table entry is changing.
1393 * Remove the old entry in the hardware's array of PKEYs.
1396 changed |= rm_pkey(dd, okey);
1398 int ret = add_pkey(dd, key);
1405 pd->port_pkeys[i] = key;
1410 pkey = (u64) dd->ipath_pkeys[0] |
1411 ((u64) dd->ipath_pkeys[1] << 16) |
1412 ((u64) dd->ipath_pkeys[2] << 32) |
1413 ((u64) dd->ipath_pkeys[3] << 48);
1414 ipath_cdbg(VERBOSE, "p0 new pkey reg %llx\n",
1415 (unsigned long long) pkey);
1416 ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
1422 EXPORT_SYMBOL_GPL(ipath_layer_set_pkeys);
1425 * ipath_layer_get_linkdowndefaultstate - get the default linkdown state
1426 * @dd: the infinipath device
1428 * Returns zero if the default is POLL, 1 if the default is SLEEP.
1430 int ipath_layer_get_linkdowndefaultstate(struct ipath_devdata *dd)
1432 return !!(dd->ipath_ibcctrl & INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE);
1435 EXPORT_SYMBOL_GPL(ipath_layer_get_linkdowndefaultstate);
1438 * ipath_layer_set_linkdowndefaultstate - set the default linkdown state
1439 * @dd: the infinipath device
1440 * @sleep: the new state
1442 * Note that this will only take effect when the link state changes.
1444 int ipath_layer_set_linkdowndefaultstate(struct ipath_devdata *dd,
1448 dd->ipath_ibcctrl |= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
1450 dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
1451 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1456 EXPORT_SYMBOL_GPL(ipath_layer_set_linkdowndefaultstate);
1458 int ipath_layer_get_phyerrthreshold(struct ipath_devdata *dd)
1460 return (dd->ipath_ibcctrl >>
1461 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
1462 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
1465 EXPORT_SYMBOL_GPL(ipath_layer_get_phyerrthreshold);
1468 * ipath_layer_set_phyerrthreshold - set the physical error threshold
1469 * @dd: the infinipath device
1470 * @n: the new threshold
1472 * Note that this will only take effect when the link state changes.
1474 int ipath_layer_set_phyerrthreshold(struct ipath_devdata *dd, unsigned n)
1478 v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
1479 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
1481 dd->ipath_ibcctrl &=
1482 ~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK <<
1483 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT);
1484 dd->ipath_ibcctrl |=
1485 (u64) n << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT;
1486 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1492 EXPORT_SYMBOL_GPL(ipath_layer_set_phyerrthreshold);
1494 int ipath_layer_get_overrunthreshold(struct ipath_devdata *dd)
1496 return (dd->ipath_ibcctrl >>
1497 INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
1498 INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
1501 EXPORT_SYMBOL_GPL(ipath_layer_get_overrunthreshold);
1504 * ipath_layer_set_overrunthreshold - set the overrun threshold
1505 * @dd: the infinipath device
1506 * @n: the new threshold
1508 * Note that this will only take effect when the link state changes.
1510 int ipath_layer_set_overrunthreshold(struct ipath_devdata *dd, unsigned n)
1514 v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
1515 INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
1517 dd->ipath_ibcctrl &=
1518 ~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK <<
1519 INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT);
1520 dd->ipath_ibcctrl |=
1521 (u64) n << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT;
1522 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1528 EXPORT_SYMBOL_GPL(ipath_layer_set_overrunthreshold);
1530 int ipath_layer_get_boardname(struct ipath_devdata *dd, char *name,
1533 return dd->ipath_f_get_boardname(dd, name, namelen);
1535 EXPORT_SYMBOL_GPL(ipath_layer_get_boardname);
1537 u32 ipath_layer_get_rcvhdrentsize(struct ipath_devdata *dd)
1539 return dd->ipath_rcvhdrentsize;
1541 EXPORT_SYMBOL_GPL(ipath_layer_get_rcvhdrentsize);