1 #ifndef _IPATH_KERNEL_H
2 #define _IPATH_KERNEL_H
4 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
5 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 * This header file is the base header file for infinipath kernel code
38 * ipath_user.h serves a similar purpose for user code.
41 #include <linux/interrupt.h>
42 #include <linux/pci.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/mutex.h>
45 #include <linux/list.h>
46 #include <linux/scatterlist.h>
48 #include <rdma/ib_verbs.h>
50 #include "ipath_common.h"
51 #include "ipath_debug.h"
52 #include "ipath_registers.h"
54 /* only s/w major version of InfiniPath we can handle */
55 #define IPATH_CHIP_VERS_MAJ 2U
57 /* don't care about this except printing */
58 #define IPATH_CHIP_VERS_MIN 0U
60 /* temporary, maybe always */
61 extern struct infinipath_stats ipath_stats;
63 #define IPATH_CHIP_SWVERSION IPATH_CHIP_VERS_MAJ
65 * First-cut critierion for "device is active" is
66 * two thousand dwords combined Tx, Rx traffic per
67 * 5-second interval. SMA packets are 64 dwords,
68 * and occur "a few per second", presumably each way.
70 #define IPATH_TRAFFIC_ACTIVE_THRESHOLD (2000)
72 * Struct used to indicate which errors are logged in each of the
73 * error-counters that are logged to EEPROM. A counter is incremented
74 * _once_ (saturating at 255) for each event with any bits set in
75 * the error or hwerror register masks below.
77 #define IPATH_EEP_LOG_CNT (4)
78 struct ipath_eep_log_mask {
83 struct ipath_portdata {
84 void **port_rcvegrbuf;
85 dma_addr_t *port_rcvegrbuf_phys;
86 /* rcvhdrq base, needs mmap before useful */
88 /* kernel virtual address where hdrqtail is updated */
89 void *port_rcvhdrtail_kvaddr;
91 * temp buffer for expected send setup, allocated at open, instead
94 void *port_tid_pg_list;
95 /* when waiting for rcv or pioavail */
96 wait_queue_head_t port_wait;
98 * rcvegr bufs base, physical, must fit
99 * in 44 bits so 32 bit programs mmap64 44 bit works)
101 dma_addr_t port_rcvegr_phys;
102 /* mmap of hdrq, must fit in 44 bits */
103 dma_addr_t port_rcvhdrq_phys;
104 dma_addr_t port_rcvhdrqtailaddr_phys;
106 * number of opens (including slave subports) on this instance
107 * (ignoring forks, dup, etc. for now)
111 * how much space to leave at start of eager TID entries for
112 * protocol use, on each TID
114 /* instead of calculating it */
116 /* non-zero if port is being shared. */
117 u16 port_subport_cnt;
118 /* non-zero if port is being shared. */
120 /* chip offset of PIO buffers for this port */
122 /* how many alloc_pages() chunks in port_rcvegrbuf_pages */
123 u32 port_rcvegrbuf_chunks;
124 /* how many egrbufs per chunk */
125 u32 port_rcvegrbufs_perchunk;
126 /* order for port_rcvegrbuf_pages */
127 size_t port_rcvegrbuf_size;
128 /* rcvhdrq size (for freeing) */
129 size_t port_rcvhdrq_size;
130 /* next expected TID to check when looking for free */
132 /* next expected TID to check */
133 unsigned long port_flag;
135 unsigned long int_flag;
136 /* WAIT_RCV that timed out, no interrupt */
138 /* WAIT_PIO that timed out, no interrupt */
140 /* WAIT_RCV already happened, no wait */
142 /* WAIT_PIO already happened, no wait */
144 /* total number of rcvhdrqfull errors */
147 * Used to suppress multiple instances of same
148 * port staying stuck at same point.
150 u32 port_lastrcvhdrqtail;
151 /* saved total number of rcvhdrqfull errors for poll edge trigger */
152 u32 port_hdrqfull_poll;
153 /* total number of polled urgent packets */
155 /* saved total number of polled urgent packets for poll edge trigger */
156 u32 port_urgent_poll;
157 /* pid of process using this port */
159 pid_t port_subpid[INFINIPATH_MAX_SUBPORT];
160 /* same size as task_struct .comm[] */
162 /* pkeys set by this use of this port */
164 /* so file ops can get at unit */
165 struct ipath_devdata *port_dd;
166 /* A page of memory for rcvhdrhead, rcvegrhead, rcvegrtail * N */
167 void *subport_uregbase;
168 /* An array of pages for the eager receive buffers * N */
169 void *subport_rcvegrbuf;
170 /* An array of pages for the eager header queue entries * N */
171 void *subport_rcvhdr_base;
172 /* The version of the library which opened this port */
174 /* Bitmask of active slaves */
176 /* Type of packets or conditions we want to poll for */
178 /* port rcvhdrq head offset */
180 /* receive packet sequence counter */
185 struct ipath_sge_state;
186 struct ipath_verbs_txreq;
189 * control information for layered drivers
191 struct _ipath_layer {
195 struct ipath_skbinfo {
200 struct ipath_sdma_txreq {
204 struct scatterlist *sg;
207 void (*callback)(void *, int);
208 void *callback_cookie;
210 u16 start_idx; /* sdma private */
211 u16 next_descq_idx; /* sdma private */
212 struct list_head list; /* sdma private */
215 struct ipath_sdma_desc {
219 #define IPATH_SDMA_TXREQ_F_USELARGEBUF 0x1
220 #define IPATH_SDMA_TXREQ_F_HEADTOHOST 0x2
221 #define IPATH_SDMA_TXREQ_F_INTREQ 0x4
222 #define IPATH_SDMA_TXREQ_F_FREEBUF 0x8
223 #define IPATH_SDMA_TXREQ_F_FREEDESC 0x10
224 #define IPATH_SDMA_TXREQ_F_VL15 0x20
226 #define IPATH_SDMA_TXREQ_S_OK 0
227 #define IPATH_SDMA_TXREQ_S_SENDERROR 1
228 #define IPATH_SDMA_TXREQ_S_ABORTED 2
229 #define IPATH_SDMA_TXREQ_S_SHUTDOWN 3
231 /* max dwords in small buffer packet */
232 #define IPATH_SMALLBUF_DWORDS (dd->ipath_piosize2k >> 2)
235 * Possible IB config parameters for ipath_f_get/set_ib_cfg()
237 #define IPATH_IB_CFG_LIDLMC 0 /* Get/set LID (LS16b) and Mask (MS16b) */
238 #define IPATH_IB_CFG_HRTBT 1 /* Get/set Heartbeat off/enable/auto */
239 #define IPATH_IB_HRTBT_ON 3 /* Heartbeat enabled, sent every 100msec */
240 #define IPATH_IB_HRTBT_OFF 0 /* Heartbeat off */
241 #define IPATH_IB_CFG_LWID_ENB 2 /* Get/set allowed Link-width */
242 #define IPATH_IB_CFG_LWID 3 /* Get currently active Link-width */
243 #define IPATH_IB_CFG_SPD_ENB 4 /* Get/set allowed Link speeds */
244 #define IPATH_IB_CFG_SPD 5 /* Get current Link spd */
245 #define IPATH_IB_CFG_RXPOL_ENB 6 /* Get/set Auto-RX-polarity enable */
246 #define IPATH_IB_CFG_LREV_ENB 7 /* Get/set Auto-Lane-reversal enable */
247 #define IPATH_IB_CFG_LINKLATENCY 8 /* Get Auto-Lane-reversal enable */
250 struct ipath_devdata {
251 struct list_head ipath_list;
253 struct ipath_kregs const *ipath_kregs;
254 struct ipath_cregs const *ipath_cregs;
256 /* mem-mapped pointer to base of chip regs */
257 u64 __iomem *ipath_kregbase;
258 /* end of mem-mapped chip space; range checking */
259 u64 __iomem *ipath_kregend;
260 /* physical address of chip for io_remap, etc. */
261 unsigned long ipath_physaddr;
262 /* base of memory alloced for ipath_kregbase, for free */
263 u64 *ipath_kregalloc;
264 /* ipath_cfgports pointers */
265 struct ipath_portdata **ipath_pd;
266 /* sk_buffs used by port 0 eager receive queue */
267 struct ipath_skbinfo *ipath_port0_skbinfo;
268 /* kvirt address of 1st 2k pio buffer */
269 void __iomem *ipath_pio2kbase;
270 /* kvirt address of 1st 4k pio buffer */
271 void __iomem *ipath_pio4kbase;
273 * points to area where PIOavail registers will be DMA'ed.
274 * Has to be on a page of it's own, because the page will be
275 * mapped into user program space. This copy is *ONLY* ever
276 * written by DMA, not by the driver! Need a copy per device
277 * when we get to multiple devices
279 volatile __le64 *ipath_pioavailregs_dma;
280 /* physical address where updates occur */
281 dma_addr_t ipath_pioavailregs_phys;
282 struct _ipath_layer ipath_layer;
284 int (*ipath_f_intrsetup)(struct ipath_devdata *);
285 /* fallback to alternate interrupt type if possible */
286 int (*ipath_f_intr_fallback)(struct ipath_devdata *);
287 /* setup on-chip bus config */
288 int (*ipath_f_bus)(struct ipath_devdata *, struct pci_dev *);
289 /* hard reset chip */
290 int (*ipath_f_reset)(struct ipath_devdata *);
291 int (*ipath_f_get_boardname)(struct ipath_devdata *, char *,
293 void (*ipath_f_init_hwerrors)(struct ipath_devdata *);
294 void (*ipath_f_handle_hwerrors)(struct ipath_devdata *, char *,
296 void (*ipath_f_quiet_serdes)(struct ipath_devdata *);
297 int (*ipath_f_bringup_serdes)(struct ipath_devdata *);
298 int (*ipath_f_early_init)(struct ipath_devdata *);
299 void (*ipath_f_clear_tids)(struct ipath_devdata *, unsigned);
300 void (*ipath_f_put_tid)(struct ipath_devdata *, u64 __iomem*,
302 void (*ipath_f_tidtemplate)(struct ipath_devdata *);
303 void (*ipath_f_cleanup)(struct ipath_devdata *);
304 void (*ipath_f_setextled)(struct ipath_devdata *, u64, u64);
305 /* fill out chip-specific fields */
306 int (*ipath_f_get_base_info)(struct ipath_portdata *, void *);
308 void (*ipath_f_free_irq)(struct ipath_devdata *);
309 struct ipath_message_header *(*ipath_f_get_msgheader)
310 (struct ipath_devdata *, __le32 *);
311 void (*ipath_f_config_ports)(struct ipath_devdata *, ushort);
312 int (*ipath_f_get_ib_cfg)(struct ipath_devdata *, int);
313 int (*ipath_f_set_ib_cfg)(struct ipath_devdata *, int, u32);
314 void (*ipath_f_config_jint)(struct ipath_devdata *, u16 , u16);
315 void (*ipath_f_read_counters)(struct ipath_devdata *,
316 struct infinipath_counters *);
317 void (*ipath_f_xgxs_reset)(struct ipath_devdata *);
318 /* per chip actions needed for IB Link up/down changes */
319 int (*ipath_f_ib_updown)(struct ipath_devdata *, int, u64);
321 unsigned ipath_lastegr_idx;
322 struct ipath_ibdev *verbs_dev;
323 struct timer_list verbs_timer;
324 /* total dwords sent (summed from counter) */
326 /* total dwords rcvd (summed from counter) */
328 /* total packets sent (summed from counter) */
330 /* total packets rcvd (summed from counter) */
332 /* ipath_statusp initially points to this. */
334 /* GUID for this interface, in network order */
337 * aggregrate of error bits reported since last cleared, for
338 * limiting of error reporting
340 ipath_err_t ipath_lasterror;
342 * aggregrate of error bits reported since last cleared, for
343 * limiting of hwerror reporting
345 ipath_err_t ipath_lasthwerror;
346 /* errors masked because they occur too fast */
347 ipath_err_t ipath_maskederrs;
348 u64 ipath_lastlinkrecov; /* link recoveries at last ACTIVE */
349 /* time in jiffies at which to re-enable maskederrs */
350 unsigned long ipath_unmasktime;
351 /* count of egrfull errors, combined for all ports */
352 u64 ipath_last_tidfull;
353 /* for ipath_qcheck() */
354 u64 ipath_lastport0rcv_cnt;
355 /* template for writing TIDs */
356 u64 ipath_tidtemplate;
357 /* value to write to free TIDs */
358 u64 ipath_tidinvalid;
359 /* IBA6120 rcv interrupt setup */
360 u64 ipath_rhdrhead_intr_off;
362 /* size of memory at ipath_kregbase */
364 /* number of registers used for pioavail */
366 /* IPATH_POLL, etc. */
368 /* ipath_flags driver is waiting for */
369 u32 ipath_state_wanted;
370 /* last buffer for user use, first buf for kernel use is this
372 u32 ipath_lastport_piobuf;
373 /* is a stats timer active */
374 u32 ipath_stats_timer_active;
375 /* number of interrupts for this device -- saturates... */
376 u32 ipath_int_counter;
377 /* dwords sent read from counter */
379 /* dwords received read from counter */
381 /* sent packets read from counter */
383 /* received packets read from counter */
385 /* pio bufs allocated per port */
387 u32 ipath_pioupd_thresh; /* update threshold, some chips */
389 * number of ports configured as max; zero is set to number chip
390 * supports, less gives more pio bufs/port, etc.
393 /* count of port 0 hdrqfull errors */
394 u32 ipath_p0_hdrqfull;
395 /* port 0 number of receive eager buffers */
396 u32 ipath_p0_rcvegrcnt;
399 * index of last piobuffer we used. Speeds up searching, by
400 * starting at this point. Doesn't matter if multiple cpu's use and
401 * update, last updater is only write that matters. Whenever it
402 * wraps, we update shadow copies. Need a copy per device when we
403 * get to multiple devices
405 u32 ipath_lastpioindex;
406 u32 ipath_lastpioindexl;
407 /* max length of freezemsg */
410 * consecutive times we wanted a PIO buffer but were unable to
413 u32 ipath_consec_nopiobuf;
415 * hint that we should update ipath_pioavailshadow before
416 * looking for a PIO buffer
418 u32 ipath_upd_pio_shadow;
419 /* so we can rewrite it after a chip reset */
421 /* so we can rewrite it after a chip reset */
423 u32 ipath_x1_fix_tries;
424 u32 ipath_autoneg_tries;
425 u32 serdes_first_init_done;
427 struct ipath_relock {
428 atomic_t ipath_relock_timer_active;
429 struct timer_list ipath_relock_timer;
430 unsigned int ipath_relock_interval; /* in jiffies */
431 } ipath_relock_singleton;
433 /* interrupt number */
435 /* HT/PCI Vendor ID (here for NodeInfo) */
437 /* HT/PCI Device ID (here for NodeInfo) */
439 /* offset in HT config space of slave/primary interface block */
440 u8 ipath_ht_slave_off;
441 /* for write combining settings */
442 unsigned long ipath_wc_cookie;
443 unsigned long ipath_wc_base;
444 unsigned long ipath_wc_len;
445 /* ref count for each pkey */
446 atomic_t ipath_pkeyrefs[4];
447 /* shadow copy of struct page *'s for exp tid pages */
448 struct page **ipath_pageshadow;
449 /* shadow copy of dma handles for exp tid pages */
450 dma_addr_t *ipath_physshadow;
451 u64 __iomem *ipath_egrtidbase;
452 /* lock to workaround chip bug 9437 and others */
453 spinlock_t ipath_kernel_tid_lock;
454 spinlock_t ipath_user_tid_lock;
455 spinlock_t ipath_sendctrl_lock;
459 * this address is mapped readonly into user processes so they can
460 * get status cheaply, whenever they want.
463 /* freeze msg if hw error put chip in freeze */
464 char *ipath_freezemsg;
465 /* pci access data structure */
466 struct pci_dev *pcidev;
467 struct cdev *user_cdev;
468 struct cdev *diag_cdev;
469 struct class_device *user_class_dev;
470 struct class_device *diag_class_dev;
471 /* timer used to prevent stats overflow, error throttling, etc. */
472 struct timer_list ipath_stats_timer;
473 /* timer to verify interrupts work, and fallback if possible */
474 struct timer_list ipath_intrchk_timer;
475 void *ipath_dummy_hdrq; /* used after port close */
476 dma_addr_t ipath_dummy_hdrq_phys;
478 /* SendDMA related entries */
479 spinlock_t ipath_sdma_lock;
480 u64 ipath_sdma_status;
481 unsigned long ipath_sdma_abort_jiffies;
482 unsigned long ipath_sdma_abort_intr_timeout;
483 unsigned long ipath_sdma_buf_jiffies;
484 struct ipath_sdma_desc *ipath_sdma_descq;
485 u64 ipath_sdma_descq_added;
486 u64 ipath_sdma_descq_removed;
487 int ipath_sdma_desc_nreserved;
488 u16 ipath_sdma_descq_cnt;
489 u16 ipath_sdma_descq_tail;
490 u16 ipath_sdma_descq_head;
491 u16 ipath_sdma_next_intr;
492 u16 ipath_sdma_reset_wait;
493 u8 ipath_sdma_generation;
494 struct tasklet_struct ipath_sdma_abort_task;
495 struct tasklet_struct ipath_sdma_notify_task;
496 struct list_head ipath_sdma_activelist;
497 struct list_head ipath_sdma_notifylist;
498 atomic_t ipath_sdma_vl15_count;
499 struct timer_list ipath_sdma_vl15_timer;
501 dma_addr_t ipath_sdma_descq_phys;
502 volatile __le64 *ipath_sdma_head_dma;
503 dma_addr_t ipath_sdma_head_phys;
505 unsigned long ipath_ureg_align; /* user register alignment */
507 struct delayed_work ipath_autoneg_work;
508 wait_queue_head_t ipath_autoneg_wait;
510 /* HoL blocking / user app forward-progress state */
511 unsigned ipath_hol_state;
512 unsigned ipath_hol_next;
513 struct timer_list ipath_hol_timer;
516 * Shadow copies of registers; size indicates read access size.
517 * Most of them are readonly, but some are write-only register,
518 * where we manipulate the bits in the shadow copy, and then write
519 * the shadow copy to infinipath.
521 * We deliberately make most of these 32 bits, since they have
522 * restricted range. For any that we read, we won't to generate 32
523 * bit accesses, since Opteron will generate 2 separate 32 bit HT
524 * transactions for a 64 bit read, and we want to avoid unnecessary
528 /* This is the 64 bit group */
531 * shadow of pioavail, check to be sure it's large enough at
534 unsigned long ipath_pioavailshadow[8];
535 /* bitmap of send buffers available for the kernel to use with PIO. */
536 unsigned long ipath_pioavailkernel[8];
537 /* shadow of kr_gpio_out, for rmw ops */
539 /* shadow the gpio mask register */
541 /* shadow the gpio output enable, etc... */
543 /* kr_revision shadow */
546 * shadow of ibcctrl, for interrupt handling of link changes,
551 * last ibcstatus, to suppress "duplicate" status change messages,
554 u64 ipath_lastibcstat;
555 /* hwerrmask shadow */
556 ipath_err_t ipath_hwerrmask;
557 ipath_err_t ipath_errormask; /* errormask shadow */
558 /* interrupt config reg shadow */
560 /* kr_sendpiobufbase value */
561 u64 ipath_piobufbase;
562 /* kr_ibcddrctrl shadow */
563 u64 ipath_ibcddrctrl;
565 /* these are the "32 bit" regs */
568 * number of GUIDs in the flash for this interface; may need some
569 * rethinking for setting on other ifaces
573 * the following two are 32-bit bitmasks, but {test,clear,set}_bit
574 * all expect bit fields to be "unsigned long"
576 /* shadow kr_rcvctrl */
577 unsigned long ipath_rcvctrl;
578 /* shadow kr_sendctrl */
579 unsigned long ipath_sendctrl;
580 /* to not count armlaunch after cancel */
581 unsigned long ipath_lastcancel;
582 /* count cases where special trigger was needed (double write) */
583 unsigned long ipath_spectriggerhit;
585 /* value we put in kr_rcvhdrcnt */
587 /* value we put in kr_rcvhdrsize */
588 u32 ipath_rcvhdrsize;
589 /* value we put in kr_rcvhdrentsize */
590 u32 ipath_rcvhdrentsize;
591 /* offset of last entry in rcvhdrq */
593 /* kr_portcnt value */
595 /* kr_pagealign value */
597 /* number of "2KB" PIO buffers */
599 /* size in bytes of "2KB" PIO buffers */
601 /* number of "4KB" PIO buffers */
603 /* size in bytes of "4KB" PIO buffers */
605 u32 ipath_pioreserved; /* reserved special-inkernel; */
606 /* kr_rcvegrbase value */
607 u32 ipath_rcvegrbase;
608 /* kr_rcvegrcnt value */
610 /* kr_rcvtidbase value */
611 u32 ipath_rcvtidbase;
612 /* kr_rcvtidcnt value */
618 /* kr_counterregbase */
620 /* shadow the control register contents */
622 /* PCI revision register (HTC rev on FPGA) */
625 /* chip address space used by 4k pio buffers */
627 /* The MTU programmed for this unit */
630 * The max size IB packet, included IB headers that we can send.
631 * Starts same as ipath_piosize, but is affected when ibmtu is
632 * changed, or by size of eager buffers
636 * ibmaxlen at init time, limited by chip and by receive buffer
637 * size. Not changed after init.
639 u32 ipath_init_ibmaxlen;
640 /* size of each rcvegrbuffer */
641 u32 ipath_rcvegrbufsize;
642 /* localbus width (1, 2,4,8,16,32) from config space */
643 u32 ipath_lbus_width;
644 /* localbus speed (HT: 200,400,800,1000; PCIe 2500) */
645 u32 ipath_lbus_speed;
647 * number of sequential ibcstatus change for polling active/quiet
648 * (i.e., link not coming up).
651 /* low and high portions of MSI capability/vector */
653 /* saved after PCIe init for restore after reset */
655 /* MSI data (vector) saved for restore */
657 /* MLID programmed for this instance */
659 /* LID programmed for this instance */
661 /* list of pkeys programmed; 0 if not set */
664 * ASCII serial number, from flash, large enough for original
665 * all digit strings, and longer QLogic serial number format
668 /* human readable board version */
669 u8 ipath_boardversion[96];
670 u8 ipath_lbus_info[32]; /* human readable localbus info */
671 /* chip major rev, from ipath_revision */
673 /* chip minor rev, from ipath_revision */
675 /* board rev, from ipath_revision */
677 /* saved for restore after reset */
678 u8 ipath_pci_cacheline;
679 /* LID mask control */
681 /* link width supported */
682 u8 ipath_link_width_supported;
683 /* link speed supported */
684 u8 ipath_link_speed_supported;
685 u8 ipath_link_width_enabled;
686 u8 ipath_link_speed_enabled;
687 u8 ipath_link_width_active;
688 u8 ipath_link_speed_active;
689 /* Rx Polarity inversion (compensate for ~tx on partner) */
692 u8 ipath_r_portenable_shift;
693 u8 ipath_r_intravail_shift;
694 u8 ipath_r_tailupd_shift;
695 u8 ipath_r_portcfg_shift;
697 /* unit # of this chip, if present */
700 /* local link integrity counter */
701 u32 ipath_lli_counter;
702 /* local link integrity errors */
703 u32 ipath_lli_errors;
705 * Above counts only cases where _successive_ LocalLinkIntegrity
706 * errors were seen in the receive headers of kern-packets.
707 * Below are the three (monotonically increasing) counters
708 * maintained via GPIO interrupts on iba6120-rev2.
710 u32 ipath_rxfc_unsupvl_errs;
711 u32 ipath_overrun_thresh_errs;
715 * Not all devices managed by a driver instance are the same
716 * type, so these fields must be per-device.
718 u64 ipath_i_bitsextant;
719 ipath_err_t ipath_e_bitsextant;
720 ipath_err_t ipath_hwe_bitsextant;
723 * Below should be computable from number of ports,
724 * since they are never modified.
726 u64 ipath_i_rcvavail_mask;
727 u64 ipath_i_rcvurg_mask;
728 u16 ipath_i_rcvurg_shift;
729 u16 ipath_i_rcvavail_shift;
732 * Register bits for selecting i2c direction and values, used for
735 u8 ipath_gpio_sda_num;
736 u8 ipath_gpio_scl_num;
737 u8 ipath_i2c_chain_type;
741 /* lock for doing RMW of shadows/regs for ExtCtrl and GPIO */
742 spinlock_t ipath_gpio_lock;
745 * IB link and linktraining states and masks that vary per chip in
746 * some way. Set at init, to avoid each IB status change interrupt
755 u16 ipath_rhf_offset; /* offset of RHF within receive header entry */
758 * shift/mask for linkcmd, linkinitcmd, maxpktlen in ibccontol
759 * reg. Changes for IBA7220
761 u8 ibcc_lic_mask; /* LinkInitCmd */
762 u8 ibcc_lc_shift; /* LinkCmd */
763 u8 ibcc_mpl_shift; /* Maxpktlen */
767 /* used to override LED behavior */
768 u8 ipath_led_override; /* Substituted for normal value, if non-zero */
769 u16 ipath_led_override_timeoff; /* delta to next timer event */
770 u8 ipath_led_override_vals[2]; /* Alternates per blink-frame */
771 u8 ipath_led_override_phase; /* Just counts, LSB picks from vals[] */
772 atomic_t ipath_led_override_timer_active;
773 /* Used to flash LEDs in override mode */
774 struct timer_list ipath_led_override_timer;
776 /* Support (including locks) for EEPROM logging of errors and time */
777 /* control access to actual counters, timer */
778 spinlock_t ipath_eep_st_lock;
779 /* control high-level access to EEPROM */
780 struct mutex ipath_eep_lock;
781 /* Below inc'd by ipath_snap_cntrs(), locked by ipath_eep_st_lock */
782 uint64_t ipath_traffic_wds;
783 /* active time is kept in seconds, but logged in hours */
784 atomic_t ipath_active_time;
785 /* Below are nominal shadow of EEPROM, new since last EEPROM update */
786 uint8_t ipath_eep_st_errs[IPATH_EEP_LOG_CNT];
787 uint8_t ipath_eep_st_new_errs[IPATH_EEP_LOG_CNT];
788 uint16_t ipath_eep_hrs;
790 * masks for which bits of errs, hwerrs that cause
791 * each of the counters to increment.
793 struct ipath_eep_log_mask ipath_eep_st_masks[IPATH_EEP_LOG_CNT];
795 /* interrupt mitigation reload register info */
796 u16 ipath_jint_idle_ticks; /* idle clock ticks */
797 u16 ipath_jint_max_packets; /* max packets across all ports */
800 * lock for access to SerDes, and flags to sequence preset
801 * versus steady-state. 7220-only at the moment.
803 spinlock_t ipath_sdepb_lock;
804 u8 ipath_presets_needed; /* Set if presets to be restored next DOWN */
807 /* ipath_hol_state values (stopping/starting user proc, send flushing) */
808 #define IPATH_HOL_UP 0
809 #define IPATH_HOL_DOWN 1
810 /* ipath_hol_next toggle values, used when hol_state IPATH_HOL_DOWN */
811 #define IPATH_HOL_DOWNSTOP 0
812 #define IPATH_HOL_DOWNCONT 1
814 /* bit positions for sdma_status */
815 #define IPATH_SDMA_ABORTING 0
816 #define IPATH_SDMA_DISARMED 1
817 #define IPATH_SDMA_DISABLED 2
818 #define IPATH_SDMA_LAYERBUF 3
819 #define IPATH_SDMA_RUNNING 62
820 #define IPATH_SDMA_SHUTDOWN 63
822 /* bit combinations that correspond to abort states */
823 #define IPATH_SDMA_ABORT_NONE 0
824 #define IPATH_SDMA_ABORT_ABORTING (1UL << IPATH_SDMA_ABORTING)
825 #define IPATH_SDMA_ABORT_DISARMED ((1UL << IPATH_SDMA_ABORTING) | \
826 (1UL << IPATH_SDMA_DISARMED))
827 #define IPATH_SDMA_ABORT_DISABLED ((1UL << IPATH_SDMA_ABORTING) | \
828 (1UL << IPATH_SDMA_DISABLED))
829 #define IPATH_SDMA_ABORT_ABORTED ((1UL << IPATH_SDMA_ABORTING) | \
830 (1UL << IPATH_SDMA_DISARMED) | (1UL << IPATH_SDMA_DISABLED))
831 #define IPATH_SDMA_ABORT_MASK ((1UL<<IPATH_SDMA_ABORTING) | \
832 (1UL << IPATH_SDMA_DISARMED) | (1UL << IPATH_SDMA_DISABLED))
834 #define IPATH_SDMA_BUF_NONE 0
835 #define IPATH_SDMA_BUF_MASK (1UL<<IPATH_SDMA_LAYERBUF)
837 /* Private data for file operations */
838 struct ipath_filedata {
839 struct ipath_portdata *pd;
842 struct ipath_user_sdma_queue *pq;
844 extern struct list_head ipath_dev_list;
845 extern spinlock_t ipath_devs_lock;
846 extern struct ipath_devdata *ipath_lookup(int unit);
848 int ipath_init_chip(struct ipath_devdata *, int);
849 int ipath_enable_wc(struct ipath_devdata *dd);
850 void ipath_disable_wc(struct ipath_devdata *dd);
851 int ipath_count_units(int *npresentp, int *nupp, int *maxportsp);
852 void ipath_shutdown_device(struct ipath_devdata *);
853 void ipath_clear_freeze(struct ipath_devdata *);
855 struct file_operations;
856 int ipath_cdev_init(int minor, char *name, const struct file_operations *fops,
857 struct cdev **cdevp, struct class_device **class_devp);
858 void ipath_cdev_cleanup(struct cdev **cdevp,
859 struct class_device **class_devp);
861 int ipath_diag_add(struct ipath_devdata *);
862 void ipath_diag_remove(struct ipath_devdata *);
864 extern wait_queue_head_t ipath_state_wait;
866 int ipath_user_add(struct ipath_devdata *dd);
867 void ipath_user_remove(struct ipath_devdata *dd);
869 struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd, gfp_t);
871 extern int ipath_diag_inuse;
873 irqreturn_t ipath_intr(int irq, void *devid);
874 int ipath_decode_err(struct ipath_devdata *dd, char *buf, size_t blen,
876 #if __IPATH_INFO || __IPATH_DBG
877 extern const char *ipath_ibcstatus_str[];
880 /* clean up any per-chip chip-specific stuff */
881 void ipath_chip_cleanup(struct ipath_devdata *);
882 /* clean up any chip type-specific stuff */
883 void ipath_chip_done(void);
885 /* check to see if we have to force ordering for write combining */
886 int ipath_unordered_wc(void);
888 void ipath_disarm_piobufs(struct ipath_devdata *, unsigned first,
890 void ipath_cancel_sends(struct ipath_devdata *, int);
892 int ipath_create_rcvhdrq(struct ipath_devdata *, struct ipath_portdata *);
893 void ipath_free_pddata(struct ipath_devdata *, struct ipath_portdata *);
895 int ipath_parse_ushort(const char *str, unsigned short *valp);
897 void ipath_kreceive(struct ipath_portdata *);
898 int ipath_setrcvhdrsize(struct ipath_devdata *, unsigned);
899 int ipath_reset_device(int);
900 void ipath_get_faststats(unsigned long);
901 int ipath_wait_linkstate(struct ipath_devdata *, u32, int);
902 int ipath_set_linkstate(struct ipath_devdata *, u8);
903 int ipath_set_mtu(struct ipath_devdata *, u16);
904 int ipath_set_lid(struct ipath_devdata *, u32, u8);
905 int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv);
906 void ipath_enable_armlaunch(struct ipath_devdata *);
907 void ipath_disable_armlaunch(struct ipath_devdata *);
908 void ipath_hol_down(struct ipath_devdata *);
909 void ipath_hol_up(struct ipath_devdata *);
910 void ipath_hol_event(unsigned long);
911 void ipath_toggle_rclkrls(struct ipath_devdata *);
912 void ipath_sd7220_clr_ibpar(struct ipath_devdata *);
913 void ipath_set_relock_poll(struct ipath_devdata *, int);
914 void ipath_shutdown_relock_poll(struct ipath_devdata *);
916 /* for use in system calls, where we want to know device type, etc. */
917 #define port_fp(fp) ((struct ipath_filedata *)(fp)->private_data)->pd
918 #define subport_fp(fp) \
919 ((struct ipath_filedata *)(fp)->private_data)->subport
920 #define tidcursor_fp(fp) \
921 ((struct ipath_filedata *)(fp)->private_data)->tidcursor
922 #define user_sdma_queue_fp(fp) \
923 ((struct ipath_filedata *)(fp)->private_data)->pq
926 * values for ipath_flags
928 /* chip can report link latency (IB 1.2) */
929 #define IPATH_HAS_LINK_LATENCY 0x1
930 /* The chip is up and initted */
931 #define IPATH_INITTED 0x2
932 /* set if any user code has set kr_rcvhdrsize */
933 #define IPATH_RCVHDRSZ_SET 0x4
934 /* The chip is present and valid for accesses */
935 #define IPATH_PRESENT 0x8
936 /* HT link0 is only 8 bits wide, ignore upper byte crc
938 #define IPATH_8BIT_IN_HT0 0x10
939 /* HT link1 is only 8 bits wide, ignore upper byte crc
941 #define IPATH_8BIT_IN_HT1 0x20
942 /* The link is down */
943 #define IPATH_LINKDOWN 0x40
944 /* The link level is up (0x11) */
945 #define IPATH_LINKINIT 0x80
946 /* The link is in the armed (0x21) state */
947 #define IPATH_LINKARMED 0x100
948 /* The link is in the active (0x31) state */
949 #define IPATH_LINKACTIVE 0x200
950 /* link current state is unknown */
951 #define IPATH_LINKUNK 0x400
952 /* Write combining flush needed for PIO */
953 #define IPATH_PIO_FLUSH_WC 0x1000
954 /* DMA Receive tail pointer */
955 #define IPATH_NODMA_RTAIL 0x2000
956 /* no IB cable, or no device on IB cable */
957 #define IPATH_NOCABLE 0x4000
958 /* Supports port zero per packet receive interrupts via
960 #define IPATH_GPIO_INTR 0x8000
961 /* uses the coded 4byte TID, not 8 byte */
962 #define IPATH_4BYTE_TID 0x10000
963 /* packet/word counters are 32 bit, else those 4 counters
965 #define IPATH_32BITCOUNTERS 0x20000
966 /* Interrupt register is 64 bits */
967 #define IPATH_INTREG_64 0x40000
968 /* can miss port0 rx interrupts */
969 #define IPATH_DISABLED 0x80000 /* administratively disabled */
970 /* Use GPIO interrupts for new counters */
971 #define IPATH_GPIO_ERRINTRS 0x100000
972 #define IPATH_SWAP_PIOBUFS 0x200000
973 /* Supports Send DMA */
974 #define IPATH_HAS_SEND_DMA 0x400000
975 /* Supports Send Count (not just word count) in PBC */
976 #define IPATH_HAS_PBC_CNT 0x800000
977 /* Suppress heartbeat, even if turning off loopback */
978 #define IPATH_NO_HRTBT 0x1000000
979 #define IPATH_HAS_THRESH_UPDATE 0x4000000
980 #define IPATH_HAS_MULT_IB_SPEED 0x8000000
981 #define IPATH_IB_AUTONEG_INPROG 0x10000000
982 #define IPATH_IB_AUTONEG_FAILED 0x20000000
983 /* Linkdown-disable intentionally, Do not attempt to bring up */
984 #define IPATH_IB_LINK_DISABLED 0x40000000
985 #define IPATH_IB_FORCE_NOTIFY 0x80000000 /* force notify on next ib change */
987 /* Bits in GPIO for the added interrupts */
988 #define IPATH_GPIO_PORT0_BIT 2
989 #define IPATH_GPIO_RXUVL_BIT 3
990 #define IPATH_GPIO_OVRUN_BIT 4
991 #define IPATH_GPIO_LLI_BIT 5
992 #define IPATH_GPIO_ERRINTR_MASK 0x38
994 /* portdata flag bit offsets */
995 /* waiting for a packet to arrive */
996 #define IPATH_PORT_WAITING_RCV 2
997 /* master has not finished initializing */
998 #define IPATH_PORT_MASTER_UNINIT 4
999 /* waiting for an urgent packet to arrive */
1000 #define IPATH_PORT_WAITING_URG 5
1002 /* free up any allocated data at closes */
1003 void ipath_free_data(struct ipath_portdata *dd);
1004 u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32, u32 *);
1005 void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
1006 unsigned len, int avail);
1007 void ipath_init_iba7220_funcs(struct ipath_devdata *);
1008 void ipath_init_iba6120_funcs(struct ipath_devdata *);
1009 void ipath_init_iba6110_funcs(struct ipath_devdata *);
1010 void ipath_get_eeprom_info(struct ipath_devdata *);
1011 int ipath_update_eeprom_log(struct ipath_devdata *dd);
1012 void ipath_inc_eeprom_err(struct ipath_devdata *dd, u32 eidx, u32 incr);
1013 u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg);
1014 void ipath_disarm_senderrbufs(struct ipath_devdata *, int);
1015 void ipath_force_pio_avail_update(struct ipath_devdata *);
1016 void signal_ib_event(struct ipath_devdata *dd, enum ib_event_type ev);
1019 * Set LED override, only the two LSBs have "public" meaning, but
1020 * any non-zero value substitutes them for the Link and LinkTrain
1023 #define IPATH_LED_PHYS 1 /* Physical (linktraining) GREEN LED */
1024 #define IPATH_LED_LOG 2 /* Logical (link) YELLOW LED */
1025 void ipath_set_led_override(struct ipath_devdata *dd, unsigned int val);
1027 /* send dma routines */
1028 int setup_sdma(struct ipath_devdata *);
1029 void teardown_sdma(struct ipath_devdata *);
1030 void ipath_restart_sdma(struct ipath_devdata *);
1031 void ipath_sdma_intr(struct ipath_devdata *);
1032 int ipath_sdma_verbs_send(struct ipath_devdata *, struct ipath_sge_state *,
1033 u32, struct ipath_verbs_txreq *);
1034 /* ipath_sdma_lock should be locked before calling this. */
1035 int ipath_sdma_make_progress(struct ipath_devdata *dd);
1037 /* must be called under ipath_sdma_lock */
1038 static inline u16 ipath_sdma_descq_freecnt(const struct ipath_devdata *dd)
1040 return dd->ipath_sdma_descq_cnt -
1041 (dd->ipath_sdma_descq_added - dd->ipath_sdma_descq_removed) -
1042 1 - dd->ipath_sdma_desc_nreserved;
1045 static inline void ipath_sdma_desc_reserve(struct ipath_devdata *dd, u16 cnt)
1047 dd->ipath_sdma_desc_nreserved += cnt;
1050 static inline void ipath_sdma_desc_unreserve(struct ipath_devdata *dd, u16 cnt)
1052 dd->ipath_sdma_desc_nreserved -= cnt;
1056 * number of words used for protocol header if not set by ipath_userinit();
1058 #define IPATH_DFLT_RCVHDRSIZE 9
1060 int ipath_get_user_pages(unsigned long, size_t, struct page **);
1061 void ipath_release_user_pages(struct page **, size_t);
1062 void ipath_release_user_pages_on_close(struct page **, size_t);
1063 int ipath_eeprom_read(struct ipath_devdata *, u8, void *, int);
1064 int ipath_eeprom_write(struct ipath_devdata *, u8, const void *, int);
1065 int ipath_tempsense_read(struct ipath_devdata *, u8 regnum);
1066 int ipath_tempsense_write(struct ipath_devdata *, u8 regnum, u8 data);
1068 /* these are used for the registers that vary with port */
1069 void ipath_write_kreg_port(const struct ipath_devdata *, ipath_kreg,
1073 * We could have a single register get/put routine, that takes a group type,
1074 * but this is somewhat clearer and cleaner. It also gives us some error
1075 * checking. 64 bit register reads should always work, but are inefficient
1076 * on opteron (the northbridge always generates 2 separate HT 32 bit reads),
1077 * so we use kreg32 wherever possible. User register and counter register
1078 * reads are always 32 bit reads, so only one form of those routines.
1082 * At the moment, none of the s-registers are writable, so no
1083 * ipath_write_sreg().
1087 * ipath_read_ureg32 - read 32-bit virtualized per-port register
1089 * @regno: register number
1090 * @port: port number
1092 * Return the contents of a register that is virtualized to be per port.
1093 * Returns -1 on errors (not distinguishable from valid contents at
1094 * runtime; we may add a separate error variable at some point).
1096 static inline u32 ipath_read_ureg32(const struct ipath_devdata *dd,
1097 ipath_ureg regno, int port)
1099 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
1102 return readl(regno + (u64 __iomem *)
1103 (dd->ipath_uregbase +
1104 (char __iomem *)dd->ipath_kregbase +
1105 dd->ipath_ureg_align * port));
1109 * ipath_write_ureg - write 32-bit virtualized per-port register
1111 * @regno: register number
1115 * Write the contents of a register that is virtualized to be per port.
1117 static inline void ipath_write_ureg(const struct ipath_devdata *dd,
1118 ipath_ureg regno, u64 value, int port)
1120 u64 __iomem *ubase = (u64 __iomem *)
1121 (dd->ipath_uregbase + (char __iomem *) dd->ipath_kregbase +
1122 dd->ipath_ureg_align * port);
1123 if (dd->ipath_kregbase)
1124 writeq(value, &ubase[regno]);
1127 static inline u32 ipath_read_kreg32(const struct ipath_devdata *dd,
1130 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
1132 return readl((u32 __iomem *) & dd->ipath_kregbase[regno]);
1135 static inline u64 ipath_read_kreg64(const struct ipath_devdata *dd,
1138 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
1141 return readq(&dd->ipath_kregbase[regno]);
1144 static inline void ipath_write_kreg(const struct ipath_devdata *dd,
1145 ipath_kreg regno, u64 value)
1147 if (dd->ipath_kregbase)
1148 writeq(value, &dd->ipath_kregbase[regno]);
1151 static inline u64 ipath_read_creg(const struct ipath_devdata *dd,
1154 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
1157 return readq(regno + (u64 __iomem *)
1158 (dd->ipath_cregbase +
1159 (char __iomem *)dd->ipath_kregbase));
1162 static inline u32 ipath_read_creg32(const struct ipath_devdata *dd,
1165 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
1167 return readl(regno + (u64 __iomem *)
1168 (dd->ipath_cregbase +
1169 (char __iomem *)dd->ipath_kregbase));
1172 static inline void ipath_write_creg(const struct ipath_devdata *dd,
1173 ipath_creg regno, u64 value)
1175 if (dd->ipath_kregbase)
1176 writeq(value, regno + (u64 __iomem *)
1177 (dd->ipath_cregbase +
1178 (char __iomem *)dd->ipath_kregbase));
1181 static inline void ipath_clear_rcvhdrtail(const struct ipath_portdata *pd)
1183 *((u64 *) pd->port_rcvhdrtail_kvaddr) = 0ULL;
1186 static inline u32 ipath_get_rcvhdrtail(const struct ipath_portdata *pd)
1188 return (u32) le64_to_cpu(*((volatile __le64 *)
1189 pd->port_rcvhdrtail_kvaddr));
1192 static inline u32 ipath_get_hdrqtail(const struct ipath_portdata *pd)
1194 const struct ipath_devdata *dd = pd->port_dd;
1197 if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
1201 rhf_addr = (__le32 *) pd->port_rcvhdrq +
1202 pd->port_head + dd->ipath_rhf_offset;
1203 seq = ipath_hdrget_seq(rhf_addr);
1204 hdrqtail = pd->port_head;
1205 if (seq == pd->port_seq_cnt)
1208 hdrqtail = ipath_get_rcvhdrtail(pd);
1213 static inline u64 ipath_read_ireg(const struct ipath_devdata *dd, ipath_kreg r)
1215 return (dd->ipath_flags & IPATH_INTREG_64) ?
1216 ipath_read_kreg64(dd, r) : ipath_read_kreg32(dd, r);
1220 * from contents of IBCStatus (or a saved copy), return linkstate
1221 * Report ACTIVE_DEFER as ACTIVE, because we treat them the same
1222 * everywhere, anyway (and should be, for almost all purposes).
1224 static inline u32 ipath_ib_linkstate(struct ipath_devdata *dd, u64 ibcs)
1226 u32 state = (u32)(ibcs >> dd->ibcs_ls_shift) &
1227 INFINIPATH_IBCS_LINKSTATE_MASK;
1228 if (state == INFINIPATH_IBCS_L_STATE_ACT_DEFER)
1229 state = INFINIPATH_IBCS_L_STATE_ACTIVE;
1233 /* from contents of IBCStatus (or a saved copy), return linktrainingstate */
1234 static inline u32 ipath_ib_linktrstate(struct ipath_devdata *dd, u64 ibcs)
1236 return (u32)(ibcs >> INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
1241 * from contents of IBCStatus (or a saved copy), return logical link state
1242 * combination of link state and linktraining state (down, active, init,
1245 static inline u32 ipath_ib_state(struct ipath_devdata *dd, u64 ibcs)
1248 ibs = (u32)(ibcs >> INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
1251 (INFINIPATH_IBCS_LINKSTATE_MASK << dd->ibcs_ls_shift));
1259 struct device_driver;
1261 extern const char ib_ipath_version[];
1263 extern struct attribute_group *ipath_driver_attr_groups[];
1265 int ipath_device_create_group(struct device *, struct ipath_devdata *);
1266 void ipath_device_remove_group(struct device *, struct ipath_devdata *);
1267 int ipath_expose_reset(struct device *);
1269 int ipath_init_ipathfs(void);
1270 void ipath_exit_ipathfs(void);
1271 int ipathfs_add_device(struct ipath_devdata *);
1272 int ipathfs_remove_device(struct ipath_devdata *);
1275 * dma_addr wrappers - all 0's invalid for hw
1277 dma_addr_t ipath_map_page(struct pci_dev *, struct page *, unsigned long,
1279 dma_addr_t ipath_map_single(struct pci_dev *, void *, size_t, int);
1280 const char *ipath_get_unit_name(int unit);
1283 * Flush write combining store buffers (if present) and perform a write
1286 #if defined(CONFIG_X86_64)
1287 #define ipath_flush_wc() asm volatile("sfence" ::: "memory")
1289 #define ipath_flush_wc() wmb()
1292 extern unsigned ipath_debug; /* debugging bit mask */
1293 extern unsigned ipath_linkrecovery;
1294 extern unsigned ipath_mtu4096;
1295 extern struct mutex ipath_mutex;
1297 #define IPATH_DRV_NAME "ib_ipath"
1298 #define IPATH_MAJOR 233
1299 #define IPATH_USER_MINOR_BASE 0
1300 #define IPATH_DIAGPKT_MINOR 127
1301 #define IPATH_DIAG_MINOR_BASE 129
1302 #define IPATH_NMINORS 255
1304 #define ipath_dev_err(dd,fmt,...) \
1306 const struct ipath_devdata *__dd = (dd); \
1308 dev_err(&__dd->pcidev->dev, "%s: " fmt, \
1309 ipath_get_unit_name(__dd->ipath_unit), \
1312 printk(KERN_ERR IPATH_DRV_NAME ": %s: " fmt, \
1313 ipath_get_unit_name(__dd->ipath_unit), \
1317 #if _IPATH_DEBUGGING
1319 # define __IPATH_DBG_WHICH(which,fmt,...) \
1321 if (unlikely(ipath_debug & (which))) \
1322 printk(KERN_DEBUG IPATH_DRV_NAME ": %s: " fmt, \
1323 __func__,##__VA_ARGS__); \
1326 # define ipath_dbg(fmt,...) \
1327 __IPATH_DBG_WHICH(__IPATH_DBG,fmt,##__VA_ARGS__)
1328 # define ipath_cdbg(which,fmt,...) \
1329 __IPATH_DBG_WHICH(__IPATH_##which##DBG,fmt,##__VA_ARGS__)
1331 #else /* ! _IPATH_DEBUGGING */
1333 # define ipath_dbg(fmt,...)
1334 # define ipath_cdbg(which,fmt,...)
1336 #endif /* _IPATH_DEBUGGING */
1339 * this is used for formatting hw error messages...
1341 struct ipath_hwerror_msgs {
1346 #define INFINIPATH_HWE_MSG(a, b) { .mask = INFINIPATH_HWE_##a, .msg = b }
1348 /* in ipath_intr.c... */
1349 void ipath_format_hwerrors(u64 hwerrs,
1350 const struct ipath_hwerror_msgs *hwerrmsgs,
1352 char *msg, size_t lmsg);
1354 #endif /* _IPATH_KERNEL_H */