1 #ifndef _IPATH_KERNEL_H
2 #define _IPATH_KERNEL_H
4 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
5 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 * This header file is the base header file for infinipath kernel code
38 * ipath_user.h serves a similar purpose for user code.
41 #include <linux/interrupt.h>
44 #include "ipath_common.h"
45 #include "ipath_debug.h"
46 #include "ipath_registers.h"
48 /* only s/w major version of InfiniPath we can handle */
49 #define IPATH_CHIP_VERS_MAJ 2U
51 /* don't care about this except printing */
52 #define IPATH_CHIP_VERS_MIN 0U
54 /* temporary, maybe always */
55 extern struct infinipath_stats ipath_stats;
57 #define IPATH_CHIP_SWVERSION IPATH_CHIP_VERS_MAJ
59 struct ipath_portdata {
60 void **port_rcvegrbuf;
61 dma_addr_t *port_rcvegrbuf_phys;
62 /* rcvhdrq base, needs mmap before useful */
64 /* kernel virtual address where hdrqtail is updated */
65 u64 *port_rcvhdrtail_kvaddr;
66 /* page * used for uaddr */
67 struct page *port_rcvhdrtail_pagep;
69 * temp buffer for expected send setup, allocated at open, instead
72 void *port_tid_pg_list;
73 /* when waiting for rcv or pioavail */
74 wait_queue_head_t port_wait;
76 * rcvegr bufs base, physical, must fit
77 * in 44 bits so 32 bit programs mmap64 44 bit works)
79 dma_addr_t port_rcvegr_phys;
80 /* mmap of hdrq, must fit in 44 bits */
81 dma_addr_t port_rcvhdrq_phys;
83 * the actual user address that we ipath_mlock'ed, so we can
84 * ipath_munlock it at close
86 unsigned long port_rcvhdrtail_uaddr;
88 * number of opens on this instance (0 or 1; ignoring forks, dup,
93 * how much space to leave at start of eager TID entries for
94 * protocol use, on each TID
96 /* instead of calculating it */
98 /* chip offset of PIO buffers for this port */
100 /* how many alloc_pages() chunks in port_rcvegrbuf_pages */
101 u32 port_rcvegrbuf_chunks;
102 /* how many egrbufs per chunk */
103 u32 port_rcvegrbufs_perchunk;
104 /* order for port_rcvegrbuf_pages */
105 size_t port_rcvegrbuf_size;
106 /* rcvhdrq size (for freeing) */
107 size_t port_rcvhdrq_size;
108 /* next expected TID to check when looking for free */
110 /* next expected TID to check */
111 unsigned long port_flag;
112 /* WAIT_RCV that timed out, no interrupt */
114 /* WAIT_PIO that timed out, no interrupt */
116 /* WAIT_RCV already happened, no wait */
118 /* WAIT_PIO already happened, no wait */
120 /* total number of rcvhdrqfull errors */
122 /* pid of process using this port */
124 /* same size as task_struct .comm[] */
126 /* pkeys set by this use of this port */
128 /* so file ops can get at unit */
129 struct ipath_devdata *port_dd;
135 * control information for layered drivers
137 struct _ipath_layer {
141 /* Verbs layer interface */
142 struct _verbs_layer {
144 struct timer_list l_timer;
147 struct ipath_devdata {
148 struct list_head ipath_list;
150 struct ipath_kregs const *ipath_kregs;
151 struct ipath_cregs const *ipath_cregs;
153 /* mem-mapped pointer to base of chip regs */
154 u64 __iomem *ipath_kregbase;
155 /* end of mem-mapped chip space; range checking */
156 u64 __iomem *ipath_kregend;
157 /* physical address of chip for io_remap, etc. */
158 unsigned long ipath_physaddr;
159 /* base of memory alloced for ipath_kregbase, for free */
160 u64 *ipath_kregalloc;
162 * version of kregbase that doesn't have high bits set (for 32 bit
163 * programs, so mmap64 44 bit works)
165 u64 __iomem *ipath_kregvirt;
167 * virtual address where port0 rcvhdrqtail updated for this unit.
168 * only written to by the chip, not the driver.
170 volatile __le64 *ipath_hdrqtailptr;
171 dma_addr_t ipath_dma_addr;
172 /* ipath_cfgports pointers */
173 struct ipath_portdata **ipath_pd;
174 /* sk_buffs used by port 0 eager receive queue */
175 struct sk_buff **ipath_port0_skbs;
176 /* kvirt address of 1st 2k pio buffer */
177 void __iomem *ipath_pio2kbase;
178 /* kvirt address of 1st 4k pio buffer */
179 void __iomem *ipath_pio4kbase;
181 * points to area where PIOavail registers will be DMA'ed.
182 * Has to be on a page of it's own, because the page will be
183 * mapped into user program space. This copy is *ONLY* ever
184 * written by DMA, not by the driver! Need a copy per device
185 * when we get to multiple devices
187 volatile __le64 *ipath_pioavailregs_dma;
188 /* physical address where updates occur */
189 dma_addr_t ipath_pioavailregs_phys;
190 struct _ipath_layer ipath_layer;
192 int (*ipath_f_intrsetup)(struct ipath_devdata *);
193 /* setup on-chip bus config */
194 int (*ipath_f_bus)(struct ipath_devdata *, struct pci_dev *);
195 /* hard reset chip */
196 int (*ipath_f_reset)(struct ipath_devdata *);
197 int (*ipath_f_get_boardname)(struct ipath_devdata *, char *,
199 void (*ipath_f_init_hwerrors)(struct ipath_devdata *);
200 void (*ipath_f_handle_hwerrors)(struct ipath_devdata *, char *,
202 void (*ipath_f_quiet_serdes)(struct ipath_devdata *);
203 int (*ipath_f_bringup_serdes)(struct ipath_devdata *);
204 int (*ipath_f_early_init)(struct ipath_devdata *);
205 void (*ipath_f_clear_tids)(struct ipath_devdata *, unsigned);
206 void (*ipath_f_put_tid)(struct ipath_devdata *, u64 __iomem*,
208 void (*ipath_f_tidtemplate)(struct ipath_devdata *);
209 void (*ipath_f_cleanup)(struct ipath_devdata *);
210 void (*ipath_f_setextled)(struct ipath_devdata *, u64, u64);
211 /* fill out chip-specific fields */
212 int (*ipath_f_get_base_info)(struct ipath_portdata *, void *);
213 struct _verbs_layer verbs_layer;
214 /* total dwords sent (summed from counter) */
216 /* total dwords rcvd (summed from counter) */
218 /* total packets sent (summed from counter) */
220 /* total packets rcvd (summed from counter) */
222 /* ipath_statusp initially points to this. */
224 /* GUID for this interface, in network order */
227 * aggregrate of error bits reported since last cleared, for
228 * limiting of error reporting
230 ipath_err_t ipath_lasterror;
232 * aggregrate of error bits reported since last cleared, for
233 * limiting of hwerror reporting
235 ipath_err_t ipath_lasthwerror;
237 * errors masked because they occur too fast, also includes errors
238 * that are always ignored (ipath_ignorederrs)
240 ipath_err_t ipath_maskederrs;
241 /* time in jiffies at which to re-enable maskederrs */
242 unsigned long ipath_unmasktime;
244 * errors always ignored (masked), at least for a given
245 * chip/device, because they are wrong or not useful
247 ipath_err_t ipath_ignorederrs;
248 /* count of egrfull errors, combined for all ports */
249 u64 ipath_last_tidfull;
250 /* for ipath_qcheck() */
251 u64 ipath_lastport0rcv_cnt;
252 /* template for writing TIDs */
253 u64 ipath_tidtemplate;
254 /* value to write to free TIDs */
255 u64 ipath_tidinvalid;
256 /* PE-800 rcv interrupt setup */
257 u64 ipath_rhdrhead_intr_off;
259 /* size of memory at ipath_kregbase */
261 /* number of registers used for pioavail */
263 /* IPATH_POLL, etc. */
265 /* ipath_flags sma is waiting for */
266 u32 ipath_sma_state_wanted;
267 /* last buffer for user use, first buf for kernel use is this
269 u32 ipath_lastport_piobuf;
270 /* is a stats timer active */
271 u32 ipath_stats_timer_active;
272 /* dwords sent read from counter */
274 /* dwords received read from counter */
276 /* sent packets read from counter */
278 /* received packets read from counter */
280 /* pio bufs allocated per port */
283 * number of ports configured as max; zero is set to number chip
284 * supports, less gives more pio bufs/port, etc.
287 /* port0 rcvhdrq head offset */
289 /* count of port 0 hdrqfull errors */
290 u32 ipath_p0_hdrqfull;
293 * (*cfgports) used to suppress multiple instances of same
294 * port staying stuck at same point
296 u32 *ipath_lastrcvhdrqtails;
298 * (*cfgports) used to suppress multiple instances of same
299 * port staying stuck at same point
301 u32 *ipath_lastegrheads;
303 * index of last piobuffer we used. Speeds up searching, by
304 * starting at this point. Doesn't matter if multiple cpu's use and
305 * update, last updater is only write that matters. Whenever it
306 * wraps, we update shadow copies. Need a copy per device when we
307 * get to multiple devices
309 u32 ipath_lastpioindex;
310 /* max length of freezemsg */
313 * consecutive times we wanted a PIO buffer but were unable to
316 u32 ipath_consec_nopiobuf;
318 * hint that we should update ipath_pioavailshadow before
319 * looking for a PIO buffer
321 u32 ipath_upd_pio_shadow;
322 /* so we can rewrite it after a chip reset */
324 /* so we can rewrite it after a chip reset */
326 /* sequential tries for SMA send and no bufs */
327 u32 ipath_nosma_bufs;
328 /* duration (seconds) ipath_nosma_bufs set */
329 u32 ipath_nosma_secs;
331 /* HT/PCI Vendor ID (here for NodeInfo) */
333 /* HT/PCI Device ID (here for NodeInfo) */
335 /* offset in HT config space of slave/primary interface block */
336 u8 ipath_ht_slave_off;
337 /* for write combining settings */
338 unsigned long ipath_wc_cookie;
339 /* ref count for each pkey */
340 atomic_t ipath_pkeyrefs[4];
341 /* shadow copy of all exptids physaddr; used only by funcsim */
342 u64 *ipath_tidsimshadow;
343 /* shadow copy of struct page *'s for exp tid pages */
344 struct page **ipath_pageshadow;
345 /* lock to workaround chip bug 9437 */
346 spinlock_t ipath_tid_lock;
350 * this address is mapped readonly into user processes so they can
351 * get status cheaply, whenever they want.
354 /* freeze msg if hw error put chip in freeze */
355 char *ipath_freezemsg;
356 /* pci access data structure */
357 struct pci_dev *pcidev;
358 struct cdev *user_cdev;
359 struct cdev *diag_cdev;
360 struct class_device *user_class_dev;
361 struct class_device *diag_class_dev;
362 /* timer used to prevent stats overflow, error throttling, etc. */
363 struct timer_list ipath_stats_timer;
364 /* check for stale messages in rcv queue */
365 /* only allow one intr at a time. */
366 unsigned long ipath_rcv_pending;
369 * Shadow copies of registers; size indicates read access size.
370 * Most of them are readonly, but some are write-only register,
371 * where we manipulate the bits in the shadow copy, and then write
372 * the shadow copy to infinipath.
374 * We deliberately make most of these 32 bits, since they have
375 * restricted range. For any that we read, we won't to generate 32
376 * bit accesses, since Opteron will generate 2 separate 32 bit HT
377 * transactions for a 64 bit read, and we want to avoid unnecessary
381 /* This is the 64 bit group */
384 * shadow of pioavail, check to be sure it's large enough at
387 unsigned long ipath_pioavailshadow[8];
388 /* shadow of kr_gpio_out, for rmw ops */
390 /* kr_revision shadow */
393 * shadow of ibcctrl, for interrupt handling of link changes,
398 * last ibcstatus, to suppress "duplicate" status change messages,
401 u64 ipath_lastibcstat;
402 /* hwerrmask shadow */
403 ipath_err_t ipath_hwerrmask;
404 /* interrupt config reg shadow */
406 /* kr_sendpiobufbase value */
407 u64 ipath_piobufbase;
409 /* these are the "32 bit" regs */
412 * number of GUIDs in the flash for this interface; may need some
413 * rethinking for setting on other ifaces
417 * the following two are 32-bit bitmasks, but {test,clear,set}_bit
418 * all expect bit fields to be "unsigned long"
420 /* shadow kr_rcvctrl */
421 unsigned long ipath_rcvctrl;
422 /* shadow kr_sendctrl */
423 unsigned long ipath_sendctrl;
425 /* value we put in kr_rcvhdrcnt */
427 /* value we put in kr_rcvhdrsize */
428 u32 ipath_rcvhdrsize;
429 /* value we put in kr_rcvhdrentsize */
430 u32 ipath_rcvhdrentsize;
431 /* offset of last entry in rcvhdrq */
433 /* kr_portcnt value */
435 /* kr_pagealign value */
437 /* number of "2KB" PIO buffers */
439 /* size in bytes of "2KB" PIO buffers */
441 /* number of "4KB" PIO buffers */
443 /* size in bytes of "4KB" PIO buffers */
445 /* kr_rcvegrbase value */
446 u32 ipath_rcvegrbase;
447 /* kr_rcvegrcnt value */
449 /* kr_rcvtidbase value */
450 u32 ipath_rcvtidbase;
451 /* kr_rcvtidcnt value */
457 /* kr_counterregbase */
459 /* shadow the control register contents */
461 /* shadow the gpio output contents */
463 /* PCI revision register (HTC rev on FPGA) */
466 /* chip address space used by 4k pio buffers */
468 /* The MTU programmed for this unit */
471 * The max size IB packet, included IB headers that we can send.
472 * Starts same as ipath_piosize, but is affected when ibmtu is
473 * changed, or by size of eager buffers
477 * ibmaxlen at init time, limited by chip and by receive buffer
478 * size. Not changed after init.
480 u32 ipath_init_ibmaxlen;
481 /* size of each rcvegrbuffer */
482 u32 ipath_rcvegrbufsize;
483 /* width (2,4,8,16,32) from HT config reg */
485 /* HT speed (200,400,800,1000) from HT config */
487 /* ports waiting for PIOavail intr */
488 unsigned long ipath_portpiowait;
490 * number of sequential ibcstatus change for polling active/quiet
491 * (i.e., link not coming up).
494 /* low and high portions of MSI capability/vector */
496 /* saved after PCIe init for restore after reset */
498 /* MSI data (vector) saved for restore */
500 /* MLID programmed for this instance */
502 /* LID programmed for this instance */
504 /* list of pkeys programmed; 0 if not set */
506 /* ASCII serial number, from flash */
508 /* human readable board version */
509 u8 ipath_boardversion[80];
510 /* chip major rev, from ipath_revision */
512 /* chip minor rev, from ipath_revision */
514 /* board rev, from ipath_revision */
516 /* unit # of this chip, if present */
518 /* saved for restore after reset */
519 u8 ipath_pci_cacheline;
520 /* LID mask control */
524 extern volatile __le64 *ipath_port0_rcvhdrtail;
525 extern dma_addr_t ipath_port0_rcvhdrtail_dma;
527 #define IPATH_PORT0_RCVHDRTAIL_SIZE PAGE_SIZE
529 extern struct list_head ipath_dev_list;
530 extern spinlock_t ipath_devs_lock;
531 extern struct ipath_devdata *ipath_lookup(int unit);
533 extern u16 ipath_layer_rcv_opcode;
534 extern int __ipath_layer_intr(struct ipath_devdata *, u32);
535 extern int ipath_layer_intr(struct ipath_devdata *, u32);
536 extern int __ipath_layer_rcv(struct ipath_devdata *, void *,
538 extern int __ipath_layer_rcv_lid(struct ipath_devdata *, void *);
539 extern int __ipath_verbs_piobufavail(struct ipath_devdata *);
540 extern int __ipath_verbs_rcv(struct ipath_devdata *, void *, void *, u32);
542 void ipath_layer_add(struct ipath_devdata *);
543 void ipath_layer_remove(struct ipath_devdata *);
545 int ipath_init_chip(struct ipath_devdata *, int);
546 int ipath_enable_wc(struct ipath_devdata *dd);
547 void ipath_disable_wc(struct ipath_devdata *dd);
548 int ipath_count_units(int *npresentp, int *nupp, u32 *maxportsp);
549 void ipath_shutdown_device(struct ipath_devdata *);
551 struct file_operations;
552 int ipath_cdev_init(int minor, char *name, struct file_operations *fops,
553 struct cdev **cdevp, struct class_device **class_devp);
554 void ipath_cdev_cleanup(struct cdev **cdevp,
555 struct class_device **class_devp);
557 int ipath_diag_add(struct ipath_devdata *);
558 void ipath_diag_remove(struct ipath_devdata *);
559 void ipath_diag_bringup_link(struct ipath_devdata *);
561 extern wait_queue_head_t ipath_sma_state_wait;
563 int ipath_user_add(struct ipath_devdata *dd);
564 void ipath_user_remove(struct ipath_devdata *dd);
566 struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd, gfp_t);
568 extern int ipath_diag_inuse;
570 irqreturn_t ipath_intr(int irq, void *devid, struct pt_regs *regs);
571 void ipath_decode_err(char *buf, size_t blen, ipath_err_t err);
572 #if __IPATH_INFO || __IPATH_DBG
573 extern const char *ipath_ibcstatus_str[];
576 /* clean up any per-chip chip-specific stuff */
577 void ipath_chip_cleanup(struct ipath_devdata *);
578 /* clean up any chip type-specific stuff */
579 void ipath_chip_done(void);
581 /* check to see if we have to force ordering for write combining */
582 int ipath_unordered_wc(void);
584 void ipath_disarm_piobufs(struct ipath_devdata *, unsigned first,
587 int ipath_create_rcvhdrq(struct ipath_devdata *, struct ipath_portdata *);
588 void ipath_free_pddata(struct ipath_devdata *, u32, int);
590 int ipath_parse_ushort(const char *str, unsigned short *valp);
592 int ipath_wait_linkstate(struct ipath_devdata *, u32, int);
593 void ipath_set_ib_lstate(struct ipath_devdata *, int);
594 void ipath_kreceive(struct ipath_devdata *);
595 int ipath_setrcvhdrsize(struct ipath_devdata *, unsigned);
596 int ipath_reset_device(int);
597 void ipath_get_faststats(unsigned long);
599 /* for use in system calls, where we want to know device type, etc. */
600 #define port_fp(fp) ((struct ipath_portdata *) (fp)->private_data)
603 * values for ipath_flags
605 /* The chip is up and initted */
606 #define IPATH_INITTED 0x2
607 /* set if any user code has set kr_rcvhdrsize */
608 #define IPATH_RCVHDRSZ_SET 0x4
609 /* The chip is present and valid for accesses */
610 #define IPATH_PRESENT 0x8
611 /* HT link0 is only 8 bits wide, ignore upper byte crc
613 #define IPATH_8BIT_IN_HT0 0x10
614 /* HT link1 is only 8 bits wide, ignore upper byte crc
616 #define IPATH_8BIT_IN_HT1 0x20
617 /* The link is down */
618 #define IPATH_LINKDOWN 0x40
619 /* The link level is up (0x11) */
620 #define IPATH_LINKINIT 0x80
621 /* The link is in the armed (0x21) state */
622 #define IPATH_LINKARMED 0x100
623 /* The link is in the active (0x31) state */
624 #define IPATH_LINKACTIVE 0x200
625 /* link current state is unknown */
626 #define IPATH_LINKUNK 0x400
627 /* no IB cable, or no device on IB cable */
628 #define IPATH_NOCABLE 0x4000
629 /* Supports port zero per packet receive interrupts via
631 #define IPATH_GPIO_INTR 0x8000
632 /* uses the coded 4byte TID, not 8 byte */
633 #define IPATH_4BYTE_TID 0x10000
634 /* packet/word counters are 32 bit, else those 4 counters
636 #define IPATH_32BITCOUNTERS 0x20000
637 /* can miss port0 rx interrupts */
638 #define IPATH_POLL_RX_INTR 0x40000
639 #define IPATH_DISABLED 0x80000 /* administratively disabled */
641 /* portdata flag bit offsets */
642 /* waiting for a packet to arrive */
643 #define IPATH_PORT_WAITING_RCV 2
644 /* waiting for a PIO buffer to be available */
645 #define IPATH_PORT_WAITING_PIO 3
647 /* free up any allocated data at closes */
648 void ipath_free_data(struct ipath_portdata *dd);
649 int ipath_waitfor_mdio_cmdready(struct ipath_devdata *);
650 int ipath_waitfor_complete(struct ipath_devdata *, ipath_kreg, u64, u64 *);
651 u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32 *);
652 /* init PE-800-specific func */
653 void ipath_init_pe800_funcs(struct ipath_devdata *);
654 /* init HT-400-specific func */
655 void ipath_init_ht400_funcs(struct ipath_devdata *);
656 void ipath_get_eeprom_info(struct ipath_devdata *);
657 u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg);
660 * number of words used for protocol header if not set by ipath_userinit();
662 #define IPATH_DFLT_RCVHDRSIZE 9
664 #define IPATH_MDIO_CMD_WRITE 1
665 #define IPATH_MDIO_CMD_READ 2
666 #define IPATH_MDIO_CLD_DIV 25 /* to get 2.5 Mhz mdio clock */
667 #define IPATH_MDIO_CMDVALID 0x40000000 /* bit 30 */
668 #define IPATH_MDIO_DATAVALID 0x80000000 /* bit 31 */
669 #define IPATH_MDIO_CTRL_STD 0x0
671 static inline u64 ipath_mdio_req(int cmd, int dev, int reg, int data)
673 return (((u64) IPATH_MDIO_CLD_DIV) << 32) |
680 /* signal and fifo status, in bank 31 */
681 #define IPATH_MDIO_CTRL_XGXS_REG_8 0x8
682 /* controls loopback, redundancy */
683 #define IPATH_MDIO_CTRL_8355_REG_1 0x10
684 /* premph, encdec, etc. */
685 #define IPATH_MDIO_CTRL_8355_REG_2 0x11
687 #define IPATH_MDIO_CTRL_8355_REG_6 0x15
688 #define IPATH_MDIO_CTRL_8355_REG_9 0x18
689 #define IPATH_MDIO_CTRL_8355_REG_10 0x1D
691 int ipath_get_user_pages(unsigned long, size_t, struct page **);
692 int ipath_get_user_pages_nocopy(unsigned long, struct page **);
693 void ipath_release_user_pages(struct page **, size_t);
694 void ipath_release_user_pages_on_close(struct page **, size_t);
695 int ipath_eeprom_read(struct ipath_devdata *, u8, void *, int);
696 int ipath_eeprom_write(struct ipath_devdata *, u8, const void *, int);
698 /* these are used for the registers that vary with port */
699 void ipath_write_kreg_port(const struct ipath_devdata *, ipath_kreg,
701 u64 ipath_read_kreg64_port(const struct ipath_devdata *, ipath_kreg,
705 * We could have a single register get/put routine, that takes a group type,
706 * but this is somewhat clearer and cleaner. It also gives us some error
707 * checking. 64 bit register reads should always work, but are inefficient
708 * on opteron (the northbridge always generates 2 separate HT 32 bit reads),
709 * so we use kreg32 wherever possible. User register and counter register
710 * reads are always 32 bit reads, so only one form of those routines.
714 * At the moment, none of the s-registers are writable, so no
715 * ipath_write_sreg(), and none of the c-registers are writable, so no
716 * ipath_write_creg().
720 * ipath_read_ureg32 - read 32-bit virtualized per-port register
722 * @regno: register number
725 * Return the contents of a register that is virtualized to be per port.
726 * Returns -1 on errors (not distinguishable from valid contents at
727 * runtime; we may add a separate error variable at some point).
729 static inline u32 ipath_read_ureg32(const struct ipath_devdata *dd,
730 ipath_ureg regno, int port)
732 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
735 return readl(regno + (u64 __iomem *)
736 (dd->ipath_uregbase +
737 (char __iomem *)dd->ipath_kregbase +
738 dd->ipath_palign * port));
742 * ipath_write_ureg - write 32-bit virtualized per-port register
744 * @regno: register number
748 * Write the contents of a register that is virtualized to be per port.
750 static inline void ipath_write_ureg(const struct ipath_devdata *dd,
751 ipath_ureg regno, u64 value, int port)
753 u64 __iomem *ubase = (u64 __iomem *)
754 (dd->ipath_uregbase + (char __iomem *) dd->ipath_kregbase +
755 dd->ipath_palign * port);
756 if (dd->ipath_kregbase)
757 writeq(value, &ubase[regno]);
760 static inline u32 ipath_read_kreg32(const struct ipath_devdata *dd,
763 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
765 return readl((u32 __iomem *) & dd->ipath_kregbase[regno]);
768 static inline u64 ipath_read_kreg64(const struct ipath_devdata *dd,
771 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
774 return readq(&dd->ipath_kregbase[regno]);
777 static inline void ipath_write_kreg(const struct ipath_devdata *dd,
778 ipath_kreg regno, u64 value)
780 if (dd->ipath_kregbase)
781 writeq(value, &dd->ipath_kregbase[regno]);
784 static inline u64 ipath_read_creg(const struct ipath_devdata *dd,
787 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
790 return readq(regno + (u64 __iomem *)
791 (dd->ipath_cregbase +
792 (char __iomem *)dd->ipath_kregbase));
795 static inline u32 ipath_read_creg32(const struct ipath_devdata *dd,
798 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
800 return readl(regno + (u64 __iomem *)
801 (dd->ipath_cregbase +
802 (char __iomem *)dd->ipath_kregbase));
809 struct device_driver;
811 extern const char ipath_core_version[];
813 int ipath_driver_create_group(struct device_driver *);
814 void ipath_driver_remove_group(struct device_driver *);
816 int ipath_device_create_group(struct device *, struct ipath_devdata *);
817 void ipath_device_remove_group(struct device *, struct ipath_devdata *);
818 int ipath_expose_reset(struct device *);
820 int ipath_init_ipathfs(void);
821 void ipath_exit_ipathfs(void);
822 int ipathfs_add_device(struct ipath_devdata *);
823 int ipathfs_remove_device(struct ipath_devdata *);
826 * Flush write combining store buffers (if present) and perform a write
829 #if defined(CONFIG_X86_64)
830 #define ipath_flush_wc() asm volatile("sfence" ::: "memory")
832 #define ipath_flush_wc() wmb()
835 extern unsigned ipath_debug; /* debugging bit mask */
837 const char *ipath_get_unit_name(int unit);
839 extern struct mutex ipath_mutex;
841 #define IPATH_DRV_NAME "ipath_core"
842 #define IPATH_MAJOR 233
843 #define IPATH_USER_MINOR_BASE 0
844 #define IPATH_SMA_MINOR 128
845 #define IPATH_DIAG_MINOR_BASE 129
846 #define IPATH_NMINORS 255
848 #define ipath_dev_err(dd,fmt,...) \
850 const struct ipath_devdata *__dd = (dd); \
852 dev_err(&__dd->pcidev->dev, "%s: " fmt, \
853 ipath_get_unit_name(__dd->ipath_unit), \
856 printk(KERN_ERR IPATH_DRV_NAME ": %s: " fmt, \
857 ipath_get_unit_name(__dd->ipath_unit), \
863 # define __IPATH_DBG_WHICH(which,fmt,...) \
865 if(unlikely(ipath_debug&(which))) \
866 printk(KERN_DEBUG IPATH_DRV_NAME ": %s: " fmt, \
867 __func__,##__VA_ARGS__); \
870 # define ipath_dbg(fmt,...) \
871 __IPATH_DBG_WHICH(__IPATH_DBG,fmt,##__VA_ARGS__)
872 # define ipath_cdbg(which,fmt,...) \
873 __IPATH_DBG_WHICH(__IPATH_##which##DBG,fmt,##__VA_ARGS__)
875 #else /* ! _IPATH_DEBUGGING */
877 # define ipath_dbg(fmt,...)
878 # define ipath_cdbg(which,fmt,...)
880 #endif /* _IPATH_DEBUGGING */
882 #endif /* _IPATH_KERNEL_H */