2 $Id: fore200e.c,v 1.5 2000/04/14 10:10:34 davem Exp $
4 A FORE Systems 200E-series driver for ATM on Linux.
5 Christophe Lizzi (lizzi@cnam.fr), October 1999-March 2003.
7 Based on the PCA-200E driver from Uwe Dannowski (Uwe.Dannowski@inf.tu-dresden.de).
9 This driver simultaneously supports PCA-200E and SBA-200E adapters
10 on i386, alpha (untested), powerpc, sparc and sparc64 architectures.
12 This program is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 2 of the License, or
15 (at your option) any later version.
17 This program is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with this program; if not, write to the Free Software
24 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #include <linux/config.h>
29 #include <linux/kernel.h>
30 #include <linux/slab.h>
31 #include <linux/init.h>
32 #include <linux/capability.h>
33 #include <linux/sched.h>
34 #include <linux/interrupt.h>
35 #include <linux/bitops.h>
36 #include <linux/pci.h>
37 #include <linux/module.h>
38 #include <linux/atmdev.h>
39 #include <linux/sonet.h>
40 #include <linux/atm_suni.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/delay.h>
44 #include <asm/string.h>
48 #include <asm/byteorder.h>
49 #include <asm/uaccess.h>
50 #include <asm/atomic.h>
52 #ifdef CONFIG_ATM_FORE200E_SBA
53 #include <asm/idprom.h>
55 #include <asm/openprom.h>
56 #include <asm/oplib.h>
57 #include <asm/pgtable.h>
60 #if defined(CONFIG_ATM_FORE200E_USE_TASKLET) /* defer interrupt work to a tasklet */
61 #define FORE200E_USE_TASKLET
64 #if 0 /* enable the debugging code of the buffer supply queues */
65 #define FORE200E_BSQ_DEBUG
68 #if 1 /* ensure correct handling of 52-byte AAL0 SDUs expected by atmdump-like apps */
69 #define FORE200E_52BYTE_AAL0_SDU
75 #define FORE200E_VERSION "0.3e"
77 #define FORE200E "fore200e: "
79 #if 0 /* override .config */
80 #define CONFIG_ATM_FORE200E_DEBUG 1
82 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
83 #define DPRINTK(level, format, args...) do { if (CONFIG_ATM_FORE200E_DEBUG >= (level)) \
84 printk(FORE200E format, ##args); } while (0)
86 #define DPRINTK(level, format, args...) do {} while (0)
90 #define FORE200E_ALIGN(addr, alignment) \
91 ((((unsigned long)(addr) + (alignment - 1)) & ~(alignment - 1)) - (unsigned long)(addr))
93 #define FORE200E_DMA_INDEX(dma_addr, type, index) ((dma_addr) + (index) * sizeof(type))
95 #define FORE200E_INDEX(virt_addr, type, index) (&((type *)(virt_addr))[ index ])
97 #define FORE200E_NEXT_ENTRY(index, modulo) (index = ++(index) % (modulo))
100 #define ASSERT(expr) if (!(expr)) { \
101 printk(FORE200E "assertion failed! %s[%d]: %s\n", \
102 __FUNCTION__, __LINE__, #expr); \
103 panic(FORE200E "%s", __FUNCTION__); \
106 #define ASSERT(expr) do {} while (0)
110 static const struct atmdev_ops fore200e_ops;
111 static const struct fore200e_bus fore200e_bus[];
113 static LIST_HEAD(fore200e_boards);
116 MODULE_AUTHOR("Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen");
117 MODULE_DESCRIPTION("FORE Systems 200E-series ATM driver - version " FORE200E_VERSION);
118 MODULE_SUPPORTED_DEVICE("PCA-200E, SBA-200E");
121 static const int fore200e_rx_buf_nbr[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
122 { BUFFER_S1_NBR, BUFFER_L1_NBR },
123 { BUFFER_S2_NBR, BUFFER_L2_NBR }
126 static const int fore200e_rx_buf_size[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
127 { BUFFER_S1_SIZE, BUFFER_L1_SIZE },
128 { BUFFER_S2_SIZE, BUFFER_L2_SIZE }
132 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
133 static const char* fore200e_traffic_class[] = { "NONE", "UBR", "CBR", "VBR", "ABR", "ANY" };
137 #if 0 /* currently unused */
139 fore200e_fore2atm_aal(enum fore200e_aal aal)
142 case FORE200E_AAL0: return ATM_AAL0;
143 case FORE200E_AAL34: return ATM_AAL34;
144 case FORE200E_AAL5: return ATM_AAL5;
152 static enum fore200e_aal
153 fore200e_atm2fore_aal(int aal)
156 case ATM_AAL0: return FORE200E_AAL0;
157 case ATM_AAL34: return FORE200E_AAL34;
160 case ATM_AAL5: return FORE200E_AAL5;
168 fore200e_irq_itoa(int irq)
170 #if defined(__sparc_v9__)
171 return __irq_itoa(irq);
174 sprintf(str, "%d", irq);
181 fore200e_kmalloc(int size, unsigned int __nocast flags)
183 void *chunk = kzalloc(size, flags);
186 printk(FORE200E "kmalloc() failed, requested size = %d, flags = 0x%x\n", size, flags);
193 fore200e_kfree(void* chunk)
199 /* allocate and align a chunk of memory intended to hold the data behing exchanged
200 between the driver and the adapter (using streaming DVMA) */
203 fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, int alignment, int direction)
205 unsigned long offset = 0;
207 if (alignment <= sizeof(int))
210 chunk->alloc_size = size + alignment;
211 chunk->align_size = size;
212 chunk->direction = direction;
214 chunk->alloc_addr = fore200e_kmalloc(chunk->alloc_size, GFP_KERNEL | GFP_DMA);
215 if (chunk->alloc_addr == NULL)
219 offset = FORE200E_ALIGN(chunk->alloc_addr, alignment);
221 chunk->align_addr = chunk->alloc_addr + offset;
223 chunk->dma_addr = fore200e->bus->dma_map(fore200e, chunk->align_addr, chunk->align_size, direction);
229 /* free a chunk of memory */
232 fore200e_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
234 fore200e->bus->dma_unmap(fore200e, chunk->dma_addr, chunk->dma_size, chunk->direction);
236 fore200e_kfree(chunk->alloc_addr);
241 fore200e_spin(int msecs)
243 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
244 while (time_before(jiffies, timeout));
249 fore200e_poll(struct fore200e* fore200e, volatile u32* addr, u32 val, int msecs)
251 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
256 if ((ok = (*addr == val)) || (*addr & STATUS_ERROR))
259 } while (time_before(jiffies, timeout));
263 printk(FORE200E "cmd polling failed, got status 0x%08x, expected 0x%08x\n",
273 fore200e_io_poll(struct fore200e* fore200e, volatile u32 __iomem *addr, u32 val, int msecs)
275 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
279 if ((ok = (fore200e->bus->read(addr) == val)))
282 } while (time_before(jiffies, timeout));
286 printk(FORE200E "I/O polling failed, got status 0x%08x, expected 0x%08x\n",
287 fore200e->bus->read(addr), val);
296 fore200e_free_rx_buf(struct fore200e* fore200e)
298 int scheme, magn, nbr;
299 struct buffer* buffer;
301 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
302 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
304 if ((buffer = fore200e->host_bsq[ scheme ][ magn ].buffer) != NULL) {
306 for (nbr = 0; nbr < fore200e_rx_buf_nbr[ scheme ][ magn ]; nbr++) {
308 struct chunk* data = &buffer[ nbr ].data;
310 if (data->alloc_addr != NULL)
311 fore200e_chunk_free(fore200e, data);
320 fore200e_uninit_bs_queue(struct fore200e* fore200e)
324 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
325 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
327 struct chunk* status = &fore200e->host_bsq[ scheme ][ magn ].status;
328 struct chunk* rbd_block = &fore200e->host_bsq[ scheme ][ magn ].rbd_block;
330 if (status->alloc_addr)
331 fore200e->bus->dma_chunk_free(fore200e, status);
333 if (rbd_block->alloc_addr)
334 fore200e->bus->dma_chunk_free(fore200e, rbd_block);
341 fore200e_reset(struct fore200e* fore200e, int diag)
345 fore200e->cp_monitor = fore200e->virt_base + FORE200E_CP_MONITOR_OFFSET;
347 fore200e->bus->write(BSTAT_COLD_START, &fore200e->cp_monitor->bstat);
349 fore200e->bus->reset(fore200e);
352 ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_SELFTEST_OK, 1000);
355 printk(FORE200E "device %s self-test failed\n", fore200e->name);
359 printk(FORE200E "device %s self-test passed\n", fore200e->name);
361 fore200e->state = FORE200E_STATE_RESET;
369 fore200e_shutdown(struct fore200e* fore200e)
371 printk(FORE200E "removing device %s at 0x%lx, IRQ %s\n",
372 fore200e->name, fore200e->phys_base,
373 fore200e_irq_itoa(fore200e->irq));
375 if (fore200e->state > FORE200E_STATE_RESET) {
376 /* first, reset the board to prevent further interrupts or data transfers */
377 fore200e_reset(fore200e, 0);
380 /* then, release all allocated resources */
381 switch(fore200e->state) {
383 case FORE200E_STATE_COMPLETE:
384 kfree(fore200e->stats);
386 case FORE200E_STATE_IRQ:
387 free_irq(fore200e->irq, fore200e->atm_dev);
389 case FORE200E_STATE_ALLOC_BUF:
390 fore200e_free_rx_buf(fore200e);
392 case FORE200E_STATE_INIT_BSQ:
393 fore200e_uninit_bs_queue(fore200e);
395 case FORE200E_STATE_INIT_RXQ:
396 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.status);
397 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.rpd);
399 case FORE200E_STATE_INIT_TXQ:
400 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.status);
401 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.tpd);
403 case FORE200E_STATE_INIT_CMDQ:
404 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_cmdq.status);
406 case FORE200E_STATE_INITIALIZE:
407 /* nothing to do for that state */
409 case FORE200E_STATE_START_FW:
410 /* nothing to do for that state */
412 case FORE200E_STATE_LOAD_FW:
413 /* nothing to do for that state */
415 case FORE200E_STATE_RESET:
416 /* nothing to do for that state */
418 case FORE200E_STATE_MAP:
419 fore200e->bus->unmap(fore200e);
421 case FORE200E_STATE_CONFIGURE:
422 /* nothing to do for that state */
424 case FORE200E_STATE_REGISTER:
425 /* XXX shouldn't we *start* by deregistering the device? */
426 atm_dev_deregister(fore200e->atm_dev);
428 case FORE200E_STATE_BLANK:
429 /* nothing to do for that state */
435 #ifdef CONFIG_ATM_FORE200E_PCA
437 static u32 fore200e_pca_read(volatile u32 __iomem *addr)
439 /* on big-endian hosts, the board is configured to convert
440 the endianess of slave RAM accesses */
441 return le32_to_cpu(readl(addr));
445 static void fore200e_pca_write(u32 val, volatile u32 __iomem *addr)
447 /* on big-endian hosts, the board is configured to convert
448 the endianess of slave RAM accesses */
449 writel(cpu_to_le32(val), addr);
454 fore200e_pca_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction)
456 u32 dma_addr = pci_map_single((struct pci_dev*)fore200e->bus_dev, virt_addr, size, direction);
458 DPRINTK(3, "PCI DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d, --> dma_addr = 0x%08x\n",
459 virt_addr, size, direction, dma_addr);
466 fore200e_pca_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
468 DPRINTK(3, "PCI DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d\n",
469 dma_addr, size, direction);
471 pci_unmap_single((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
476 fore200e_pca_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
478 DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
480 pci_dma_sync_single_for_cpu((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
484 fore200e_pca_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
486 DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
488 pci_dma_sync_single_for_device((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
492 /* allocate a DMA consistent chunk of memory intended to act as a communication mechanism
493 (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */
496 fore200e_pca_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
497 int size, int nbr, int alignment)
499 /* returned chunks are page-aligned */
500 chunk->alloc_size = size * nbr;
501 chunk->alloc_addr = pci_alloc_consistent((struct pci_dev*)fore200e->bus_dev,
505 if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
508 chunk->align_addr = chunk->alloc_addr;
514 /* free a DMA consistent chunk of memory */
517 fore200e_pca_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
519 pci_free_consistent((struct pci_dev*)fore200e->bus_dev,
527 fore200e_pca_irq_check(struct fore200e* fore200e)
529 /* this is a 1 bit register */
530 int irq_posted = readl(fore200e->regs.pca.psr);
532 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG == 2)
533 if (irq_posted && (readl(fore200e->regs.pca.hcr) & PCA200E_HCR_OUTFULL)) {
534 DPRINTK(2,"FIFO OUT full, device %d\n", fore200e->atm_dev->number);
543 fore200e_pca_irq_ack(struct fore200e* fore200e)
545 writel(PCA200E_HCR_CLRINTR, fore200e->regs.pca.hcr);
550 fore200e_pca_reset(struct fore200e* fore200e)
552 writel(PCA200E_HCR_RESET, fore200e->regs.pca.hcr);
554 writel(0, fore200e->regs.pca.hcr);
559 fore200e_pca_map(struct fore200e* fore200e)
561 DPRINTK(2, "device %s being mapped in memory\n", fore200e->name);
563 fore200e->virt_base = ioremap(fore200e->phys_base, PCA200E_IOSPACE_LENGTH);
565 if (fore200e->virt_base == NULL) {
566 printk(FORE200E "can't map device %s\n", fore200e->name);
570 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
572 /* gain access to the PCA specific registers */
573 fore200e->regs.pca.hcr = fore200e->virt_base + PCA200E_HCR_OFFSET;
574 fore200e->regs.pca.imr = fore200e->virt_base + PCA200E_IMR_OFFSET;
575 fore200e->regs.pca.psr = fore200e->virt_base + PCA200E_PSR_OFFSET;
577 fore200e->state = FORE200E_STATE_MAP;
583 fore200e_pca_unmap(struct fore200e* fore200e)
585 DPRINTK(2, "device %s being unmapped from memory\n", fore200e->name);
587 if (fore200e->virt_base != NULL)
588 iounmap(fore200e->virt_base);
593 fore200e_pca_configure(struct fore200e* fore200e)
595 struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev;
596 u8 master_ctrl, latency;
598 DPRINTK(2, "device %s being configured\n", fore200e->name);
600 if ((pci_dev->irq == 0) || (pci_dev->irq == 0xFF)) {
601 printk(FORE200E "incorrect IRQ setting - misconfigured PCI-PCI bridge?\n");
605 pci_read_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, &master_ctrl);
607 master_ctrl = master_ctrl
608 #if defined(__BIG_ENDIAN)
609 /* request the PCA board to convert the endianess of slave RAM accesses */
610 | PCA200E_CTRL_CONVERT_ENDIAN
613 | PCA200E_CTRL_DIS_CACHE_RD
614 | PCA200E_CTRL_DIS_WRT_INVAL
615 | PCA200E_CTRL_ENA_CONT_REQ_MODE
616 | PCA200E_CTRL_2_CACHE_WRT_INVAL
618 | PCA200E_CTRL_LARGE_PCI_BURSTS;
620 pci_write_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, master_ctrl);
622 /* raise latency from 32 (default) to 192, as this seems to prevent NIC
623 lockups (under heavy rx loads) due to continuous 'FIFO OUT full' condition.
624 this may impact the performances of other PCI devices on the same bus, though */
626 pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, latency);
628 fore200e->state = FORE200E_STATE_CONFIGURE;
634 fore200e_pca_prom_read(struct fore200e* fore200e, struct prom_data* prom)
636 struct host_cmdq* cmdq = &fore200e->host_cmdq;
637 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
638 struct prom_opcode opcode;
642 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
644 opcode.opcode = OPCODE_GET_PROM;
647 prom_dma = fore200e->bus->dma_map(fore200e, prom, sizeof(struct prom_data), DMA_FROM_DEVICE);
649 fore200e->bus->write(prom_dma, &entry->cp_entry->cmd.prom_block.prom_haddr);
651 *entry->status = STATUS_PENDING;
653 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.prom_block.opcode);
655 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
657 *entry->status = STATUS_FREE;
659 fore200e->bus->dma_unmap(fore200e, prom_dma, sizeof(struct prom_data), DMA_FROM_DEVICE);
662 printk(FORE200E "unable to get PROM data from device %s\n", fore200e->name);
666 #if defined(__BIG_ENDIAN)
668 #define swap_here(addr) (*((u32*)(addr)) = swab32( *((u32*)(addr)) ))
670 /* MAC address is stored as little-endian */
671 swap_here(&prom->mac_addr[0]);
672 swap_here(&prom->mac_addr[4]);
680 fore200e_pca_proc_read(struct fore200e* fore200e, char *page)
682 struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev;
684 return sprintf(page, " PCI bus/slot/function:\t%d/%d/%d\n",
685 pci_dev->bus->number, PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
688 #endif /* CONFIG_ATM_FORE200E_PCA */
691 #ifdef CONFIG_ATM_FORE200E_SBA
694 fore200e_sba_read(volatile u32 __iomem *addr)
696 return sbus_readl(addr);
701 fore200e_sba_write(u32 val, volatile u32 __iomem *addr)
703 sbus_writel(val, addr);
708 fore200e_sba_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction)
710 u32 dma_addr = sbus_map_single((struct sbus_dev*)fore200e->bus_dev, virt_addr, size, direction);
712 DPRINTK(3, "SBUS DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d --> dma_addr = 0x%08x\n",
713 virt_addr, size, direction, dma_addr);
720 fore200e_sba_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
722 DPRINTK(3, "SBUS DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d,\n",
723 dma_addr, size, direction);
725 sbus_unmap_single((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction);
730 fore200e_sba_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
732 DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
734 sbus_dma_sync_single_for_cpu((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction);
738 fore200e_sba_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
740 DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
742 sbus_dma_sync_single_for_device((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction);
746 /* allocate a DVMA consistent chunk of memory intended to act as a communication mechanism
747 (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */
750 fore200e_sba_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
751 int size, int nbr, int alignment)
753 chunk->alloc_size = chunk->align_size = size * nbr;
755 /* returned chunks are page-aligned */
756 chunk->alloc_addr = sbus_alloc_consistent((struct sbus_dev*)fore200e->bus_dev,
760 if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
763 chunk->align_addr = chunk->alloc_addr;
769 /* free a DVMA consistent chunk of memory */
772 fore200e_sba_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
774 sbus_free_consistent((struct sbus_dev*)fore200e->bus_dev,
782 fore200e_sba_irq_enable(struct fore200e* fore200e)
784 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
785 fore200e->bus->write(hcr | SBA200E_HCR_INTR_ENA, fore200e->regs.sba.hcr);
790 fore200e_sba_irq_check(struct fore200e* fore200e)
792 return fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_INTR_REQ;
797 fore200e_sba_irq_ack(struct fore200e* fore200e)
799 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
800 fore200e->bus->write(hcr | SBA200E_HCR_INTR_CLR, fore200e->regs.sba.hcr);
805 fore200e_sba_reset(struct fore200e* fore200e)
807 fore200e->bus->write(SBA200E_HCR_RESET, fore200e->regs.sba.hcr);
809 fore200e->bus->write(0, fore200e->regs.sba.hcr);
814 fore200e_sba_map(struct fore200e* fore200e)
816 struct sbus_dev* sbus_dev = (struct sbus_dev*)fore200e->bus_dev;
819 /* gain access to the SBA specific registers */
820 fore200e->regs.sba.hcr = sbus_ioremap(&sbus_dev->resource[0], 0, SBA200E_HCR_LENGTH, "SBA HCR");
821 fore200e->regs.sba.bsr = sbus_ioremap(&sbus_dev->resource[1], 0, SBA200E_BSR_LENGTH, "SBA BSR");
822 fore200e->regs.sba.isr = sbus_ioremap(&sbus_dev->resource[2], 0, SBA200E_ISR_LENGTH, "SBA ISR");
823 fore200e->virt_base = sbus_ioremap(&sbus_dev->resource[3], 0, SBA200E_RAM_LENGTH, "SBA RAM");
825 if (fore200e->virt_base == NULL) {
826 printk(FORE200E "unable to map RAM of device %s\n", fore200e->name);
830 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
832 fore200e->bus->write(0x02, fore200e->regs.sba.isr); /* XXX hardwired interrupt level */
834 /* get the supported DVMA burst sizes */
835 bursts = prom_getintdefault(sbus_dev->bus->prom_node, "burst-sizes", 0x00);
837 if (sbus_can_dma_64bit(sbus_dev))
838 sbus_set_sbus64(sbus_dev, bursts);
840 fore200e->state = FORE200E_STATE_MAP;
846 fore200e_sba_unmap(struct fore200e* fore200e)
848 sbus_iounmap(fore200e->regs.sba.hcr, SBA200E_HCR_LENGTH);
849 sbus_iounmap(fore200e->regs.sba.bsr, SBA200E_BSR_LENGTH);
850 sbus_iounmap(fore200e->regs.sba.isr, SBA200E_ISR_LENGTH);
851 sbus_iounmap(fore200e->virt_base, SBA200E_RAM_LENGTH);
856 fore200e_sba_configure(struct fore200e* fore200e)
858 fore200e->state = FORE200E_STATE_CONFIGURE;
863 static struct fore200e* __init
864 fore200e_sba_detect(const struct fore200e_bus* bus, int index)
866 struct fore200e* fore200e;
867 struct sbus_bus* sbus_bus;
868 struct sbus_dev* sbus_dev = NULL;
870 unsigned int count = 0;
872 for_each_sbus (sbus_bus) {
873 for_each_sbusdev (sbus_dev, sbus_bus) {
874 if (strcmp(sbus_dev->prom_name, SBA200E_PROM_NAME) == 0) {
884 if (sbus_dev->num_registers != 4) {
885 printk(FORE200E "this %s device has %d instead of 4 registers\n",
886 bus->model_name, sbus_dev->num_registers);
890 fore200e = fore200e_kmalloc(sizeof(struct fore200e), GFP_KERNEL);
891 if (fore200e == NULL)
895 fore200e->bus_dev = sbus_dev;
896 fore200e->irq = sbus_dev->irqs[ 0 ];
898 fore200e->phys_base = (unsigned long)sbus_dev;
900 sprintf(fore200e->name, "%s-%d", bus->model_name, index - 1);
907 fore200e_sba_prom_read(struct fore200e* fore200e, struct prom_data* prom)
909 struct sbus_dev* sbus_dev = (struct sbus_dev*) fore200e->bus_dev;
912 len = prom_getproperty(sbus_dev->prom_node, "macaddrlo2", &prom->mac_addr[ 4 ], 4);
916 len = prom_getproperty(sbus_dev->prom_node, "macaddrhi4", &prom->mac_addr[ 2 ], 4);
920 prom_getproperty(sbus_dev->prom_node, "serialnumber",
921 (char*)&prom->serial_number, sizeof(prom->serial_number));
923 prom_getproperty(sbus_dev->prom_node, "promversion",
924 (char*)&prom->hw_revision, sizeof(prom->hw_revision));
931 fore200e_sba_proc_read(struct fore200e* fore200e, char *page)
933 struct sbus_dev* sbus_dev = (struct sbus_dev*)fore200e->bus_dev;
935 return sprintf(page, " SBUS slot/device:\t\t%d/'%s'\n", sbus_dev->slot, sbus_dev->prom_name);
937 #endif /* CONFIG_ATM_FORE200E_SBA */
941 fore200e_tx_irq(struct fore200e* fore200e)
943 struct host_txq* txq = &fore200e->host_txq;
944 struct host_txq_entry* entry;
946 struct fore200e_vc_map* vc_map;
948 if (fore200e->host_txq.txing == 0)
953 entry = &txq->host_entry[ txq->tail ];
955 if ((*entry->status & STATUS_COMPLETE) == 0) {
959 DPRINTK(3, "TX COMPLETED: entry = %p [tail = %d], vc_map = %p, skb = %p\n",
960 entry, txq->tail, entry->vc_map, entry->skb);
962 /* free copy of misaligned data */
965 /* remove DMA mapping */
966 fore200e->bus->dma_unmap(fore200e, entry->tpd->tsd[ 0 ].buffer, entry->tpd->tsd[ 0 ].length,
969 vc_map = entry->vc_map;
971 /* vcc closed since the time the entry was submitted for tx? */
972 if ((vc_map->vcc == NULL) ||
973 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
975 DPRINTK(1, "no ready vcc found for PDU sent on device %d\n",
976 fore200e->atm_dev->number);
978 dev_kfree_skb_any(entry->skb);
983 /* vcc closed then immediately re-opened? */
984 if (vc_map->incarn != entry->incarn) {
986 /* when a vcc is closed, some PDUs may be still pending in the tx queue.
987 if the same vcc is immediately re-opened, those pending PDUs must
988 not be popped after the completion of their emission, as they refer
989 to the prior incarnation of that vcc. otherwise, sk_atm(vcc)->sk_wmem_alloc
990 would be decremented by the size of the (unrelated) skb, possibly
991 leading to a negative sk->sk_wmem_alloc count, ultimately freezing the vcc.
992 we thus bind the tx entry to the current incarnation of the vcc
993 when the entry is submitted for tx. When the tx later completes,
994 if the incarnation number of the tx entry does not match the one
995 of the vcc, then this implies that the vcc has been closed then re-opened.
996 we thus just drop the skb here. */
998 DPRINTK(1, "vcc closed-then-re-opened; dropping PDU sent on device %d\n",
999 fore200e->atm_dev->number);
1001 dev_kfree_skb_any(entry->skb);
1007 /* notify tx completion */
1009 vcc->pop(vcc, entry->skb);
1012 dev_kfree_skb_any(entry->skb);
1015 /* race fixed by the above incarnation mechanism, but... */
1016 if (atomic_read(&sk_atm(vcc)->sk_wmem_alloc) < 0) {
1017 atomic_set(&sk_atm(vcc)->sk_wmem_alloc, 0);
1020 /* check error condition */
1021 if (*entry->status & STATUS_ERROR)
1022 atomic_inc(&vcc->stats->tx_err);
1024 atomic_inc(&vcc->stats->tx);
1028 *entry->status = STATUS_FREE;
1030 fore200e->host_txq.txing--;
1032 FORE200E_NEXT_ENTRY(txq->tail, QUEUE_SIZE_TX);
1037 #ifdef FORE200E_BSQ_DEBUG
1038 int bsq_audit(int where, struct host_bsq* bsq, int scheme, int magn)
1040 struct buffer* buffer;
1043 buffer = bsq->freebuf;
1046 if (buffer->supplied) {
1047 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld supplied but in free list!\n",
1048 where, scheme, magn, buffer->index);
1051 if (buffer->magn != magn) {
1052 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected magn = %d\n",
1053 where, scheme, magn, buffer->index, buffer->magn);
1056 if (buffer->scheme != scheme) {
1057 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected scheme = %d\n",
1058 where, scheme, magn, buffer->index, buffer->scheme);
1061 if ((buffer->index < 0) || (buffer->index >= fore200e_rx_buf_nbr[ scheme ][ magn ])) {
1062 printk(FORE200E "bsq_audit(%d): queue %d.%d, out of range buffer index = %ld !\n",
1063 where, scheme, magn, buffer->index);
1067 buffer = buffer->next;
1070 if (count != bsq->freebuf_count) {
1071 printk(FORE200E "bsq_audit(%d): queue %d.%d, %d bufs in free list, but freebuf_count = %d\n",
1072 where, scheme, magn, count, bsq->freebuf_count);
1080 fore200e_supply(struct fore200e* fore200e)
1082 int scheme, magn, i;
1084 struct host_bsq* bsq;
1085 struct host_bsq_entry* entry;
1086 struct buffer* buffer;
1088 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
1089 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
1091 bsq = &fore200e->host_bsq[ scheme ][ magn ];
1093 #ifdef FORE200E_BSQ_DEBUG
1094 bsq_audit(1, bsq, scheme, magn);
1096 while (bsq->freebuf_count >= RBD_BLK_SIZE) {
1098 DPRINTK(2, "supplying %d rx buffers to queue %d / %d, freebuf_count = %d\n",
1099 RBD_BLK_SIZE, scheme, magn, bsq->freebuf_count);
1101 entry = &bsq->host_entry[ bsq->head ];
1103 for (i = 0; i < RBD_BLK_SIZE; i++) {
1105 /* take the first buffer in the free buffer list */
1106 buffer = bsq->freebuf;
1108 printk(FORE200E "no more free bufs in queue %d.%d, but freebuf_count = %d\n",
1109 scheme, magn, bsq->freebuf_count);
1112 bsq->freebuf = buffer->next;
1114 #ifdef FORE200E_BSQ_DEBUG
1115 if (buffer->supplied)
1116 printk(FORE200E "queue %d.%d, buffer %lu already supplied\n",
1117 scheme, magn, buffer->index);
1118 buffer->supplied = 1;
1120 entry->rbd_block->rbd[ i ].buffer_haddr = buffer->data.dma_addr;
1121 entry->rbd_block->rbd[ i ].handle = FORE200E_BUF2HDL(buffer);
1124 FORE200E_NEXT_ENTRY(bsq->head, QUEUE_SIZE_BS);
1126 /* decrease accordingly the number of free rx buffers */
1127 bsq->freebuf_count -= RBD_BLK_SIZE;
1129 *entry->status = STATUS_PENDING;
1130 fore200e->bus->write(entry->rbd_block_dma, &entry->cp_entry->rbd_block_haddr);
1138 fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rpd)
1140 struct sk_buff* skb;
1141 struct buffer* buffer;
1142 struct fore200e_vcc* fore200e_vcc;
1144 #ifdef FORE200E_52BYTE_AAL0_SDU
1145 u32 cell_header = 0;
1150 fore200e_vcc = FORE200E_VCC(vcc);
1151 ASSERT(fore200e_vcc);
1153 #ifdef FORE200E_52BYTE_AAL0_SDU
1154 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.rxtp.max_sdu == ATM_AAL0_SDU)) {
1156 cell_header = (rpd->atm_header.gfc << ATM_HDR_GFC_SHIFT) |
1157 (rpd->atm_header.vpi << ATM_HDR_VPI_SHIFT) |
1158 (rpd->atm_header.vci << ATM_HDR_VCI_SHIFT) |
1159 (rpd->atm_header.plt << ATM_HDR_PTI_SHIFT) |
1160 rpd->atm_header.clp;
1165 /* compute total PDU length */
1166 for (i = 0; i < rpd->nseg; i++)
1167 pdu_len += rpd->rsd[ i ].length;
1169 skb = alloc_skb(pdu_len, GFP_ATOMIC);
1171 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
1173 atomic_inc(&vcc->stats->rx_drop);
1177 __net_timestamp(skb);
1179 #ifdef FORE200E_52BYTE_AAL0_SDU
1181 *((u32*)skb_put(skb, 4)) = cell_header;
1185 /* reassemble segments */
1186 for (i = 0; i < rpd->nseg; i++) {
1188 /* rebuild rx buffer address from rsd handle */
1189 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1191 /* Make device DMA transfer visible to CPU. */
1192 fore200e->bus->dma_sync_for_cpu(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE);
1194 memcpy(skb_put(skb, rpd->rsd[ i ].length), buffer->data.align_addr, rpd->rsd[ i ].length);
1196 /* Now let the device get at it again. */
1197 fore200e->bus->dma_sync_for_device(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE);
1200 DPRINTK(3, "rx skb: len = %d, truesize = %d\n", skb->len, skb->truesize);
1202 if (pdu_len < fore200e_vcc->rx_min_pdu)
1203 fore200e_vcc->rx_min_pdu = pdu_len;
1204 if (pdu_len > fore200e_vcc->rx_max_pdu)
1205 fore200e_vcc->rx_max_pdu = pdu_len;
1206 fore200e_vcc->rx_pdu++;
1209 if (atm_charge(vcc, skb->truesize) == 0) {
1211 DPRINTK(2, "receive buffers saturated for %d.%d.%d - PDU dropped\n",
1212 vcc->itf, vcc->vpi, vcc->vci);
1214 dev_kfree_skb_any(skb);
1216 atomic_inc(&vcc->stats->rx_drop);
1220 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1222 vcc->push(vcc, skb);
1223 atomic_inc(&vcc->stats->rx);
1225 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1232 fore200e_collect_rpd(struct fore200e* fore200e, struct rpd* rpd)
1234 struct host_bsq* bsq;
1235 struct buffer* buffer;
1238 for (i = 0; i < rpd->nseg; i++) {
1240 /* rebuild rx buffer address from rsd handle */
1241 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1243 bsq = &fore200e->host_bsq[ buffer->scheme ][ buffer->magn ];
1245 #ifdef FORE200E_BSQ_DEBUG
1246 bsq_audit(2, bsq, buffer->scheme, buffer->magn);
1248 if (buffer->supplied == 0)
1249 printk(FORE200E "queue %d.%d, buffer %ld was not supplied\n",
1250 buffer->scheme, buffer->magn, buffer->index);
1251 buffer->supplied = 0;
1254 /* re-insert the buffer into the free buffer list */
1255 buffer->next = bsq->freebuf;
1256 bsq->freebuf = buffer;
1258 /* then increment the number of free rx buffers */
1259 bsq->freebuf_count++;
1265 fore200e_rx_irq(struct fore200e* fore200e)
1267 struct host_rxq* rxq = &fore200e->host_rxq;
1268 struct host_rxq_entry* entry;
1269 struct atm_vcc* vcc;
1270 struct fore200e_vc_map* vc_map;
1274 entry = &rxq->host_entry[ rxq->head ];
1276 /* no more received PDUs */
1277 if ((*entry->status & STATUS_COMPLETE) == 0)
1280 vc_map = FORE200E_VC_MAP(fore200e, entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1282 if ((vc_map->vcc == NULL) ||
1283 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
1285 DPRINTK(1, "no ready VC found for PDU received on %d.%d.%d\n",
1286 fore200e->atm_dev->number,
1287 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1293 if ((*entry->status & STATUS_ERROR) == 0) {
1295 fore200e_push_rpd(fore200e, vcc, entry->rpd);
1298 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
1299 fore200e->atm_dev->number,
1300 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1301 atomic_inc(&vcc->stats->rx_err);
1305 FORE200E_NEXT_ENTRY(rxq->head, QUEUE_SIZE_RX);
1307 fore200e_collect_rpd(fore200e, entry->rpd);
1309 /* rewrite the rpd address to ack the received PDU */
1310 fore200e->bus->write(entry->rpd_dma, &entry->cp_entry->rpd_haddr);
1311 *entry->status = STATUS_FREE;
1313 fore200e_supply(fore200e);
1318 #ifndef FORE200E_USE_TASKLET
1320 fore200e_irq(struct fore200e* fore200e)
1322 unsigned long flags;
1324 spin_lock_irqsave(&fore200e->q_lock, flags);
1325 fore200e_rx_irq(fore200e);
1326 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1328 spin_lock_irqsave(&fore200e->q_lock, flags);
1329 fore200e_tx_irq(fore200e);
1330 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1336 fore200e_interrupt(int irq, void* dev, struct pt_regs* regs)
1338 struct fore200e* fore200e = FORE200E_DEV((struct atm_dev*)dev);
1340 if (fore200e->bus->irq_check(fore200e) == 0) {
1342 DPRINTK(3, "interrupt NOT triggered by device %d\n", fore200e->atm_dev->number);
1345 DPRINTK(3, "interrupt triggered by device %d\n", fore200e->atm_dev->number);
1347 #ifdef FORE200E_USE_TASKLET
1348 tasklet_schedule(&fore200e->tx_tasklet);
1349 tasklet_schedule(&fore200e->rx_tasklet);
1351 fore200e_irq(fore200e);
1354 fore200e->bus->irq_ack(fore200e);
1359 #ifdef FORE200E_USE_TASKLET
1361 fore200e_tx_tasklet(unsigned long data)
1363 struct fore200e* fore200e = (struct fore200e*) data;
1364 unsigned long flags;
1366 DPRINTK(3, "tx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1368 spin_lock_irqsave(&fore200e->q_lock, flags);
1369 fore200e_tx_irq(fore200e);
1370 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1375 fore200e_rx_tasklet(unsigned long data)
1377 struct fore200e* fore200e = (struct fore200e*) data;
1378 unsigned long flags;
1380 DPRINTK(3, "rx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1382 spin_lock_irqsave(&fore200e->q_lock, flags);
1383 fore200e_rx_irq((struct fore200e*) data);
1384 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1390 fore200e_select_scheme(struct atm_vcc* vcc)
1392 /* fairly balance the VCs over (identical) buffer schemes */
1393 int scheme = vcc->vci % 2 ? BUFFER_SCHEME_ONE : BUFFER_SCHEME_TWO;
1395 DPRINTK(1, "VC %d.%d.%d uses buffer scheme %d\n",
1396 vcc->itf, vcc->vpi, vcc->vci, scheme);
1403 fore200e_activate_vcin(struct fore200e* fore200e, int activate, struct atm_vcc* vcc, int mtu)
1405 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1406 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1407 struct activate_opcode activ_opcode;
1408 struct deactivate_opcode deactiv_opcode;
1411 enum fore200e_aal aal = fore200e_atm2fore_aal(vcc->qos.aal);
1413 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1416 FORE200E_VCC(vcc)->scheme = fore200e_select_scheme(vcc);
1418 activ_opcode.opcode = OPCODE_ACTIVATE_VCIN;
1419 activ_opcode.aal = aal;
1420 activ_opcode.scheme = FORE200E_VCC(vcc)->scheme;
1421 activ_opcode.pad = 0;
1424 deactiv_opcode.opcode = OPCODE_DEACTIVATE_VCIN;
1425 deactiv_opcode.pad = 0;
1428 vpvc.vci = vcc->vci;
1429 vpvc.vpi = vcc->vpi;
1431 *entry->status = STATUS_PENDING;
1435 #ifdef FORE200E_52BYTE_AAL0_SDU
1438 /* the MTU is not used by the cp, except in the case of AAL0 */
1439 fore200e->bus->write(mtu, &entry->cp_entry->cmd.activate_block.mtu);
1440 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.vpvc);
1441 fore200e->bus->write(*(u32*)&activ_opcode, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.opcode);
1444 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.vpvc);
1445 fore200e->bus->write(*(u32*)&deactiv_opcode, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.opcode);
1448 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1450 *entry->status = STATUS_FREE;
1453 printk(FORE200E "unable to %s VC %d.%d.%d\n",
1454 activate ? "open" : "close", vcc->itf, vcc->vpi, vcc->vci);
1458 DPRINTK(1, "VC %d.%d.%d %sed\n", vcc->itf, vcc->vpi, vcc->vci,
1459 activate ? "open" : "clos");
1465 #define FORE200E_MAX_BACK2BACK_CELLS 255 /* XXX depends on CDVT */
1468 fore200e_rate_ctrl(struct atm_qos* qos, struct tpd_rate* rate)
1470 if (qos->txtp.max_pcr < ATM_OC3_PCR) {
1472 /* compute the data cells to idle cells ratio from the tx PCR */
1473 rate->data_cells = qos->txtp.max_pcr * FORE200E_MAX_BACK2BACK_CELLS / ATM_OC3_PCR;
1474 rate->idle_cells = FORE200E_MAX_BACK2BACK_CELLS - rate->data_cells;
1477 /* disable rate control */
1478 rate->data_cells = rate->idle_cells = 0;
1484 fore200e_open(struct atm_vcc *vcc)
1486 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1487 struct fore200e_vcc* fore200e_vcc;
1488 struct fore200e_vc_map* vc_map;
1489 unsigned long flags;
1491 short vpi = vcc->vpi;
1493 ASSERT((vpi >= 0) && (vpi < 1<<FORE200E_VPI_BITS));
1494 ASSERT((vci >= 0) && (vci < 1<<FORE200E_VCI_BITS));
1496 spin_lock_irqsave(&fore200e->q_lock, flags);
1498 vc_map = FORE200E_VC_MAP(fore200e, vpi, vci);
1501 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1503 printk(FORE200E "VC %d.%d.%d already in use\n",
1504 fore200e->atm_dev->number, vpi, vci);
1511 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1513 fore200e_vcc = fore200e_kmalloc(sizeof(struct fore200e_vcc), GFP_ATOMIC);
1514 if (fore200e_vcc == NULL) {
1519 DPRINTK(2, "opening %d.%d.%d:%d QoS = (tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
1520 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d)\n",
1521 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1522 fore200e_traffic_class[ vcc->qos.txtp.traffic_class ],
1523 vcc->qos.txtp.min_pcr, vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_cdv, vcc->qos.txtp.max_sdu,
1524 fore200e_traffic_class[ vcc->qos.rxtp.traffic_class ],
1525 vcc->qos.rxtp.min_pcr, vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_cdv, vcc->qos.rxtp.max_sdu);
1527 /* pseudo-CBR bandwidth requested? */
1528 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1530 down(&fore200e->rate_sf);
1531 if (fore200e->available_cell_rate < vcc->qos.txtp.max_pcr) {
1532 up(&fore200e->rate_sf);
1534 fore200e_kfree(fore200e_vcc);
1539 /* reserve bandwidth */
1540 fore200e->available_cell_rate -= vcc->qos.txtp.max_pcr;
1541 up(&fore200e->rate_sf);
1544 vcc->itf = vcc->dev->number;
1546 set_bit(ATM_VF_PARTIAL,&vcc->flags);
1547 set_bit(ATM_VF_ADDR, &vcc->flags);
1549 vcc->dev_data = fore200e_vcc;
1551 if (fore200e_activate_vcin(fore200e, 1, vcc, vcc->qos.rxtp.max_sdu) < 0) {
1555 clear_bit(ATM_VF_ADDR, &vcc->flags);
1556 clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1558 vcc->dev_data = NULL;
1560 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1562 fore200e_kfree(fore200e_vcc);
1566 /* compute rate control parameters */
1567 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1569 fore200e_rate_ctrl(&vcc->qos, &fore200e_vcc->rate);
1570 set_bit(ATM_VF_HASQOS, &vcc->flags);
1572 DPRINTK(3, "tx on %d.%d.%d:%d, tx PCR = %d, rx PCR = %d, data_cells = %u, idle_cells = %u\n",
1573 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1574 vcc->qos.txtp.max_pcr, vcc->qos.rxtp.max_pcr,
1575 fore200e_vcc->rate.data_cells, fore200e_vcc->rate.idle_cells);
1578 fore200e_vcc->tx_min_pdu = fore200e_vcc->rx_min_pdu = MAX_PDU_SIZE + 1;
1579 fore200e_vcc->tx_max_pdu = fore200e_vcc->rx_max_pdu = 0;
1580 fore200e_vcc->tx_pdu = fore200e_vcc->rx_pdu = 0;
1582 /* new incarnation of the vcc */
1583 vc_map->incarn = ++fore200e->incarn_count;
1585 /* VC unusable before this flag is set */
1586 set_bit(ATM_VF_READY, &vcc->flags);
1593 fore200e_close(struct atm_vcc* vcc)
1595 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1596 struct fore200e_vcc* fore200e_vcc;
1597 struct fore200e_vc_map* vc_map;
1598 unsigned long flags;
1601 ASSERT((vcc->vpi >= 0) && (vcc->vpi < 1<<FORE200E_VPI_BITS));
1602 ASSERT((vcc->vci >= 0) && (vcc->vci < 1<<FORE200E_VCI_BITS));
1604 DPRINTK(2, "closing %d.%d.%d:%d\n", vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal));
1606 clear_bit(ATM_VF_READY, &vcc->flags);
1608 fore200e_activate_vcin(fore200e, 0, vcc, 0);
1610 spin_lock_irqsave(&fore200e->q_lock, flags);
1612 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1614 /* the vc is no longer considered as "in use" by fore200e_open() */
1617 vcc->itf = vcc->vci = vcc->vpi = 0;
1619 fore200e_vcc = FORE200E_VCC(vcc);
1620 vcc->dev_data = NULL;
1622 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1624 /* release reserved bandwidth, if any */
1625 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1627 down(&fore200e->rate_sf);
1628 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1629 up(&fore200e->rate_sf);
1631 clear_bit(ATM_VF_HASQOS, &vcc->flags);
1634 clear_bit(ATM_VF_ADDR, &vcc->flags);
1635 clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1637 ASSERT(fore200e_vcc);
1638 fore200e_kfree(fore200e_vcc);
1643 fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
1645 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1646 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
1647 struct fore200e_vc_map* vc_map;
1648 struct host_txq* txq = &fore200e->host_txq;
1649 struct host_txq_entry* entry;
1651 struct tpd_haddr tpd_haddr;
1652 int retry = CONFIG_ATM_FORE200E_TX_RETRY;
1654 int tx_len = skb->len;
1655 u32* cell_header = NULL;
1656 unsigned char* skb_data;
1658 unsigned char* data;
1659 unsigned long flags;
1662 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1664 ASSERT(fore200e_vcc);
1666 if (!test_bit(ATM_VF_READY, &vcc->flags)) {
1667 DPRINTK(1, "VC %d.%d.%d not ready for tx\n", vcc->itf, vcc->vpi, vcc->vpi);
1668 dev_kfree_skb_any(skb);
1672 #ifdef FORE200E_52BYTE_AAL0_SDU
1673 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.txtp.max_sdu == ATM_AAL0_SDU)) {
1674 cell_header = (u32*) skb->data;
1675 skb_data = skb->data + 4; /* skip 4-byte cell header */
1676 skb_len = tx_len = skb->len - 4;
1678 DPRINTK(3, "user-supplied cell header = 0x%08x\n", *cell_header);
1683 skb_data = skb->data;
1687 if (((unsigned long)skb_data) & 0x3) {
1689 DPRINTK(2, "misaligned tx PDU on device %s\n", fore200e->name);
1694 if ((vcc->qos.aal == ATM_AAL0) && (skb_len % ATM_CELL_PAYLOAD)) {
1696 /* this simply NUKES the PCA board */
1697 DPRINTK(2, "incomplete tx AAL0 PDU on device %s\n", fore200e->name);
1699 tx_len = ((skb_len / ATM_CELL_PAYLOAD) + 1) * ATM_CELL_PAYLOAD;
1703 data = kmalloc(tx_len, GFP_ATOMIC | GFP_DMA);
1709 dev_kfree_skb_any(skb);
1714 memcpy(data, skb_data, skb_len);
1715 if (skb_len < tx_len)
1716 memset(data + skb_len, 0x00, tx_len - skb_len);
1722 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1723 ASSERT(vc_map->vcc == vcc);
1727 spin_lock_irqsave(&fore200e->q_lock, flags);
1729 entry = &txq->host_entry[ txq->head ];
1731 if ((*entry->status != STATUS_FREE) || (txq->txing >= QUEUE_SIZE_TX - 2)) {
1733 /* try to free completed tx queue entries */
1734 fore200e_tx_irq(fore200e);
1736 if (*entry->status != STATUS_FREE) {
1738 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1740 /* retry once again? */
1746 atomic_inc(&vcc->stats->tx_err);
1749 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
1750 fore200e->name, fore200e->cp_queues->heartbeat);
1755 dev_kfree_skb_any(skb);
1765 entry->incarn = vc_map->incarn;
1766 entry->vc_map = vc_map;
1768 entry->data = tx_copy ? data : NULL;
1771 tpd->tsd[ 0 ].buffer = fore200e->bus->dma_map(fore200e, data, tx_len, DMA_TO_DEVICE);
1772 tpd->tsd[ 0 ].length = tx_len;
1774 FORE200E_NEXT_ENTRY(txq->head, QUEUE_SIZE_TX);
1777 /* The dma_map call above implies a dma_sync so the device can use it,
1778 * thus no explicit dma_sync call is necessary here.
1781 DPRINTK(3, "tx on %d.%d.%d:%d, len = %u (%u)\n",
1782 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1783 tpd->tsd[0].length, skb_len);
1785 if (skb_len < fore200e_vcc->tx_min_pdu)
1786 fore200e_vcc->tx_min_pdu = skb_len;
1787 if (skb_len > fore200e_vcc->tx_max_pdu)
1788 fore200e_vcc->tx_max_pdu = skb_len;
1789 fore200e_vcc->tx_pdu++;
1791 /* set tx rate control information */
1792 tpd->rate.data_cells = fore200e_vcc->rate.data_cells;
1793 tpd->rate.idle_cells = fore200e_vcc->rate.idle_cells;
1796 tpd->atm_header.clp = (*cell_header & ATM_HDR_CLP);
1797 tpd->atm_header.plt = (*cell_header & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
1798 tpd->atm_header.vci = (*cell_header & ATM_HDR_VCI_MASK) >> ATM_HDR_VCI_SHIFT;
1799 tpd->atm_header.vpi = (*cell_header & ATM_HDR_VPI_MASK) >> ATM_HDR_VPI_SHIFT;
1800 tpd->atm_header.gfc = (*cell_header & ATM_HDR_GFC_MASK) >> ATM_HDR_GFC_SHIFT;
1803 /* set the ATM header, common to all cells conveying the PDU */
1804 tpd->atm_header.clp = 0;
1805 tpd->atm_header.plt = 0;
1806 tpd->atm_header.vci = vcc->vci;
1807 tpd->atm_header.vpi = vcc->vpi;
1808 tpd->atm_header.gfc = 0;
1811 tpd->spec.length = tx_len;
1813 tpd->spec.aal = fore200e_atm2fore_aal(vcc->qos.aal);
1816 tpd_haddr.size = sizeof(struct tpd) / (1<<TPD_HADDR_SHIFT); /* size is expressed in 32 byte blocks */
1818 tpd_haddr.haddr = entry->tpd_dma >> TPD_HADDR_SHIFT; /* shift the address, as we are in a bitfield */
1820 *entry->status = STATUS_PENDING;
1821 fore200e->bus->write(*(u32*)&tpd_haddr, (u32 __iomem *)&entry->cp_entry->tpd_haddr);
1823 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1830 fore200e_getstats(struct fore200e* fore200e)
1832 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1833 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1834 struct stats_opcode opcode;
1838 if (fore200e->stats == NULL) {
1839 fore200e->stats = fore200e_kmalloc(sizeof(struct stats), GFP_KERNEL | GFP_DMA);
1840 if (fore200e->stats == NULL)
1844 stats_dma_addr = fore200e->bus->dma_map(fore200e, fore200e->stats,
1845 sizeof(struct stats), DMA_FROM_DEVICE);
1847 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1849 opcode.opcode = OPCODE_GET_STATS;
1852 fore200e->bus->write(stats_dma_addr, &entry->cp_entry->cmd.stats_block.stats_haddr);
1854 *entry->status = STATUS_PENDING;
1856 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.stats_block.opcode);
1858 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1860 *entry->status = STATUS_FREE;
1862 fore200e->bus->dma_unmap(fore200e, stats_dma_addr, sizeof(struct stats), DMA_FROM_DEVICE);
1865 printk(FORE200E "unable to get statistics from device %s\n", fore200e->name);
1874 fore200e_getsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen)
1876 /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
1878 DPRINTK(2, "getsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
1879 vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
1886 fore200e_setsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen)
1888 /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
1890 DPRINTK(2, "setsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
1891 vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
1897 #if 0 /* currently unused */
1899 fore200e_get_oc3(struct fore200e* fore200e, struct oc3_regs* regs)
1901 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1902 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1903 struct oc3_opcode opcode;
1905 u32 oc3_regs_dma_addr;
1907 oc3_regs_dma_addr = fore200e->bus->dma_map(fore200e, regs, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1909 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1911 opcode.opcode = OPCODE_GET_OC3;
1916 fore200e->bus->write(oc3_regs_dma_addr, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1918 *entry->status = STATUS_PENDING;
1920 fore200e->bus->write(*(u32*)&opcode, (u32*)&entry->cp_entry->cmd.oc3_block.opcode);
1922 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1924 *entry->status = STATUS_FREE;
1926 fore200e->bus->dma_unmap(fore200e, oc3_regs_dma_addr, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1929 printk(FORE200E "unable to get OC-3 regs of device %s\n", fore200e->name);
1939 fore200e_set_oc3(struct fore200e* fore200e, u32 reg, u32 value, u32 mask)
1941 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1942 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1943 struct oc3_opcode opcode;
1946 DPRINTK(2, "set OC-3 reg = 0x%02x, value = 0x%02x, mask = 0x%02x\n", reg, value, mask);
1948 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1950 opcode.opcode = OPCODE_SET_OC3;
1952 opcode.value = value;
1955 fore200e->bus->write(0, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1957 *entry->status = STATUS_PENDING;
1959 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.oc3_block.opcode);
1961 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1963 *entry->status = STATUS_FREE;
1966 printk(FORE200E "unable to set OC-3 reg 0x%02x of device %s\n", reg, fore200e->name);
1975 fore200e_setloop(struct fore200e* fore200e, int loop_mode)
1977 u32 mct_value, mct_mask;
1980 if (!capable(CAP_NET_ADMIN))
1983 switch (loop_mode) {
1987 mct_mask = SUNI_MCT_DLE | SUNI_MCT_LLE;
1990 case ATM_LM_LOC_PHY:
1991 mct_value = mct_mask = SUNI_MCT_DLE;
1994 case ATM_LM_RMT_PHY:
1995 mct_value = mct_mask = SUNI_MCT_LLE;
2002 error = fore200e_set_oc3(fore200e, SUNI_MCT, mct_value, mct_mask);
2004 fore200e->loop_mode = loop_mode;
2010 static inline unsigned int
2011 fore200e_swap(unsigned int in)
2013 #if defined(__LITTLE_ENDIAN)
2022 fore200e_fetch_stats(struct fore200e* fore200e, struct sonet_stats __user *arg)
2024 struct sonet_stats tmp;
2026 if (fore200e_getstats(fore200e) < 0)
2029 tmp.section_bip = fore200e_swap(fore200e->stats->oc3.section_bip8_errors);
2030 tmp.line_bip = fore200e_swap(fore200e->stats->oc3.line_bip24_errors);
2031 tmp.path_bip = fore200e_swap(fore200e->stats->oc3.path_bip8_errors);
2032 tmp.line_febe = fore200e_swap(fore200e->stats->oc3.line_febe_errors);
2033 tmp.path_febe = fore200e_swap(fore200e->stats->oc3.path_febe_errors);
2034 tmp.corr_hcs = fore200e_swap(fore200e->stats->oc3.corr_hcs_errors);
2035 tmp.uncorr_hcs = fore200e_swap(fore200e->stats->oc3.ucorr_hcs_errors);
2036 tmp.tx_cells = fore200e_swap(fore200e->stats->aal0.cells_transmitted) +
2037 fore200e_swap(fore200e->stats->aal34.cells_transmitted) +
2038 fore200e_swap(fore200e->stats->aal5.cells_transmitted);
2039 tmp.rx_cells = fore200e_swap(fore200e->stats->aal0.cells_received) +
2040 fore200e_swap(fore200e->stats->aal34.cells_received) +
2041 fore200e_swap(fore200e->stats->aal5.cells_received);
2044 return copy_to_user(arg, &tmp, sizeof(struct sonet_stats)) ? -EFAULT : 0;
2051 fore200e_ioctl(struct atm_dev* dev, unsigned int cmd, void __user * arg)
2053 struct fore200e* fore200e = FORE200E_DEV(dev);
2055 DPRINTK(2, "ioctl cmd = 0x%x (%u), arg = 0x%p (%lu)\n", cmd, cmd, arg, (unsigned long)arg);
2060 return fore200e_fetch_stats(fore200e, (struct sonet_stats __user *)arg);
2063 return put_user(0, (int __user *)arg) ? -EFAULT : 0;
2066 return fore200e_setloop(fore200e, (int)(unsigned long)arg);
2069 return put_user(fore200e->loop_mode, (int __user *)arg) ? -EFAULT : 0;
2072 return put_user(ATM_LM_LOC_PHY | ATM_LM_RMT_PHY, (int __user *)arg) ? -EFAULT : 0;
2075 return -ENOSYS; /* not implemented */
2080 fore200e_change_qos(struct atm_vcc* vcc,struct atm_qos* qos, int flags)
2082 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
2083 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
2085 if (!test_bit(ATM_VF_READY, &vcc->flags)) {
2086 DPRINTK(1, "VC %d.%d.%d not ready for QoS change\n", vcc->itf, vcc->vpi, vcc->vpi);
2090 DPRINTK(2, "change_qos %d.%d.%d, "
2091 "(tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
2092 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d), flags = 0x%x\n"
2093 "available_cell_rate = %u",
2094 vcc->itf, vcc->vpi, vcc->vci,
2095 fore200e_traffic_class[ qos->txtp.traffic_class ],
2096 qos->txtp.min_pcr, qos->txtp.max_pcr, qos->txtp.max_cdv, qos->txtp.max_sdu,
2097 fore200e_traffic_class[ qos->rxtp.traffic_class ],
2098 qos->rxtp.min_pcr, qos->rxtp.max_pcr, qos->rxtp.max_cdv, qos->rxtp.max_sdu,
2099 flags, fore200e->available_cell_rate);
2101 if ((qos->txtp.traffic_class == ATM_CBR) && (qos->txtp.max_pcr > 0)) {
2103 down(&fore200e->rate_sf);
2104 if (fore200e->available_cell_rate + vcc->qos.txtp.max_pcr < qos->txtp.max_pcr) {
2105 up(&fore200e->rate_sf);
2109 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
2110 fore200e->available_cell_rate -= qos->txtp.max_pcr;
2112 up(&fore200e->rate_sf);
2114 memcpy(&vcc->qos, qos, sizeof(struct atm_qos));
2116 /* update rate control parameters */
2117 fore200e_rate_ctrl(qos, &fore200e_vcc->rate);
2119 set_bit(ATM_VF_HASQOS, &vcc->flags);
2129 fore200e_irq_request(struct fore200e* fore200e)
2131 if (request_irq(fore200e->irq, fore200e_interrupt, SA_SHIRQ, fore200e->name, fore200e->atm_dev) < 0) {
2133 printk(FORE200E "unable to reserve IRQ %s for device %s\n",
2134 fore200e_irq_itoa(fore200e->irq), fore200e->name);
2138 printk(FORE200E "IRQ %s reserved for device %s\n",
2139 fore200e_irq_itoa(fore200e->irq), fore200e->name);
2141 #ifdef FORE200E_USE_TASKLET
2142 tasklet_init(&fore200e->tx_tasklet, fore200e_tx_tasklet, (unsigned long)fore200e);
2143 tasklet_init(&fore200e->rx_tasklet, fore200e_rx_tasklet, (unsigned long)fore200e);
2146 fore200e->state = FORE200E_STATE_IRQ;
2152 fore200e_get_esi(struct fore200e* fore200e)
2154 struct prom_data* prom = fore200e_kmalloc(sizeof(struct prom_data), GFP_KERNEL | GFP_DMA);
2160 ok = fore200e->bus->prom_read(fore200e, prom);
2162 fore200e_kfree(prom);
2166 printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %02x:%02x:%02x:%02x:%02x:%02x\n",
2168 (prom->hw_revision & 0xFF) + '@', /* probably meaningless with SBA boards */
2169 prom->serial_number & 0xFFFF,
2170 prom->mac_addr[ 2 ], prom->mac_addr[ 3 ], prom->mac_addr[ 4 ],
2171 prom->mac_addr[ 5 ], prom->mac_addr[ 6 ], prom->mac_addr[ 7 ]);
2173 for (i = 0; i < ESI_LEN; i++) {
2174 fore200e->esi[ i ] = fore200e->atm_dev->esi[ i ] = prom->mac_addr[ i + 2 ];
2177 fore200e_kfree(prom);
2184 fore200e_alloc_rx_buf(struct fore200e* fore200e)
2186 int scheme, magn, nbr, size, i;
2188 struct host_bsq* bsq;
2189 struct buffer* buffer;
2191 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2192 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2194 bsq = &fore200e->host_bsq[ scheme ][ magn ];
2196 nbr = fore200e_rx_buf_nbr[ scheme ][ magn ];
2197 size = fore200e_rx_buf_size[ scheme ][ magn ];
2199 DPRINTK(2, "rx buffers %d / %d are being allocated\n", scheme, magn);
2201 /* allocate the array of receive buffers */
2202 buffer = bsq->buffer = fore200e_kmalloc(nbr * sizeof(struct buffer), GFP_KERNEL);
2207 bsq->freebuf = NULL;
2209 for (i = 0; i < nbr; i++) {
2211 buffer[ i ].scheme = scheme;
2212 buffer[ i ].magn = magn;
2213 #ifdef FORE200E_BSQ_DEBUG
2214 buffer[ i ].index = i;
2215 buffer[ i ].supplied = 0;
2218 /* allocate the receive buffer body */
2219 if (fore200e_chunk_alloc(fore200e,
2220 &buffer[ i ].data, size, fore200e->bus->buffer_alignment,
2221 DMA_FROM_DEVICE) < 0) {
2224 fore200e_chunk_free(fore200e, &buffer[ --i ].data);
2225 fore200e_kfree(buffer);
2230 /* insert the buffer into the free buffer list */
2231 buffer[ i ].next = bsq->freebuf;
2232 bsq->freebuf = &buffer[ i ];
2234 /* all the buffers are free, initially */
2235 bsq->freebuf_count = nbr;
2237 #ifdef FORE200E_BSQ_DEBUG
2238 bsq_audit(3, bsq, scheme, magn);
2243 fore200e->state = FORE200E_STATE_ALLOC_BUF;
2249 fore200e_init_bs_queue(struct fore200e* fore200e)
2251 int scheme, magn, i;
2253 struct host_bsq* bsq;
2254 struct cp_bsq_entry __iomem * cp_entry;
2256 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2257 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2259 DPRINTK(2, "buffer supply queue %d / %d is being initialized\n", scheme, magn);
2261 bsq = &fore200e->host_bsq[ scheme ][ magn ];
2263 /* allocate and align the array of status words */
2264 if (fore200e->bus->dma_chunk_alloc(fore200e,
2266 sizeof(enum status),
2268 fore200e->bus->status_alignment) < 0) {
2272 /* allocate and align the array of receive buffer descriptors */
2273 if (fore200e->bus->dma_chunk_alloc(fore200e,
2275 sizeof(struct rbd_block),
2277 fore200e->bus->descr_alignment) < 0) {
2279 fore200e->bus->dma_chunk_free(fore200e, &bsq->status);
2283 /* get the base address of the cp resident buffer supply queue entries */
2284 cp_entry = fore200e->virt_base +
2285 fore200e->bus->read(&fore200e->cp_queues->cp_bsq[ scheme ][ magn ]);
2287 /* fill the host resident and cp resident buffer supply queue entries */
2288 for (i = 0; i < QUEUE_SIZE_BS; i++) {
2290 bsq->host_entry[ i ].status =
2291 FORE200E_INDEX(bsq->status.align_addr, enum status, i);
2292 bsq->host_entry[ i ].rbd_block =
2293 FORE200E_INDEX(bsq->rbd_block.align_addr, struct rbd_block, i);
2294 bsq->host_entry[ i ].rbd_block_dma =
2295 FORE200E_DMA_INDEX(bsq->rbd_block.dma_addr, struct rbd_block, i);
2296 bsq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2298 *bsq->host_entry[ i ].status = STATUS_FREE;
2300 fore200e->bus->write(FORE200E_DMA_INDEX(bsq->status.dma_addr, enum status, i),
2301 &cp_entry[ i ].status_haddr);
2306 fore200e->state = FORE200E_STATE_INIT_BSQ;
2312 fore200e_init_rx_queue(struct fore200e* fore200e)
2314 struct host_rxq* rxq = &fore200e->host_rxq;
2315 struct cp_rxq_entry __iomem * cp_entry;
2318 DPRINTK(2, "receive queue is being initialized\n");
2320 /* allocate and align the array of status words */
2321 if (fore200e->bus->dma_chunk_alloc(fore200e,
2323 sizeof(enum status),
2325 fore200e->bus->status_alignment) < 0) {
2329 /* allocate and align the array of receive PDU descriptors */
2330 if (fore200e->bus->dma_chunk_alloc(fore200e,
2334 fore200e->bus->descr_alignment) < 0) {
2336 fore200e->bus->dma_chunk_free(fore200e, &rxq->status);
2340 /* get the base address of the cp resident rx queue entries */
2341 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_rxq);
2343 /* fill the host resident and cp resident rx entries */
2344 for (i=0; i < QUEUE_SIZE_RX; i++) {
2346 rxq->host_entry[ i ].status =
2347 FORE200E_INDEX(rxq->status.align_addr, enum status, i);
2348 rxq->host_entry[ i ].rpd =
2349 FORE200E_INDEX(rxq->rpd.align_addr, struct rpd, i);
2350 rxq->host_entry[ i ].rpd_dma =
2351 FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i);
2352 rxq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2354 *rxq->host_entry[ i ].status = STATUS_FREE;
2356 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->status.dma_addr, enum status, i),
2357 &cp_entry[ i ].status_haddr);
2359 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i),
2360 &cp_entry[ i ].rpd_haddr);
2363 /* set the head entry of the queue */
2366 fore200e->state = FORE200E_STATE_INIT_RXQ;
2372 fore200e_init_tx_queue(struct fore200e* fore200e)
2374 struct host_txq* txq = &fore200e->host_txq;
2375 struct cp_txq_entry __iomem * cp_entry;
2378 DPRINTK(2, "transmit queue is being initialized\n");
2380 /* allocate and align the array of status words */
2381 if (fore200e->bus->dma_chunk_alloc(fore200e,
2383 sizeof(enum status),
2385 fore200e->bus->status_alignment) < 0) {
2389 /* allocate and align the array of transmit PDU descriptors */
2390 if (fore200e->bus->dma_chunk_alloc(fore200e,
2394 fore200e->bus->descr_alignment) < 0) {
2396 fore200e->bus->dma_chunk_free(fore200e, &txq->status);
2400 /* get the base address of the cp resident tx queue entries */
2401 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_txq);
2403 /* fill the host resident and cp resident tx entries */
2404 for (i=0; i < QUEUE_SIZE_TX; i++) {
2406 txq->host_entry[ i ].status =
2407 FORE200E_INDEX(txq->status.align_addr, enum status, i);
2408 txq->host_entry[ i ].tpd =
2409 FORE200E_INDEX(txq->tpd.align_addr, struct tpd, i);
2410 txq->host_entry[ i ].tpd_dma =
2411 FORE200E_DMA_INDEX(txq->tpd.dma_addr, struct tpd, i);
2412 txq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2414 *txq->host_entry[ i ].status = STATUS_FREE;
2416 fore200e->bus->write(FORE200E_DMA_INDEX(txq->status.dma_addr, enum status, i),
2417 &cp_entry[ i ].status_haddr);
2419 /* although there is a one-to-one mapping of tx queue entries and tpds,
2420 we do not write here the DMA (physical) base address of each tpd into
2421 the related cp resident entry, because the cp relies on this write
2422 operation to detect that a new pdu has been submitted for tx */
2425 /* set the head and tail entries of the queue */
2429 fore200e->state = FORE200E_STATE_INIT_TXQ;
2435 fore200e_init_cmd_queue(struct fore200e* fore200e)
2437 struct host_cmdq* cmdq = &fore200e->host_cmdq;
2438 struct cp_cmdq_entry __iomem * cp_entry;
2441 DPRINTK(2, "command queue is being initialized\n");
2443 /* allocate and align the array of status words */
2444 if (fore200e->bus->dma_chunk_alloc(fore200e,
2446 sizeof(enum status),
2448 fore200e->bus->status_alignment) < 0) {
2452 /* get the base address of the cp resident cmd queue entries */
2453 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_cmdq);
2455 /* fill the host resident and cp resident cmd entries */
2456 for (i=0; i < QUEUE_SIZE_CMD; i++) {
2458 cmdq->host_entry[ i ].status =
2459 FORE200E_INDEX(cmdq->status.align_addr, enum status, i);
2460 cmdq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2462 *cmdq->host_entry[ i ].status = STATUS_FREE;
2464 fore200e->bus->write(FORE200E_DMA_INDEX(cmdq->status.dma_addr, enum status, i),
2465 &cp_entry[ i ].status_haddr);
2468 /* set the head entry of the queue */
2471 fore200e->state = FORE200E_STATE_INIT_CMDQ;
2477 fore200e_param_bs_queue(struct fore200e* fore200e,
2478 enum buffer_scheme scheme, enum buffer_magn magn,
2479 int queue_length, int pool_size, int supply_blksize)
2481 struct bs_spec __iomem * bs_spec = &fore200e->cp_queues->init.bs_spec[ scheme ][ magn ];
2483 fore200e->bus->write(queue_length, &bs_spec->queue_length);
2484 fore200e->bus->write(fore200e_rx_buf_size[ scheme ][ magn ], &bs_spec->buffer_size);
2485 fore200e->bus->write(pool_size, &bs_spec->pool_size);
2486 fore200e->bus->write(supply_blksize, &bs_spec->supply_blksize);
2491 fore200e_initialize(struct fore200e* fore200e)
2493 struct cp_queues __iomem * cpq;
2494 int ok, scheme, magn;
2496 DPRINTK(2, "device %s being initialized\n", fore200e->name);
2498 init_MUTEX(&fore200e->rate_sf);
2499 spin_lock_init(&fore200e->q_lock);
2501 cpq = fore200e->cp_queues = fore200e->virt_base + FORE200E_CP_QUEUES_OFFSET;
2503 /* enable cp to host interrupts */
2504 fore200e->bus->write(1, &cpq->imask);
2506 if (fore200e->bus->irq_enable)
2507 fore200e->bus->irq_enable(fore200e);
2509 fore200e->bus->write(NBR_CONNECT, &cpq->init.num_connect);
2511 fore200e->bus->write(QUEUE_SIZE_CMD, &cpq->init.cmd_queue_len);
2512 fore200e->bus->write(QUEUE_SIZE_RX, &cpq->init.rx_queue_len);
2513 fore200e->bus->write(QUEUE_SIZE_TX, &cpq->init.tx_queue_len);
2515 fore200e->bus->write(RSD_EXTENSION, &cpq->init.rsd_extension);
2516 fore200e->bus->write(TSD_EXTENSION, &cpq->init.tsd_extension);
2518 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++)
2519 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++)
2520 fore200e_param_bs_queue(fore200e, scheme, magn,
2522 fore200e_rx_buf_nbr[ scheme ][ magn ],
2525 /* issue the initialize command */
2526 fore200e->bus->write(STATUS_PENDING, &cpq->init.status);
2527 fore200e->bus->write(OPCODE_INITIALIZE, &cpq->init.opcode);
2529 ok = fore200e_io_poll(fore200e, &cpq->init.status, STATUS_COMPLETE, 3000);
2531 printk(FORE200E "device %s initialization failed\n", fore200e->name);
2535 printk(FORE200E "device %s initialized\n", fore200e->name);
2537 fore200e->state = FORE200E_STATE_INITIALIZE;
2543 fore200e_monitor_putc(struct fore200e* fore200e, char c)
2545 struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2550 fore200e->bus->write(((u32) c) | FORE200E_CP_MONITOR_UART_AVAIL, &monitor->soft_uart.send);
2555 fore200e_monitor_getc(struct fore200e* fore200e)
2557 struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2558 unsigned long timeout = jiffies + msecs_to_jiffies(50);
2561 while (time_before(jiffies, timeout)) {
2563 c = (int) fore200e->bus->read(&monitor->soft_uart.recv);
2565 if (c & FORE200E_CP_MONITOR_UART_AVAIL) {
2567 fore200e->bus->write(FORE200E_CP_MONITOR_UART_FREE, &monitor->soft_uart.recv);
2569 printk("%c", c & 0xFF);
2580 fore200e_monitor_puts(struct fore200e* fore200e, char* str)
2584 /* the i960 monitor doesn't accept any new character if it has something to say */
2585 while (fore200e_monitor_getc(fore200e) >= 0);
2587 fore200e_monitor_putc(fore200e, *str++);
2590 while (fore200e_monitor_getc(fore200e) >= 0);
2595 fore200e_start_fw(struct fore200e* fore200e)
2599 struct fw_header* fw_header = (struct fw_header*) fore200e->bus->fw_data;
2601 DPRINTK(2, "device %s firmware being started\n", fore200e->name);
2603 #if defined(__sparc_v9__)
2604 /* reported to be required by SBA cards on some sparc64 hosts */
2608 sprintf(cmd, "\rgo %x\r", le32_to_cpu(fw_header->start_offset));
2610 fore200e_monitor_puts(fore200e, cmd);
2612 ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_CP_RUNNING, 1000);
2614 printk(FORE200E "device %s firmware didn't start\n", fore200e->name);
2618 printk(FORE200E "device %s firmware started\n", fore200e->name);
2620 fore200e->state = FORE200E_STATE_START_FW;
2626 fore200e_load_fw(struct fore200e* fore200e)
2628 u32* fw_data = (u32*) fore200e->bus->fw_data;
2629 u32 fw_size = (u32) *fore200e->bus->fw_size / sizeof(u32);
2631 struct fw_header* fw_header = (struct fw_header*) fw_data;
2633 u32 __iomem *load_addr = fore200e->virt_base + le32_to_cpu(fw_header->load_offset);
2635 DPRINTK(2, "device %s firmware being loaded at 0x%p (%d words)\n",
2636 fore200e->name, load_addr, fw_size);
2638 if (le32_to_cpu(fw_header->magic) != FW_HEADER_MAGIC) {
2639 printk(FORE200E "corrupted %s firmware image\n", fore200e->bus->model_name);
2643 for (; fw_size--; fw_data++, load_addr++)
2644 fore200e->bus->write(le32_to_cpu(*fw_data), load_addr);
2646 fore200e->state = FORE200E_STATE_LOAD_FW;
2652 fore200e_register(struct fore200e* fore200e)
2654 struct atm_dev* atm_dev;
2656 DPRINTK(2, "device %s being registered\n", fore200e->name);
2658 atm_dev = atm_dev_register(fore200e->bus->proc_name, &fore200e_ops, -1,
2660 if (atm_dev == NULL) {
2661 printk(FORE200E "unable to register device %s\n", fore200e->name);
2665 atm_dev->dev_data = fore200e;
2666 fore200e->atm_dev = atm_dev;
2668 atm_dev->ci_range.vpi_bits = FORE200E_VPI_BITS;
2669 atm_dev->ci_range.vci_bits = FORE200E_VCI_BITS;
2671 fore200e->available_cell_rate = ATM_OC3_PCR;
2673 fore200e->state = FORE200E_STATE_REGISTER;
2679 fore200e_init(struct fore200e* fore200e)
2681 if (fore200e_register(fore200e) < 0)
2684 if (fore200e->bus->configure(fore200e) < 0)
2687 if (fore200e->bus->map(fore200e) < 0)
2690 if (fore200e_reset(fore200e, 1) < 0)
2693 if (fore200e_load_fw(fore200e) < 0)
2696 if (fore200e_start_fw(fore200e) < 0)
2699 if (fore200e_initialize(fore200e) < 0)
2702 if (fore200e_init_cmd_queue(fore200e) < 0)
2705 if (fore200e_init_tx_queue(fore200e) < 0)
2708 if (fore200e_init_rx_queue(fore200e) < 0)
2711 if (fore200e_init_bs_queue(fore200e) < 0)
2714 if (fore200e_alloc_rx_buf(fore200e) < 0)
2717 if (fore200e_get_esi(fore200e) < 0)
2720 if (fore200e_irq_request(fore200e) < 0)
2723 fore200e_supply(fore200e);
2725 /* all done, board initialization is now complete */
2726 fore200e->state = FORE200E_STATE_COMPLETE;
2731 static int __devinit
2732 fore200e_pca_detect(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
2734 const struct fore200e_bus* bus = (struct fore200e_bus*) pci_ent->driver_data;
2735 struct fore200e* fore200e;
2737 static int index = 0;
2739 if (pci_enable_device(pci_dev)) {
2744 fore200e = fore200e_kmalloc(sizeof(struct fore200e), GFP_KERNEL);
2745 if (fore200e == NULL) {
2750 fore200e->bus = bus;
2751 fore200e->bus_dev = pci_dev;
2752 fore200e->irq = pci_dev->irq;
2753 fore200e->phys_base = pci_resource_start(pci_dev, 0);
2755 sprintf(fore200e->name, "%s-%d", bus->model_name, index - 1);
2757 pci_set_master(pci_dev);
2759 printk(FORE200E "device %s found at 0x%lx, IRQ %s\n",
2760 fore200e->bus->model_name,
2761 fore200e->phys_base, fore200e_irq_itoa(fore200e->irq));
2763 sprintf(fore200e->name, "%s-%d", bus->model_name, index);
2765 err = fore200e_init(fore200e);
2767 fore200e_shutdown(fore200e);
2772 pci_set_drvdata(pci_dev, fore200e);
2780 pci_disable_device(pci_dev);
2785 static void __devexit fore200e_pca_remove_one(struct pci_dev *pci_dev)
2787 struct fore200e *fore200e;
2789 fore200e = pci_get_drvdata(pci_dev);
2791 fore200e_shutdown(fore200e);
2793 pci_disable_device(pci_dev);
2797 #ifdef CONFIG_ATM_FORE200E_PCA
2798 static struct pci_device_id fore200e_pca_tbl[] = {
2799 { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID,
2800 0, 0, (unsigned long) &fore200e_bus[0] },
2804 MODULE_DEVICE_TABLE(pci, fore200e_pca_tbl);
2806 static struct pci_driver fore200e_pca_driver = {
2807 .name = "fore_200e",
2808 .probe = fore200e_pca_detect,
2809 .remove = __devexit_p(fore200e_pca_remove_one),
2810 .id_table = fore200e_pca_tbl,
2816 fore200e_module_init(void)
2818 const struct fore200e_bus* bus;
2819 struct fore200e* fore200e;
2822 printk(FORE200E "FORE Systems 200E-series ATM driver - version " FORE200E_VERSION "\n");
2824 /* for each configured bus interface */
2825 for (bus = fore200e_bus; bus->model_name; bus++) {
2827 /* detect all boards present on that bus */
2828 for (index = 0; bus->detect && (fore200e = bus->detect(bus, index)); index++) {
2830 printk(FORE200E "device %s found at 0x%lx, IRQ %s\n",
2831 fore200e->bus->model_name,
2832 fore200e->phys_base, fore200e_irq_itoa(fore200e->irq));
2834 sprintf(fore200e->name, "%s-%d", bus->model_name, index);
2836 if (fore200e_init(fore200e) < 0) {
2838 fore200e_shutdown(fore200e);
2842 list_add(&fore200e->entry, &fore200e_boards);
2846 #ifdef CONFIG_ATM_FORE200E_PCA
2847 if (!pci_register_driver(&fore200e_pca_driver))
2851 if (!list_empty(&fore200e_boards))
2859 fore200e_module_cleanup(void)
2861 struct fore200e *fore200e, *next;
2863 #ifdef CONFIG_ATM_FORE200E_PCA
2864 pci_unregister_driver(&fore200e_pca_driver);
2867 list_for_each_entry_safe(fore200e, next, &fore200e_boards, entry) {
2868 fore200e_shutdown(fore200e);
2871 DPRINTK(1, "module being removed\n");
2876 fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
2878 struct fore200e* fore200e = FORE200E_DEV(dev);
2879 struct fore200e_vcc* fore200e_vcc;
2880 struct atm_vcc* vcc;
2881 int i, len, left = *pos;
2882 unsigned long flags;
2886 if (fore200e_getstats(fore200e) < 0)
2889 len = sprintf(page,"\n"
2891 " internal name:\t\t%s\n", fore200e->name);
2893 /* print bus-specific information */
2894 if (fore200e->bus->proc_read)
2895 len += fore200e->bus->proc_read(fore200e, page + len);
2897 len += sprintf(page + len,
2898 " interrupt line:\t\t%s\n"
2899 " physical base address:\t0x%p\n"
2900 " virtual base address:\t0x%p\n"
2901 " factory address (ESI):\t%02x:%02x:%02x:%02x:%02x:%02x\n"
2902 " board serial number:\t\t%d\n\n",
2903 fore200e_irq_itoa(fore200e->irq),
2904 (void*)fore200e->phys_base,
2905 fore200e->virt_base,
2906 fore200e->esi[0], fore200e->esi[1], fore200e->esi[2],
2907 fore200e->esi[3], fore200e->esi[4], fore200e->esi[5],
2908 fore200e->esi[4] * 256 + fore200e->esi[5]);
2914 return sprintf(page,
2915 " free small bufs, scheme 1:\t%d\n"
2916 " free large bufs, scheme 1:\t%d\n"
2917 " free small bufs, scheme 2:\t%d\n"
2918 " free large bufs, scheme 2:\t%d\n",
2919 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_SMALL ].freebuf_count,
2920 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_LARGE ].freebuf_count,
2921 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_SMALL ].freebuf_count,
2922 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_LARGE ].freebuf_count);
2925 u32 hb = fore200e->bus->read(&fore200e->cp_queues->heartbeat);
2927 len = sprintf(page,"\n\n"
2928 " cell processor:\n"
2929 " heartbeat state:\t\t");
2931 if (hb >> 16 != 0xDEAD)
2932 len += sprintf(page + len, "0x%08x\n", hb);
2934 len += sprintf(page + len, "*** FATAL ERROR %04x ***\n", hb & 0xFFFF);
2940 static const char* media_name[] = {
2941 "unshielded twisted pair",
2942 "multimode optical fiber ST",
2943 "multimode optical fiber SC",
2944 "single-mode optical fiber ST",
2945 "single-mode optical fiber SC",
2949 static const char* oc3_mode[] = {
2951 "diagnostic loopback",
2956 u32 fw_release = fore200e->bus->read(&fore200e->cp_queues->fw_release);
2957 u32 mon960_release = fore200e->bus->read(&fore200e->cp_queues->mon960_release);
2958 u32 oc3_revision = fore200e->bus->read(&fore200e->cp_queues->oc3_revision);
2959 u32 media_index = FORE200E_MEDIA_INDEX(fore200e->bus->read(&fore200e->cp_queues->media_type));
2962 if ((media_index < 0) || (media_index > 4))
2965 switch (fore200e->loop_mode) {
2966 case ATM_LM_NONE: oc3_index = 0;
2968 case ATM_LM_LOC_PHY: oc3_index = 1;
2970 case ATM_LM_RMT_PHY: oc3_index = 2;
2972 default: oc3_index = 3;
2975 return sprintf(page,
2976 " firmware release:\t\t%d.%d.%d\n"
2977 " monitor release:\t\t%d.%d\n"
2978 " media type:\t\t\t%s\n"
2979 " OC-3 revision:\t\t0x%x\n"
2980 " OC-3 mode:\t\t\t%s",
2981 fw_release >> 16, fw_release << 16 >> 24, fw_release << 24 >> 24,
2982 mon960_release >> 16, mon960_release << 16 >> 16,
2983 media_name[ media_index ],
2985 oc3_mode[ oc3_index ]);
2989 struct cp_monitor __iomem * cp_monitor = fore200e->cp_monitor;
2991 return sprintf(page,
2994 " version number:\t\t%d\n"
2995 " boot status word:\t\t0x%08x\n",
2996 fore200e->bus->read(&cp_monitor->mon_version),
2997 fore200e->bus->read(&cp_monitor->bstat));
3001 return sprintf(page,
3003 " device statistics:\n"
3005 " crc_header_errors:\t\t%10u\n"
3006 " framing_errors:\t\t%10u\n",
3007 fore200e_swap(fore200e->stats->phy.crc_header_errors),
3008 fore200e_swap(fore200e->stats->phy.framing_errors));
3011 return sprintf(page, "\n"
3013 " section_bip8_errors:\t%10u\n"
3014 " path_bip8_errors:\t\t%10u\n"
3015 " line_bip24_errors:\t\t%10u\n"
3016 " line_febe_errors:\t\t%10u\n"
3017 " path_febe_errors:\t\t%10u\n"
3018 " corr_hcs_errors:\t\t%10u\n"
3019 " ucorr_hcs_errors:\t\t%10u\n",
3020 fore200e_swap(fore200e->stats->oc3.section_bip8_errors),
3021 fore200e_swap(fore200e->stats->oc3.path_bip8_errors),
3022 fore200e_swap(fore200e->stats->oc3.line_bip24_errors),
3023 fore200e_swap(fore200e->stats->oc3.line_febe_errors),
3024 fore200e_swap(fore200e->stats->oc3.path_febe_errors),
3025 fore200e_swap(fore200e->stats->oc3.corr_hcs_errors),
3026 fore200e_swap(fore200e->stats->oc3.ucorr_hcs_errors));
3029 return sprintf(page,"\n"
3030 " ATM:\t\t\t\t cells\n"
3033 " vpi out of range:\t\t%10u\n"
3034 " vpi no conn:\t\t%10u\n"
3035 " vci out of range:\t\t%10u\n"
3036 " vci no conn:\t\t%10u\n",
3037 fore200e_swap(fore200e->stats->atm.cells_transmitted),
3038 fore200e_swap(fore200e->stats->atm.cells_received),
3039 fore200e_swap(fore200e->stats->atm.vpi_bad_range),
3040 fore200e_swap(fore200e->stats->atm.vpi_no_conn),
3041 fore200e_swap(fore200e->stats->atm.vci_bad_range),
3042 fore200e_swap(fore200e->stats->atm.vci_no_conn));
3045 return sprintf(page,"\n"
3046 " AAL0:\t\t\t cells\n"
3049 " dropped:\t\t\t%10u\n",
3050 fore200e_swap(fore200e->stats->aal0.cells_transmitted),
3051 fore200e_swap(fore200e->stats->aal0.cells_received),
3052 fore200e_swap(fore200e->stats->aal0.cells_dropped));
3055 return sprintf(page,"\n"
3057 " SAR sublayer:\t\t cells\n"
3060 " dropped:\t\t\t%10u\n"
3061 " CRC errors:\t\t%10u\n"
3062 " protocol errors:\t\t%10u\n\n"
3063 " CS sublayer:\t\t PDUs\n"
3066 " dropped:\t\t\t%10u\n"
3067 " protocol errors:\t\t%10u\n",
3068 fore200e_swap(fore200e->stats->aal34.cells_transmitted),
3069 fore200e_swap(fore200e->stats->aal34.cells_received),
3070 fore200e_swap(fore200e->stats->aal34.cells_dropped),
3071 fore200e_swap(fore200e->stats->aal34.cells_crc_errors),
3072 fore200e_swap(fore200e->stats->aal34.cells_protocol_errors),
3073 fore200e_swap(fore200e->stats->aal34.cspdus_transmitted),
3074 fore200e_swap(fore200e->stats->aal34.cspdus_received),
3075 fore200e_swap(fore200e->stats->aal34.cspdus_dropped),
3076 fore200e_swap(fore200e->stats->aal34.cspdus_protocol_errors));
3079 return sprintf(page,"\n"
3081 " SAR sublayer:\t\t cells\n"
3084 " dropped:\t\t\t%10u\n"
3085 " congestions:\t\t%10u\n\n"
3086 " CS sublayer:\t\t PDUs\n"
3089 " dropped:\t\t\t%10u\n"
3090 " CRC errors:\t\t%10u\n"
3091 " protocol errors:\t\t%10u\n",
3092 fore200e_swap(fore200e->stats->aal5.cells_transmitted),
3093 fore200e_swap(fore200e->stats->aal5.cells_received),
3094 fore200e_swap(fore200e->stats->aal5.cells_dropped),
3095 fore200e_swap(fore200e->stats->aal5.congestion_experienced),
3096 fore200e_swap(fore200e->stats->aal5.cspdus_transmitted),
3097 fore200e_swap(fore200e->stats->aal5.cspdus_received),
3098 fore200e_swap(fore200e->stats->aal5.cspdus_dropped),
3099 fore200e_swap(fore200e->stats->aal5.cspdus_crc_errors),
3100 fore200e_swap(fore200e->stats->aal5.cspdus_protocol_errors));
3103 return sprintf(page,"\n"
3104 " AUX:\t\t allocation failures\n"
3105 " small b1:\t\t\t%10u\n"
3106 " large b1:\t\t\t%10u\n"
3107 " small b2:\t\t\t%10u\n"
3108 " large b2:\t\t\t%10u\n"
3109 " RX PDUs:\t\t\t%10u\n"
3110 " TX PDUs:\t\t\t%10lu\n",
3111 fore200e_swap(fore200e->stats->aux.small_b1_failed),
3112 fore200e_swap(fore200e->stats->aux.large_b1_failed),
3113 fore200e_swap(fore200e->stats->aux.small_b2_failed),
3114 fore200e_swap(fore200e->stats->aux.large_b2_failed),
3115 fore200e_swap(fore200e->stats->aux.rpd_alloc_failed),
3119 return sprintf(page,"\n"
3120 " receive carrier:\t\t\t%s\n",
3121 fore200e->stats->aux.receive_carrier ? "ON" : "OFF!");
3124 return sprintf(page,"\n"
3125 " VCCs:\n address VPI VCI AAL "
3126 "TX PDUs TX min/max size RX PDUs RX min/max size\n");
3129 for (i = 0; i < NBR_CONNECT; i++) {
3131 vcc = fore200e->vc_map[i].vcc;
3136 spin_lock_irqsave(&fore200e->q_lock, flags);
3138 if (vcc && test_bit(ATM_VF_READY, &vcc->flags) && !left--) {
3140 fore200e_vcc = FORE200E_VCC(vcc);
3141 ASSERT(fore200e_vcc);
3144 " %08x %03d %05d %1d %09lu %05d/%05d %09lu %05d/%05d\n",
3145 (u32)(unsigned long)vcc,
3146 vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
3147 fore200e_vcc->tx_pdu,
3148 fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu,
3149 fore200e_vcc->tx_max_pdu,
3150 fore200e_vcc->rx_pdu,
3151 fore200e_vcc->rx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->rx_min_pdu,
3152 fore200e_vcc->rx_max_pdu);
3154 spin_unlock_irqrestore(&fore200e->q_lock, flags);
3158 spin_unlock_irqrestore(&fore200e->q_lock, flags);
3164 module_init(fore200e_module_init);
3165 module_exit(fore200e_module_cleanup);
3168 static const struct atmdev_ops fore200e_ops =
3170 .open = fore200e_open,
3171 .close = fore200e_close,
3172 .ioctl = fore200e_ioctl,
3173 .getsockopt = fore200e_getsockopt,
3174 .setsockopt = fore200e_setsockopt,
3175 .send = fore200e_send,
3176 .change_qos = fore200e_change_qos,
3177 .proc_read = fore200e_proc_read,
3178 .owner = THIS_MODULE
3182 #ifdef CONFIG_ATM_FORE200E_PCA
3183 extern const unsigned char _fore200e_pca_fw_data[];
3184 extern const unsigned int _fore200e_pca_fw_size;
3186 #ifdef CONFIG_ATM_FORE200E_SBA
3187 extern const unsigned char _fore200e_sba_fw_data[];
3188 extern const unsigned int _fore200e_sba_fw_size;
3191 static const struct fore200e_bus fore200e_bus[] = {
3192 #ifdef CONFIG_ATM_FORE200E_PCA
3193 { "PCA-200E", "pca200e", 32, 4, 32,
3194 _fore200e_pca_fw_data, &_fore200e_pca_fw_size,
3197 fore200e_pca_dma_map,
3198 fore200e_pca_dma_unmap,
3199 fore200e_pca_dma_sync_for_cpu,
3200 fore200e_pca_dma_sync_for_device,
3201 fore200e_pca_dma_chunk_alloc,
3202 fore200e_pca_dma_chunk_free,
3204 fore200e_pca_configure,
3207 fore200e_pca_prom_read,
3210 fore200e_pca_irq_check,
3211 fore200e_pca_irq_ack,
3212 fore200e_pca_proc_read,
3215 #ifdef CONFIG_ATM_FORE200E_SBA
3216 { "SBA-200E", "sba200e", 32, 64, 32,
3217 _fore200e_sba_fw_data, &_fore200e_sba_fw_size,
3220 fore200e_sba_dma_map,
3221 fore200e_sba_dma_unmap,
3222 fore200e_sba_dma_sync_for_cpu,
3223 fore200e_sba_dma_sync_for_device,
3224 fore200e_sba_dma_chunk_alloc,
3225 fore200e_sba_dma_chunk_free,
3226 fore200e_sba_detect,
3227 fore200e_sba_configure,
3230 fore200e_sba_prom_read,
3232 fore200e_sba_irq_enable,
3233 fore200e_sba_irq_check,
3234 fore200e_sba_irq_ack,
3235 fore200e_sba_proc_read,
3241 #ifdef MODULE_LICENSE
3242 MODULE_LICENSE("GPL");