2 A FORE Systems 200E-series driver for ATM on Linux.
3 Christophe Lizzi (lizzi@cnam.fr), October 1999-March 2003.
5 Based on the PCA-200E driver from Uwe Dannowski (Uwe.Dannowski@inf.tu-dresden.de).
7 This driver simultaneously supports PCA-200E and SBA-200E adapters
8 on i386, alpha (untested), powerpc, sparc and sparc64 architectures.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2 of the License, or
13 (at your option) any later version.
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 #include <linux/kernel.h>
27 #include <linux/slab.h>
28 #include <linux/init.h>
29 #include <linux/capability.h>
30 #include <linux/interrupt.h>
31 #include <linux/bitops.h>
32 #include <linux/pci.h>
33 #include <linux/module.h>
34 #include <linux/atmdev.h>
35 #include <linux/sonet.h>
36 #include <linux/atm_suni.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/delay.h>
40 #include <asm/string.h>
44 #include <asm/byteorder.h>
45 #include <asm/uaccess.h>
46 #include <asm/atomic.h>
48 #ifdef CONFIG_ATM_FORE200E_SBA
49 #include <asm/idprom.h>
51 #include <asm/openprom.h>
52 #include <asm/oplib.h>
53 #include <asm/pgtable.h>
56 #if defined(CONFIG_ATM_FORE200E_USE_TASKLET) /* defer interrupt work to a tasklet */
57 #define FORE200E_USE_TASKLET
60 #if 0 /* enable the debugging code of the buffer supply queues */
61 #define FORE200E_BSQ_DEBUG
64 #if 1 /* ensure correct handling of 52-byte AAL0 SDUs expected by atmdump-like apps */
65 #define FORE200E_52BYTE_AAL0_SDU
71 #define FORE200E_VERSION "0.3e"
73 #define FORE200E "fore200e: "
75 #if 0 /* override .config */
76 #define CONFIG_ATM_FORE200E_DEBUG 1
78 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
79 #define DPRINTK(level, format, args...) do { if (CONFIG_ATM_FORE200E_DEBUG >= (level)) \
80 printk(FORE200E format, ##args); } while (0)
82 #define DPRINTK(level, format, args...) do {} while (0)
86 #define FORE200E_ALIGN(addr, alignment) \
87 ((((unsigned long)(addr) + (alignment - 1)) & ~(alignment - 1)) - (unsigned long)(addr))
89 #define FORE200E_DMA_INDEX(dma_addr, type, index) ((dma_addr) + (index) * sizeof(type))
91 #define FORE200E_INDEX(virt_addr, type, index) (&((type *)(virt_addr))[ index ])
93 #define FORE200E_NEXT_ENTRY(index, modulo) (index = ++(index) % (modulo))
96 #define ASSERT(expr) if (!(expr)) { \
97 printk(FORE200E "assertion failed! %s[%d]: %s\n", \
98 __FUNCTION__, __LINE__, #expr); \
99 panic(FORE200E "%s", __FUNCTION__); \
102 #define ASSERT(expr) do {} while (0)
106 static const struct atmdev_ops fore200e_ops;
107 static const struct fore200e_bus fore200e_bus[];
109 static LIST_HEAD(fore200e_boards);
112 MODULE_AUTHOR("Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen");
113 MODULE_DESCRIPTION("FORE Systems 200E-series ATM driver - version " FORE200E_VERSION);
114 MODULE_SUPPORTED_DEVICE("PCA-200E, SBA-200E");
117 static const int fore200e_rx_buf_nbr[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
118 { BUFFER_S1_NBR, BUFFER_L1_NBR },
119 { BUFFER_S2_NBR, BUFFER_L2_NBR }
122 static const int fore200e_rx_buf_size[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
123 { BUFFER_S1_SIZE, BUFFER_L1_SIZE },
124 { BUFFER_S2_SIZE, BUFFER_L2_SIZE }
128 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
129 static const char* fore200e_traffic_class[] = { "NONE", "UBR", "CBR", "VBR", "ABR", "ANY" };
133 #if 0 /* currently unused */
135 fore200e_fore2atm_aal(enum fore200e_aal aal)
138 case FORE200E_AAL0: return ATM_AAL0;
139 case FORE200E_AAL34: return ATM_AAL34;
140 case FORE200E_AAL5: return ATM_AAL5;
148 static enum fore200e_aal
149 fore200e_atm2fore_aal(int aal)
152 case ATM_AAL0: return FORE200E_AAL0;
153 case ATM_AAL34: return FORE200E_AAL34;
156 case ATM_AAL5: return FORE200E_AAL5;
164 fore200e_irq_itoa(int irq)
167 sprintf(str, "%d", irq);
172 /* allocate and align a chunk of memory intended to hold the data behing exchanged
173 between the driver and the adapter (using streaming DVMA) */
176 fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, int alignment, int direction)
178 unsigned long offset = 0;
180 if (alignment <= sizeof(int))
183 chunk->alloc_size = size + alignment;
184 chunk->align_size = size;
185 chunk->direction = direction;
187 chunk->alloc_addr = kzalloc(chunk->alloc_size, GFP_KERNEL | GFP_DMA);
188 if (chunk->alloc_addr == NULL)
192 offset = FORE200E_ALIGN(chunk->alloc_addr, alignment);
194 chunk->align_addr = chunk->alloc_addr + offset;
196 chunk->dma_addr = fore200e->bus->dma_map(fore200e, chunk->align_addr, chunk->align_size, direction);
202 /* free a chunk of memory */
205 fore200e_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
207 fore200e->bus->dma_unmap(fore200e, chunk->dma_addr, chunk->dma_size, chunk->direction);
209 kfree(chunk->alloc_addr);
214 fore200e_spin(int msecs)
216 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
217 while (time_before(jiffies, timeout));
222 fore200e_poll(struct fore200e* fore200e, volatile u32* addr, u32 val, int msecs)
224 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
229 if ((ok = (*addr == val)) || (*addr & STATUS_ERROR))
232 } while (time_before(jiffies, timeout));
236 printk(FORE200E "cmd polling failed, got status 0x%08x, expected 0x%08x\n",
246 fore200e_io_poll(struct fore200e* fore200e, volatile u32 __iomem *addr, u32 val, int msecs)
248 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
252 if ((ok = (fore200e->bus->read(addr) == val)))
255 } while (time_before(jiffies, timeout));
259 printk(FORE200E "I/O polling failed, got status 0x%08x, expected 0x%08x\n",
260 fore200e->bus->read(addr), val);
269 fore200e_free_rx_buf(struct fore200e* fore200e)
271 int scheme, magn, nbr;
272 struct buffer* buffer;
274 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
275 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
277 if ((buffer = fore200e->host_bsq[ scheme ][ magn ].buffer) != NULL) {
279 for (nbr = 0; nbr < fore200e_rx_buf_nbr[ scheme ][ magn ]; nbr++) {
281 struct chunk* data = &buffer[ nbr ].data;
283 if (data->alloc_addr != NULL)
284 fore200e_chunk_free(fore200e, data);
293 fore200e_uninit_bs_queue(struct fore200e* fore200e)
297 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
298 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
300 struct chunk* status = &fore200e->host_bsq[ scheme ][ magn ].status;
301 struct chunk* rbd_block = &fore200e->host_bsq[ scheme ][ magn ].rbd_block;
303 if (status->alloc_addr)
304 fore200e->bus->dma_chunk_free(fore200e, status);
306 if (rbd_block->alloc_addr)
307 fore200e->bus->dma_chunk_free(fore200e, rbd_block);
314 fore200e_reset(struct fore200e* fore200e, int diag)
318 fore200e->cp_monitor = fore200e->virt_base + FORE200E_CP_MONITOR_OFFSET;
320 fore200e->bus->write(BSTAT_COLD_START, &fore200e->cp_monitor->bstat);
322 fore200e->bus->reset(fore200e);
325 ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_SELFTEST_OK, 1000);
328 printk(FORE200E "device %s self-test failed\n", fore200e->name);
332 printk(FORE200E "device %s self-test passed\n", fore200e->name);
334 fore200e->state = FORE200E_STATE_RESET;
342 fore200e_shutdown(struct fore200e* fore200e)
344 printk(FORE200E "removing device %s at 0x%lx, IRQ %s\n",
345 fore200e->name, fore200e->phys_base,
346 fore200e_irq_itoa(fore200e->irq));
348 if (fore200e->state > FORE200E_STATE_RESET) {
349 /* first, reset the board to prevent further interrupts or data transfers */
350 fore200e_reset(fore200e, 0);
353 /* then, release all allocated resources */
354 switch(fore200e->state) {
356 case FORE200E_STATE_COMPLETE:
357 kfree(fore200e->stats);
359 case FORE200E_STATE_IRQ:
360 free_irq(fore200e->irq, fore200e->atm_dev);
362 case FORE200E_STATE_ALLOC_BUF:
363 fore200e_free_rx_buf(fore200e);
365 case FORE200E_STATE_INIT_BSQ:
366 fore200e_uninit_bs_queue(fore200e);
368 case FORE200E_STATE_INIT_RXQ:
369 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.status);
370 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.rpd);
372 case FORE200E_STATE_INIT_TXQ:
373 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.status);
374 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.tpd);
376 case FORE200E_STATE_INIT_CMDQ:
377 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_cmdq.status);
379 case FORE200E_STATE_INITIALIZE:
380 /* nothing to do for that state */
382 case FORE200E_STATE_START_FW:
383 /* nothing to do for that state */
385 case FORE200E_STATE_LOAD_FW:
386 /* nothing to do for that state */
388 case FORE200E_STATE_RESET:
389 /* nothing to do for that state */
391 case FORE200E_STATE_MAP:
392 fore200e->bus->unmap(fore200e);
394 case FORE200E_STATE_CONFIGURE:
395 /* nothing to do for that state */
397 case FORE200E_STATE_REGISTER:
398 /* XXX shouldn't we *start* by deregistering the device? */
399 atm_dev_deregister(fore200e->atm_dev);
401 case FORE200E_STATE_BLANK:
402 /* nothing to do for that state */
408 #ifdef CONFIG_ATM_FORE200E_PCA
410 static u32 fore200e_pca_read(volatile u32 __iomem *addr)
412 /* on big-endian hosts, the board is configured to convert
413 the endianess of slave RAM accesses */
414 return le32_to_cpu(readl(addr));
418 static void fore200e_pca_write(u32 val, volatile u32 __iomem *addr)
420 /* on big-endian hosts, the board is configured to convert
421 the endianess of slave RAM accesses */
422 writel(cpu_to_le32(val), addr);
427 fore200e_pca_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction)
429 u32 dma_addr = pci_map_single((struct pci_dev*)fore200e->bus_dev, virt_addr, size, direction);
431 DPRINTK(3, "PCI DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d, --> dma_addr = 0x%08x\n",
432 virt_addr, size, direction, dma_addr);
439 fore200e_pca_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
441 DPRINTK(3, "PCI DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d\n",
442 dma_addr, size, direction);
444 pci_unmap_single((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
449 fore200e_pca_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
451 DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
453 pci_dma_sync_single_for_cpu((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
457 fore200e_pca_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
459 DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
461 pci_dma_sync_single_for_device((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
465 /* allocate a DMA consistent chunk of memory intended to act as a communication mechanism
466 (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */
469 fore200e_pca_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
470 int size, int nbr, int alignment)
472 /* returned chunks are page-aligned */
473 chunk->alloc_size = size * nbr;
474 chunk->alloc_addr = pci_alloc_consistent((struct pci_dev*)fore200e->bus_dev,
478 if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
481 chunk->align_addr = chunk->alloc_addr;
487 /* free a DMA consistent chunk of memory */
490 fore200e_pca_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
492 pci_free_consistent((struct pci_dev*)fore200e->bus_dev,
500 fore200e_pca_irq_check(struct fore200e* fore200e)
502 /* this is a 1 bit register */
503 int irq_posted = readl(fore200e->regs.pca.psr);
505 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG == 2)
506 if (irq_posted && (readl(fore200e->regs.pca.hcr) & PCA200E_HCR_OUTFULL)) {
507 DPRINTK(2,"FIFO OUT full, device %d\n", fore200e->atm_dev->number);
516 fore200e_pca_irq_ack(struct fore200e* fore200e)
518 writel(PCA200E_HCR_CLRINTR, fore200e->regs.pca.hcr);
523 fore200e_pca_reset(struct fore200e* fore200e)
525 writel(PCA200E_HCR_RESET, fore200e->regs.pca.hcr);
527 writel(0, fore200e->regs.pca.hcr);
532 fore200e_pca_map(struct fore200e* fore200e)
534 DPRINTK(2, "device %s being mapped in memory\n", fore200e->name);
536 fore200e->virt_base = ioremap(fore200e->phys_base, PCA200E_IOSPACE_LENGTH);
538 if (fore200e->virt_base == NULL) {
539 printk(FORE200E "can't map device %s\n", fore200e->name);
543 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
545 /* gain access to the PCA specific registers */
546 fore200e->regs.pca.hcr = fore200e->virt_base + PCA200E_HCR_OFFSET;
547 fore200e->regs.pca.imr = fore200e->virt_base + PCA200E_IMR_OFFSET;
548 fore200e->regs.pca.psr = fore200e->virt_base + PCA200E_PSR_OFFSET;
550 fore200e->state = FORE200E_STATE_MAP;
556 fore200e_pca_unmap(struct fore200e* fore200e)
558 DPRINTK(2, "device %s being unmapped from memory\n", fore200e->name);
560 if (fore200e->virt_base != NULL)
561 iounmap(fore200e->virt_base);
566 fore200e_pca_configure(struct fore200e* fore200e)
568 struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev;
569 u8 master_ctrl, latency;
571 DPRINTK(2, "device %s being configured\n", fore200e->name);
573 if ((pci_dev->irq == 0) || (pci_dev->irq == 0xFF)) {
574 printk(FORE200E "incorrect IRQ setting - misconfigured PCI-PCI bridge?\n");
578 pci_read_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, &master_ctrl);
580 master_ctrl = master_ctrl
581 #if defined(__BIG_ENDIAN)
582 /* request the PCA board to convert the endianess of slave RAM accesses */
583 | PCA200E_CTRL_CONVERT_ENDIAN
586 | PCA200E_CTRL_DIS_CACHE_RD
587 | PCA200E_CTRL_DIS_WRT_INVAL
588 | PCA200E_CTRL_ENA_CONT_REQ_MODE
589 | PCA200E_CTRL_2_CACHE_WRT_INVAL
591 | PCA200E_CTRL_LARGE_PCI_BURSTS;
593 pci_write_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, master_ctrl);
595 /* raise latency from 32 (default) to 192, as this seems to prevent NIC
596 lockups (under heavy rx loads) due to continuous 'FIFO OUT full' condition.
597 this may impact the performances of other PCI devices on the same bus, though */
599 pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, latency);
601 fore200e->state = FORE200E_STATE_CONFIGURE;
607 fore200e_pca_prom_read(struct fore200e* fore200e, struct prom_data* prom)
609 struct host_cmdq* cmdq = &fore200e->host_cmdq;
610 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
611 struct prom_opcode opcode;
615 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
617 opcode.opcode = OPCODE_GET_PROM;
620 prom_dma = fore200e->bus->dma_map(fore200e, prom, sizeof(struct prom_data), DMA_FROM_DEVICE);
622 fore200e->bus->write(prom_dma, &entry->cp_entry->cmd.prom_block.prom_haddr);
624 *entry->status = STATUS_PENDING;
626 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.prom_block.opcode);
628 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
630 *entry->status = STATUS_FREE;
632 fore200e->bus->dma_unmap(fore200e, prom_dma, sizeof(struct prom_data), DMA_FROM_DEVICE);
635 printk(FORE200E "unable to get PROM data from device %s\n", fore200e->name);
639 #if defined(__BIG_ENDIAN)
641 #define swap_here(addr) (*((u32*)(addr)) = swab32( *((u32*)(addr)) ))
643 /* MAC address is stored as little-endian */
644 swap_here(&prom->mac_addr[0]);
645 swap_here(&prom->mac_addr[4]);
653 fore200e_pca_proc_read(struct fore200e* fore200e, char *page)
655 struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev;
657 return sprintf(page, " PCI bus/slot/function:\t%d/%d/%d\n",
658 pci_dev->bus->number, PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
661 #endif /* CONFIG_ATM_FORE200E_PCA */
664 #ifdef CONFIG_ATM_FORE200E_SBA
667 fore200e_sba_read(volatile u32 __iomem *addr)
669 return sbus_readl(addr);
674 fore200e_sba_write(u32 val, volatile u32 __iomem *addr)
676 sbus_writel(val, addr);
681 fore200e_sba_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction)
683 u32 dma_addr = sbus_map_single((struct sbus_dev*)fore200e->bus_dev, virt_addr, size, direction);
685 DPRINTK(3, "SBUS DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d --> dma_addr = 0x%08x\n",
686 virt_addr, size, direction, dma_addr);
693 fore200e_sba_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
695 DPRINTK(3, "SBUS DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d,\n",
696 dma_addr, size, direction);
698 sbus_unmap_single((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction);
703 fore200e_sba_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
705 DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
707 sbus_dma_sync_single_for_cpu((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction);
711 fore200e_sba_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
713 DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
715 sbus_dma_sync_single_for_device((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction);
719 /* allocate a DVMA consistent chunk of memory intended to act as a communication mechanism
720 (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */
723 fore200e_sba_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
724 int size, int nbr, int alignment)
726 chunk->alloc_size = chunk->align_size = size * nbr;
728 /* returned chunks are page-aligned */
729 chunk->alloc_addr = sbus_alloc_consistent((struct sbus_dev*)fore200e->bus_dev,
733 if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
736 chunk->align_addr = chunk->alloc_addr;
742 /* free a DVMA consistent chunk of memory */
745 fore200e_sba_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
747 sbus_free_consistent((struct sbus_dev*)fore200e->bus_dev,
755 fore200e_sba_irq_enable(struct fore200e* fore200e)
757 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
758 fore200e->bus->write(hcr | SBA200E_HCR_INTR_ENA, fore200e->regs.sba.hcr);
763 fore200e_sba_irq_check(struct fore200e* fore200e)
765 return fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_INTR_REQ;
770 fore200e_sba_irq_ack(struct fore200e* fore200e)
772 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
773 fore200e->bus->write(hcr | SBA200E_HCR_INTR_CLR, fore200e->regs.sba.hcr);
778 fore200e_sba_reset(struct fore200e* fore200e)
780 fore200e->bus->write(SBA200E_HCR_RESET, fore200e->regs.sba.hcr);
782 fore200e->bus->write(0, fore200e->regs.sba.hcr);
787 fore200e_sba_map(struct fore200e* fore200e)
789 struct sbus_dev* sbus_dev = (struct sbus_dev*)fore200e->bus_dev;
792 /* gain access to the SBA specific registers */
793 fore200e->regs.sba.hcr = sbus_ioremap(&sbus_dev->resource[0], 0, SBA200E_HCR_LENGTH, "SBA HCR");
794 fore200e->regs.sba.bsr = sbus_ioremap(&sbus_dev->resource[1], 0, SBA200E_BSR_LENGTH, "SBA BSR");
795 fore200e->regs.sba.isr = sbus_ioremap(&sbus_dev->resource[2], 0, SBA200E_ISR_LENGTH, "SBA ISR");
796 fore200e->virt_base = sbus_ioremap(&sbus_dev->resource[3], 0, SBA200E_RAM_LENGTH, "SBA RAM");
798 if (fore200e->virt_base == NULL) {
799 printk(FORE200E "unable to map RAM of device %s\n", fore200e->name);
803 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
805 fore200e->bus->write(0x02, fore200e->regs.sba.isr); /* XXX hardwired interrupt level */
807 /* get the supported DVMA burst sizes */
808 bursts = prom_getintdefault(sbus_dev->bus->prom_node, "burst-sizes", 0x00);
810 if (sbus_can_dma_64bit(sbus_dev))
811 sbus_set_sbus64(sbus_dev, bursts);
813 fore200e->state = FORE200E_STATE_MAP;
819 fore200e_sba_unmap(struct fore200e* fore200e)
821 sbus_iounmap(fore200e->regs.sba.hcr, SBA200E_HCR_LENGTH);
822 sbus_iounmap(fore200e->regs.sba.bsr, SBA200E_BSR_LENGTH);
823 sbus_iounmap(fore200e->regs.sba.isr, SBA200E_ISR_LENGTH);
824 sbus_iounmap(fore200e->virt_base, SBA200E_RAM_LENGTH);
829 fore200e_sba_configure(struct fore200e* fore200e)
831 fore200e->state = FORE200E_STATE_CONFIGURE;
836 static struct fore200e* __init
837 fore200e_sba_detect(const struct fore200e_bus* bus, int index)
839 struct fore200e* fore200e;
840 struct sbus_bus* sbus_bus;
841 struct sbus_dev* sbus_dev = NULL;
843 unsigned int count = 0;
845 for_each_sbus (sbus_bus) {
846 for_each_sbusdev (sbus_dev, sbus_bus) {
847 if (strcmp(sbus_dev->prom_name, SBA200E_PROM_NAME) == 0) {
857 if (sbus_dev->num_registers != 4) {
858 printk(FORE200E "this %s device has %d instead of 4 registers\n",
859 bus->model_name, sbus_dev->num_registers);
863 fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL);
864 if (fore200e == NULL)
868 fore200e->bus_dev = sbus_dev;
869 fore200e->irq = sbus_dev->irqs[ 0 ];
871 fore200e->phys_base = (unsigned long)sbus_dev;
873 sprintf(fore200e->name, "%s-%d", bus->model_name, index - 1);
880 fore200e_sba_prom_read(struct fore200e* fore200e, struct prom_data* prom)
882 struct sbus_dev* sbus_dev = (struct sbus_dev*) fore200e->bus_dev;
885 len = prom_getproperty(sbus_dev->prom_node, "macaddrlo2", &prom->mac_addr[ 4 ], 4);
889 len = prom_getproperty(sbus_dev->prom_node, "macaddrhi4", &prom->mac_addr[ 2 ], 4);
893 prom_getproperty(sbus_dev->prom_node, "serialnumber",
894 (char*)&prom->serial_number, sizeof(prom->serial_number));
896 prom_getproperty(sbus_dev->prom_node, "promversion",
897 (char*)&prom->hw_revision, sizeof(prom->hw_revision));
904 fore200e_sba_proc_read(struct fore200e* fore200e, char *page)
906 struct sbus_dev* sbus_dev = (struct sbus_dev*)fore200e->bus_dev;
908 return sprintf(page, " SBUS slot/device:\t\t%d/'%s'\n", sbus_dev->slot, sbus_dev->prom_name);
910 #endif /* CONFIG_ATM_FORE200E_SBA */
914 fore200e_tx_irq(struct fore200e* fore200e)
916 struct host_txq* txq = &fore200e->host_txq;
917 struct host_txq_entry* entry;
919 struct fore200e_vc_map* vc_map;
921 if (fore200e->host_txq.txing == 0)
926 entry = &txq->host_entry[ txq->tail ];
928 if ((*entry->status & STATUS_COMPLETE) == 0) {
932 DPRINTK(3, "TX COMPLETED: entry = %p [tail = %d], vc_map = %p, skb = %p\n",
933 entry, txq->tail, entry->vc_map, entry->skb);
935 /* free copy of misaligned data */
938 /* remove DMA mapping */
939 fore200e->bus->dma_unmap(fore200e, entry->tpd->tsd[ 0 ].buffer, entry->tpd->tsd[ 0 ].length,
942 vc_map = entry->vc_map;
944 /* vcc closed since the time the entry was submitted for tx? */
945 if ((vc_map->vcc == NULL) ||
946 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
948 DPRINTK(1, "no ready vcc found for PDU sent on device %d\n",
949 fore200e->atm_dev->number);
951 dev_kfree_skb_any(entry->skb);
956 /* vcc closed then immediately re-opened? */
957 if (vc_map->incarn != entry->incarn) {
959 /* when a vcc is closed, some PDUs may be still pending in the tx queue.
960 if the same vcc is immediately re-opened, those pending PDUs must
961 not be popped after the completion of their emission, as they refer
962 to the prior incarnation of that vcc. otherwise, sk_atm(vcc)->sk_wmem_alloc
963 would be decremented by the size of the (unrelated) skb, possibly
964 leading to a negative sk->sk_wmem_alloc count, ultimately freezing the vcc.
965 we thus bind the tx entry to the current incarnation of the vcc
966 when the entry is submitted for tx. When the tx later completes,
967 if the incarnation number of the tx entry does not match the one
968 of the vcc, then this implies that the vcc has been closed then re-opened.
969 we thus just drop the skb here. */
971 DPRINTK(1, "vcc closed-then-re-opened; dropping PDU sent on device %d\n",
972 fore200e->atm_dev->number);
974 dev_kfree_skb_any(entry->skb);
980 /* notify tx completion */
982 vcc->pop(vcc, entry->skb);
985 dev_kfree_skb_any(entry->skb);
988 /* race fixed by the above incarnation mechanism, but... */
989 if (atomic_read(&sk_atm(vcc)->sk_wmem_alloc) < 0) {
990 atomic_set(&sk_atm(vcc)->sk_wmem_alloc, 0);
993 /* check error condition */
994 if (*entry->status & STATUS_ERROR)
995 atomic_inc(&vcc->stats->tx_err);
997 atomic_inc(&vcc->stats->tx);
1001 *entry->status = STATUS_FREE;
1003 fore200e->host_txq.txing--;
1005 FORE200E_NEXT_ENTRY(txq->tail, QUEUE_SIZE_TX);
1010 #ifdef FORE200E_BSQ_DEBUG
1011 int bsq_audit(int where, struct host_bsq* bsq, int scheme, int magn)
1013 struct buffer* buffer;
1016 buffer = bsq->freebuf;
1019 if (buffer->supplied) {
1020 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld supplied but in free list!\n",
1021 where, scheme, magn, buffer->index);
1024 if (buffer->magn != magn) {
1025 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected magn = %d\n",
1026 where, scheme, magn, buffer->index, buffer->magn);
1029 if (buffer->scheme != scheme) {
1030 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected scheme = %d\n",
1031 where, scheme, magn, buffer->index, buffer->scheme);
1034 if ((buffer->index < 0) || (buffer->index >= fore200e_rx_buf_nbr[ scheme ][ magn ])) {
1035 printk(FORE200E "bsq_audit(%d): queue %d.%d, out of range buffer index = %ld !\n",
1036 where, scheme, magn, buffer->index);
1040 buffer = buffer->next;
1043 if (count != bsq->freebuf_count) {
1044 printk(FORE200E "bsq_audit(%d): queue %d.%d, %d bufs in free list, but freebuf_count = %d\n",
1045 where, scheme, magn, count, bsq->freebuf_count);
1053 fore200e_supply(struct fore200e* fore200e)
1055 int scheme, magn, i;
1057 struct host_bsq* bsq;
1058 struct host_bsq_entry* entry;
1059 struct buffer* buffer;
1061 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
1062 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
1064 bsq = &fore200e->host_bsq[ scheme ][ magn ];
1066 #ifdef FORE200E_BSQ_DEBUG
1067 bsq_audit(1, bsq, scheme, magn);
1069 while (bsq->freebuf_count >= RBD_BLK_SIZE) {
1071 DPRINTK(2, "supplying %d rx buffers to queue %d / %d, freebuf_count = %d\n",
1072 RBD_BLK_SIZE, scheme, magn, bsq->freebuf_count);
1074 entry = &bsq->host_entry[ bsq->head ];
1076 for (i = 0; i < RBD_BLK_SIZE; i++) {
1078 /* take the first buffer in the free buffer list */
1079 buffer = bsq->freebuf;
1081 printk(FORE200E "no more free bufs in queue %d.%d, but freebuf_count = %d\n",
1082 scheme, magn, bsq->freebuf_count);
1085 bsq->freebuf = buffer->next;
1087 #ifdef FORE200E_BSQ_DEBUG
1088 if (buffer->supplied)
1089 printk(FORE200E "queue %d.%d, buffer %lu already supplied\n",
1090 scheme, magn, buffer->index);
1091 buffer->supplied = 1;
1093 entry->rbd_block->rbd[ i ].buffer_haddr = buffer->data.dma_addr;
1094 entry->rbd_block->rbd[ i ].handle = FORE200E_BUF2HDL(buffer);
1097 FORE200E_NEXT_ENTRY(bsq->head, QUEUE_SIZE_BS);
1099 /* decrease accordingly the number of free rx buffers */
1100 bsq->freebuf_count -= RBD_BLK_SIZE;
1102 *entry->status = STATUS_PENDING;
1103 fore200e->bus->write(entry->rbd_block_dma, &entry->cp_entry->rbd_block_haddr);
1111 fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rpd)
1113 struct sk_buff* skb;
1114 struct buffer* buffer;
1115 struct fore200e_vcc* fore200e_vcc;
1117 #ifdef FORE200E_52BYTE_AAL0_SDU
1118 u32 cell_header = 0;
1123 fore200e_vcc = FORE200E_VCC(vcc);
1124 ASSERT(fore200e_vcc);
1126 #ifdef FORE200E_52BYTE_AAL0_SDU
1127 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.rxtp.max_sdu == ATM_AAL0_SDU)) {
1129 cell_header = (rpd->atm_header.gfc << ATM_HDR_GFC_SHIFT) |
1130 (rpd->atm_header.vpi << ATM_HDR_VPI_SHIFT) |
1131 (rpd->atm_header.vci << ATM_HDR_VCI_SHIFT) |
1132 (rpd->atm_header.plt << ATM_HDR_PTI_SHIFT) |
1133 rpd->atm_header.clp;
1138 /* compute total PDU length */
1139 for (i = 0; i < rpd->nseg; i++)
1140 pdu_len += rpd->rsd[ i ].length;
1142 skb = alloc_skb(pdu_len, GFP_ATOMIC);
1144 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
1146 atomic_inc(&vcc->stats->rx_drop);
1150 __net_timestamp(skb);
1152 #ifdef FORE200E_52BYTE_AAL0_SDU
1154 *((u32*)skb_put(skb, 4)) = cell_header;
1158 /* reassemble segments */
1159 for (i = 0; i < rpd->nseg; i++) {
1161 /* rebuild rx buffer address from rsd handle */
1162 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1164 /* Make device DMA transfer visible to CPU. */
1165 fore200e->bus->dma_sync_for_cpu(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE);
1167 memcpy(skb_put(skb, rpd->rsd[ i ].length), buffer->data.align_addr, rpd->rsd[ i ].length);
1169 /* Now let the device get at it again. */
1170 fore200e->bus->dma_sync_for_device(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE);
1173 DPRINTK(3, "rx skb: len = %d, truesize = %d\n", skb->len, skb->truesize);
1175 if (pdu_len < fore200e_vcc->rx_min_pdu)
1176 fore200e_vcc->rx_min_pdu = pdu_len;
1177 if (pdu_len > fore200e_vcc->rx_max_pdu)
1178 fore200e_vcc->rx_max_pdu = pdu_len;
1179 fore200e_vcc->rx_pdu++;
1182 if (atm_charge(vcc, skb->truesize) == 0) {
1184 DPRINTK(2, "receive buffers saturated for %d.%d.%d - PDU dropped\n",
1185 vcc->itf, vcc->vpi, vcc->vci);
1187 dev_kfree_skb_any(skb);
1189 atomic_inc(&vcc->stats->rx_drop);
1193 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1195 vcc->push(vcc, skb);
1196 atomic_inc(&vcc->stats->rx);
1198 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1205 fore200e_collect_rpd(struct fore200e* fore200e, struct rpd* rpd)
1207 struct host_bsq* bsq;
1208 struct buffer* buffer;
1211 for (i = 0; i < rpd->nseg; i++) {
1213 /* rebuild rx buffer address from rsd handle */
1214 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1216 bsq = &fore200e->host_bsq[ buffer->scheme ][ buffer->magn ];
1218 #ifdef FORE200E_BSQ_DEBUG
1219 bsq_audit(2, bsq, buffer->scheme, buffer->magn);
1221 if (buffer->supplied == 0)
1222 printk(FORE200E "queue %d.%d, buffer %ld was not supplied\n",
1223 buffer->scheme, buffer->magn, buffer->index);
1224 buffer->supplied = 0;
1227 /* re-insert the buffer into the free buffer list */
1228 buffer->next = bsq->freebuf;
1229 bsq->freebuf = buffer;
1231 /* then increment the number of free rx buffers */
1232 bsq->freebuf_count++;
1238 fore200e_rx_irq(struct fore200e* fore200e)
1240 struct host_rxq* rxq = &fore200e->host_rxq;
1241 struct host_rxq_entry* entry;
1242 struct atm_vcc* vcc;
1243 struct fore200e_vc_map* vc_map;
1247 entry = &rxq->host_entry[ rxq->head ];
1249 /* no more received PDUs */
1250 if ((*entry->status & STATUS_COMPLETE) == 0)
1253 vc_map = FORE200E_VC_MAP(fore200e, entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1255 if ((vc_map->vcc == NULL) ||
1256 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
1258 DPRINTK(1, "no ready VC found for PDU received on %d.%d.%d\n",
1259 fore200e->atm_dev->number,
1260 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1266 if ((*entry->status & STATUS_ERROR) == 0) {
1268 fore200e_push_rpd(fore200e, vcc, entry->rpd);
1271 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
1272 fore200e->atm_dev->number,
1273 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1274 atomic_inc(&vcc->stats->rx_err);
1278 FORE200E_NEXT_ENTRY(rxq->head, QUEUE_SIZE_RX);
1280 fore200e_collect_rpd(fore200e, entry->rpd);
1282 /* rewrite the rpd address to ack the received PDU */
1283 fore200e->bus->write(entry->rpd_dma, &entry->cp_entry->rpd_haddr);
1284 *entry->status = STATUS_FREE;
1286 fore200e_supply(fore200e);
1291 #ifndef FORE200E_USE_TASKLET
1293 fore200e_irq(struct fore200e* fore200e)
1295 unsigned long flags;
1297 spin_lock_irqsave(&fore200e->q_lock, flags);
1298 fore200e_rx_irq(fore200e);
1299 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1301 spin_lock_irqsave(&fore200e->q_lock, flags);
1302 fore200e_tx_irq(fore200e);
1303 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1309 fore200e_interrupt(int irq, void* dev)
1311 struct fore200e* fore200e = FORE200E_DEV((struct atm_dev*)dev);
1313 if (fore200e->bus->irq_check(fore200e) == 0) {
1315 DPRINTK(3, "interrupt NOT triggered by device %d\n", fore200e->atm_dev->number);
1318 DPRINTK(3, "interrupt triggered by device %d\n", fore200e->atm_dev->number);
1320 #ifdef FORE200E_USE_TASKLET
1321 tasklet_schedule(&fore200e->tx_tasklet);
1322 tasklet_schedule(&fore200e->rx_tasklet);
1324 fore200e_irq(fore200e);
1327 fore200e->bus->irq_ack(fore200e);
1332 #ifdef FORE200E_USE_TASKLET
1334 fore200e_tx_tasklet(unsigned long data)
1336 struct fore200e* fore200e = (struct fore200e*) data;
1337 unsigned long flags;
1339 DPRINTK(3, "tx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1341 spin_lock_irqsave(&fore200e->q_lock, flags);
1342 fore200e_tx_irq(fore200e);
1343 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1348 fore200e_rx_tasklet(unsigned long data)
1350 struct fore200e* fore200e = (struct fore200e*) data;
1351 unsigned long flags;
1353 DPRINTK(3, "rx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1355 spin_lock_irqsave(&fore200e->q_lock, flags);
1356 fore200e_rx_irq((struct fore200e*) data);
1357 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1363 fore200e_select_scheme(struct atm_vcc* vcc)
1365 /* fairly balance the VCs over (identical) buffer schemes */
1366 int scheme = vcc->vci % 2 ? BUFFER_SCHEME_ONE : BUFFER_SCHEME_TWO;
1368 DPRINTK(1, "VC %d.%d.%d uses buffer scheme %d\n",
1369 vcc->itf, vcc->vpi, vcc->vci, scheme);
1376 fore200e_activate_vcin(struct fore200e* fore200e, int activate, struct atm_vcc* vcc, int mtu)
1378 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1379 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1380 struct activate_opcode activ_opcode;
1381 struct deactivate_opcode deactiv_opcode;
1384 enum fore200e_aal aal = fore200e_atm2fore_aal(vcc->qos.aal);
1386 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1389 FORE200E_VCC(vcc)->scheme = fore200e_select_scheme(vcc);
1391 activ_opcode.opcode = OPCODE_ACTIVATE_VCIN;
1392 activ_opcode.aal = aal;
1393 activ_opcode.scheme = FORE200E_VCC(vcc)->scheme;
1394 activ_opcode.pad = 0;
1397 deactiv_opcode.opcode = OPCODE_DEACTIVATE_VCIN;
1398 deactiv_opcode.pad = 0;
1401 vpvc.vci = vcc->vci;
1402 vpvc.vpi = vcc->vpi;
1404 *entry->status = STATUS_PENDING;
1408 #ifdef FORE200E_52BYTE_AAL0_SDU
1411 /* the MTU is not used by the cp, except in the case of AAL0 */
1412 fore200e->bus->write(mtu, &entry->cp_entry->cmd.activate_block.mtu);
1413 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.vpvc);
1414 fore200e->bus->write(*(u32*)&activ_opcode, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.opcode);
1417 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.vpvc);
1418 fore200e->bus->write(*(u32*)&deactiv_opcode, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.opcode);
1421 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1423 *entry->status = STATUS_FREE;
1426 printk(FORE200E "unable to %s VC %d.%d.%d\n",
1427 activate ? "open" : "close", vcc->itf, vcc->vpi, vcc->vci);
1431 DPRINTK(1, "VC %d.%d.%d %sed\n", vcc->itf, vcc->vpi, vcc->vci,
1432 activate ? "open" : "clos");
1438 #define FORE200E_MAX_BACK2BACK_CELLS 255 /* XXX depends on CDVT */
1441 fore200e_rate_ctrl(struct atm_qos* qos, struct tpd_rate* rate)
1443 if (qos->txtp.max_pcr < ATM_OC3_PCR) {
1445 /* compute the data cells to idle cells ratio from the tx PCR */
1446 rate->data_cells = qos->txtp.max_pcr * FORE200E_MAX_BACK2BACK_CELLS / ATM_OC3_PCR;
1447 rate->idle_cells = FORE200E_MAX_BACK2BACK_CELLS - rate->data_cells;
1450 /* disable rate control */
1451 rate->data_cells = rate->idle_cells = 0;
1457 fore200e_open(struct atm_vcc *vcc)
1459 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1460 struct fore200e_vcc* fore200e_vcc;
1461 struct fore200e_vc_map* vc_map;
1462 unsigned long flags;
1464 short vpi = vcc->vpi;
1466 ASSERT((vpi >= 0) && (vpi < 1<<FORE200E_VPI_BITS));
1467 ASSERT((vci >= 0) && (vci < 1<<FORE200E_VCI_BITS));
1469 spin_lock_irqsave(&fore200e->q_lock, flags);
1471 vc_map = FORE200E_VC_MAP(fore200e, vpi, vci);
1474 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1476 printk(FORE200E "VC %d.%d.%d already in use\n",
1477 fore200e->atm_dev->number, vpi, vci);
1484 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1486 fore200e_vcc = kzalloc(sizeof(struct fore200e_vcc), GFP_ATOMIC);
1487 if (fore200e_vcc == NULL) {
1492 DPRINTK(2, "opening %d.%d.%d:%d QoS = (tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
1493 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d)\n",
1494 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1495 fore200e_traffic_class[ vcc->qos.txtp.traffic_class ],
1496 vcc->qos.txtp.min_pcr, vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_cdv, vcc->qos.txtp.max_sdu,
1497 fore200e_traffic_class[ vcc->qos.rxtp.traffic_class ],
1498 vcc->qos.rxtp.min_pcr, vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_cdv, vcc->qos.rxtp.max_sdu);
1500 /* pseudo-CBR bandwidth requested? */
1501 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1503 mutex_lock(&fore200e->rate_mtx);
1504 if (fore200e->available_cell_rate < vcc->qos.txtp.max_pcr) {
1505 mutex_unlock(&fore200e->rate_mtx);
1507 kfree(fore200e_vcc);
1512 /* reserve bandwidth */
1513 fore200e->available_cell_rate -= vcc->qos.txtp.max_pcr;
1514 mutex_unlock(&fore200e->rate_mtx);
1517 vcc->itf = vcc->dev->number;
1519 set_bit(ATM_VF_PARTIAL,&vcc->flags);
1520 set_bit(ATM_VF_ADDR, &vcc->flags);
1522 vcc->dev_data = fore200e_vcc;
1524 if (fore200e_activate_vcin(fore200e, 1, vcc, vcc->qos.rxtp.max_sdu) < 0) {
1528 clear_bit(ATM_VF_ADDR, &vcc->flags);
1529 clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1531 vcc->dev_data = NULL;
1533 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1535 kfree(fore200e_vcc);
1539 /* compute rate control parameters */
1540 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1542 fore200e_rate_ctrl(&vcc->qos, &fore200e_vcc->rate);
1543 set_bit(ATM_VF_HASQOS, &vcc->flags);
1545 DPRINTK(3, "tx on %d.%d.%d:%d, tx PCR = %d, rx PCR = %d, data_cells = %u, idle_cells = %u\n",
1546 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1547 vcc->qos.txtp.max_pcr, vcc->qos.rxtp.max_pcr,
1548 fore200e_vcc->rate.data_cells, fore200e_vcc->rate.idle_cells);
1551 fore200e_vcc->tx_min_pdu = fore200e_vcc->rx_min_pdu = MAX_PDU_SIZE + 1;
1552 fore200e_vcc->tx_max_pdu = fore200e_vcc->rx_max_pdu = 0;
1553 fore200e_vcc->tx_pdu = fore200e_vcc->rx_pdu = 0;
1555 /* new incarnation of the vcc */
1556 vc_map->incarn = ++fore200e->incarn_count;
1558 /* VC unusable before this flag is set */
1559 set_bit(ATM_VF_READY, &vcc->flags);
1566 fore200e_close(struct atm_vcc* vcc)
1568 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1569 struct fore200e_vcc* fore200e_vcc;
1570 struct fore200e_vc_map* vc_map;
1571 unsigned long flags;
1574 ASSERT((vcc->vpi >= 0) && (vcc->vpi < 1<<FORE200E_VPI_BITS));
1575 ASSERT((vcc->vci >= 0) && (vcc->vci < 1<<FORE200E_VCI_BITS));
1577 DPRINTK(2, "closing %d.%d.%d:%d\n", vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal));
1579 clear_bit(ATM_VF_READY, &vcc->flags);
1581 fore200e_activate_vcin(fore200e, 0, vcc, 0);
1583 spin_lock_irqsave(&fore200e->q_lock, flags);
1585 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1587 /* the vc is no longer considered as "in use" by fore200e_open() */
1590 vcc->itf = vcc->vci = vcc->vpi = 0;
1592 fore200e_vcc = FORE200E_VCC(vcc);
1593 vcc->dev_data = NULL;
1595 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1597 /* release reserved bandwidth, if any */
1598 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1600 mutex_lock(&fore200e->rate_mtx);
1601 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1602 mutex_unlock(&fore200e->rate_mtx);
1604 clear_bit(ATM_VF_HASQOS, &vcc->flags);
1607 clear_bit(ATM_VF_ADDR, &vcc->flags);
1608 clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1610 ASSERT(fore200e_vcc);
1611 kfree(fore200e_vcc);
1616 fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
1618 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1619 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
1620 struct fore200e_vc_map* vc_map;
1621 struct host_txq* txq = &fore200e->host_txq;
1622 struct host_txq_entry* entry;
1624 struct tpd_haddr tpd_haddr;
1625 int retry = CONFIG_ATM_FORE200E_TX_RETRY;
1627 int tx_len = skb->len;
1628 u32* cell_header = NULL;
1629 unsigned char* skb_data;
1631 unsigned char* data;
1632 unsigned long flags;
1635 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1637 ASSERT(fore200e_vcc);
1639 if (!test_bit(ATM_VF_READY, &vcc->flags)) {
1640 DPRINTK(1, "VC %d.%d.%d not ready for tx\n", vcc->itf, vcc->vpi, vcc->vpi);
1641 dev_kfree_skb_any(skb);
1645 #ifdef FORE200E_52BYTE_AAL0_SDU
1646 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.txtp.max_sdu == ATM_AAL0_SDU)) {
1647 cell_header = (u32*) skb->data;
1648 skb_data = skb->data + 4; /* skip 4-byte cell header */
1649 skb_len = tx_len = skb->len - 4;
1651 DPRINTK(3, "user-supplied cell header = 0x%08x\n", *cell_header);
1656 skb_data = skb->data;
1660 if (((unsigned long)skb_data) & 0x3) {
1662 DPRINTK(2, "misaligned tx PDU on device %s\n", fore200e->name);
1667 if ((vcc->qos.aal == ATM_AAL0) && (skb_len % ATM_CELL_PAYLOAD)) {
1669 /* this simply NUKES the PCA board */
1670 DPRINTK(2, "incomplete tx AAL0 PDU on device %s\n", fore200e->name);
1672 tx_len = ((skb_len / ATM_CELL_PAYLOAD) + 1) * ATM_CELL_PAYLOAD;
1676 data = kmalloc(tx_len, GFP_ATOMIC | GFP_DMA);
1682 dev_kfree_skb_any(skb);
1687 memcpy(data, skb_data, skb_len);
1688 if (skb_len < tx_len)
1689 memset(data + skb_len, 0x00, tx_len - skb_len);
1695 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1696 ASSERT(vc_map->vcc == vcc);
1700 spin_lock_irqsave(&fore200e->q_lock, flags);
1702 entry = &txq->host_entry[ txq->head ];
1704 if ((*entry->status != STATUS_FREE) || (txq->txing >= QUEUE_SIZE_TX - 2)) {
1706 /* try to free completed tx queue entries */
1707 fore200e_tx_irq(fore200e);
1709 if (*entry->status != STATUS_FREE) {
1711 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1713 /* retry once again? */
1719 atomic_inc(&vcc->stats->tx_err);
1722 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
1723 fore200e->name, fore200e->cp_queues->heartbeat);
1728 dev_kfree_skb_any(skb);
1738 entry->incarn = vc_map->incarn;
1739 entry->vc_map = vc_map;
1741 entry->data = tx_copy ? data : NULL;
1744 tpd->tsd[ 0 ].buffer = fore200e->bus->dma_map(fore200e, data, tx_len, DMA_TO_DEVICE);
1745 tpd->tsd[ 0 ].length = tx_len;
1747 FORE200E_NEXT_ENTRY(txq->head, QUEUE_SIZE_TX);
1750 /* The dma_map call above implies a dma_sync so the device can use it,
1751 * thus no explicit dma_sync call is necessary here.
1754 DPRINTK(3, "tx on %d.%d.%d:%d, len = %u (%u)\n",
1755 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1756 tpd->tsd[0].length, skb_len);
1758 if (skb_len < fore200e_vcc->tx_min_pdu)
1759 fore200e_vcc->tx_min_pdu = skb_len;
1760 if (skb_len > fore200e_vcc->tx_max_pdu)
1761 fore200e_vcc->tx_max_pdu = skb_len;
1762 fore200e_vcc->tx_pdu++;
1764 /* set tx rate control information */
1765 tpd->rate.data_cells = fore200e_vcc->rate.data_cells;
1766 tpd->rate.idle_cells = fore200e_vcc->rate.idle_cells;
1769 tpd->atm_header.clp = (*cell_header & ATM_HDR_CLP);
1770 tpd->atm_header.plt = (*cell_header & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
1771 tpd->atm_header.vci = (*cell_header & ATM_HDR_VCI_MASK) >> ATM_HDR_VCI_SHIFT;
1772 tpd->atm_header.vpi = (*cell_header & ATM_HDR_VPI_MASK) >> ATM_HDR_VPI_SHIFT;
1773 tpd->atm_header.gfc = (*cell_header & ATM_HDR_GFC_MASK) >> ATM_HDR_GFC_SHIFT;
1776 /* set the ATM header, common to all cells conveying the PDU */
1777 tpd->atm_header.clp = 0;
1778 tpd->atm_header.plt = 0;
1779 tpd->atm_header.vci = vcc->vci;
1780 tpd->atm_header.vpi = vcc->vpi;
1781 tpd->atm_header.gfc = 0;
1784 tpd->spec.length = tx_len;
1786 tpd->spec.aal = fore200e_atm2fore_aal(vcc->qos.aal);
1789 tpd_haddr.size = sizeof(struct tpd) / (1<<TPD_HADDR_SHIFT); /* size is expressed in 32 byte blocks */
1791 tpd_haddr.haddr = entry->tpd_dma >> TPD_HADDR_SHIFT; /* shift the address, as we are in a bitfield */
1793 *entry->status = STATUS_PENDING;
1794 fore200e->bus->write(*(u32*)&tpd_haddr, (u32 __iomem *)&entry->cp_entry->tpd_haddr);
1796 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1803 fore200e_getstats(struct fore200e* fore200e)
1805 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1806 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1807 struct stats_opcode opcode;
1811 if (fore200e->stats == NULL) {
1812 fore200e->stats = kzalloc(sizeof(struct stats), GFP_KERNEL | GFP_DMA);
1813 if (fore200e->stats == NULL)
1817 stats_dma_addr = fore200e->bus->dma_map(fore200e, fore200e->stats,
1818 sizeof(struct stats), DMA_FROM_DEVICE);
1820 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1822 opcode.opcode = OPCODE_GET_STATS;
1825 fore200e->bus->write(stats_dma_addr, &entry->cp_entry->cmd.stats_block.stats_haddr);
1827 *entry->status = STATUS_PENDING;
1829 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.stats_block.opcode);
1831 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1833 *entry->status = STATUS_FREE;
1835 fore200e->bus->dma_unmap(fore200e, stats_dma_addr, sizeof(struct stats), DMA_FROM_DEVICE);
1838 printk(FORE200E "unable to get statistics from device %s\n", fore200e->name);
1847 fore200e_getsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen)
1849 /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
1851 DPRINTK(2, "getsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
1852 vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
1859 fore200e_setsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen)
1861 /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
1863 DPRINTK(2, "setsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
1864 vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
1870 #if 0 /* currently unused */
1872 fore200e_get_oc3(struct fore200e* fore200e, struct oc3_regs* regs)
1874 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1875 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1876 struct oc3_opcode opcode;
1878 u32 oc3_regs_dma_addr;
1880 oc3_regs_dma_addr = fore200e->bus->dma_map(fore200e, regs, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1882 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1884 opcode.opcode = OPCODE_GET_OC3;
1889 fore200e->bus->write(oc3_regs_dma_addr, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1891 *entry->status = STATUS_PENDING;
1893 fore200e->bus->write(*(u32*)&opcode, (u32*)&entry->cp_entry->cmd.oc3_block.opcode);
1895 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1897 *entry->status = STATUS_FREE;
1899 fore200e->bus->dma_unmap(fore200e, oc3_regs_dma_addr, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1902 printk(FORE200E "unable to get OC-3 regs of device %s\n", fore200e->name);
1912 fore200e_set_oc3(struct fore200e* fore200e, u32 reg, u32 value, u32 mask)
1914 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1915 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1916 struct oc3_opcode opcode;
1919 DPRINTK(2, "set OC-3 reg = 0x%02x, value = 0x%02x, mask = 0x%02x\n", reg, value, mask);
1921 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1923 opcode.opcode = OPCODE_SET_OC3;
1925 opcode.value = value;
1928 fore200e->bus->write(0, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1930 *entry->status = STATUS_PENDING;
1932 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.oc3_block.opcode);
1934 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1936 *entry->status = STATUS_FREE;
1939 printk(FORE200E "unable to set OC-3 reg 0x%02x of device %s\n", reg, fore200e->name);
1948 fore200e_setloop(struct fore200e* fore200e, int loop_mode)
1950 u32 mct_value, mct_mask;
1953 if (!capable(CAP_NET_ADMIN))
1956 switch (loop_mode) {
1960 mct_mask = SUNI_MCT_DLE | SUNI_MCT_LLE;
1963 case ATM_LM_LOC_PHY:
1964 mct_value = mct_mask = SUNI_MCT_DLE;
1967 case ATM_LM_RMT_PHY:
1968 mct_value = mct_mask = SUNI_MCT_LLE;
1975 error = fore200e_set_oc3(fore200e, SUNI_MCT, mct_value, mct_mask);
1977 fore200e->loop_mode = loop_mode;
1984 fore200e_fetch_stats(struct fore200e* fore200e, struct sonet_stats __user *arg)
1986 struct sonet_stats tmp;
1988 if (fore200e_getstats(fore200e) < 0)
1991 tmp.section_bip = cpu_to_be32(fore200e->stats->oc3.section_bip8_errors);
1992 tmp.line_bip = cpu_to_be32(fore200e->stats->oc3.line_bip24_errors);
1993 tmp.path_bip = cpu_to_be32(fore200e->stats->oc3.path_bip8_errors);
1994 tmp.line_febe = cpu_to_be32(fore200e->stats->oc3.line_febe_errors);
1995 tmp.path_febe = cpu_to_be32(fore200e->stats->oc3.path_febe_errors);
1996 tmp.corr_hcs = cpu_to_be32(fore200e->stats->oc3.corr_hcs_errors);
1997 tmp.uncorr_hcs = cpu_to_be32(fore200e->stats->oc3.ucorr_hcs_errors);
1998 tmp.tx_cells = cpu_to_be32(fore200e->stats->aal0.cells_transmitted) +
1999 cpu_to_be32(fore200e->stats->aal34.cells_transmitted) +
2000 cpu_to_be32(fore200e->stats->aal5.cells_transmitted);
2001 tmp.rx_cells = cpu_to_be32(fore200e->stats->aal0.cells_received) +
2002 cpu_to_be32(fore200e->stats->aal34.cells_received) +
2003 cpu_to_be32(fore200e->stats->aal5.cells_received);
2006 return copy_to_user(arg, &tmp, sizeof(struct sonet_stats)) ? -EFAULT : 0;
2013 fore200e_ioctl(struct atm_dev* dev, unsigned int cmd, void __user * arg)
2015 struct fore200e* fore200e = FORE200E_DEV(dev);
2017 DPRINTK(2, "ioctl cmd = 0x%x (%u), arg = 0x%p (%lu)\n", cmd, cmd, arg, (unsigned long)arg);
2022 return fore200e_fetch_stats(fore200e, (struct sonet_stats __user *)arg);
2025 return put_user(0, (int __user *)arg) ? -EFAULT : 0;
2028 return fore200e_setloop(fore200e, (int)(unsigned long)arg);
2031 return put_user(fore200e->loop_mode, (int __user *)arg) ? -EFAULT : 0;
2034 return put_user(ATM_LM_LOC_PHY | ATM_LM_RMT_PHY, (int __user *)arg) ? -EFAULT : 0;
2037 return -ENOSYS; /* not implemented */
2042 fore200e_change_qos(struct atm_vcc* vcc,struct atm_qos* qos, int flags)
2044 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
2045 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
2047 if (!test_bit(ATM_VF_READY, &vcc->flags)) {
2048 DPRINTK(1, "VC %d.%d.%d not ready for QoS change\n", vcc->itf, vcc->vpi, vcc->vpi);
2052 DPRINTK(2, "change_qos %d.%d.%d, "
2053 "(tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
2054 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d), flags = 0x%x\n"
2055 "available_cell_rate = %u",
2056 vcc->itf, vcc->vpi, vcc->vci,
2057 fore200e_traffic_class[ qos->txtp.traffic_class ],
2058 qos->txtp.min_pcr, qos->txtp.max_pcr, qos->txtp.max_cdv, qos->txtp.max_sdu,
2059 fore200e_traffic_class[ qos->rxtp.traffic_class ],
2060 qos->rxtp.min_pcr, qos->rxtp.max_pcr, qos->rxtp.max_cdv, qos->rxtp.max_sdu,
2061 flags, fore200e->available_cell_rate);
2063 if ((qos->txtp.traffic_class == ATM_CBR) && (qos->txtp.max_pcr > 0)) {
2065 mutex_lock(&fore200e->rate_mtx);
2066 if (fore200e->available_cell_rate + vcc->qos.txtp.max_pcr < qos->txtp.max_pcr) {
2067 mutex_unlock(&fore200e->rate_mtx);
2071 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
2072 fore200e->available_cell_rate -= qos->txtp.max_pcr;
2074 mutex_unlock(&fore200e->rate_mtx);
2076 memcpy(&vcc->qos, qos, sizeof(struct atm_qos));
2078 /* update rate control parameters */
2079 fore200e_rate_ctrl(qos, &fore200e_vcc->rate);
2081 set_bit(ATM_VF_HASQOS, &vcc->flags);
2090 static int __devinit
2091 fore200e_irq_request(struct fore200e* fore200e)
2093 if (request_irq(fore200e->irq, fore200e_interrupt, IRQF_SHARED, fore200e->name, fore200e->atm_dev) < 0) {
2095 printk(FORE200E "unable to reserve IRQ %s for device %s\n",
2096 fore200e_irq_itoa(fore200e->irq), fore200e->name);
2100 printk(FORE200E "IRQ %s reserved for device %s\n",
2101 fore200e_irq_itoa(fore200e->irq), fore200e->name);
2103 #ifdef FORE200E_USE_TASKLET
2104 tasklet_init(&fore200e->tx_tasklet, fore200e_tx_tasklet, (unsigned long)fore200e);
2105 tasklet_init(&fore200e->rx_tasklet, fore200e_rx_tasklet, (unsigned long)fore200e);
2108 fore200e->state = FORE200E_STATE_IRQ;
2113 static int __devinit
2114 fore200e_get_esi(struct fore200e* fore200e)
2116 struct prom_data* prom = kzalloc(sizeof(struct prom_data), GFP_KERNEL | GFP_DMA);
2122 ok = fore200e->bus->prom_read(fore200e, prom);
2128 printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %02x:%02x:%02x:%02x:%02x:%02x\n",
2130 (prom->hw_revision & 0xFF) + '@', /* probably meaningless with SBA boards */
2131 prom->serial_number & 0xFFFF,
2132 prom->mac_addr[ 2 ], prom->mac_addr[ 3 ], prom->mac_addr[ 4 ],
2133 prom->mac_addr[ 5 ], prom->mac_addr[ 6 ], prom->mac_addr[ 7 ]);
2135 for (i = 0; i < ESI_LEN; i++) {
2136 fore200e->esi[ i ] = fore200e->atm_dev->esi[ i ] = prom->mac_addr[ i + 2 ];
2145 static int __devinit
2146 fore200e_alloc_rx_buf(struct fore200e* fore200e)
2148 int scheme, magn, nbr, size, i;
2150 struct host_bsq* bsq;
2151 struct buffer* buffer;
2153 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2154 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2156 bsq = &fore200e->host_bsq[ scheme ][ magn ];
2158 nbr = fore200e_rx_buf_nbr[ scheme ][ magn ];
2159 size = fore200e_rx_buf_size[ scheme ][ magn ];
2161 DPRINTK(2, "rx buffers %d / %d are being allocated\n", scheme, magn);
2163 /* allocate the array of receive buffers */
2164 buffer = bsq->buffer = kzalloc(nbr * sizeof(struct buffer), GFP_KERNEL);
2169 bsq->freebuf = NULL;
2171 for (i = 0; i < nbr; i++) {
2173 buffer[ i ].scheme = scheme;
2174 buffer[ i ].magn = magn;
2175 #ifdef FORE200E_BSQ_DEBUG
2176 buffer[ i ].index = i;
2177 buffer[ i ].supplied = 0;
2180 /* allocate the receive buffer body */
2181 if (fore200e_chunk_alloc(fore200e,
2182 &buffer[ i ].data, size, fore200e->bus->buffer_alignment,
2183 DMA_FROM_DEVICE) < 0) {
2186 fore200e_chunk_free(fore200e, &buffer[ --i ].data);
2192 /* insert the buffer into the free buffer list */
2193 buffer[ i ].next = bsq->freebuf;
2194 bsq->freebuf = &buffer[ i ];
2196 /* all the buffers are free, initially */
2197 bsq->freebuf_count = nbr;
2199 #ifdef FORE200E_BSQ_DEBUG
2200 bsq_audit(3, bsq, scheme, magn);
2205 fore200e->state = FORE200E_STATE_ALLOC_BUF;
2210 static int __devinit
2211 fore200e_init_bs_queue(struct fore200e* fore200e)
2213 int scheme, magn, i;
2215 struct host_bsq* bsq;
2216 struct cp_bsq_entry __iomem * cp_entry;
2218 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2219 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2221 DPRINTK(2, "buffer supply queue %d / %d is being initialized\n", scheme, magn);
2223 bsq = &fore200e->host_bsq[ scheme ][ magn ];
2225 /* allocate and align the array of status words */
2226 if (fore200e->bus->dma_chunk_alloc(fore200e,
2228 sizeof(enum status),
2230 fore200e->bus->status_alignment) < 0) {
2234 /* allocate and align the array of receive buffer descriptors */
2235 if (fore200e->bus->dma_chunk_alloc(fore200e,
2237 sizeof(struct rbd_block),
2239 fore200e->bus->descr_alignment) < 0) {
2241 fore200e->bus->dma_chunk_free(fore200e, &bsq->status);
2245 /* get the base address of the cp resident buffer supply queue entries */
2246 cp_entry = fore200e->virt_base +
2247 fore200e->bus->read(&fore200e->cp_queues->cp_bsq[ scheme ][ magn ]);
2249 /* fill the host resident and cp resident buffer supply queue entries */
2250 for (i = 0; i < QUEUE_SIZE_BS; i++) {
2252 bsq->host_entry[ i ].status =
2253 FORE200E_INDEX(bsq->status.align_addr, enum status, i);
2254 bsq->host_entry[ i ].rbd_block =
2255 FORE200E_INDEX(bsq->rbd_block.align_addr, struct rbd_block, i);
2256 bsq->host_entry[ i ].rbd_block_dma =
2257 FORE200E_DMA_INDEX(bsq->rbd_block.dma_addr, struct rbd_block, i);
2258 bsq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2260 *bsq->host_entry[ i ].status = STATUS_FREE;
2262 fore200e->bus->write(FORE200E_DMA_INDEX(bsq->status.dma_addr, enum status, i),
2263 &cp_entry[ i ].status_haddr);
2268 fore200e->state = FORE200E_STATE_INIT_BSQ;
2273 static int __devinit
2274 fore200e_init_rx_queue(struct fore200e* fore200e)
2276 struct host_rxq* rxq = &fore200e->host_rxq;
2277 struct cp_rxq_entry __iomem * cp_entry;
2280 DPRINTK(2, "receive queue is being initialized\n");
2282 /* allocate and align the array of status words */
2283 if (fore200e->bus->dma_chunk_alloc(fore200e,
2285 sizeof(enum status),
2287 fore200e->bus->status_alignment) < 0) {
2291 /* allocate and align the array of receive PDU descriptors */
2292 if (fore200e->bus->dma_chunk_alloc(fore200e,
2296 fore200e->bus->descr_alignment) < 0) {
2298 fore200e->bus->dma_chunk_free(fore200e, &rxq->status);
2302 /* get the base address of the cp resident rx queue entries */
2303 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_rxq);
2305 /* fill the host resident and cp resident rx entries */
2306 for (i=0; i < QUEUE_SIZE_RX; i++) {
2308 rxq->host_entry[ i ].status =
2309 FORE200E_INDEX(rxq->status.align_addr, enum status, i);
2310 rxq->host_entry[ i ].rpd =
2311 FORE200E_INDEX(rxq->rpd.align_addr, struct rpd, i);
2312 rxq->host_entry[ i ].rpd_dma =
2313 FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i);
2314 rxq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2316 *rxq->host_entry[ i ].status = STATUS_FREE;
2318 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->status.dma_addr, enum status, i),
2319 &cp_entry[ i ].status_haddr);
2321 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i),
2322 &cp_entry[ i ].rpd_haddr);
2325 /* set the head entry of the queue */
2328 fore200e->state = FORE200E_STATE_INIT_RXQ;
2333 static int __devinit
2334 fore200e_init_tx_queue(struct fore200e* fore200e)
2336 struct host_txq* txq = &fore200e->host_txq;
2337 struct cp_txq_entry __iomem * cp_entry;
2340 DPRINTK(2, "transmit queue is being initialized\n");
2342 /* allocate and align the array of status words */
2343 if (fore200e->bus->dma_chunk_alloc(fore200e,
2345 sizeof(enum status),
2347 fore200e->bus->status_alignment) < 0) {
2351 /* allocate and align the array of transmit PDU descriptors */
2352 if (fore200e->bus->dma_chunk_alloc(fore200e,
2356 fore200e->bus->descr_alignment) < 0) {
2358 fore200e->bus->dma_chunk_free(fore200e, &txq->status);
2362 /* get the base address of the cp resident tx queue entries */
2363 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_txq);
2365 /* fill the host resident and cp resident tx entries */
2366 for (i=0; i < QUEUE_SIZE_TX; i++) {
2368 txq->host_entry[ i ].status =
2369 FORE200E_INDEX(txq->status.align_addr, enum status, i);
2370 txq->host_entry[ i ].tpd =
2371 FORE200E_INDEX(txq->tpd.align_addr, struct tpd, i);
2372 txq->host_entry[ i ].tpd_dma =
2373 FORE200E_DMA_INDEX(txq->tpd.dma_addr, struct tpd, i);
2374 txq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2376 *txq->host_entry[ i ].status = STATUS_FREE;
2378 fore200e->bus->write(FORE200E_DMA_INDEX(txq->status.dma_addr, enum status, i),
2379 &cp_entry[ i ].status_haddr);
2381 /* although there is a one-to-one mapping of tx queue entries and tpds,
2382 we do not write here the DMA (physical) base address of each tpd into
2383 the related cp resident entry, because the cp relies on this write
2384 operation to detect that a new pdu has been submitted for tx */
2387 /* set the head and tail entries of the queue */
2391 fore200e->state = FORE200E_STATE_INIT_TXQ;
2396 static int __devinit
2397 fore200e_init_cmd_queue(struct fore200e* fore200e)
2399 struct host_cmdq* cmdq = &fore200e->host_cmdq;
2400 struct cp_cmdq_entry __iomem * cp_entry;
2403 DPRINTK(2, "command queue is being initialized\n");
2405 /* allocate and align the array of status words */
2406 if (fore200e->bus->dma_chunk_alloc(fore200e,
2408 sizeof(enum status),
2410 fore200e->bus->status_alignment) < 0) {
2414 /* get the base address of the cp resident cmd queue entries */
2415 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_cmdq);
2417 /* fill the host resident and cp resident cmd entries */
2418 for (i=0; i < QUEUE_SIZE_CMD; i++) {
2420 cmdq->host_entry[ i ].status =
2421 FORE200E_INDEX(cmdq->status.align_addr, enum status, i);
2422 cmdq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2424 *cmdq->host_entry[ i ].status = STATUS_FREE;
2426 fore200e->bus->write(FORE200E_DMA_INDEX(cmdq->status.dma_addr, enum status, i),
2427 &cp_entry[ i ].status_haddr);
2430 /* set the head entry of the queue */
2433 fore200e->state = FORE200E_STATE_INIT_CMDQ;
2438 static void __devinit
2439 fore200e_param_bs_queue(struct fore200e* fore200e,
2440 enum buffer_scheme scheme, enum buffer_magn magn,
2441 int queue_length, int pool_size, int supply_blksize)
2443 struct bs_spec __iomem * bs_spec = &fore200e->cp_queues->init.bs_spec[ scheme ][ magn ];
2445 fore200e->bus->write(queue_length, &bs_spec->queue_length);
2446 fore200e->bus->write(fore200e_rx_buf_size[ scheme ][ magn ], &bs_spec->buffer_size);
2447 fore200e->bus->write(pool_size, &bs_spec->pool_size);
2448 fore200e->bus->write(supply_blksize, &bs_spec->supply_blksize);
2452 static int __devinit
2453 fore200e_initialize(struct fore200e* fore200e)
2455 struct cp_queues __iomem * cpq;
2456 int ok, scheme, magn;
2458 DPRINTK(2, "device %s being initialized\n", fore200e->name);
2460 mutex_init(&fore200e->rate_mtx);
2461 spin_lock_init(&fore200e->q_lock);
2463 cpq = fore200e->cp_queues = fore200e->virt_base + FORE200E_CP_QUEUES_OFFSET;
2465 /* enable cp to host interrupts */
2466 fore200e->bus->write(1, &cpq->imask);
2468 if (fore200e->bus->irq_enable)
2469 fore200e->bus->irq_enable(fore200e);
2471 fore200e->bus->write(NBR_CONNECT, &cpq->init.num_connect);
2473 fore200e->bus->write(QUEUE_SIZE_CMD, &cpq->init.cmd_queue_len);
2474 fore200e->bus->write(QUEUE_SIZE_RX, &cpq->init.rx_queue_len);
2475 fore200e->bus->write(QUEUE_SIZE_TX, &cpq->init.tx_queue_len);
2477 fore200e->bus->write(RSD_EXTENSION, &cpq->init.rsd_extension);
2478 fore200e->bus->write(TSD_EXTENSION, &cpq->init.tsd_extension);
2480 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++)
2481 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++)
2482 fore200e_param_bs_queue(fore200e, scheme, magn,
2484 fore200e_rx_buf_nbr[ scheme ][ magn ],
2487 /* issue the initialize command */
2488 fore200e->bus->write(STATUS_PENDING, &cpq->init.status);
2489 fore200e->bus->write(OPCODE_INITIALIZE, &cpq->init.opcode);
2491 ok = fore200e_io_poll(fore200e, &cpq->init.status, STATUS_COMPLETE, 3000);
2493 printk(FORE200E "device %s initialization failed\n", fore200e->name);
2497 printk(FORE200E "device %s initialized\n", fore200e->name);
2499 fore200e->state = FORE200E_STATE_INITIALIZE;
2504 static void __devinit
2505 fore200e_monitor_putc(struct fore200e* fore200e, char c)
2507 struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2512 fore200e->bus->write(((u32) c) | FORE200E_CP_MONITOR_UART_AVAIL, &monitor->soft_uart.send);
2516 static int __devinit
2517 fore200e_monitor_getc(struct fore200e* fore200e)
2519 struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2520 unsigned long timeout = jiffies + msecs_to_jiffies(50);
2523 while (time_before(jiffies, timeout)) {
2525 c = (int) fore200e->bus->read(&monitor->soft_uart.recv);
2527 if (c & FORE200E_CP_MONITOR_UART_AVAIL) {
2529 fore200e->bus->write(FORE200E_CP_MONITOR_UART_FREE, &monitor->soft_uart.recv);
2531 printk("%c", c & 0xFF);
2541 static void __devinit
2542 fore200e_monitor_puts(struct fore200e* fore200e, char* str)
2546 /* the i960 monitor doesn't accept any new character if it has something to say */
2547 while (fore200e_monitor_getc(fore200e) >= 0);
2549 fore200e_monitor_putc(fore200e, *str++);
2552 while (fore200e_monitor_getc(fore200e) >= 0);
2556 static int __devinit
2557 fore200e_start_fw(struct fore200e* fore200e)
2561 struct fw_header* fw_header = (struct fw_header*) fore200e->bus->fw_data;
2563 DPRINTK(2, "device %s firmware being started\n", fore200e->name);
2565 #if defined(__sparc_v9__)
2566 /* reported to be required by SBA cards on some sparc64 hosts */
2570 sprintf(cmd, "\rgo %x\r", le32_to_cpu(fw_header->start_offset));
2572 fore200e_monitor_puts(fore200e, cmd);
2574 ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_CP_RUNNING, 1000);
2576 printk(FORE200E "device %s firmware didn't start\n", fore200e->name);
2580 printk(FORE200E "device %s firmware started\n", fore200e->name);
2582 fore200e->state = FORE200E_STATE_START_FW;
2587 static int __devinit
2588 fore200e_load_fw(struct fore200e* fore200e)
2590 u32* fw_data = (u32*) fore200e->bus->fw_data;
2591 u32 fw_size = (u32) *fore200e->bus->fw_size / sizeof(u32);
2593 struct fw_header* fw_header = (struct fw_header*) fw_data;
2595 u32 __iomem *load_addr = fore200e->virt_base + le32_to_cpu(fw_header->load_offset);
2597 DPRINTK(2, "device %s firmware being loaded at 0x%p (%d words)\n",
2598 fore200e->name, load_addr, fw_size);
2600 if (le32_to_cpu(fw_header->magic) != FW_HEADER_MAGIC) {
2601 printk(FORE200E "corrupted %s firmware image\n", fore200e->bus->model_name);
2605 for (; fw_size--; fw_data++, load_addr++)
2606 fore200e->bus->write(le32_to_cpu(*fw_data), load_addr);
2608 fore200e->state = FORE200E_STATE_LOAD_FW;
2613 static int __devinit
2614 fore200e_register(struct fore200e* fore200e)
2616 struct atm_dev* atm_dev;
2618 DPRINTK(2, "device %s being registered\n", fore200e->name);
2620 atm_dev = atm_dev_register(fore200e->bus->proc_name, &fore200e_ops, -1,
2622 if (atm_dev == NULL) {
2623 printk(FORE200E "unable to register device %s\n", fore200e->name);
2627 atm_dev->dev_data = fore200e;
2628 fore200e->atm_dev = atm_dev;
2630 atm_dev->ci_range.vpi_bits = FORE200E_VPI_BITS;
2631 atm_dev->ci_range.vci_bits = FORE200E_VCI_BITS;
2633 fore200e->available_cell_rate = ATM_OC3_PCR;
2635 fore200e->state = FORE200E_STATE_REGISTER;
2640 static int __devinit
2641 fore200e_init(struct fore200e* fore200e)
2643 if (fore200e_register(fore200e) < 0)
2646 if (fore200e->bus->configure(fore200e) < 0)
2649 if (fore200e->bus->map(fore200e) < 0)
2652 if (fore200e_reset(fore200e, 1) < 0)
2655 if (fore200e_load_fw(fore200e) < 0)
2658 if (fore200e_start_fw(fore200e) < 0)
2661 if (fore200e_initialize(fore200e) < 0)
2664 if (fore200e_init_cmd_queue(fore200e) < 0)
2667 if (fore200e_init_tx_queue(fore200e) < 0)
2670 if (fore200e_init_rx_queue(fore200e) < 0)
2673 if (fore200e_init_bs_queue(fore200e) < 0)
2676 if (fore200e_alloc_rx_buf(fore200e) < 0)
2679 if (fore200e_get_esi(fore200e) < 0)
2682 if (fore200e_irq_request(fore200e) < 0)
2685 fore200e_supply(fore200e);
2687 /* all done, board initialization is now complete */
2688 fore200e->state = FORE200E_STATE_COMPLETE;
2692 #ifdef CONFIG_ATM_FORE200E_PCA
2693 static int __devinit
2694 fore200e_pca_detect(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
2696 const struct fore200e_bus* bus = (struct fore200e_bus*) pci_ent->driver_data;
2697 struct fore200e* fore200e;
2699 static int index = 0;
2701 if (pci_enable_device(pci_dev)) {
2706 fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL);
2707 if (fore200e == NULL) {
2712 fore200e->bus = bus;
2713 fore200e->bus_dev = pci_dev;
2714 fore200e->irq = pci_dev->irq;
2715 fore200e->phys_base = pci_resource_start(pci_dev, 0);
2717 sprintf(fore200e->name, "%s-%d", bus->model_name, index - 1);
2719 pci_set_master(pci_dev);
2721 printk(FORE200E "device %s found at 0x%lx, IRQ %s\n",
2722 fore200e->bus->model_name,
2723 fore200e->phys_base, fore200e_irq_itoa(fore200e->irq));
2725 sprintf(fore200e->name, "%s-%d", bus->model_name, index);
2727 err = fore200e_init(fore200e);
2729 fore200e_shutdown(fore200e);
2734 pci_set_drvdata(pci_dev, fore200e);
2742 pci_disable_device(pci_dev);
2747 static void __devexit fore200e_pca_remove_one(struct pci_dev *pci_dev)
2749 struct fore200e *fore200e;
2751 fore200e = pci_get_drvdata(pci_dev);
2753 fore200e_shutdown(fore200e);
2755 pci_disable_device(pci_dev);
2759 static struct pci_device_id fore200e_pca_tbl[] = {
2760 { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID,
2761 0, 0, (unsigned long) &fore200e_bus[0] },
2765 MODULE_DEVICE_TABLE(pci, fore200e_pca_tbl);
2767 static struct pci_driver fore200e_pca_driver = {
2768 .name = "fore_200e",
2769 .probe = fore200e_pca_detect,
2770 .remove = __devexit_p(fore200e_pca_remove_one),
2771 .id_table = fore200e_pca_tbl,
2777 fore200e_module_init(void)
2779 const struct fore200e_bus* bus;
2780 struct fore200e* fore200e;
2783 printk(FORE200E "FORE Systems 200E-series ATM driver - version " FORE200E_VERSION "\n");
2785 /* for each configured bus interface */
2786 for (bus = fore200e_bus; bus->model_name; bus++) {
2788 /* detect all boards present on that bus */
2789 for (index = 0; bus->detect && (fore200e = bus->detect(bus, index)); index++) {
2791 printk(FORE200E "device %s found at 0x%lx, IRQ %s\n",
2792 fore200e->bus->model_name,
2793 fore200e->phys_base, fore200e_irq_itoa(fore200e->irq));
2795 sprintf(fore200e->name, "%s-%d", bus->model_name, index);
2797 if (fore200e_init(fore200e) < 0) {
2799 fore200e_shutdown(fore200e);
2803 list_add(&fore200e->entry, &fore200e_boards);
2807 #ifdef CONFIG_ATM_FORE200E_PCA
2808 if (!pci_register_driver(&fore200e_pca_driver))
2812 if (!list_empty(&fore200e_boards))
2820 fore200e_module_cleanup(void)
2822 struct fore200e *fore200e, *next;
2824 #ifdef CONFIG_ATM_FORE200E_PCA
2825 pci_unregister_driver(&fore200e_pca_driver);
2828 list_for_each_entry_safe(fore200e, next, &fore200e_boards, entry) {
2829 fore200e_shutdown(fore200e);
2832 DPRINTK(1, "module being removed\n");
2837 fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
2839 struct fore200e* fore200e = FORE200E_DEV(dev);
2840 struct fore200e_vcc* fore200e_vcc;
2841 struct atm_vcc* vcc;
2842 int i, len, left = *pos;
2843 unsigned long flags;
2847 if (fore200e_getstats(fore200e) < 0)
2850 len = sprintf(page,"\n"
2852 " internal name:\t\t%s\n", fore200e->name);
2854 /* print bus-specific information */
2855 if (fore200e->bus->proc_read)
2856 len += fore200e->bus->proc_read(fore200e, page + len);
2858 len += sprintf(page + len,
2859 " interrupt line:\t\t%s\n"
2860 " physical base address:\t0x%p\n"
2861 " virtual base address:\t0x%p\n"
2862 " factory address (ESI):\t%02x:%02x:%02x:%02x:%02x:%02x\n"
2863 " board serial number:\t\t%d\n\n",
2864 fore200e_irq_itoa(fore200e->irq),
2865 (void*)fore200e->phys_base,
2866 fore200e->virt_base,
2867 fore200e->esi[0], fore200e->esi[1], fore200e->esi[2],
2868 fore200e->esi[3], fore200e->esi[4], fore200e->esi[5],
2869 fore200e->esi[4] * 256 + fore200e->esi[5]);
2875 return sprintf(page,
2876 " free small bufs, scheme 1:\t%d\n"
2877 " free large bufs, scheme 1:\t%d\n"
2878 " free small bufs, scheme 2:\t%d\n"
2879 " free large bufs, scheme 2:\t%d\n",
2880 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_SMALL ].freebuf_count,
2881 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_LARGE ].freebuf_count,
2882 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_SMALL ].freebuf_count,
2883 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_LARGE ].freebuf_count);
2886 u32 hb = fore200e->bus->read(&fore200e->cp_queues->heartbeat);
2888 len = sprintf(page,"\n\n"
2889 " cell processor:\n"
2890 " heartbeat state:\t\t");
2892 if (hb >> 16 != 0xDEAD)
2893 len += sprintf(page + len, "0x%08x\n", hb);
2895 len += sprintf(page + len, "*** FATAL ERROR %04x ***\n", hb & 0xFFFF);
2901 static const char* media_name[] = {
2902 "unshielded twisted pair",
2903 "multimode optical fiber ST",
2904 "multimode optical fiber SC",
2905 "single-mode optical fiber ST",
2906 "single-mode optical fiber SC",
2910 static const char* oc3_mode[] = {
2912 "diagnostic loopback",
2917 u32 fw_release = fore200e->bus->read(&fore200e->cp_queues->fw_release);
2918 u32 mon960_release = fore200e->bus->read(&fore200e->cp_queues->mon960_release);
2919 u32 oc3_revision = fore200e->bus->read(&fore200e->cp_queues->oc3_revision);
2920 u32 media_index = FORE200E_MEDIA_INDEX(fore200e->bus->read(&fore200e->cp_queues->media_type));
2923 if ((media_index < 0) || (media_index > 4))
2926 switch (fore200e->loop_mode) {
2927 case ATM_LM_NONE: oc3_index = 0;
2929 case ATM_LM_LOC_PHY: oc3_index = 1;
2931 case ATM_LM_RMT_PHY: oc3_index = 2;
2933 default: oc3_index = 3;
2936 return sprintf(page,
2937 " firmware release:\t\t%d.%d.%d\n"
2938 " monitor release:\t\t%d.%d\n"
2939 " media type:\t\t\t%s\n"
2940 " OC-3 revision:\t\t0x%x\n"
2941 " OC-3 mode:\t\t\t%s",
2942 fw_release >> 16, fw_release << 16 >> 24, fw_release << 24 >> 24,
2943 mon960_release >> 16, mon960_release << 16 >> 16,
2944 media_name[ media_index ],
2946 oc3_mode[ oc3_index ]);
2950 struct cp_monitor __iomem * cp_monitor = fore200e->cp_monitor;
2952 return sprintf(page,
2955 " version number:\t\t%d\n"
2956 " boot status word:\t\t0x%08x\n",
2957 fore200e->bus->read(&cp_monitor->mon_version),
2958 fore200e->bus->read(&cp_monitor->bstat));
2962 return sprintf(page,
2964 " device statistics:\n"
2966 " crc_header_errors:\t\t%10u\n"
2967 " framing_errors:\t\t%10u\n",
2968 cpu_to_be32(fore200e->stats->phy.crc_header_errors),
2969 cpu_to_be32(fore200e->stats->phy.framing_errors));
2972 return sprintf(page, "\n"
2974 " section_bip8_errors:\t%10u\n"
2975 " path_bip8_errors:\t\t%10u\n"
2976 " line_bip24_errors:\t\t%10u\n"
2977 " line_febe_errors:\t\t%10u\n"
2978 " path_febe_errors:\t\t%10u\n"
2979 " corr_hcs_errors:\t\t%10u\n"
2980 " ucorr_hcs_errors:\t\t%10u\n",
2981 cpu_to_be32(fore200e->stats->oc3.section_bip8_errors),
2982 cpu_to_be32(fore200e->stats->oc3.path_bip8_errors),
2983 cpu_to_be32(fore200e->stats->oc3.line_bip24_errors),
2984 cpu_to_be32(fore200e->stats->oc3.line_febe_errors),
2985 cpu_to_be32(fore200e->stats->oc3.path_febe_errors),
2986 cpu_to_be32(fore200e->stats->oc3.corr_hcs_errors),
2987 cpu_to_be32(fore200e->stats->oc3.ucorr_hcs_errors));
2990 return sprintf(page,"\n"
2991 " ATM:\t\t\t\t cells\n"
2994 " vpi out of range:\t\t%10u\n"
2995 " vpi no conn:\t\t%10u\n"
2996 " vci out of range:\t\t%10u\n"
2997 " vci no conn:\t\t%10u\n",
2998 cpu_to_be32(fore200e->stats->atm.cells_transmitted),
2999 cpu_to_be32(fore200e->stats->atm.cells_received),
3000 cpu_to_be32(fore200e->stats->atm.vpi_bad_range),
3001 cpu_to_be32(fore200e->stats->atm.vpi_no_conn),
3002 cpu_to_be32(fore200e->stats->atm.vci_bad_range),
3003 cpu_to_be32(fore200e->stats->atm.vci_no_conn));
3006 return sprintf(page,"\n"
3007 " AAL0:\t\t\t cells\n"
3010 " dropped:\t\t\t%10u\n",
3011 cpu_to_be32(fore200e->stats->aal0.cells_transmitted),
3012 cpu_to_be32(fore200e->stats->aal0.cells_received),
3013 cpu_to_be32(fore200e->stats->aal0.cells_dropped));
3016 return sprintf(page,"\n"
3018 " SAR sublayer:\t\t cells\n"
3021 " dropped:\t\t\t%10u\n"
3022 " CRC errors:\t\t%10u\n"
3023 " protocol errors:\t\t%10u\n\n"
3024 " CS sublayer:\t\t PDUs\n"
3027 " dropped:\t\t\t%10u\n"
3028 " protocol errors:\t\t%10u\n",
3029 cpu_to_be32(fore200e->stats->aal34.cells_transmitted),
3030 cpu_to_be32(fore200e->stats->aal34.cells_received),
3031 cpu_to_be32(fore200e->stats->aal34.cells_dropped),
3032 cpu_to_be32(fore200e->stats->aal34.cells_crc_errors),
3033 cpu_to_be32(fore200e->stats->aal34.cells_protocol_errors),
3034 cpu_to_be32(fore200e->stats->aal34.cspdus_transmitted),
3035 cpu_to_be32(fore200e->stats->aal34.cspdus_received),
3036 cpu_to_be32(fore200e->stats->aal34.cspdus_dropped),
3037 cpu_to_be32(fore200e->stats->aal34.cspdus_protocol_errors));
3040 return sprintf(page,"\n"
3042 " SAR sublayer:\t\t cells\n"
3045 " dropped:\t\t\t%10u\n"
3046 " congestions:\t\t%10u\n\n"
3047 " CS sublayer:\t\t PDUs\n"
3050 " dropped:\t\t\t%10u\n"
3051 " CRC errors:\t\t%10u\n"
3052 " protocol errors:\t\t%10u\n",
3053 cpu_to_be32(fore200e->stats->aal5.cells_transmitted),
3054 cpu_to_be32(fore200e->stats->aal5.cells_received),
3055 cpu_to_be32(fore200e->stats->aal5.cells_dropped),
3056 cpu_to_be32(fore200e->stats->aal5.congestion_experienced),
3057 cpu_to_be32(fore200e->stats->aal5.cspdus_transmitted),
3058 cpu_to_be32(fore200e->stats->aal5.cspdus_received),
3059 cpu_to_be32(fore200e->stats->aal5.cspdus_dropped),
3060 cpu_to_be32(fore200e->stats->aal5.cspdus_crc_errors),
3061 cpu_to_be32(fore200e->stats->aal5.cspdus_protocol_errors));
3064 return sprintf(page,"\n"
3065 " AUX:\t\t allocation failures\n"
3066 " small b1:\t\t\t%10u\n"
3067 " large b1:\t\t\t%10u\n"
3068 " small b2:\t\t\t%10u\n"
3069 " large b2:\t\t\t%10u\n"
3070 " RX PDUs:\t\t\t%10u\n"
3071 " TX PDUs:\t\t\t%10lu\n",
3072 cpu_to_be32(fore200e->stats->aux.small_b1_failed),
3073 cpu_to_be32(fore200e->stats->aux.large_b1_failed),
3074 cpu_to_be32(fore200e->stats->aux.small_b2_failed),
3075 cpu_to_be32(fore200e->stats->aux.large_b2_failed),
3076 cpu_to_be32(fore200e->stats->aux.rpd_alloc_failed),
3080 return sprintf(page,"\n"
3081 " receive carrier:\t\t\t%s\n",
3082 fore200e->stats->aux.receive_carrier ? "ON" : "OFF!");
3085 return sprintf(page,"\n"
3086 " VCCs:\n address VPI VCI AAL "
3087 "TX PDUs TX min/max size RX PDUs RX min/max size\n");
3090 for (i = 0; i < NBR_CONNECT; i++) {
3092 vcc = fore200e->vc_map[i].vcc;
3097 spin_lock_irqsave(&fore200e->q_lock, flags);
3099 if (vcc && test_bit(ATM_VF_READY, &vcc->flags) && !left--) {
3101 fore200e_vcc = FORE200E_VCC(vcc);
3102 ASSERT(fore200e_vcc);
3105 " %08x %03d %05d %1d %09lu %05d/%05d %09lu %05d/%05d\n",
3106 (u32)(unsigned long)vcc,
3107 vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
3108 fore200e_vcc->tx_pdu,
3109 fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu,
3110 fore200e_vcc->tx_max_pdu,
3111 fore200e_vcc->rx_pdu,
3112 fore200e_vcc->rx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->rx_min_pdu,
3113 fore200e_vcc->rx_max_pdu);
3115 spin_unlock_irqrestore(&fore200e->q_lock, flags);
3119 spin_unlock_irqrestore(&fore200e->q_lock, flags);
3125 module_init(fore200e_module_init);
3126 module_exit(fore200e_module_cleanup);
3129 static const struct atmdev_ops fore200e_ops =
3131 .open = fore200e_open,
3132 .close = fore200e_close,
3133 .ioctl = fore200e_ioctl,
3134 .getsockopt = fore200e_getsockopt,
3135 .setsockopt = fore200e_setsockopt,
3136 .send = fore200e_send,
3137 .change_qos = fore200e_change_qos,
3138 .proc_read = fore200e_proc_read,
3139 .owner = THIS_MODULE
3143 #ifdef CONFIG_ATM_FORE200E_PCA
3144 extern const unsigned char _fore200e_pca_fw_data[];
3145 extern const unsigned int _fore200e_pca_fw_size;
3147 #ifdef CONFIG_ATM_FORE200E_SBA
3148 extern const unsigned char _fore200e_sba_fw_data[];
3149 extern const unsigned int _fore200e_sba_fw_size;
3152 static const struct fore200e_bus fore200e_bus[] = {
3153 #ifdef CONFIG_ATM_FORE200E_PCA
3154 { "PCA-200E", "pca200e", 32, 4, 32,
3155 _fore200e_pca_fw_data, &_fore200e_pca_fw_size,
3158 fore200e_pca_dma_map,
3159 fore200e_pca_dma_unmap,
3160 fore200e_pca_dma_sync_for_cpu,
3161 fore200e_pca_dma_sync_for_device,
3162 fore200e_pca_dma_chunk_alloc,
3163 fore200e_pca_dma_chunk_free,
3165 fore200e_pca_configure,
3168 fore200e_pca_prom_read,
3171 fore200e_pca_irq_check,
3172 fore200e_pca_irq_ack,
3173 fore200e_pca_proc_read,
3176 #ifdef CONFIG_ATM_FORE200E_SBA
3177 { "SBA-200E", "sba200e", 32, 64, 32,
3178 _fore200e_sba_fw_data, &_fore200e_sba_fw_size,
3181 fore200e_sba_dma_map,
3182 fore200e_sba_dma_unmap,
3183 fore200e_sba_dma_sync_for_cpu,
3184 fore200e_sba_dma_sync_for_device,
3185 fore200e_sba_dma_chunk_alloc,
3186 fore200e_sba_dma_chunk_free,
3187 fore200e_sba_detect,
3188 fore200e_sba_configure,
3191 fore200e_sba_prom_read,
3193 fore200e_sba_irq_enable,
3194 fore200e_sba_irq_check,
3195 fore200e_sba_irq_ack,
3196 fore200e_sba_proc_read,
3202 #ifdef MODULE_LICENSE
3203 MODULE_LICENSE("GPL");