2 $Id: fore200e.c,v 1.5 2000/04/14 10:10:34 davem Exp $
4 A FORE Systems 200E-series driver for ATM on Linux.
5 Christophe Lizzi (lizzi@cnam.fr), October 1999-March 2003.
7 Based on the PCA-200E driver from Uwe Dannowski (Uwe.Dannowski@inf.tu-dresden.de).
9 This driver simultaneously supports PCA-200E and SBA-200E adapters
10 on i386, alpha (untested), powerpc, sparc and sparc64 architectures.
12 This program is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 2 of the License, or
15 (at your option) any later version.
17 This program is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with this program; if not, write to the Free Software
24 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #include <linux/config.h>
29 #include <linux/kernel.h>
30 #include <linux/slab.h>
31 #include <linux/init.h>
32 #include <linux/capability.h>
33 #include <linux/sched.h>
34 #include <linux/interrupt.h>
35 #include <linux/bitops.h>
36 #include <linux/pci.h>
37 #include <linux/module.h>
38 #include <linux/atmdev.h>
39 #include <linux/sonet.h>
40 #include <linux/atm_suni.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/delay.h>
44 #include <asm/string.h>
48 #include <asm/byteorder.h>
49 #include <asm/uaccess.h>
50 #include <asm/atomic.h>
52 #ifdef CONFIG_ATM_FORE200E_SBA
53 #include <asm/idprom.h>
55 #include <asm/openprom.h>
56 #include <asm/oplib.h>
57 #include <asm/pgtable.h>
60 #if defined(CONFIG_ATM_FORE200E_USE_TASKLET) /* defer interrupt work to a tasklet */
61 #define FORE200E_USE_TASKLET
64 #if 0 /* enable the debugging code of the buffer supply queues */
65 #define FORE200E_BSQ_DEBUG
68 #if 1 /* ensure correct handling of 52-byte AAL0 SDUs expected by atmdump-like apps */
69 #define FORE200E_52BYTE_AAL0_SDU
75 #define FORE200E_VERSION "0.3e"
77 #define FORE200E "fore200e: "
79 #if 0 /* override .config */
80 #define CONFIG_ATM_FORE200E_DEBUG 1
82 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
83 #define DPRINTK(level, format, args...) do { if (CONFIG_ATM_FORE200E_DEBUG >= (level)) \
84 printk(FORE200E format, ##args); } while (0)
86 #define DPRINTK(level, format, args...) do {} while (0)
90 #define FORE200E_ALIGN(addr, alignment) \
91 ((((unsigned long)(addr) + (alignment - 1)) & ~(alignment - 1)) - (unsigned long)(addr))
93 #define FORE200E_DMA_INDEX(dma_addr, type, index) ((dma_addr) + (index) * sizeof(type))
95 #define FORE200E_INDEX(virt_addr, type, index) (&((type *)(virt_addr))[ index ])
97 #define FORE200E_NEXT_ENTRY(index, modulo) (index = ++(index) % (modulo))
100 #define ASSERT(expr) if (!(expr)) { \
101 printk(FORE200E "assertion failed! %s[%d]: %s\n", \
102 __FUNCTION__, __LINE__, #expr); \
103 panic(FORE200E "%s", __FUNCTION__); \
106 #define ASSERT(expr) do {} while (0)
110 static const struct atmdev_ops fore200e_ops;
111 static const struct fore200e_bus fore200e_bus[];
113 static LIST_HEAD(fore200e_boards);
116 MODULE_AUTHOR("Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen");
117 MODULE_DESCRIPTION("FORE Systems 200E-series ATM driver - version " FORE200E_VERSION);
118 MODULE_SUPPORTED_DEVICE("PCA-200E, SBA-200E");
121 static const int fore200e_rx_buf_nbr[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
122 { BUFFER_S1_NBR, BUFFER_L1_NBR },
123 { BUFFER_S2_NBR, BUFFER_L2_NBR }
126 static const int fore200e_rx_buf_size[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
127 { BUFFER_S1_SIZE, BUFFER_L1_SIZE },
128 { BUFFER_S2_SIZE, BUFFER_L2_SIZE }
132 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
133 static const char* fore200e_traffic_class[] = { "NONE", "UBR", "CBR", "VBR", "ABR", "ANY" };
137 #if 0 /* currently unused */
139 fore200e_fore2atm_aal(enum fore200e_aal aal)
142 case FORE200E_AAL0: return ATM_AAL0;
143 case FORE200E_AAL34: return ATM_AAL34;
144 case FORE200E_AAL5: return ATM_AAL5;
152 static enum fore200e_aal
153 fore200e_atm2fore_aal(int aal)
156 case ATM_AAL0: return FORE200E_AAL0;
157 case ATM_AAL34: return FORE200E_AAL34;
160 case ATM_AAL5: return FORE200E_AAL5;
168 fore200e_irq_itoa(int irq)
170 #if defined(__sparc_v9__)
171 return __irq_itoa(irq);
174 sprintf(str, "%d", irq);
181 fore200e_kmalloc(int size, int flags)
183 void* chunk = kmalloc(size, flags);
186 memset(chunk, 0x00, size);
188 printk(FORE200E "kmalloc() failed, requested size = %d, flags = 0x%x\n", size, flags);
195 fore200e_kfree(void* chunk)
201 /* allocate and align a chunk of memory intended to hold the data behing exchanged
202 between the driver and the adapter (using streaming DVMA) */
205 fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, int alignment, int direction)
207 unsigned long offset = 0;
209 if (alignment <= sizeof(int))
212 chunk->alloc_size = size + alignment;
213 chunk->align_size = size;
214 chunk->direction = direction;
216 chunk->alloc_addr = fore200e_kmalloc(chunk->alloc_size, GFP_KERNEL | GFP_DMA);
217 if (chunk->alloc_addr == NULL)
221 offset = FORE200E_ALIGN(chunk->alloc_addr, alignment);
223 chunk->align_addr = chunk->alloc_addr + offset;
225 chunk->dma_addr = fore200e->bus->dma_map(fore200e, chunk->align_addr, chunk->align_size, direction);
231 /* free a chunk of memory */
234 fore200e_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
236 fore200e->bus->dma_unmap(fore200e, chunk->dma_addr, chunk->dma_size, chunk->direction);
238 fore200e_kfree(chunk->alloc_addr);
243 fore200e_spin(int msecs)
245 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
246 while (time_before(jiffies, timeout));
251 fore200e_poll(struct fore200e* fore200e, volatile u32* addr, u32 val, int msecs)
253 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
258 if ((ok = (*addr == val)) || (*addr & STATUS_ERROR))
261 } while (time_before(jiffies, timeout));
265 printk(FORE200E "cmd polling failed, got status 0x%08x, expected 0x%08x\n",
275 fore200e_io_poll(struct fore200e* fore200e, volatile u32 __iomem *addr, u32 val, int msecs)
277 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
281 if ((ok = (fore200e->bus->read(addr) == val)))
284 } while (time_before(jiffies, timeout));
288 printk(FORE200E "I/O polling failed, got status 0x%08x, expected 0x%08x\n",
289 fore200e->bus->read(addr), val);
298 fore200e_free_rx_buf(struct fore200e* fore200e)
300 int scheme, magn, nbr;
301 struct buffer* buffer;
303 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
304 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
306 if ((buffer = fore200e->host_bsq[ scheme ][ magn ].buffer) != NULL) {
308 for (nbr = 0; nbr < fore200e_rx_buf_nbr[ scheme ][ magn ]; nbr++) {
310 struct chunk* data = &buffer[ nbr ].data;
312 if (data->alloc_addr != NULL)
313 fore200e_chunk_free(fore200e, data);
322 fore200e_uninit_bs_queue(struct fore200e* fore200e)
326 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
327 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
329 struct chunk* status = &fore200e->host_bsq[ scheme ][ magn ].status;
330 struct chunk* rbd_block = &fore200e->host_bsq[ scheme ][ magn ].rbd_block;
332 if (status->alloc_addr)
333 fore200e->bus->dma_chunk_free(fore200e, status);
335 if (rbd_block->alloc_addr)
336 fore200e->bus->dma_chunk_free(fore200e, rbd_block);
343 fore200e_reset(struct fore200e* fore200e, int diag)
347 fore200e->cp_monitor = fore200e->virt_base + FORE200E_CP_MONITOR_OFFSET;
349 fore200e->bus->write(BSTAT_COLD_START, &fore200e->cp_monitor->bstat);
351 fore200e->bus->reset(fore200e);
354 ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_SELFTEST_OK, 1000);
357 printk(FORE200E "device %s self-test failed\n", fore200e->name);
361 printk(FORE200E "device %s self-test passed\n", fore200e->name);
363 fore200e->state = FORE200E_STATE_RESET;
371 fore200e_shutdown(struct fore200e* fore200e)
373 printk(FORE200E "removing device %s at 0x%lx, IRQ %s\n",
374 fore200e->name, fore200e->phys_base,
375 fore200e_irq_itoa(fore200e->irq));
377 if (fore200e->state > FORE200E_STATE_RESET) {
378 /* first, reset the board to prevent further interrupts or data transfers */
379 fore200e_reset(fore200e, 0);
382 /* then, release all allocated resources */
383 switch(fore200e->state) {
385 case FORE200E_STATE_COMPLETE:
387 kfree(fore200e->stats);
389 case FORE200E_STATE_IRQ:
390 free_irq(fore200e->irq, fore200e->atm_dev);
392 case FORE200E_STATE_ALLOC_BUF:
393 fore200e_free_rx_buf(fore200e);
395 case FORE200E_STATE_INIT_BSQ:
396 fore200e_uninit_bs_queue(fore200e);
398 case FORE200E_STATE_INIT_RXQ:
399 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.status);
400 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.rpd);
402 case FORE200E_STATE_INIT_TXQ:
403 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.status);
404 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.tpd);
406 case FORE200E_STATE_INIT_CMDQ:
407 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_cmdq.status);
409 case FORE200E_STATE_INITIALIZE:
410 /* nothing to do for that state */
412 case FORE200E_STATE_START_FW:
413 /* nothing to do for that state */
415 case FORE200E_STATE_LOAD_FW:
416 /* nothing to do for that state */
418 case FORE200E_STATE_RESET:
419 /* nothing to do for that state */
421 case FORE200E_STATE_MAP:
422 fore200e->bus->unmap(fore200e);
424 case FORE200E_STATE_CONFIGURE:
425 /* nothing to do for that state */
427 case FORE200E_STATE_REGISTER:
428 /* XXX shouldn't we *start* by deregistering the device? */
429 atm_dev_deregister(fore200e->atm_dev);
431 case FORE200E_STATE_BLANK:
432 /* nothing to do for that state */
438 #ifdef CONFIG_ATM_FORE200E_PCA
440 static u32 fore200e_pca_read(volatile u32 __iomem *addr)
442 /* on big-endian hosts, the board is configured to convert
443 the endianess of slave RAM accesses */
444 return le32_to_cpu(readl(addr));
448 static void fore200e_pca_write(u32 val, volatile u32 __iomem *addr)
450 /* on big-endian hosts, the board is configured to convert
451 the endianess of slave RAM accesses */
452 writel(cpu_to_le32(val), addr);
457 fore200e_pca_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction)
459 u32 dma_addr = pci_map_single((struct pci_dev*)fore200e->bus_dev, virt_addr, size, direction);
461 DPRINTK(3, "PCI DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d, --> dma_addr = 0x%08x\n",
462 virt_addr, size, direction, dma_addr);
469 fore200e_pca_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
471 DPRINTK(3, "PCI DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d\n",
472 dma_addr, size, direction);
474 pci_unmap_single((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
479 fore200e_pca_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
481 DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
483 pci_dma_sync_single_for_cpu((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
487 fore200e_pca_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
489 DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
491 pci_dma_sync_single_for_device((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
495 /* allocate a DMA consistent chunk of memory intended to act as a communication mechanism
496 (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */
499 fore200e_pca_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
500 int size, int nbr, int alignment)
502 /* returned chunks are page-aligned */
503 chunk->alloc_size = size * nbr;
504 chunk->alloc_addr = pci_alloc_consistent((struct pci_dev*)fore200e->bus_dev,
508 if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
511 chunk->align_addr = chunk->alloc_addr;
517 /* free a DMA consistent chunk of memory */
520 fore200e_pca_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
522 pci_free_consistent((struct pci_dev*)fore200e->bus_dev,
530 fore200e_pca_irq_check(struct fore200e* fore200e)
532 /* this is a 1 bit register */
533 int irq_posted = readl(fore200e->regs.pca.psr);
535 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG == 2)
536 if (irq_posted && (readl(fore200e->regs.pca.hcr) & PCA200E_HCR_OUTFULL)) {
537 DPRINTK(2,"FIFO OUT full, device %d\n", fore200e->atm_dev->number);
546 fore200e_pca_irq_ack(struct fore200e* fore200e)
548 writel(PCA200E_HCR_CLRINTR, fore200e->regs.pca.hcr);
553 fore200e_pca_reset(struct fore200e* fore200e)
555 writel(PCA200E_HCR_RESET, fore200e->regs.pca.hcr);
557 writel(0, fore200e->regs.pca.hcr);
562 fore200e_pca_map(struct fore200e* fore200e)
564 DPRINTK(2, "device %s being mapped in memory\n", fore200e->name);
566 fore200e->virt_base = ioremap(fore200e->phys_base, PCA200E_IOSPACE_LENGTH);
568 if (fore200e->virt_base == NULL) {
569 printk(FORE200E "can't map device %s\n", fore200e->name);
573 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
575 /* gain access to the PCA specific registers */
576 fore200e->regs.pca.hcr = fore200e->virt_base + PCA200E_HCR_OFFSET;
577 fore200e->regs.pca.imr = fore200e->virt_base + PCA200E_IMR_OFFSET;
578 fore200e->regs.pca.psr = fore200e->virt_base + PCA200E_PSR_OFFSET;
580 fore200e->state = FORE200E_STATE_MAP;
586 fore200e_pca_unmap(struct fore200e* fore200e)
588 DPRINTK(2, "device %s being unmapped from memory\n", fore200e->name);
590 if (fore200e->virt_base != NULL)
591 iounmap(fore200e->virt_base);
596 fore200e_pca_configure(struct fore200e* fore200e)
598 struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev;
599 u8 master_ctrl, latency;
601 DPRINTK(2, "device %s being configured\n", fore200e->name);
603 if ((pci_dev->irq == 0) || (pci_dev->irq == 0xFF)) {
604 printk(FORE200E "incorrect IRQ setting - misconfigured PCI-PCI bridge?\n");
608 pci_read_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, &master_ctrl);
610 master_ctrl = master_ctrl
611 #if defined(__BIG_ENDIAN)
612 /* request the PCA board to convert the endianess of slave RAM accesses */
613 | PCA200E_CTRL_CONVERT_ENDIAN
616 | PCA200E_CTRL_DIS_CACHE_RD
617 | PCA200E_CTRL_DIS_WRT_INVAL
618 | PCA200E_CTRL_ENA_CONT_REQ_MODE
619 | PCA200E_CTRL_2_CACHE_WRT_INVAL
621 | PCA200E_CTRL_LARGE_PCI_BURSTS;
623 pci_write_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, master_ctrl);
625 /* raise latency from 32 (default) to 192, as this seems to prevent NIC
626 lockups (under heavy rx loads) due to continuous 'FIFO OUT full' condition.
627 this may impact the performances of other PCI devices on the same bus, though */
629 pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, latency);
631 fore200e->state = FORE200E_STATE_CONFIGURE;
637 fore200e_pca_prom_read(struct fore200e* fore200e, struct prom_data* prom)
639 struct host_cmdq* cmdq = &fore200e->host_cmdq;
640 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
641 struct prom_opcode opcode;
645 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
647 opcode.opcode = OPCODE_GET_PROM;
650 prom_dma = fore200e->bus->dma_map(fore200e, prom, sizeof(struct prom_data), DMA_FROM_DEVICE);
652 fore200e->bus->write(prom_dma, &entry->cp_entry->cmd.prom_block.prom_haddr);
654 *entry->status = STATUS_PENDING;
656 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.prom_block.opcode);
658 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
660 *entry->status = STATUS_FREE;
662 fore200e->bus->dma_unmap(fore200e, prom_dma, sizeof(struct prom_data), DMA_FROM_DEVICE);
665 printk(FORE200E "unable to get PROM data from device %s\n", fore200e->name);
669 #if defined(__BIG_ENDIAN)
671 #define swap_here(addr) (*((u32*)(addr)) = swab32( *((u32*)(addr)) ))
673 /* MAC address is stored as little-endian */
674 swap_here(&prom->mac_addr[0]);
675 swap_here(&prom->mac_addr[4]);
683 fore200e_pca_proc_read(struct fore200e* fore200e, char *page)
685 struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev;
687 return sprintf(page, " PCI bus/slot/function:\t%d/%d/%d\n",
688 pci_dev->bus->number, PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
691 #endif /* CONFIG_ATM_FORE200E_PCA */
694 #ifdef CONFIG_ATM_FORE200E_SBA
697 fore200e_sba_read(volatile u32 __iomem *addr)
699 return sbus_readl(addr);
704 fore200e_sba_write(u32 val, volatile u32 __iomem *addr)
706 sbus_writel(val, addr);
711 fore200e_sba_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction)
713 u32 dma_addr = sbus_map_single((struct sbus_dev*)fore200e->bus_dev, virt_addr, size, direction);
715 DPRINTK(3, "SBUS DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d --> dma_addr = 0x%08x\n",
716 virt_addr, size, direction, dma_addr);
723 fore200e_sba_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
725 DPRINTK(3, "SBUS DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d,\n",
726 dma_addr, size, direction);
728 sbus_unmap_single((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction);
733 fore200e_sba_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
735 DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
737 sbus_dma_sync_single_for_cpu((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction);
741 fore200e_sba_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
743 DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
745 sbus_dma_sync_single_for_device((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction);
749 /* allocate a DVMA consistent chunk of memory intended to act as a communication mechanism
750 (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */
753 fore200e_sba_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
754 int size, int nbr, int alignment)
756 chunk->alloc_size = chunk->align_size = size * nbr;
758 /* returned chunks are page-aligned */
759 chunk->alloc_addr = sbus_alloc_consistent((struct sbus_dev*)fore200e->bus_dev,
763 if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
766 chunk->align_addr = chunk->alloc_addr;
772 /* free a DVMA consistent chunk of memory */
775 fore200e_sba_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
777 sbus_free_consistent((struct sbus_dev*)fore200e->bus_dev,
785 fore200e_sba_irq_enable(struct fore200e* fore200e)
787 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
788 fore200e->bus->write(hcr | SBA200E_HCR_INTR_ENA, fore200e->regs.sba.hcr);
793 fore200e_sba_irq_check(struct fore200e* fore200e)
795 return fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_INTR_REQ;
800 fore200e_sba_irq_ack(struct fore200e* fore200e)
802 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
803 fore200e->bus->write(hcr | SBA200E_HCR_INTR_CLR, fore200e->regs.sba.hcr);
808 fore200e_sba_reset(struct fore200e* fore200e)
810 fore200e->bus->write(SBA200E_HCR_RESET, fore200e->regs.sba.hcr);
812 fore200e->bus->write(0, fore200e->regs.sba.hcr);
817 fore200e_sba_map(struct fore200e* fore200e)
819 struct sbus_dev* sbus_dev = (struct sbus_dev*)fore200e->bus_dev;
822 /* gain access to the SBA specific registers */
823 fore200e->regs.sba.hcr = sbus_ioremap(&sbus_dev->resource[0], 0, SBA200E_HCR_LENGTH, "SBA HCR");
824 fore200e->regs.sba.bsr = sbus_ioremap(&sbus_dev->resource[1], 0, SBA200E_BSR_LENGTH, "SBA BSR");
825 fore200e->regs.sba.isr = sbus_ioremap(&sbus_dev->resource[2], 0, SBA200E_ISR_LENGTH, "SBA ISR");
826 fore200e->virt_base = sbus_ioremap(&sbus_dev->resource[3], 0, SBA200E_RAM_LENGTH, "SBA RAM");
828 if (fore200e->virt_base == NULL) {
829 printk(FORE200E "unable to map RAM of device %s\n", fore200e->name);
833 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
835 fore200e->bus->write(0x02, fore200e->regs.sba.isr); /* XXX hardwired interrupt level */
837 /* get the supported DVMA burst sizes */
838 bursts = prom_getintdefault(sbus_dev->bus->prom_node, "burst-sizes", 0x00);
840 if (sbus_can_dma_64bit(sbus_dev))
841 sbus_set_sbus64(sbus_dev, bursts);
843 fore200e->state = FORE200E_STATE_MAP;
849 fore200e_sba_unmap(struct fore200e* fore200e)
851 sbus_iounmap(fore200e->regs.sba.hcr, SBA200E_HCR_LENGTH);
852 sbus_iounmap(fore200e->regs.sba.bsr, SBA200E_BSR_LENGTH);
853 sbus_iounmap(fore200e->regs.sba.isr, SBA200E_ISR_LENGTH);
854 sbus_iounmap(fore200e->virt_base, SBA200E_RAM_LENGTH);
859 fore200e_sba_configure(struct fore200e* fore200e)
861 fore200e->state = FORE200E_STATE_CONFIGURE;
866 static struct fore200e* __init
867 fore200e_sba_detect(const struct fore200e_bus* bus, int index)
869 struct fore200e* fore200e;
870 struct sbus_bus* sbus_bus;
871 struct sbus_dev* sbus_dev = NULL;
873 unsigned int count = 0;
875 for_each_sbus (sbus_bus) {
876 for_each_sbusdev (sbus_dev, sbus_bus) {
877 if (strcmp(sbus_dev->prom_name, SBA200E_PROM_NAME) == 0) {
887 if (sbus_dev->num_registers != 4) {
888 printk(FORE200E "this %s device has %d instead of 4 registers\n",
889 bus->model_name, sbus_dev->num_registers);
893 fore200e = fore200e_kmalloc(sizeof(struct fore200e), GFP_KERNEL);
894 if (fore200e == NULL)
898 fore200e->bus_dev = sbus_dev;
899 fore200e->irq = sbus_dev->irqs[ 0 ];
901 fore200e->phys_base = (unsigned long)sbus_dev;
903 sprintf(fore200e->name, "%s-%d", bus->model_name, index - 1);
910 fore200e_sba_prom_read(struct fore200e* fore200e, struct prom_data* prom)
912 struct sbus_dev* sbus_dev = (struct sbus_dev*) fore200e->bus_dev;
915 len = prom_getproperty(sbus_dev->prom_node, "macaddrlo2", &prom->mac_addr[ 4 ], 4);
919 len = prom_getproperty(sbus_dev->prom_node, "macaddrhi4", &prom->mac_addr[ 2 ], 4);
923 prom_getproperty(sbus_dev->prom_node, "serialnumber",
924 (char*)&prom->serial_number, sizeof(prom->serial_number));
926 prom_getproperty(sbus_dev->prom_node, "promversion",
927 (char*)&prom->hw_revision, sizeof(prom->hw_revision));
934 fore200e_sba_proc_read(struct fore200e* fore200e, char *page)
936 struct sbus_dev* sbus_dev = (struct sbus_dev*)fore200e->bus_dev;
938 return sprintf(page, " SBUS slot/device:\t\t%d/'%s'\n", sbus_dev->slot, sbus_dev->prom_name);
940 #endif /* CONFIG_ATM_FORE200E_SBA */
944 fore200e_tx_irq(struct fore200e* fore200e)
946 struct host_txq* txq = &fore200e->host_txq;
947 struct host_txq_entry* entry;
949 struct fore200e_vc_map* vc_map;
951 if (fore200e->host_txq.txing == 0)
956 entry = &txq->host_entry[ txq->tail ];
958 if ((*entry->status & STATUS_COMPLETE) == 0) {
962 DPRINTK(3, "TX COMPLETED: entry = %p [tail = %d], vc_map = %p, skb = %p\n",
963 entry, txq->tail, entry->vc_map, entry->skb);
965 /* free copy of misaligned data */
969 /* remove DMA mapping */
970 fore200e->bus->dma_unmap(fore200e, entry->tpd->tsd[ 0 ].buffer, entry->tpd->tsd[ 0 ].length,
973 vc_map = entry->vc_map;
975 /* vcc closed since the time the entry was submitted for tx? */
976 if ((vc_map->vcc == NULL) ||
977 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
979 DPRINTK(1, "no ready vcc found for PDU sent on device %d\n",
980 fore200e->atm_dev->number);
982 dev_kfree_skb_any(entry->skb);
987 /* vcc closed then immediately re-opened? */
988 if (vc_map->incarn != entry->incarn) {
990 /* when a vcc is closed, some PDUs may be still pending in the tx queue.
991 if the same vcc is immediately re-opened, those pending PDUs must
992 not be popped after the completion of their emission, as they refer
993 to the prior incarnation of that vcc. otherwise, sk_atm(vcc)->sk_wmem_alloc
994 would be decremented by the size of the (unrelated) skb, possibly
995 leading to a negative sk->sk_wmem_alloc count, ultimately freezing the vcc.
996 we thus bind the tx entry to the current incarnation of the vcc
997 when the entry is submitted for tx. When the tx later completes,
998 if the incarnation number of the tx entry does not match the one
999 of the vcc, then this implies that the vcc has been closed then re-opened.
1000 we thus just drop the skb here. */
1002 DPRINTK(1, "vcc closed-then-re-opened; dropping PDU sent on device %d\n",
1003 fore200e->atm_dev->number);
1005 dev_kfree_skb_any(entry->skb);
1011 /* notify tx completion */
1013 vcc->pop(vcc, entry->skb);
1016 dev_kfree_skb_any(entry->skb);
1019 /* race fixed by the above incarnation mechanism, but... */
1020 if (atomic_read(&sk_atm(vcc)->sk_wmem_alloc) < 0) {
1021 atomic_set(&sk_atm(vcc)->sk_wmem_alloc, 0);
1024 /* check error condition */
1025 if (*entry->status & STATUS_ERROR)
1026 atomic_inc(&vcc->stats->tx_err);
1028 atomic_inc(&vcc->stats->tx);
1032 *entry->status = STATUS_FREE;
1034 fore200e->host_txq.txing--;
1036 FORE200E_NEXT_ENTRY(txq->tail, QUEUE_SIZE_TX);
1041 #ifdef FORE200E_BSQ_DEBUG
1042 int bsq_audit(int where, struct host_bsq* bsq, int scheme, int magn)
1044 struct buffer* buffer;
1047 buffer = bsq->freebuf;
1050 if (buffer->supplied) {
1051 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld supplied but in free list!\n",
1052 where, scheme, magn, buffer->index);
1055 if (buffer->magn != magn) {
1056 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected magn = %d\n",
1057 where, scheme, magn, buffer->index, buffer->magn);
1060 if (buffer->scheme != scheme) {
1061 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected scheme = %d\n",
1062 where, scheme, magn, buffer->index, buffer->scheme);
1065 if ((buffer->index < 0) || (buffer->index >= fore200e_rx_buf_nbr[ scheme ][ magn ])) {
1066 printk(FORE200E "bsq_audit(%d): queue %d.%d, out of range buffer index = %ld !\n",
1067 where, scheme, magn, buffer->index);
1071 buffer = buffer->next;
1074 if (count != bsq->freebuf_count) {
1075 printk(FORE200E "bsq_audit(%d): queue %d.%d, %d bufs in free list, but freebuf_count = %d\n",
1076 where, scheme, magn, count, bsq->freebuf_count);
1084 fore200e_supply(struct fore200e* fore200e)
1086 int scheme, magn, i;
1088 struct host_bsq* bsq;
1089 struct host_bsq_entry* entry;
1090 struct buffer* buffer;
1092 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
1093 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
1095 bsq = &fore200e->host_bsq[ scheme ][ magn ];
1097 #ifdef FORE200E_BSQ_DEBUG
1098 bsq_audit(1, bsq, scheme, magn);
1100 while (bsq->freebuf_count >= RBD_BLK_SIZE) {
1102 DPRINTK(2, "supplying %d rx buffers to queue %d / %d, freebuf_count = %d\n",
1103 RBD_BLK_SIZE, scheme, magn, bsq->freebuf_count);
1105 entry = &bsq->host_entry[ bsq->head ];
1107 for (i = 0; i < RBD_BLK_SIZE; i++) {
1109 /* take the first buffer in the free buffer list */
1110 buffer = bsq->freebuf;
1112 printk(FORE200E "no more free bufs in queue %d.%d, but freebuf_count = %d\n",
1113 scheme, magn, bsq->freebuf_count);
1116 bsq->freebuf = buffer->next;
1118 #ifdef FORE200E_BSQ_DEBUG
1119 if (buffer->supplied)
1120 printk(FORE200E "queue %d.%d, buffer %lu already supplied\n",
1121 scheme, magn, buffer->index);
1122 buffer->supplied = 1;
1124 entry->rbd_block->rbd[ i ].buffer_haddr = buffer->data.dma_addr;
1125 entry->rbd_block->rbd[ i ].handle = FORE200E_BUF2HDL(buffer);
1128 FORE200E_NEXT_ENTRY(bsq->head, QUEUE_SIZE_BS);
1130 /* decrease accordingly the number of free rx buffers */
1131 bsq->freebuf_count -= RBD_BLK_SIZE;
1133 *entry->status = STATUS_PENDING;
1134 fore200e->bus->write(entry->rbd_block_dma, &entry->cp_entry->rbd_block_haddr);
1142 fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rpd)
1144 struct sk_buff* skb;
1145 struct buffer* buffer;
1146 struct fore200e_vcc* fore200e_vcc;
1148 #ifdef FORE200E_52BYTE_AAL0_SDU
1149 u32 cell_header = 0;
1154 fore200e_vcc = FORE200E_VCC(vcc);
1155 ASSERT(fore200e_vcc);
1157 #ifdef FORE200E_52BYTE_AAL0_SDU
1158 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.rxtp.max_sdu == ATM_AAL0_SDU)) {
1160 cell_header = (rpd->atm_header.gfc << ATM_HDR_GFC_SHIFT) |
1161 (rpd->atm_header.vpi << ATM_HDR_VPI_SHIFT) |
1162 (rpd->atm_header.vci << ATM_HDR_VCI_SHIFT) |
1163 (rpd->atm_header.plt << ATM_HDR_PTI_SHIFT) |
1164 rpd->atm_header.clp;
1169 /* compute total PDU length */
1170 for (i = 0; i < rpd->nseg; i++)
1171 pdu_len += rpd->rsd[ i ].length;
1173 skb = alloc_skb(pdu_len, GFP_ATOMIC);
1175 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
1177 atomic_inc(&vcc->stats->rx_drop);
1181 do_gettimeofday(&skb->stamp);
1183 #ifdef FORE200E_52BYTE_AAL0_SDU
1185 *((u32*)skb_put(skb, 4)) = cell_header;
1189 /* reassemble segments */
1190 for (i = 0; i < rpd->nseg; i++) {
1192 /* rebuild rx buffer address from rsd handle */
1193 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1195 /* Make device DMA transfer visible to CPU. */
1196 fore200e->bus->dma_sync_for_cpu(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE);
1198 memcpy(skb_put(skb, rpd->rsd[ i ].length), buffer->data.align_addr, rpd->rsd[ i ].length);
1200 /* Now let the device get at it again. */
1201 fore200e->bus->dma_sync_for_device(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE);
1204 DPRINTK(3, "rx skb: len = %d, truesize = %d\n", skb->len, skb->truesize);
1206 if (pdu_len < fore200e_vcc->rx_min_pdu)
1207 fore200e_vcc->rx_min_pdu = pdu_len;
1208 if (pdu_len > fore200e_vcc->rx_max_pdu)
1209 fore200e_vcc->rx_max_pdu = pdu_len;
1210 fore200e_vcc->rx_pdu++;
1213 if (atm_charge(vcc, skb->truesize) == 0) {
1215 DPRINTK(2, "receive buffers saturated for %d.%d.%d - PDU dropped\n",
1216 vcc->itf, vcc->vpi, vcc->vci);
1218 dev_kfree_skb_any(skb);
1220 atomic_inc(&vcc->stats->rx_drop);
1224 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1226 vcc->push(vcc, skb);
1227 atomic_inc(&vcc->stats->rx);
1229 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1236 fore200e_collect_rpd(struct fore200e* fore200e, struct rpd* rpd)
1238 struct host_bsq* bsq;
1239 struct buffer* buffer;
1242 for (i = 0; i < rpd->nseg; i++) {
1244 /* rebuild rx buffer address from rsd handle */
1245 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1247 bsq = &fore200e->host_bsq[ buffer->scheme ][ buffer->magn ];
1249 #ifdef FORE200E_BSQ_DEBUG
1250 bsq_audit(2, bsq, buffer->scheme, buffer->magn);
1252 if (buffer->supplied == 0)
1253 printk(FORE200E "queue %d.%d, buffer %ld was not supplied\n",
1254 buffer->scheme, buffer->magn, buffer->index);
1255 buffer->supplied = 0;
1258 /* re-insert the buffer into the free buffer list */
1259 buffer->next = bsq->freebuf;
1260 bsq->freebuf = buffer;
1262 /* then increment the number of free rx buffers */
1263 bsq->freebuf_count++;
1269 fore200e_rx_irq(struct fore200e* fore200e)
1271 struct host_rxq* rxq = &fore200e->host_rxq;
1272 struct host_rxq_entry* entry;
1273 struct atm_vcc* vcc;
1274 struct fore200e_vc_map* vc_map;
1278 entry = &rxq->host_entry[ rxq->head ];
1280 /* no more received PDUs */
1281 if ((*entry->status & STATUS_COMPLETE) == 0)
1284 vc_map = FORE200E_VC_MAP(fore200e, entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1286 if ((vc_map->vcc == NULL) ||
1287 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
1289 DPRINTK(1, "no ready VC found for PDU received on %d.%d.%d\n",
1290 fore200e->atm_dev->number,
1291 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1297 if ((*entry->status & STATUS_ERROR) == 0) {
1299 fore200e_push_rpd(fore200e, vcc, entry->rpd);
1302 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
1303 fore200e->atm_dev->number,
1304 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1305 atomic_inc(&vcc->stats->rx_err);
1309 FORE200E_NEXT_ENTRY(rxq->head, QUEUE_SIZE_RX);
1311 fore200e_collect_rpd(fore200e, entry->rpd);
1313 /* rewrite the rpd address to ack the received PDU */
1314 fore200e->bus->write(entry->rpd_dma, &entry->cp_entry->rpd_haddr);
1315 *entry->status = STATUS_FREE;
1317 fore200e_supply(fore200e);
1322 #ifndef FORE200E_USE_TASKLET
1324 fore200e_irq(struct fore200e* fore200e)
1326 unsigned long flags;
1328 spin_lock_irqsave(&fore200e->q_lock, flags);
1329 fore200e_rx_irq(fore200e);
1330 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1332 spin_lock_irqsave(&fore200e->q_lock, flags);
1333 fore200e_tx_irq(fore200e);
1334 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1340 fore200e_interrupt(int irq, void* dev, struct pt_regs* regs)
1342 struct fore200e* fore200e = FORE200E_DEV((struct atm_dev*)dev);
1344 if (fore200e->bus->irq_check(fore200e) == 0) {
1346 DPRINTK(3, "interrupt NOT triggered by device %d\n", fore200e->atm_dev->number);
1349 DPRINTK(3, "interrupt triggered by device %d\n", fore200e->atm_dev->number);
1351 #ifdef FORE200E_USE_TASKLET
1352 tasklet_schedule(&fore200e->tx_tasklet);
1353 tasklet_schedule(&fore200e->rx_tasklet);
1355 fore200e_irq(fore200e);
1358 fore200e->bus->irq_ack(fore200e);
1363 #ifdef FORE200E_USE_TASKLET
1365 fore200e_tx_tasklet(unsigned long data)
1367 struct fore200e* fore200e = (struct fore200e*) data;
1368 unsigned long flags;
1370 DPRINTK(3, "tx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1372 spin_lock_irqsave(&fore200e->q_lock, flags);
1373 fore200e_tx_irq(fore200e);
1374 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1379 fore200e_rx_tasklet(unsigned long data)
1381 struct fore200e* fore200e = (struct fore200e*) data;
1382 unsigned long flags;
1384 DPRINTK(3, "rx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1386 spin_lock_irqsave(&fore200e->q_lock, flags);
1387 fore200e_rx_irq((struct fore200e*) data);
1388 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1394 fore200e_select_scheme(struct atm_vcc* vcc)
1396 /* fairly balance the VCs over (identical) buffer schemes */
1397 int scheme = vcc->vci % 2 ? BUFFER_SCHEME_ONE : BUFFER_SCHEME_TWO;
1399 DPRINTK(1, "VC %d.%d.%d uses buffer scheme %d\n",
1400 vcc->itf, vcc->vpi, vcc->vci, scheme);
1407 fore200e_activate_vcin(struct fore200e* fore200e, int activate, struct atm_vcc* vcc, int mtu)
1409 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1410 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1411 struct activate_opcode activ_opcode;
1412 struct deactivate_opcode deactiv_opcode;
1415 enum fore200e_aal aal = fore200e_atm2fore_aal(vcc->qos.aal);
1417 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1420 FORE200E_VCC(vcc)->scheme = fore200e_select_scheme(vcc);
1422 activ_opcode.opcode = OPCODE_ACTIVATE_VCIN;
1423 activ_opcode.aal = aal;
1424 activ_opcode.scheme = FORE200E_VCC(vcc)->scheme;
1425 activ_opcode.pad = 0;
1428 deactiv_opcode.opcode = OPCODE_DEACTIVATE_VCIN;
1429 deactiv_opcode.pad = 0;
1432 vpvc.vci = vcc->vci;
1433 vpvc.vpi = vcc->vpi;
1435 *entry->status = STATUS_PENDING;
1439 #ifdef FORE200E_52BYTE_AAL0_SDU
1442 /* the MTU is not used by the cp, except in the case of AAL0 */
1443 fore200e->bus->write(mtu, &entry->cp_entry->cmd.activate_block.mtu);
1444 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.vpvc);
1445 fore200e->bus->write(*(u32*)&activ_opcode, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.opcode);
1448 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.vpvc);
1449 fore200e->bus->write(*(u32*)&deactiv_opcode, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.opcode);
1452 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1454 *entry->status = STATUS_FREE;
1457 printk(FORE200E "unable to %s VC %d.%d.%d\n",
1458 activate ? "open" : "close", vcc->itf, vcc->vpi, vcc->vci);
1462 DPRINTK(1, "VC %d.%d.%d %sed\n", vcc->itf, vcc->vpi, vcc->vci,
1463 activate ? "open" : "clos");
1469 #define FORE200E_MAX_BACK2BACK_CELLS 255 /* XXX depends on CDVT */
1472 fore200e_rate_ctrl(struct atm_qos* qos, struct tpd_rate* rate)
1474 if (qos->txtp.max_pcr < ATM_OC3_PCR) {
1476 /* compute the data cells to idle cells ratio from the tx PCR */
1477 rate->data_cells = qos->txtp.max_pcr * FORE200E_MAX_BACK2BACK_CELLS / ATM_OC3_PCR;
1478 rate->idle_cells = FORE200E_MAX_BACK2BACK_CELLS - rate->data_cells;
1481 /* disable rate control */
1482 rate->data_cells = rate->idle_cells = 0;
1488 fore200e_open(struct atm_vcc *vcc)
1490 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1491 struct fore200e_vcc* fore200e_vcc;
1492 struct fore200e_vc_map* vc_map;
1493 unsigned long flags;
1495 short vpi = vcc->vpi;
1497 ASSERT((vpi >= 0) && (vpi < 1<<FORE200E_VPI_BITS));
1498 ASSERT((vci >= 0) && (vci < 1<<FORE200E_VCI_BITS));
1500 spin_lock_irqsave(&fore200e->q_lock, flags);
1502 vc_map = FORE200E_VC_MAP(fore200e, vpi, vci);
1505 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1507 printk(FORE200E "VC %d.%d.%d already in use\n",
1508 fore200e->atm_dev->number, vpi, vci);
1515 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1517 fore200e_vcc = fore200e_kmalloc(sizeof(struct fore200e_vcc), GFP_ATOMIC);
1518 if (fore200e_vcc == NULL) {
1523 DPRINTK(2, "opening %d.%d.%d:%d QoS = (tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
1524 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d)\n",
1525 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1526 fore200e_traffic_class[ vcc->qos.txtp.traffic_class ],
1527 vcc->qos.txtp.min_pcr, vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_cdv, vcc->qos.txtp.max_sdu,
1528 fore200e_traffic_class[ vcc->qos.rxtp.traffic_class ],
1529 vcc->qos.rxtp.min_pcr, vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_cdv, vcc->qos.rxtp.max_sdu);
1531 /* pseudo-CBR bandwidth requested? */
1532 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1534 down(&fore200e->rate_sf);
1535 if (fore200e->available_cell_rate < vcc->qos.txtp.max_pcr) {
1536 up(&fore200e->rate_sf);
1538 fore200e_kfree(fore200e_vcc);
1543 /* reserve bandwidth */
1544 fore200e->available_cell_rate -= vcc->qos.txtp.max_pcr;
1545 up(&fore200e->rate_sf);
1548 vcc->itf = vcc->dev->number;
1550 set_bit(ATM_VF_PARTIAL,&vcc->flags);
1551 set_bit(ATM_VF_ADDR, &vcc->flags);
1553 vcc->dev_data = fore200e_vcc;
1555 if (fore200e_activate_vcin(fore200e, 1, vcc, vcc->qos.rxtp.max_sdu) < 0) {
1559 clear_bit(ATM_VF_ADDR, &vcc->flags);
1560 clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1562 vcc->dev_data = NULL;
1564 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1566 fore200e_kfree(fore200e_vcc);
1570 /* compute rate control parameters */
1571 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1573 fore200e_rate_ctrl(&vcc->qos, &fore200e_vcc->rate);
1574 set_bit(ATM_VF_HASQOS, &vcc->flags);
1576 DPRINTK(3, "tx on %d.%d.%d:%d, tx PCR = %d, rx PCR = %d, data_cells = %u, idle_cells = %u\n",
1577 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1578 vcc->qos.txtp.max_pcr, vcc->qos.rxtp.max_pcr,
1579 fore200e_vcc->rate.data_cells, fore200e_vcc->rate.idle_cells);
1582 fore200e_vcc->tx_min_pdu = fore200e_vcc->rx_min_pdu = MAX_PDU_SIZE + 1;
1583 fore200e_vcc->tx_max_pdu = fore200e_vcc->rx_max_pdu = 0;
1584 fore200e_vcc->tx_pdu = fore200e_vcc->rx_pdu = 0;
1586 /* new incarnation of the vcc */
1587 vc_map->incarn = ++fore200e->incarn_count;
1589 /* VC unusable before this flag is set */
1590 set_bit(ATM_VF_READY, &vcc->flags);
1597 fore200e_close(struct atm_vcc* vcc)
1599 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1600 struct fore200e_vcc* fore200e_vcc;
1601 struct fore200e_vc_map* vc_map;
1602 unsigned long flags;
1605 ASSERT((vcc->vpi >= 0) && (vcc->vpi < 1<<FORE200E_VPI_BITS));
1606 ASSERT((vcc->vci >= 0) && (vcc->vci < 1<<FORE200E_VCI_BITS));
1608 DPRINTK(2, "closing %d.%d.%d:%d\n", vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal));
1610 clear_bit(ATM_VF_READY, &vcc->flags);
1612 fore200e_activate_vcin(fore200e, 0, vcc, 0);
1614 spin_lock_irqsave(&fore200e->q_lock, flags);
1616 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1618 /* the vc is no longer considered as "in use" by fore200e_open() */
1621 vcc->itf = vcc->vci = vcc->vpi = 0;
1623 fore200e_vcc = FORE200E_VCC(vcc);
1624 vcc->dev_data = NULL;
1626 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1628 /* release reserved bandwidth, if any */
1629 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1631 down(&fore200e->rate_sf);
1632 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1633 up(&fore200e->rate_sf);
1635 clear_bit(ATM_VF_HASQOS, &vcc->flags);
1638 clear_bit(ATM_VF_ADDR, &vcc->flags);
1639 clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1641 ASSERT(fore200e_vcc);
1642 fore200e_kfree(fore200e_vcc);
1647 fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
1649 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1650 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
1651 struct fore200e_vc_map* vc_map;
1652 struct host_txq* txq = &fore200e->host_txq;
1653 struct host_txq_entry* entry;
1655 struct tpd_haddr tpd_haddr;
1656 int retry = CONFIG_ATM_FORE200E_TX_RETRY;
1658 int tx_len = skb->len;
1659 u32* cell_header = NULL;
1660 unsigned char* skb_data;
1662 unsigned char* data;
1663 unsigned long flags;
1666 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1668 ASSERT(fore200e_vcc);
1670 if (!test_bit(ATM_VF_READY, &vcc->flags)) {
1671 DPRINTK(1, "VC %d.%d.%d not ready for tx\n", vcc->itf, vcc->vpi, vcc->vpi);
1672 dev_kfree_skb_any(skb);
1676 #ifdef FORE200E_52BYTE_AAL0_SDU
1677 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.txtp.max_sdu == ATM_AAL0_SDU)) {
1678 cell_header = (u32*) skb->data;
1679 skb_data = skb->data + 4; /* skip 4-byte cell header */
1680 skb_len = tx_len = skb->len - 4;
1682 DPRINTK(3, "user-supplied cell header = 0x%08x\n", *cell_header);
1687 skb_data = skb->data;
1691 if (((unsigned long)skb_data) & 0x3) {
1693 DPRINTK(2, "misaligned tx PDU on device %s\n", fore200e->name);
1698 if ((vcc->qos.aal == ATM_AAL0) && (skb_len % ATM_CELL_PAYLOAD)) {
1700 /* this simply NUKES the PCA board */
1701 DPRINTK(2, "incomplete tx AAL0 PDU on device %s\n", fore200e->name);
1703 tx_len = ((skb_len / ATM_CELL_PAYLOAD) + 1) * ATM_CELL_PAYLOAD;
1707 data = kmalloc(tx_len, GFP_ATOMIC | GFP_DMA);
1713 dev_kfree_skb_any(skb);
1718 memcpy(data, skb_data, skb_len);
1719 if (skb_len < tx_len)
1720 memset(data + skb_len, 0x00, tx_len - skb_len);
1726 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1727 ASSERT(vc_map->vcc == vcc);
1731 spin_lock_irqsave(&fore200e->q_lock, flags);
1733 entry = &txq->host_entry[ txq->head ];
1735 if ((*entry->status != STATUS_FREE) || (txq->txing >= QUEUE_SIZE_TX - 2)) {
1737 /* try to free completed tx queue entries */
1738 fore200e_tx_irq(fore200e);
1740 if (*entry->status != STATUS_FREE) {
1742 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1744 /* retry once again? */
1750 atomic_inc(&vcc->stats->tx_err);
1753 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
1754 fore200e->name, fore200e->cp_queues->heartbeat);
1759 dev_kfree_skb_any(skb);
1769 entry->incarn = vc_map->incarn;
1770 entry->vc_map = vc_map;
1772 entry->data = tx_copy ? data : NULL;
1775 tpd->tsd[ 0 ].buffer = fore200e->bus->dma_map(fore200e, data, tx_len, DMA_TO_DEVICE);
1776 tpd->tsd[ 0 ].length = tx_len;
1778 FORE200E_NEXT_ENTRY(txq->head, QUEUE_SIZE_TX);
1781 /* The dma_map call above implies a dma_sync so the device can use it,
1782 * thus no explicit dma_sync call is necessary here.
1785 DPRINTK(3, "tx on %d.%d.%d:%d, len = %u (%u)\n",
1786 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1787 tpd->tsd[0].length, skb_len);
1789 if (skb_len < fore200e_vcc->tx_min_pdu)
1790 fore200e_vcc->tx_min_pdu = skb_len;
1791 if (skb_len > fore200e_vcc->tx_max_pdu)
1792 fore200e_vcc->tx_max_pdu = skb_len;
1793 fore200e_vcc->tx_pdu++;
1795 /* set tx rate control information */
1796 tpd->rate.data_cells = fore200e_vcc->rate.data_cells;
1797 tpd->rate.idle_cells = fore200e_vcc->rate.idle_cells;
1800 tpd->atm_header.clp = (*cell_header & ATM_HDR_CLP);
1801 tpd->atm_header.plt = (*cell_header & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
1802 tpd->atm_header.vci = (*cell_header & ATM_HDR_VCI_MASK) >> ATM_HDR_VCI_SHIFT;
1803 tpd->atm_header.vpi = (*cell_header & ATM_HDR_VPI_MASK) >> ATM_HDR_VPI_SHIFT;
1804 tpd->atm_header.gfc = (*cell_header & ATM_HDR_GFC_MASK) >> ATM_HDR_GFC_SHIFT;
1807 /* set the ATM header, common to all cells conveying the PDU */
1808 tpd->atm_header.clp = 0;
1809 tpd->atm_header.plt = 0;
1810 tpd->atm_header.vci = vcc->vci;
1811 tpd->atm_header.vpi = vcc->vpi;
1812 tpd->atm_header.gfc = 0;
1815 tpd->spec.length = tx_len;
1817 tpd->spec.aal = fore200e_atm2fore_aal(vcc->qos.aal);
1820 tpd_haddr.size = sizeof(struct tpd) / (1<<TPD_HADDR_SHIFT); /* size is expressed in 32 byte blocks */
1822 tpd_haddr.haddr = entry->tpd_dma >> TPD_HADDR_SHIFT; /* shift the address, as we are in a bitfield */
1824 *entry->status = STATUS_PENDING;
1825 fore200e->bus->write(*(u32*)&tpd_haddr, (u32 __iomem *)&entry->cp_entry->tpd_haddr);
1827 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1834 fore200e_getstats(struct fore200e* fore200e)
1836 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1837 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1838 struct stats_opcode opcode;
1842 if (fore200e->stats == NULL) {
1843 fore200e->stats = fore200e_kmalloc(sizeof(struct stats), GFP_KERNEL | GFP_DMA);
1844 if (fore200e->stats == NULL)
1848 stats_dma_addr = fore200e->bus->dma_map(fore200e, fore200e->stats,
1849 sizeof(struct stats), DMA_FROM_DEVICE);
1851 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1853 opcode.opcode = OPCODE_GET_STATS;
1856 fore200e->bus->write(stats_dma_addr, &entry->cp_entry->cmd.stats_block.stats_haddr);
1858 *entry->status = STATUS_PENDING;
1860 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.stats_block.opcode);
1862 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1864 *entry->status = STATUS_FREE;
1866 fore200e->bus->dma_unmap(fore200e, stats_dma_addr, sizeof(struct stats), DMA_FROM_DEVICE);
1869 printk(FORE200E "unable to get statistics from device %s\n", fore200e->name);
1878 fore200e_getsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen)
1880 /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
1882 DPRINTK(2, "getsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
1883 vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
1890 fore200e_setsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen)
1892 /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
1894 DPRINTK(2, "setsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
1895 vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
1901 #if 0 /* currently unused */
1903 fore200e_get_oc3(struct fore200e* fore200e, struct oc3_regs* regs)
1905 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1906 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1907 struct oc3_opcode opcode;
1909 u32 oc3_regs_dma_addr;
1911 oc3_regs_dma_addr = fore200e->bus->dma_map(fore200e, regs, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1913 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1915 opcode.opcode = OPCODE_GET_OC3;
1920 fore200e->bus->write(oc3_regs_dma_addr, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1922 *entry->status = STATUS_PENDING;
1924 fore200e->bus->write(*(u32*)&opcode, (u32*)&entry->cp_entry->cmd.oc3_block.opcode);
1926 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1928 *entry->status = STATUS_FREE;
1930 fore200e->bus->dma_unmap(fore200e, oc3_regs_dma_addr, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1933 printk(FORE200E "unable to get OC-3 regs of device %s\n", fore200e->name);
1943 fore200e_set_oc3(struct fore200e* fore200e, u32 reg, u32 value, u32 mask)
1945 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1946 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1947 struct oc3_opcode opcode;
1950 DPRINTK(2, "set OC-3 reg = 0x%02x, value = 0x%02x, mask = 0x%02x\n", reg, value, mask);
1952 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1954 opcode.opcode = OPCODE_SET_OC3;
1956 opcode.value = value;
1959 fore200e->bus->write(0, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1961 *entry->status = STATUS_PENDING;
1963 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.oc3_block.opcode);
1965 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1967 *entry->status = STATUS_FREE;
1970 printk(FORE200E "unable to set OC-3 reg 0x%02x of device %s\n", reg, fore200e->name);
1979 fore200e_setloop(struct fore200e* fore200e, int loop_mode)
1981 u32 mct_value, mct_mask;
1984 if (!capable(CAP_NET_ADMIN))
1987 switch (loop_mode) {
1991 mct_mask = SUNI_MCT_DLE | SUNI_MCT_LLE;
1994 case ATM_LM_LOC_PHY:
1995 mct_value = mct_mask = SUNI_MCT_DLE;
1998 case ATM_LM_RMT_PHY:
1999 mct_value = mct_mask = SUNI_MCT_LLE;
2006 error = fore200e_set_oc3(fore200e, SUNI_MCT, mct_value, mct_mask);
2008 fore200e->loop_mode = loop_mode;
2014 static inline unsigned int
2015 fore200e_swap(unsigned int in)
2017 #if defined(__LITTLE_ENDIAN)
2026 fore200e_fetch_stats(struct fore200e* fore200e, struct sonet_stats __user *arg)
2028 struct sonet_stats tmp;
2030 if (fore200e_getstats(fore200e) < 0)
2033 tmp.section_bip = fore200e_swap(fore200e->stats->oc3.section_bip8_errors);
2034 tmp.line_bip = fore200e_swap(fore200e->stats->oc3.line_bip24_errors);
2035 tmp.path_bip = fore200e_swap(fore200e->stats->oc3.path_bip8_errors);
2036 tmp.line_febe = fore200e_swap(fore200e->stats->oc3.line_febe_errors);
2037 tmp.path_febe = fore200e_swap(fore200e->stats->oc3.path_febe_errors);
2038 tmp.corr_hcs = fore200e_swap(fore200e->stats->oc3.corr_hcs_errors);
2039 tmp.uncorr_hcs = fore200e_swap(fore200e->stats->oc3.ucorr_hcs_errors);
2040 tmp.tx_cells = fore200e_swap(fore200e->stats->aal0.cells_transmitted) +
2041 fore200e_swap(fore200e->stats->aal34.cells_transmitted) +
2042 fore200e_swap(fore200e->stats->aal5.cells_transmitted);
2043 tmp.rx_cells = fore200e_swap(fore200e->stats->aal0.cells_received) +
2044 fore200e_swap(fore200e->stats->aal34.cells_received) +
2045 fore200e_swap(fore200e->stats->aal5.cells_received);
2048 return copy_to_user(arg, &tmp, sizeof(struct sonet_stats)) ? -EFAULT : 0;
2055 fore200e_ioctl(struct atm_dev* dev, unsigned int cmd, void __user * arg)
2057 struct fore200e* fore200e = FORE200E_DEV(dev);
2059 DPRINTK(2, "ioctl cmd = 0x%x (%u), arg = 0x%p (%lu)\n", cmd, cmd, arg, (unsigned long)arg);
2064 return fore200e_fetch_stats(fore200e, (struct sonet_stats __user *)arg);
2067 return put_user(0, (int __user *)arg) ? -EFAULT : 0;
2070 return fore200e_setloop(fore200e, (int)(unsigned long)arg);
2073 return put_user(fore200e->loop_mode, (int __user *)arg) ? -EFAULT : 0;
2076 return put_user(ATM_LM_LOC_PHY | ATM_LM_RMT_PHY, (int __user *)arg) ? -EFAULT : 0;
2079 return -ENOSYS; /* not implemented */
2084 fore200e_change_qos(struct atm_vcc* vcc,struct atm_qos* qos, int flags)
2086 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
2087 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
2089 if (!test_bit(ATM_VF_READY, &vcc->flags)) {
2090 DPRINTK(1, "VC %d.%d.%d not ready for QoS change\n", vcc->itf, vcc->vpi, vcc->vpi);
2094 DPRINTK(2, "change_qos %d.%d.%d, "
2095 "(tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
2096 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d), flags = 0x%x\n"
2097 "available_cell_rate = %u",
2098 vcc->itf, vcc->vpi, vcc->vci,
2099 fore200e_traffic_class[ qos->txtp.traffic_class ],
2100 qos->txtp.min_pcr, qos->txtp.max_pcr, qos->txtp.max_cdv, qos->txtp.max_sdu,
2101 fore200e_traffic_class[ qos->rxtp.traffic_class ],
2102 qos->rxtp.min_pcr, qos->rxtp.max_pcr, qos->rxtp.max_cdv, qos->rxtp.max_sdu,
2103 flags, fore200e->available_cell_rate);
2105 if ((qos->txtp.traffic_class == ATM_CBR) && (qos->txtp.max_pcr > 0)) {
2107 down(&fore200e->rate_sf);
2108 if (fore200e->available_cell_rate + vcc->qos.txtp.max_pcr < qos->txtp.max_pcr) {
2109 up(&fore200e->rate_sf);
2113 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
2114 fore200e->available_cell_rate -= qos->txtp.max_pcr;
2116 up(&fore200e->rate_sf);
2118 memcpy(&vcc->qos, qos, sizeof(struct atm_qos));
2120 /* update rate control parameters */
2121 fore200e_rate_ctrl(qos, &fore200e_vcc->rate);
2123 set_bit(ATM_VF_HASQOS, &vcc->flags);
2133 fore200e_irq_request(struct fore200e* fore200e)
2135 if (request_irq(fore200e->irq, fore200e_interrupt, SA_SHIRQ, fore200e->name, fore200e->atm_dev) < 0) {
2137 printk(FORE200E "unable to reserve IRQ %s for device %s\n",
2138 fore200e_irq_itoa(fore200e->irq), fore200e->name);
2142 printk(FORE200E "IRQ %s reserved for device %s\n",
2143 fore200e_irq_itoa(fore200e->irq), fore200e->name);
2145 #ifdef FORE200E_USE_TASKLET
2146 tasklet_init(&fore200e->tx_tasklet, fore200e_tx_tasklet, (unsigned long)fore200e);
2147 tasklet_init(&fore200e->rx_tasklet, fore200e_rx_tasklet, (unsigned long)fore200e);
2150 fore200e->state = FORE200E_STATE_IRQ;
2156 fore200e_get_esi(struct fore200e* fore200e)
2158 struct prom_data* prom = fore200e_kmalloc(sizeof(struct prom_data), GFP_KERNEL | GFP_DMA);
2164 ok = fore200e->bus->prom_read(fore200e, prom);
2166 fore200e_kfree(prom);
2170 printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %02x:%02x:%02x:%02x:%02x:%02x\n",
2172 (prom->hw_revision & 0xFF) + '@', /* probably meaningless with SBA boards */
2173 prom->serial_number & 0xFFFF,
2174 prom->mac_addr[ 2 ], prom->mac_addr[ 3 ], prom->mac_addr[ 4 ],
2175 prom->mac_addr[ 5 ], prom->mac_addr[ 6 ], prom->mac_addr[ 7 ]);
2177 for (i = 0; i < ESI_LEN; i++) {
2178 fore200e->esi[ i ] = fore200e->atm_dev->esi[ i ] = prom->mac_addr[ i + 2 ];
2181 fore200e_kfree(prom);
2188 fore200e_alloc_rx_buf(struct fore200e* fore200e)
2190 int scheme, magn, nbr, size, i;
2192 struct host_bsq* bsq;
2193 struct buffer* buffer;
2195 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2196 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2198 bsq = &fore200e->host_bsq[ scheme ][ magn ];
2200 nbr = fore200e_rx_buf_nbr[ scheme ][ magn ];
2201 size = fore200e_rx_buf_size[ scheme ][ magn ];
2203 DPRINTK(2, "rx buffers %d / %d are being allocated\n", scheme, magn);
2205 /* allocate the array of receive buffers */
2206 buffer = bsq->buffer = fore200e_kmalloc(nbr * sizeof(struct buffer), GFP_KERNEL);
2211 bsq->freebuf = NULL;
2213 for (i = 0; i < nbr; i++) {
2215 buffer[ i ].scheme = scheme;
2216 buffer[ i ].magn = magn;
2217 #ifdef FORE200E_BSQ_DEBUG
2218 buffer[ i ].index = i;
2219 buffer[ i ].supplied = 0;
2222 /* allocate the receive buffer body */
2223 if (fore200e_chunk_alloc(fore200e,
2224 &buffer[ i ].data, size, fore200e->bus->buffer_alignment,
2225 DMA_FROM_DEVICE) < 0) {
2228 fore200e_chunk_free(fore200e, &buffer[ --i ].data);
2229 fore200e_kfree(buffer);
2234 /* insert the buffer into the free buffer list */
2235 buffer[ i ].next = bsq->freebuf;
2236 bsq->freebuf = &buffer[ i ];
2238 /* all the buffers are free, initially */
2239 bsq->freebuf_count = nbr;
2241 #ifdef FORE200E_BSQ_DEBUG
2242 bsq_audit(3, bsq, scheme, magn);
2247 fore200e->state = FORE200E_STATE_ALLOC_BUF;
2253 fore200e_init_bs_queue(struct fore200e* fore200e)
2255 int scheme, magn, i;
2257 struct host_bsq* bsq;
2258 struct cp_bsq_entry __iomem * cp_entry;
2260 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2261 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2263 DPRINTK(2, "buffer supply queue %d / %d is being initialized\n", scheme, magn);
2265 bsq = &fore200e->host_bsq[ scheme ][ magn ];
2267 /* allocate and align the array of status words */
2268 if (fore200e->bus->dma_chunk_alloc(fore200e,
2270 sizeof(enum status),
2272 fore200e->bus->status_alignment) < 0) {
2276 /* allocate and align the array of receive buffer descriptors */
2277 if (fore200e->bus->dma_chunk_alloc(fore200e,
2279 sizeof(struct rbd_block),
2281 fore200e->bus->descr_alignment) < 0) {
2283 fore200e->bus->dma_chunk_free(fore200e, &bsq->status);
2287 /* get the base address of the cp resident buffer supply queue entries */
2288 cp_entry = fore200e->virt_base +
2289 fore200e->bus->read(&fore200e->cp_queues->cp_bsq[ scheme ][ magn ]);
2291 /* fill the host resident and cp resident buffer supply queue entries */
2292 for (i = 0; i < QUEUE_SIZE_BS; i++) {
2294 bsq->host_entry[ i ].status =
2295 FORE200E_INDEX(bsq->status.align_addr, enum status, i);
2296 bsq->host_entry[ i ].rbd_block =
2297 FORE200E_INDEX(bsq->rbd_block.align_addr, struct rbd_block, i);
2298 bsq->host_entry[ i ].rbd_block_dma =
2299 FORE200E_DMA_INDEX(bsq->rbd_block.dma_addr, struct rbd_block, i);
2300 bsq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2302 *bsq->host_entry[ i ].status = STATUS_FREE;
2304 fore200e->bus->write(FORE200E_DMA_INDEX(bsq->status.dma_addr, enum status, i),
2305 &cp_entry[ i ].status_haddr);
2310 fore200e->state = FORE200E_STATE_INIT_BSQ;
2316 fore200e_init_rx_queue(struct fore200e* fore200e)
2318 struct host_rxq* rxq = &fore200e->host_rxq;
2319 struct cp_rxq_entry __iomem * cp_entry;
2322 DPRINTK(2, "receive queue is being initialized\n");
2324 /* allocate and align the array of status words */
2325 if (fore200e->bus->dma_chunk_alloc(fore200e,
2327 sizeof(enum status),
2329 fore200e->bus->status_alignment) < 0) {
2333 /* allocate and align the array of receive PDU descriptors */
2334 if (fore200e->bus->dma_chunk_alloc(fore200e,
2338 fore200e->bus->descr_alignment) < 0) {
2340 fore200e->bus->dma_chunk_free(fore200e, &rxq->status);
2344 /* get the base address of the cp resident rx queue entries */
2345 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_rxq);
2347 /* fill the host resident and cp resident rx entries */
2348 for (i=0; i < QUEUE_SIZE_RX; i++) {
2350 rxq->host_entry[ i ].status =
2351 FORE200E_INDEX(rxq->status.align_addr, enum status, i);
2352 rxq->host_entry[ i ].rpd =
2353 FORE200E_INDEX(rxq->rpd.align_addr, struct rpd, i);
2354 rxq->host_entry[ i ].rpd_dma =
2355 FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i);
2356 rxq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2358 *rxq->host_entry[ i ].status = STATUS_FREE;
2360 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->status.dma_addr, enum status, i),
2361 &cp_entry[ i ].status_haddr);
2363 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i),
2364 &cp_entry[ i ].rpd_haddr);
2367 /* set the head entry of the queue */
2370 fore200e->state = FORE200E_STATE_INIT_RXQ;
2376 fore200e_init_tx_queue(struct fore200e* fore200e)
2378 struct host_txq* txq = &fore200e->host_txq;
2379 struct cp_txq_entry __iomem * cp_entry;
2382 DPRINTK(2, "transmit queue is being initialized\n");
2384 /* allocate and align the array of status words */
2385 if (fore200e->bus->dma_chunk_alloc(fore200e,
2387 sizeof(enum status),
2389 fore200e->bus->status_alignment) < 0) {
2393 /* allocate and align the array of transmit PDU descriptors */
2394 if (fore200e->bus->dma_chunk_alloc(fore200e,
2398 fore200e->bus->descr_alignment) < 0) {
2400 fore200e->bus->dma_chunk_free(fore200e, &txq->status);
2404 /* get the base address of the cp resident tx queue entries */
2405 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_txq);
2407 /* fill the host resident and cp resident tx entries */
2408 for (i=0; i < QUEUE_SIZE_TX; i++) {
2410 txq->host_entry[ i ].status =
2411 FORE200E_INDEX(txq->status.align_addr, enum status, i);
2412 txq->host_entry[ i ].tpd =
2413 FORE200E_INDEX(txq->tpd.align_addr, struct tpd, i);
2414 txq->host_entry[ i ].tpd_dma =
2415 FORE200E_DMA_INDEX(txq->tpd.dma_addr, struct tpd, i);
2416 txq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2418 *txq->host_entry[ i ].status = STATUS_FREE;
2420 fore200e->bus->write(FORE200E_DMA_INDEX(txq->status.dma_addr, enum status, i),
2421 &cp_entry[ i ].status_haddr);
2423 /* although there is a one-to-one mapping of tx queue entries and tpds,
2424 we do not write here the DMA (physical) base address of each tpd into
2425 the related cp resident entry, because the cp relies on this write
2426 operation to detect that a new pdu has been submitted for tx */
2429 /* set the head and tail entries of the queue */
2433 fore200e->state = FORE200E_STATE_INIT_TXQ;
2439 fore200e_init_cmd_queue(struct fore200e* fore200e)
2441 struct host_cmdq* cmdq = &fore200e->host_cmdq;
2442 struct cp_cmdq_entry __iomem * cp_entry;
2445 DPRINTK(2, "command queue is being initialized\n");
2447 /* allocate and align the array of status words */
2448 if (fore200e->bus->dma_chunk_alloc(fore200e,
2450 sizeof(enum status),
2452 fore200e->bus->status_alignment) < 0) {
2456 /* get the base address of the cp resident cmd queue entries */
2457 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_cmdq);
2459 /* fill the host resident and cp resident cmd entries */
2460 for (i=0; i < QUEUE_SIZE_CMD; i++) {
2462 cmdq->host_entry[ i ].status =
2463 FORE200E_INDEX(cmdq->status.align_addr, enum status, i);
2464 cmdq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2466 *cmdq->host_entry[ i ].status = STATUS_FREE;
2468 fore200e->bus->write(FORE200E_DMA_INDEX(cmdq->status.dma_addr, enum status, i),
2469 &cp_entry[ i ].status_haddr);
2472 /* set the head entry of the queue */
2475 fore200e->state = FORE200E_STATE_INIT_CMDQ;
2481 fore200e_param_bs_queue(struct fore200e* fore200e,
2482 enum buffer_scheme scheme, enum buffer_magn magn,
2483 int queue_length, int pool_size, int supply_blksize)
2485 struct bs_spec __iomem * bs_spec = &fore200e->cp_queues->init.bs_spec[ scheme ][ magn ];
2487 fore200e->bus->write(queue_length, &bs_spec->queue_length);
2488 fore200e->bus->write(fore200e_rx_buf_size[ scheme ][ magn ], &bs_spec->buffer_size);
2489 fore200e->bus->write(pool_size, &bs_spec->pool_size);
2490 fore200e->bus->write(supply_blksize, &bs_spec->supply_blksize);
2495 fore200e_initialize(struct fore200e* fore200e)
2497 struct cp_queues __iomem * cpq;
2498 int ok, scheme, magn;
2500 DPRINTK(2, "device %s being initialized\n", fore200e->name);
2502 init_MUTEX(&fore200e->rate_sf);
2503 spin_lock_init(&fore200e->q_lock);
2505 cpq = fore200e->cp_queues = fore200e->virt_base + FORE200E_CP_QUEUES_OFFSET;
2507 /* enable cp to host interrupts */
2508 fore200e->bus->write(1, &cpq->imask);
2510 if (fore200e->bus->irq_enable)
2511 fore200e->bus->irq_enable(fore200e);
2513 fore200e->bus->write(NBR_CONNECT, &cpq->init.num_connect);
2515 fore200e->bus->write(QUEUE_SIZE_CMD, &cpq->init.cmd_queue_len);
2516 fore200e->bus->write(QUEUE_SIZE_RX, &cpq->init.rx_queue_len);
2517 fore200e->bus->write(QUEUE_SIZE_TX, &cpq->init.tx_queue_len);
2519 fore200e->bus->write(RSD_EXTENSION, &cpq->init.rsd_extension);
2520 fore200e->bus->write(TSD_EXTENSION, &cpq->init.tsd_extension);
2522 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++)
2523 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++)
2524 fore200e_param_bs_queue(fore200e, scheme, magn,
2526 fore200e_rx_buf_nbr[ scheme ][ magn ],
2529 /* issue the initialize command */
2530 fore200e->bus->write(STATUS_PENDING, &cpq->init.status);
2531 fore200e->bus->write(OPCODE_INITIALIZE, &cpq->init.opcode);
2533 ok = fore200e_io_poll(fore200e, &cpq->init.status, STATUS_COMPLETE, 3000);
2535 printk(FORE200E "device %s initialization failed\n", fore200e->name);
2539 printk(FORE200E "device %s initialized\n", fore200e->name);
2541 fore200e->state = FORE200E_STATE_INITIALIZE;
2547 fore200e_monitor_putc(struct fore200e* fore200e, char c)
2549 struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2554 fore200e->bus->write(((u32) c) | FORE200E_CP_MONITOR_UART_AVAIL, &monitor->soft_uart.send);
2559 fore200e_monitor_getc(struct fore200e* fore200e)
2561 struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2562 unsigned long timeout = jiffies + msecs_to_jiffies(50);
2565 while (time_before(jiffies, timeout)) {
2567 c = (int) fore200e->bus->read(&monitor->soft_uart.recv);
2569 if (c & FORE200E_CP_MONITOR_UART_AVAIL) {
2571 fore200e->bus->write(FORE200E_CP_MONITOR_UART_FREE, &monitor->soft_uart.recv);
2573 printk("%c", c & 0xFF);
2584 fore200e_monitor_puts(struct fore200e* fore200e, char* str)
2588 /* the i960 monitor doesn't accept any new character if it has something to say */
2589 while (fore200e_monitor_getc(fore200e) >= 0);
2591 fore200e_monitor_putc(fore200e, *str++);
2594 while (fore200e_monitor_getc(fore200e) >= 0);
2599 fore200e_start_fw(struct fore200e* fore200e)
2603 struct fw_header* fw_header = (struct fw_header*) fore200e->bus->fw_data;
2605 DPRINTK(2, "device %s firmware being started\n", fore200e->name);
2607 #if defined(__sparc_v9__)
2608 /* reported to be required by SBA cards on some sparc64 hosts */
2612 sprintf(cmd, "\rgo %x\r", le32_to_cpu(fw_header->start_offset));
2614 fore200e_monitor_puts(fore200e, cmd);
2616 ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_CP_RUNNING, 1000);
2618 printk(FORE200E "device %s firmware didn't start\n", fore200e->name);
2622 printk(FORE200E "device %s firmware started\n", fore200e->name);
2624 fore200e->state = FORE200E_STATE_START_FW;
2630 fore200e_load_fw(struct fore200e* fore200e)
2632 u32* fw_data = (u32*) fore200e->bus->fw_data;
2633 u32 fw_size = (u32) *fore200e->bus->fw_size / sizeof(u32);
2635 struct fw_header* fw_header = (struct fw_header*) fw_data;
2637 u32 __iomem *load_addr = fore200e->virt_base + le32_to_cpu(fw_header->load_offset);
2639 DPRINTK(2, "device %s firmware being loaded at 0x%p (%d words)\n",
2640 fore200e->name, load_addr, fw_size);
2642 if (le32_to_cpu(fw_header->magic) != FW_HEADER_MAGIC) {
2643 printk(FORE200E "corrupted %s firmware image\n", fore200e->bus->model_name);
2647 for (; fw_size--; fw_data++, load_addr++)
2648 fore200e->bus->write(le32_to_cpu(*fw_data), load_addr);
2650 fore200e->state = FORE200E_STATE_LOAD_FW;
2656 fore200e_register(struct fore200e* fore200e)
2658 struct atm_dev* atm_dev;
2660 DPRINTK(2, "device %s being registered\n", fore200e->name);
2662 atm_dev = atm_dev_register(fore200e->bus->proc_name, &fore200e_ops, -1,
2664 if (atm_dev == NULL) {
2665 printk(FORE200E "unable to register device %s\n", fore200e->name);
2669 atm_dev->dev_data = fore200e;
2670 fore200e->atm_dev = atm_dev;
2672 atm_dev->ci_range.vpi_bits = FORE200E_VPI_BITS;
2673 atm_dev->ci_range.vci_bits = FORE200E_VCI_BITS;
2675 fore200e->available_cell_rate = ATM_OC3_PCR;
2677 fore200e->state = FORE200E_STATE_REGISTER;
2683 fore200e_init(struct fore200e* fore200e)
2685 if (fore200e_register(fore200e) < 0)
2688 if (fore200e->bus->configure(fore200e) < 0)
2691 if (fore200e->bus->map(fore200e) < 0)
2694 if (fore200e_reset(fore200e, 1) < 0)
2697 if (fore200e_load_fw(fore200e) < 0)
2700 if (fore200e_start_fw(fore200e) < 0)
2703 if (fore200e_initialize(fore200e) < 0)
2706 if (fore200e_init_cmd_queue(fore200e) < 0)
2709 if (fore200e_init_tx_queue(fore200e) < 0)
2712 if (fore200e_init_rx_queue(fore200e) < 0)
2715 if (fore200e_init_bs_queue(fore200e) < 0)
2718 if (fore200e_alloc_rx_buf(fore200e) < 0)
2721 if (fore200e_get_esi(fore200e) < 0)
2724 if (fore200e_irq_request(fore200e) < 0)
2727 fore200e_supply(fore200e);
2729 /* all done, board initialization is now complete */
2730 fore200e->state = FORE200E_STATE_COMPLETE;
2735 static int __devinit
2736 fore200e_pca_detect(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
2738 const struct fore200e_bus* bus = (struct fore200e_bus*) pci_ent->driver_data;
2739 struct fore200e* fore200e;
2741 static int index = 0;
2743 if (pci_enable_device(pci_dev)) {
2748 fore200e = fore200e_kmalloc(sizeof(struct fore200e), GFP_KERNEL);
2749 if (fore200e == NULL) {
2754 fore200e->bus = bus;
2755 fore200e->bus_dev = pci_dev;
2756 fore200e->irq = pci_dev->irq;
2757 fore200e->phys_base = pci_resource_start(pci_dev, 0);
2759 sprintf(fore200e->name, "%s-%d", bus->model_name, index - 1);
2761 pci_set_master(pci_dev);
2763 printk(FORE200E "device %s found at 0x%lx, IRQ %s\n",
2764 fore200e->bus->model_name,
2765 fore200e->phys_base, fore200e_irq_itoa(fore200e->irq));
2767 sprintf(fore200e->name, "%s-%d", bus->model_name, index);
2769 err = fore200e_init(fore200e);
2771 fore200e_shutdown(fore200e);
2776 pci_set_drvdata(pci_dev, fore200e);
2784 pci_disable_device(pci_dev);
2789 static void __devexit fore200e_pca_remove_one(struct pci_dev *pci_dev)
2791 struct fore200e *fore200e;
2793 fore200e = pci_get_drvdata(pci_dev);
2795 fore200e_shutdown(fore200e);
2797 pci_disable_device(pci_dev);
2801 #ifdef CONFIG_ATM_FORE200E_PCA
2802 static struct pci_device_id fore200e_pca_tbl[] = {
2803 { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID,
2804 0, 0, (unsigned long) &fore200e_bus[0] },
2808 MODULE_DEVICE_TABLE(pci, fore200e_pca_tbl);
2810 static struct pci_driver fore200e_pca_driver = {
2811 .name = "fore_200e",
2812 .probe = fore200e_pca_detect,
2813 .remove = __devexit_p(fore200e_pca_remove_one),
2814 .id_table = fore200e_pca_tbl,
2820 fore200e_module_init(void)
2822 const struct fore200e_bus* bus;
2823 struct fore200e* fore200e;
2826 printk(FORE200E "FORE Systems 200E-series ATM driver - version " FORE200E_VERSION "\n");
2828 /* for each configured bus interface */
2829 for (bus = fore200e_bus; bus->model_name; bus++) {
2831 /* detect all boards present on that bus */
2832 for (index = 0; bus->detect && (fore200e = bus->detect(bus, index)); index++) {
2834 printk(FORE200E "device %s found at 0x%lx, IRQ %s\n",
2835 fore200e->bus->model_name,
2836 fore200e->phys_base, fore200e_irq_itoa(fore200e->irq));
2838 sprintf(fore200e->name, "%s-%d", bus->model_name, index);
2840 if (fore200e_init(fore200e) < 0) {
2842 fore200e_shutdown(fore200e);
2846 list_add(&fore200e->entry, &fore200e_boards);
2850 #ifdef CONFIG_ATM_FORE200E_PCA
2851 if (!pci_register_driver(&fore200e_pca_driver))
2855 if (!list_empty(&fore200e_boards))
2863 fore200e_module_cleanup(void)
2865 struct fore200e *fore200e, *next;
2867 #ifdef CONFIG_ATM_FORE200E_PCA
2868 pci_unregister_driver(&fore200e_pca_driver);
2871 list_for_each_entry_safe(fore200e, next, &fore200e_boards, entry) {
2872 fore200e_shutdown(fore200e);
2875 DPRINTK(1, "module being removed\n");
2880 fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
2882 struct fore200e* fore200e = FORE200E_DEV(dev);
2883 struct fore200e_vcc* fore200e_vcc;
2884 struct atm_vcc* vcc;
2885 int i, len, left = *pos;
2886 unsigned long flags;
2890 if (fore200e_getstats(fore200e) < 0)
2893 len = sprintf(page,"\n"
2895 " internal name:\t\t%s\n", fore200e->name);
2897 /* print bus-specific information */
2898 if (fore200e->bus->proc_read)
2899 len += fore200e->bus->proc_read(fore200e, page + len);
2901 len += sprintf(page + len,
2902 " interrupt line:\t\t%s\n"
2903 " physical base address:\t0x%p\n"
2904 " virtual base address:\t0x%p\n"
2905 " factory address (ESI):\t%02x:%02x:%02x:%02x:%02x:%02x\n"
2906 " board serial number:\t\t%d\n\n",
2907 fore200e_irq_itoa(fore200e->irq),
2908 (void*)fore200e->phys_base,
2909 fore200e->virt_base,
2910 fore200e->esi[0], fore200e->esi[1], fore200e->esi[2],
2911 fore200e->esi[3], fore200e->esi[4], fore200e->esi[5],
2912 fore200e->esi[4] * 256 + fore200e->esi[5]);
2918 return sprintf(page,
2919 " free small bufs, scheme 1:\t%d\n"
2920 " free large bufs, scheme 1:\t%d\n"
2921 " free small bufs, scheme 2:\t%d\n"
2922 " free large bufs, scheme 2:\t%d\n",
2923 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_SMALL ].freebuf_count,
2924 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_LARGE ].freebuf_count,
2925 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_SMALL ].freebuf_count,
2926 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_LARGE ].freebuf_count);
2929 u32 hb = fore200e->bus->read(&fore200e->cp_queues->heartbeat);
2931 len = sprintf(page,"\n\n"
2932 " cell processor:\n"
2933 " heartbeat state:\t\t");
2935 if (hb >> 16 != 0xDEAD)
2936 len += sprintf(page + len, "0x%08x\n", hb);
2938 len += sprintf(page + len, "*** FATAL ERROR %04x ***\n", hb & 0xFFFF);
2944 static const char* media_name[] = {
2945 "unshielded twisted pair",
2946 "multimode optical fiber ST",
2947 "multimode optical fiber SC",
2948 "single-mode optical fiber ST",
2949 "single-mode optical fiber SC",
2953 static const char* oc3_mode[] = {
2955 "diagnostic loopback",
2960 u32 fw_release = fore200e->bus->read(&fore200e->cp_queues->fw_release);
2961 u32 mon960_release = fore200e->bus->read(&fore200e->cp_queues->mon960_release);
2962 u32 oc3_revision = fore200e->bus->read(&fore200e->cp_queues->oc3_revision);
2963 u32 media_index = FORE200E_MEDIA_INDEX(fore200e->bus->read(&fore200e->cp_queues->media_type));
2966 if ((media_index < 0) || (media_index > 4))
2969 switch (fore200e->loop_mode) {
2970 case ATM_LM_NONE: oc3_index = 0;
2972 case ATM_LM_LOC_PHY: oc3_index = 1;
2974 case ATM_LM_RMT_PHY: oc3_index = 2;
2976 default: oc3_index = 3;
2979 return sprintf(page,
2980 " firmware release:\t\t%d.%d.%d\n"
2981 " monitor release:\t\t%d.%d\n"
2982 " media type:\t\t\t%s\n"
2983 " OC-3 revision:\t\t0x%x\n"
2984 " OC-3 mode:\t\t\t%s",
2985 fw_release >> 16, fw_release << 16 >> 24, fw_release << 24 >> 24,
2986 mon960_release >> 16, mon960_release << 16 >> 16,
2987 media_name[ media_index ],
2989 oc3_mode[ oc3_index ]);
2993 struct cp_monitor __iomem * cp_monitor = fore200e->cp_monitor;
2995 return sprintf(page,
2998 " version number:\t\t%d\n"
2999 " boot status word:\t\t0x%08x\n",
3000 fore200e->bus->read(&cp_monitor->mon_version),
3001 fore200e->bus->read(&cp_monitor->bstat));
3005 return sprintf(page,
3007 " device statistics:\n"
3009 " crc_header_errors:\t\t%10u\n"
3010 " framing_errors:\t\t%10u\n",
3011 fore200e_swap(fore200e->stats->phy.crc_header_errors),
3012 fore200e_swap(fore200e->stats->phy.framing_errors));
3015 return sprintf(page, "\n"
3017 " section_bip8_errors:\t%10u\n"
3018 " path_bip8_errors:\t\t%10u\n"
3019 " line_bip24_errors:\t\t%10u\n"
3020 " line_febe_errors:\t\t%10u\n"
3021 " path_febe_errors:\t\t%10u\n"
3022 " corr_hcs_errors:\t\t%10u\n"
3023 " ucorr_hcs_errors:\t\t%10u\n",
3024 fore200e_swap(fore200e->stats->oc3.section_bip8_errors),
3025 fore200e_swap(fore200e->stats->oc3.path_bip8_errors),
3026 fore200e_swap(fore200e->stats->oc3.line_bip24_errors),
3027 fore200e_swap(fore200e->stats->oc3.line_febe_errors),
3028 fore200e_swap(fore200e->stats->oc3.path_febe_errors),
3029 fore200e_swap(fore200e->stats->oc3.corr_hcs_errors),
3030 fore200e_swap(fore200e->stats->oc3.ucorr_hcs_errors));
3033 return sprintf(page,"\n"
3034 " ATM:\t\t\t\t cells\n"
3037 " vpi out of range:\t\t%10u\n"
3038 " vpi no conn:\t\t%10u\n"
3039 " vci out of range:\t\t%10u\n"
3040 " vci no conn:\t\t%10u\n",
3041 fore200e_swap(fore200e->stats->atm.cells_transmitted),
3042 fore200e_swap(fore200e->stats->atm.cells_received),
3043 fore200e_swap(fore200e->stats->atm.vpi_bad_range),
3044 fore200e_swap(fore200e->stats->atm.vpi_no_conn),
3045 fore200e_swap(fore200e->stats->atm.vci_bad_range),
3046 fore200e_swap(fore200e->stats->atm.vci_no_conn));
3049 return sprintf(page,"\n"
3050 " AAL0:\t\t\t cells\n"
3053 " dropped:\t\t\t%10u\n",
3054 fore200e_swap(fore200e->stats->aal0.cells_transmitted),
3055 fore200e_swap(fore200e->stats->aal0.cells_received),
3056 fore200e_swap(fore200e->stats->aal0.cells_dropped));
3059 return sprintf(page,"\n"
3061 " SAR sublayer:\t\t cells\n"
3064 " dropped:\t\t\t%10u\n"
3065 " CRC errors:\t\t%10u\n"
3066 " protocol errors:\t\t%10u\n\n"
3067 " CS sublayer:\t\t PDUs\n"
3070 " dropped:\t\t\t%10u\n"
3071 " protocol errors:\t\t%10u\n",
3072 fore200e_swap(fore200e->stats->aal34.cells_transmitted),
3073 fore200e_swap(fore200e->stats->aal34.cells_received),
3074 fore200e_swap(fore200e->stats->aal34.cells_dropped),
3075 fore200e_swap(fore200e->stats->aal34.cells_crc_errors),
3076 fore200e_swap(fore200e->stats->aal34.cells_protocol_errors),
3077 fore200e_swap(fore200e->stats->aal34.cspdus_transmitted),
3078 fore200e_swap(fore200e->stats->aal34.cspdus_received),
3079 fore200e_swap(fore200e->stats->aal34.cspdus_dropped),
3080 fore200e_swap(fore200e->stats->aal34.cspdus_protocol_errors));
3083 return sprintf(page,"\n"
3085 " SAR sublayer:\t\t cells\n"
3088 " dropped:\t\t\t%10u\n"
3089 " congestions:\t\t%10u\n\n"
3090 " CS sublayer:\t\t PDUs\n"
3093 " dropped:\t\t\t%10u\n"
3094 " CRC errors:\t\t%10u\n"
3095 " protocol errors:\t\t%10u\n",
3096 fore200e_swap(fore200e->stats->aal5.cells_transmitted),
3097 fore200e_swap(fore200e->stats->aal5.cells_received),
3098 fore200e_swap(fore200e->stats->aal5.cells_dropped),
3099 fore200e_swap(fore200e->stats->aal5.congestion_experienced),
3100 fore200e_swap(fore200e->stats->aal5.cspdus_transmitted),
3101 fore200e_swap(fore200e->stats->aal5.cspdus_received),
3102 fore200e_swap(fore200e->stats->aal5.cspdus_dropped),
3103 fore200e_swap(fore200e->stats->aal5.cspdus_crc_errors),
3104 fore200e_swap(fore200e->stats->aal5.cspdus_protocol_errors));
3107 return sprintf(page,"\n"
3108 " AUX:\t\t allocation failures\n"
3109 " small b1:\t\t\t%10u\n"
3110 " large b1:\t\t\t%10u\n"
3111 " small b2:\t\t\t%10u\n"
3112 " large b2:\t\t\t%10u\n"
3113 " RX PDUs:\t\t\t%10u\n"
3114 " TX PDUs:\t\t\t%10lu\n",
3115 fore200e_swap(fore200e->stats->aux.small_b1_failed),
3116 fore200e_swap(fore200e->stats->aux.large_b1_failed),
3117 fore200e_swap(fore200e->stats->aux.small_b2_failed),
3118 fore200e_swap(fore200e->stats->aux.large_b2_failed),
3119 fore200e_swap(fore200e->stats->aux.rpd_alloc_failed),
3123 return sprintf(page,"\n"
3124 " receive carrier:\t\t\t%s\n",
3125 fore200e->stats->aux.receive_carrier ? "ON" : "OFF!");
3128 return sprintf(page,"\n"
3129 " VCCs:\n address VPI VCI AAL "
3130 "TX PDUs TX min/max size RX PDUs RX min/max size\n");
3133 for (i = 0; i < NBR_CONNECT; i++) {
3135 vcc = fore200e->vc_map[i].vcc;
3140 spin_lock_irqsave(&fore200e->q_lock, flags);
3142 if (vcc && test_bit(ATM_VF_READY, &vcc->flags) && !left--) {
3144 fore200e_vcc = FORE200E_VCC(vcc);
3145 ASSERT(fore200e_vcc);
3148 " %08x %03d %05d %1d %09lu %05d/%05d %09lu %05d/%05d\n",
3149 (u32)(unsigned long)vcc,
3150 vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
3151 fore200e_vcc->tx_pdu,
3152 fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu,
3153 fore200e_vcc->tx_max_pdu,
3154 fore200e_vcc->rx_pdu,
3155 fore200e_vcc->rx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->rx_min_pdu,
3156 fore200e_vcc->rx_max_pdu);
3158 spin_unlock_irqrestore(&fore200e->q_lock, flags);
3162 spin_unlock_irqrestore(&fore200e->q_lock, flags);
3168 module_init(fore200e_module_init);
3169 module_exit(fore200e_module_cleanup);
3172 static const struct atmdev_ops fore200e_ops =
3174 .open = fore200e_open,
3175 .close = fore200e_close,
3176 .ioctl = fore200e_ioctl,
3177 .getsockopt = fore200e_getsockopt,
3178 .setsockopt = fore200e_setsockopt,
3179 .send = fore200e_send,
3180 .change_qos = fore200e_change_qos,
3181 .proc_read = fore200e_proc_read,
3182 .owner = THIS_MODULE
3186 #ifdef CONFIG_ATM_FORE200E_PCA
3187 extern const unsigned char _fore200e_pca_fw_data[];
3188 extern const unsigned int _fore200e_pca_fw_size;
3190 #ifdef CONFIG_ATM_FORE200E_SBA
3191 extern const unsigned char _fore200e_sba_fw_data[];
3192 extern const unsigned int _fore200e_sba_fw_size;
3195 static const struct fore200e_bus fore200e_bus[] = {
3196 #ifdef CONFIG_ATM_FORE200E_PCA
3197 { "PCA-200E", "pca200e", 32, 4, 32,
3198 _fore200e_pca_fw_data, &_fore200e_pca_fw_size,
3201 fore200e_pca_dma_map,
3202 fore200e_pca_dma_unmap,
3203 fore200e_pca_dma_sync_for_cpu,
3204 fore200e_pca_dma_sync_for_device,
3205 fore200e_pca_dma_chunk_alloc,
3206 fore200e_pca_dma_chunk_free,
3208 fore200e_pca_configure,
3211 fore200e_pca_prom_read,
3214 fore200e_pca_irq_check,
3215 fore200e_pca_irq_ack,
3216 fore200e_pca_proc_read,
3219 #ifdef CONFIG_ATM_FORE200E_SBA
3220 { "SBA-200E", "sba200e", 32, 64, 32,
3221 _fore200e_sba_fw_data, &_fore200e_sba_fw_size,
3224 fore200e_sba_dma_map,
3225 fore200e_sba_dma_unmap,
3226 fore200e_sba_dma_sync_for_cpu,
3227 fore200e_sba_dma_sync_for_device,
3228 fore200e_sba_dma_chunk_alloc,
3229 fore200e_sba_dma_chunk_free,
3230 fore200e_sba_detect,
3231 fore200e_sba_configure,
3234 fore200e_sba_prom_read,
3236 fore200e_sba_irq_enable,
3237 fore200e_sba_irq_check,
3238 fore200e_sba_irq_ack,
3239 fore200e_sba_proc_read,
3245 #ifdef MODULE_LICENSE
3246 MODULE_LICENSE("GPL");