2 $Id: fore200e.c,v 1.5 2000/04/14 10:10:34 davem Exp $
4 A FORE Systems 200E-series driver for ATM on Linux.
5 Christophe Lizzi (lizzi@cnam.fr), October 1999-March 2003.
7 Based on the PCA-200E driver from Uwe Dannowski (Uwe.Dannowski@inf.tu-dresden.de).
9 This driver simultaneously supports PCA-200E and SBA-200E adapters
10 on i386, alpha (untested), powerpc, sparc and sparc64 architectures.
12 This program is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 2 of the License, or
15 (at your option) any later version.
17 This program is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with this program; if not, write to the Free Software
24 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #include <linux/kernel.h>
29 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/capability.h>
32 #include <linux/sched.h>
33 #include <linux/interrupt.h>
34 #include <linux/bitops.h>
35 #include <linux/pci.h>
36 #include <linux/module.h>
37 #include <linux/atmdev.h>
38 #include <linux/sonet.h>
39 #include <linux/atm_suni.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/delay.h>
43 #include <asm/string.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49 #include <asm/atomic.h>
51 #ifdef CONFIG_ATM_FORE200E_SBA
52 #include <asm/idprom.h>
54 #include <asm/openprom.h>
55 #include <asm/oplib.h>
56 #include <asm/pgtable.h>
59 #if defined(CONFIG_ATM_FORE200E_USE_TASKLET) /* defer interrupt work to a tasklet */
60 #define FORE200E_USE_TASKLET
63 #if 0 /* enable the debugging code of the buffer supply queues */
64 #define FORE200E_BSQ_DEBUG
67 #if 1 /* ensure correct handling of 52-byte AAL0 SDUs expected by atmdump-like apps */
68 #define FORE200E_52BYTE_AAL0_SDU
74 #define FORE200E_VERSION "0.3e"
76 #define FORE200E "fore200e: "
78 #if 0 /* override .config */
79 #define CONFIG_ATM_FORE200E_DEBUG 1
81 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
82 #define DPRINTK(level, format, args...) do { if (CONFIG_ATM_FORE200E_DEBUG >= (level)) \
83 printk(FORE200E format, ##args); } while (0)
85 #define DPRINTK(level, format, args...) do {} while (0)
89 #define FORE200E_ALIGN(addr, alignment) \
90 ((((unsigned long)(addr) + (alignment - 1)) & ~(alignment - 1)) - (unsigned long)(addr))
92 #define FORE200E_DMA_INDEX(dma_addr, type, index) ((dma_addr) + (index) * sizeof(type))
94 #define FORE200E_INDEX(virt_addr, type, index) (&((type *)(virt_addr))[ index ])
96 #define FORE200E_NEXT_ENTRY(index, modulo) (index = ++(index) % (modulo))
99 #define ASSERT(expr) if (!(expr)) { \
100 printk(FORE200E "assertion failed! %s[%d]: %s\n", \
101 __FUNCTION__, __LINE__, #expr); \
102 panic(FORE200E "%s", __FUNCTION__); \
105 #define ASSERT(expr) do {} while (0)
109 static const struct atmdev_ops fore200e_ops;
110 static const struct fore200e_bus fore200e_bus[];
112 static LIST_HEAD(fore200e_boards);
115 MODULE_AUTHOR("Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen");
116 MODULE_DESCRIPTION("FORE Systems 200E-series ATM driver - version " FORE200E_VERSION);
117 MODULE_SUPPORTED_DEVICE("PCA-200E, SBA-200E");
120 static const int fore200e_rx_buf_nbr[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
121 { BUFFER_S1_NBR, BUFFER_L1_NBR },
122 { BUFFER_S2_NBR, BUFFER_L2_NBR }
125 static const int fore200e_rx_buf_size[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
126 { BUFFER_S1_SIZE, BUFFER_L1_SIZE },
127 { BUFFER_S2_SIZE, BUFFER_L2_SIZE }
131 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
132 static const char* fore200e_traffic_class[] = { "NONE", "UBR", "CBR", "VBR", "ABR", "ANY" };
136 #if 0 /* currently unused */
138 fore200e_fore2atm_aal(enum fore200e_aal aal)
141 case FORE200E_AAL0: return ATM_AAL0;
142 case FORE200E_AAL34: return ATM_AAL34;
143 case FORE200E_AAL5: return ATM_AAL5;
151 static enum fore200e_aal
152 fore200e_atm2fore_aal(int aal)
155 case ATM_AAL0: return FORE200E_AAL0;
156 case ATM_AAL34: return FORE200E_AAL34;
159 case ATM_AAL5: return FORE200E_AAL5;
167 fore200e_irq_itoa(int irq)
170 sprintf(str, "%d", irq);
176 fore200e_kmalloc(int size, gfp_t flags)
178 void *chunk = kzalloc(size, flags);
181 printk(FORE200E "kmalloc() failed, requested size = %d, flags = 0x%x\n", size, flags);
188 fore200e_kfree(void* chunk)
194 /* allocate and align a chunk of memory intended to hold the data behing exchanged
195 between the driver and the adapter (using streaming DVMA) */
198 fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, int alignment, int direction)
200 unsigned long offset = 0;
202 if (alignment <= sizeof(int))
205 chunk->alloc_size = size + alignment;
206 chunk->align_size = size;
207 chunk->direction = direction;
209 chunk->alloc_addr = fore200e_kmalloc(chunk->alloc_size, GFP_KERNEL | GFP_DMA);
210 if (chunk->alloc_addr == NULL)
214 offset = FORE200E_ALIGN(chunk->alloc_addr, alignment);
216 chunk->align_addr = chunk->alloc_addr + offset;
218 chunk->dma_addr = fore200e->bus->dma_map(fore200e, chunk->align_addr, chunk->align_size, direction);
224 /* free a chunk of memory */
227 fore200e_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
229 fore200e->bus->dma_unmap(fore200e, chunk->dma_addr, chunk->dma_size, chunk->direction);
231 fore200e_kfree(chunk->alloc_addr);
236 fore200e_spin(int msecs)
238 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
239 while (time_before(jiffies, timeout));
244 fore200e_poll(struct fore200e* fore200e, volatile u32* addr, u32 val, int msecs)
246 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
251 if ((ok = (*addr == val)) || (*addr & STATUS_ERROR))
254 } while (time_before(jiffies, timeout));
258 printk(FORE200E "cmd polling failed, got status 0x%08x, expected 0x%08x\n",
268 fore200e_io_poll(struct fore200e* fore200e, volatile u32 __iomem *addr, u32 val, int msecs)
270 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
274 if ((ok = (fore200e->bus->read(addr) == val)))
277 } while (time_before(jiffies, timeout));
281 printk(FORE200E "I/O polling failed, got status 0x%08x, expected 0x%08x\n",
282 fore200e->bus->read(addr), val);
291 fore200e_free_rx_buf(struct fore200e* fore200e)
293 int scheme, magn, nbr;
294 struct buffer* buffer;
296 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
297 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
299 if ((buffer = fore200e->host_bsq[ scheme ][ magn ].buffer) != NULL) {
301 for (nbr = 0; nbr < fore200e_rx_buf_nbr[ scheme ][ magn ]; nbr++) {
303 struct chunk* data = &buffer[ nbr ].data;
305 if (data->alloc_addr != NULL)
306 fore200e_chunk_free(fore200e, data);
315 fore200e_uninit_bs_queue(struct fore200e* fore200e)
319 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
320 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
322 struct chunk* status = &fore200e->host_bsq[ scheme ][ magn ].status;
323 struct chunk* rbd_block = &fore200e->host_bsq[ scheme ][ magn ].rbd_block;
325 if (status->alloc_addr)
326 fore200e->bus->dma_chunk_free(fore200e, status);
328 if (rbd_block->alloc_addr)
329 fore200e->bus->dma_chunk_free(fore200e, rbd_block);
336 fore200e_reset(struct fore200e* fore200e, int diag)
340 fore200e->cp_monitor = fore200e->virt_base + FORE200E_CP_MONITOR_OFFSET;
342 fore200e->bus->write(BSTAT_COLD_START, &fore200e->cp_monitor->bstat);
344 fore200e->bus->reset(fore200e);
347 ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_SELFTEST_OK, 1000);
350 printk(FORE200E "device %s self-test failed\n", fore200e->name);
354 printk(FORE200E "device %s self-test passed\n", fore200e->name);
356 fore200e->state = FORE200E_STATE_RESET;
364 fore200e_shutdown(struct fore200e* fore200e)
366 printk(FORE200E "removing device %s at 0x%lx, IRQ %s\n",
367 fore200e->name, fore200e->phys_base,
368 fore200e_irq_itoa(fore200e->irq));
370 if (fore200e->state > FORE200E_STATE_RESET) {
371 /* first, reset the board to prevent further interrupts or data transfers */
372 fore200e_reset(fore200e, 0);
375 /* then, release all allocated resources */
376 switch(fore200e->state) {
378 case FORE200E_STATE_COMPLETE:
379 kfree(fore200e->stats);
381 case FORE200E_STATE_IRQ:
382 free_irq(fore200e->irq, fore200e->atm_dev);
384 case FORE200E_STATE_ALLOC_BUF:
385 fore200e_free_rx_buf(fore200e);
387 case FORE200E_STATE_INIT_BSQ:
388 fore200e_uninit_bs_queue(fore200e);
390 case FORE200E_STATE_INIT_RXQ:
391 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.status);
392 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.rpd);
394 case FORE200E_STATE_INIT_TXQ:
395 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.status);
396 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.tpd);
398 case FORE200E_STATE_INIT_CMDQ:
399 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_cmdq.status);
401 case FORE200E_STATE_INITIALIZE:
402 /* nothing to do for that state */
404 case FORE200E_STATE_START_FW:
405 /* nothing to do for that state */
407 case FORE200E_STATE_LOAD_FW:
408 /* nothing to do for that state */
410 case FORE200E_STATE_RESET:
411 /* nothing to do for that state */
413 case FORE200E_STATE_MAP:
414 fore200e->bus->unmap(fore200e);
416 case FORE200E_STATE_CONFIGURE:
417 /* nothing to do for that state */
419 case FORE200E_STATE_REGISTER:
420 /* XXX shouldn't we *start* by deregistering the device? */
421 atm_dev_deregister(fore200e->atm_dev);
423 case FORE200E_STATE_BLANK:
424 /* nothing to do for that state */
430 #ifdef CONFIG_ATM_FORE200E_PCA
432 static u32 fore200e_pca_read(volatile u32 __iomem *addr)
434 /* on big-endian hosts, the board is configured to convert
435 the endianess of slave RAM accesses */
436 return le32_to_cpu(readl(addr));
440 static void fore200e_pca_write(u32 val, volatile u32 __iomem *addr)
442 /* on big-endian hosts, the board is configured to convert
443 the endianess of slave RAM accesses */
444 writel(cpu_to_le32(val), addr);
449 fore200e_pca_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction)
451 u32 dma_addr = pci_map_single((struct pci_dev*)fore200e->bus_dev, virt_addr, size, direction);
453 DPRINTK(3, "PCI DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d, --> dma_addr = 0x%08x\n",
454 virt_addr, size, direction, dma_addr);
461 fore200e_pca_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
463 DPRINTK(3, "PCI DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d\n",
464 dma_addr, size, direction);
466 pci_unmap_single((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
471 fore200e_pca_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
473 DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
475 pci_dma_sync_single_for_cpu((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
479 fore200e_pca_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
481 DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
483 pci_dma_sync_single_for_device((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
487 /* allocate a DMA consistent chunk of memory intended to act as a communication mechanism
488 (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */
491 fore200e_pca_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
492 int size, int nbr, int alignment)
494 /* returned chunks are page-aligned */
495 chunk->alloc_size = size * nbr;
496 chunk->alloc_addr = pci_alloc_consistent((struct pci_dev*)fore200e->bus_dev,
500 if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
503 chunk->align_addr = chunk->alloc_addr;
509 /* free a DMA consistent chunk of memory */
512 fore200e_pca_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
514 pci_free_consistent((struct pci_dev*)fore200e->bus_dev,
522 fore200e_pca_irq_check(struct fore200e* fore200e)
524 /* this is a 1 bit register */
525 int irq_posted = readl(fore200e->regs.pca.psr);
527 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG == 2)
528 if (irq_posted && (readl(fore200e->regs.pca.hcr) & PCA200E_HCR_OUTFULL)) {
529 DPRINTK(2,"FIFO OUT full, device %d\n", fore200e->atm_dev->number);
538 fore200e_pca_irq_ack(struct fore200e* fore200e)
540 writel(PCA200E_HCR_CLRINTR, fore200e->regs.pca.hcr);
545 fore200e_pca_reset(struct fore200e* fore200e)
547 writel(PCA200E_HCR_RESET, fore200e->regs.pca.hcr);
549 writel(0, fore200e->regs.pca.hcr);
554 fore200e_pca_map(struct fore200e* fore200e)
556 DPRINTK(2, "device %s being mapped in memory\n", fore200e->name);
558 fore200e->virt_base = ioremap(fore200e->phys_base, PCA200E_IOSPACE_LENGTH);
560 if (fore200e->virt_base == NULL) {
561 printk(FORE200E "can't map device %s\n", fore200e->name);
565 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
567 /* gain access to the PCA specific registers */
568 fore200e->regs.pca.hcr = fore200e->virt_base + PCA200E_HCR_OFFSET;
569 fore200e->regs.pca.imr = fore200e->virt_base + PCA200E_IMR_OFFSET;
570 fore200e->regs.pca.psr = fore200e->virt_base + PCA200E_PSR_OFFSET;
572 fore200e->state = FORE200E_STATE_MAP;
578 fore200e_pca_unmap(struct fore200e* fore200e)
580 DPRINTK(2, "device %s being unmapped from memory\n", fore200e->name);
582 if (fore200e->virt_base != NULL)
583 iounmap(fore200e->virt_base);
588 fore200e_pca_configure(struct fore200e* fore200e)
590 struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev;
591 u8 master_ctrl, latency;
593 DPRINTK(2, "device %s being configured\n", fore200e->name);
595 if ((pci_dev->irq == 0) || (pci_dev->irq == 0xFF)) {
596 printk(FORE200E "incorrect IRQ setting - misconfigured PCI-PCI bridge?\n");
600 pci_read_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, &master_ctrl);
602 master_ctrl = master_ctrl
603 #if defined(__BIG_ENDIAN)
604 /* request the PCA board to convert the endianess of slave RAM accesses */
605 | PCA200E_CTRL_CONVERT_ENDIAN
608 | PCA200E_CTRL_DIS_CACHE_RD
609 | PCA200E_CTRL_DIS_WRT_INVAL
610 | PCA200E_CTRL_ENA_CONT_REQ_MODE
611 | PCA200E_CTRL_2_CACHE_WRT_INVAL
613 | PCA200E_CTRL_LARGE_PCI_BURSTS;
615 pci_write_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, master_ctrl);
617 /* raise latency from 32 (default) to 192, as this seems to prevent NIC
618 lockups (under heavy rx loads) due to continuous 'FIFO OUT full' condition.
619 this may impact the performances of other PCI devices on the same bus, though */
621 pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, latency);
623 fore200e->state = FORE200E_STATE_CONFIGURE;
629 fore200e_pca_prom_read(struct fore200e* fore200e, struct prom_data* prom)
631 struct host_cmdq* cmdq = &fore200e->host_cmdq;
632 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
633 struct prom_opcode opcode;
637 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
639 opcode.opcode = OPCODE_GET_PROM;
642 prom_dma = fore200e->bus->dma_map(fore200e, prom, sizeof(struct prom_data), DMA_FROM_DEVICE);
644 fore200e->bus->write(prom_dma, &entry->cp_entry->cmd.prom_block.prom_haddr);
646 *entry->status = STATUS_PENDING;
648 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.prom_block.opcode);
650 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
652 *entry->status = STATUS_FREE;
654 fore200e->bus->dma_unmap(fore200e, prom_dma, sizeof(struct prom_data), DMA_FROM_DEVICE);
657 printk(FORE200E "unable to get PROM data from device %s\n", fore200e->name);
661 #if defined(__BIG_ENDIAN)
663 #define swap_here(addr) (*((u32*)(addr)) = swab32( *((u32*)(addr)) ))
665 /* MAC address is stored as little-endian */
666 swap_here(&prom->mac_addr[0]);
667 swap_here(&prom->mac_addr[4]);
675 fore200e_pca_proc_read(struct fore200e* fore200e, char *page)
677 struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev;
679 return sprintf(page, " PCI bus/slot/function:\t%d/%d/%d\n",
680 pci_dev->bus->number, PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
683 #endif /* CONFIG_ATM_FORE200E_PCA */
686 #ifdef CONFIG_ATM_FORE200E_SBA
689 fore200e_sba_read(volatile u32 __iomem *addr)
691 return sbus_readl(addr);
696 fore200e_sba_write(u32 val, volatile u32 __iomem *addr)
698 sbus_writel(val, addr);
703 fore200e_sba_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction)
705 u32 dma_addr = sbus_map_single((struct sbus_dev*)fore200e->bus_dev, virt_addr, size, direction);
707 DPRINTK(3, "SBUS DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d --> dma_addr = 0x%08x\n",
708 virt_addr, size, direction, dma_addr);
715 fore200e_sba_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
717 DPRINTK(3, "SBUS DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d,\n",
718 dma_addr, size, direction);
720 sbus_unmap_single((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction);
725 fore200e_sba_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
727 DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
729 sbus_dma_sync_single_for_cpu((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction);
733 fore200e_sba_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
735 DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
737 sbus_dma_sync_single_for_device((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction);
741 /* allocate a DVMA consistent chunk of memory intended to act as a communication mechanism
742 (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */
745 fore200e_sba_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
746 int size, int nbr, int alignment)
748 chunk->alloc_size = chunk->align_size = size * nbr;
750 /* returned chunks are page-aligned */
751 chunk->alloc_addr = sbus_alloc_consistent((struct sbus_dev*)fore200e->bus_dev,
755 if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
758 chunk->align_addr = chunk->alloc_addr;
764 /* free a DVMA consistent chunk of memory */
767 fore200e_sba_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
769 sbus_free_consistent((struct sbus_dev*)fore200e->bus_dev,
777 fore200e_sba_irq_enable(struct fore200e* fore200e)
779 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
780 fore200e->bus->write(hcr | SBA200E_HCR_INTR_ENA, fore200e->regs.sba.hcr);
785 fore200e_sba_irq_check(struct fore200e* fore200e)
787 return fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_INTR_REQ;
792 fore200e_sba_irq_ack(struct fore200e* fore200e)
794 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
795 fore200e->bus->write(hcr | SBA200E_HCR_INTR_CLR, fore200e->regs.sba.hcr);
800 fore200e_sba_reset(struct fore200e* fore200e)
802 fore200e->bus->write(SBA200E_HCR_RESET, fore200e->regs.sba.hcr);
804 fore200e->bus->write(0, fore200e->regs.sba.hcr);
809 fore200e_sba_map(struct fore200e* fore200e)
811 struct sbus_dev* sbus_dev = (struct sbus_dev*)fore200e->bus_dev;
814 /* gain access to the SBA specific registers */
815 fore200e->regs.sba.hcr = sbus_ioremap(&sbus_dev->resource[0], 0, SBA200E_HCR_LENGTH, "SBA HCR");
816 fore200e->regs.sba.bsr = sbus_ioremap(&sbus_dev->resource[1], 0, SBA200E_BSR_LENGTH, "SBA BSR");
817 fore200e->regs.sba.isr = sbus_ioremap(&sbus_dev->resource[2], 0, SBA200E_ISR_LENGTH, "SBA ISR");
818 fore200e->virt_base = sbus_ioremap(&sbus_dev->resource[3], 0, SBA200E_RAM_LENGTH, "SBA RAM");
820 if (fore200e->virt_base == NULL) {
821 printk(FORE200E "unable to map RAM of device %s\n", fore200e->name);
825 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
827 fore200e->bus->write(0x02, fore200e->regs.sba.isr); /* XXX hardwired interrupt level */
829 /* get the supported DVMA burst sizes */
830 bursts = prom_getintdefault(sbus_dev->bus->prom_node, "burst-sizes", 0x00);
832 if (sbus_can_dma_64bit(sbus_dev))
833 sbus_set_sbus64(sbus_dev, bursts);
835 fore200e->state = FORE200E_STATE_MAP;
841 fore200e_sba_unmap(struct fore200e* fore200e)
843 sbus_iounmap(fore200e->regs.sba.hcr, SBA200E_HCR_LENGTH);
844 sbus_iounmap(fore200e->regs.sba.bsr, SBA200E_BSR_LENGTH);
845 sbus_iounmap(fore200e->regs.sba.isr, SBA200E_ISR_LENGTH);
846 sbus_iounmap(fore200e->virt_base, SBA200E_RAM_LENGTH);
851 fore200e_sba_configure(struct fore200e* fore200e)
853 fore200e->state = FORE200E_STATE_CONFIGURE;
858 static struct fore200e* __init
859 fore200e_sba_detect(const struct fore200e_bus* bus, int index)
861 struct fore200e* fore200e;
862 struct sbus_bus* sbus_bus;
863 struct sbus_dev* sbus_dev = NULL;
865 unsigned int count = 0;
867 for_each_sbus (sbus_bus) {
868 for_each_sbusdev (sbus_dev, sbus_bus) {
869 if (strcmp(sbus_dev->prom_name, SBA200E_PROM_NAME) == 0) {
879 if (sbus_dev->num_registers != 4) {
880 printk(FORE200E "this %s device has %d instead of 4 registers\n",
881 bus->model_name, sbus_dev->num_registers);
885 fore200e = fore200e_kmalloc(sizeof(struct fore200e), GFP_KERNEL);
886 if (fore200e == NULL)
890 fore200e->bus_dev = sbus_dev;
891 fore200e->irq = sbus_dev->irqs[ 0 ];
893 fore200e->phys_base = (unsigned long)sbus_dev;
895 sprintf(fore200e->name, "%s-%d", bus->model_name, index - 1);
902 fore200e_sba_prom_read(struct fore200e* fore200e, struct prom_data* prom)
904 struct sbus_dev* sbus_dev = (struct sbus_dev*) fore200e->bus_dev;
907 len = prom_getproperty(sbus_dev->prom_node, "macaddrlo2", &prom->mac_addr[ 4 ], 4);
911 len = prom_getproperty(sbus_dev->prom_node, "macaddrhi4", &prom->mac_addr[ 2 ], 4);
915 prom_getproperty(sbus_dev->prom_node, "serialnumber",
916 (char*)&prom->serial_number, sizeof(prom->serial_number));
918 prom_getproperty(sbus_dev->prom_node, "promversion",
919 (char*)&prom->hw_revision, sizeof(prom->hw_revision));
926 fore200e_sba_proc_read(struct fore200e* fore200e, char *page)
928 struct sbus_dev* sbus_dev = (struct sbus_dev*)fore200e->bus_dev;
930 return sprintf(page, " SBUS slot/device:\t\t%d/'%s'\n", sbus_dev->slot, sbus_dev->prom_name);
932 #endif /* CONFIG_ATM_FORE200E_SBA */
936 fore200e_tx_irq(struct fore200e* fore200e)
938 struct host_txq* txq = &fore200e->host_txq;
939 struct host_txq_entry* entry;
941 struct fore200e_vc_map* vc_map;
943 if (fore200e->host_txq.txing == 0)
948 entry = &txq->host_entry[ txq->tail ];
950 if ((*entry->status & STATUS_COMPLETE) == 0) {
954 DPRINTK(3, "TX COMPLETED: entry = %p [tail = %d], vc_map = %p, skb = %p\n",
955 entry, txq->tail, entry->vc_map, entry->skb);
957 /* free copy of misaligned data */
960 /* remove DMA mapping */
961 fore200e->bus->dma_unmap(fore200e, entry->tpd->tsd[ 0 ].buffer, entry->tpd->tsd[ 0 ].length,
964 vc_map = entry->vc_map;
966 /* vcc closed since the time the entry was submitted for tx? */
967 if ((vc_map->vcc == NULL) ||
968 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
970 DPRINTK(1, "no ready vcc found for PDU sent on device %d\n",
971 fore200e->atm_dev->number);
973 dev_kfree_skb_any(entry->skb);
978 /* vcc closed then immediately re-opened? */
979 if (vc_map->incarn != entry->incarn) {
981 /* when a vcc is closed, some PDUs may be still pending in the tx queue.
982 if the same vcc is immediately re-opened, those pending PDUs must
983 not be popped after the completion of their emission, as they refer
984 to the prior incarnation of that vcc. otherwise, sk_atm(vcc)->sk_wmem_alloc
985 would be decremented by the size of the (unrelated) skb, possibly
986 leading to a negative sk->sk_wmem_alloc count, ultimately freezing the vcc.
987 we thus bind the tx entry to the current incarnation of the vcc
988 when the entry is submitted for tx. When the tx later completes,
989 if the incarnation number of the tx entry does not match the one
990 of the vcc, then this implies that the vcc has been closed then re-opened.
991 we thus just drop the skb here. */
993 DPRINTK(1, "vcc closed-then-re-opened; dropping PDU sent on device %d\n",
994 fore200e->atm_dev->number);
996 dev_kfree_skb_any(entry->skb);
1002 /* notify tx completion */
1004 vcc->pop(vcc, entry->skb);
1007 dev_kfree_skb_any(entry->skb);
1010 /* race fixed by the above incarnation mechanism, but... */
1011 if (atomic_read(&sk_atm(vcc)->sk_wmem_alloc) < 0) {
1012 atomic_set(&sk_atm(vcc)->sk_wmem_alloc, 0);
1015 /* check error condition */
1016 if (*entry->status & STATUS_ERROR)
1017 atomic_inc(&vcc->stats->tx_err);
1019 atomic_inc(&vcc->stats->tx);
1023 *entry->status = STATUS_FREE;
1025 fore200e->host_txq.txing--;
1027 FORE200E_NEXT_ENTRY(txq->tail, QUEUE_SIZE_TX);
1032 #ifdef FORE200E_BSQ_DEBUG
1033 int bsq_audit(int where, struct host_bsq* bsq, int scheme, int magn)
1035 struct buffer* buffer;
1038 buffer = bsq->freebuf;
1041 if (buffer->supplied) {
1042 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld supplied but in free list!\n",
1043 where, scheme, magn, buffer->index);
1046 if (buffer->magn != magn) {
1047 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected magn = %d\n",
1048 where, scheme, magn, buffer->index, buffer->magn);
1051 if (buffer->scheme != scheme) {
1052 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected scheme = %d\n",
1053 where, scheme, magn, buffer->index, buffer->scheme);
1056 if ((buffer->index < 0) || (buffer->index >= fore200e_rx_buf_nbr[ scheme ][ magn ])) {
1057 printk(FORE200E "bsq_audit(%d): queue %d.%d, out of range buffer index = %ld !\n",
1058 where, scheme, magn, buffer->index);
1062 buffer = buffer->next;
1065 if (count != bsq->freebuf_count) {
1066 printk(FORE200E "bsq_audit(%d): queue %d.%d, %d bufs in free list, but freebuf_count = %d\n",
1067 where, scheme, magn, count, bsq->freebuf_count);
1075 fore200e_supply(struct fore200e* fore200e)
1077 int scheme, magn, i;
1079 struct host_bsq* bsq;
1080 struct host_bsq_entry* entry;
1081 struct buffer* buffer;
1083 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
1084 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
1086 bsq = &fore200e->host_bsq[ scheme ][ magn ];
1088 #ifdef FORE200E_BSQ_DEBUG
1089 bsq_audit(1, bsq, scheme, magn);
1091 while (bsq->freebuf_count >= RBD_BLK_SIZE) {
1093 DPRINTK(2, "supplying %d rx buffers to queue %d / %d, freebuf_count = %d\n",
1094 RBD_BLK_SIZE, scheme, magn, bsq->freebuf_count);
1096 entry = &bsq->host_entry[ bsq->head ];
1098 for (i = 0; i < RBD_BLK_SIZE; i++) {
1100 /* take the first buffer in the free buffer list */
1101 buffer = bsq->freebuf;
1103 printk(FORE200E "no more free bufs in queue %d.%d, but freebuf_count = %d\n",
1104 scheme, magn, bsq->freebuf_count);
1107 bsq->freebuf = buffer->next;
1109 #ifdef FORE200E_BSQ_DEBUG
1110 if (buffer->supplied)
1111 printk(FORE200E "queue %d.%d, buffer %lu already supplied\n",
1112 scheme, magn, buffer->index);
1113 buffer->supplied = 1;
1115 entry->rbd_block->rbd[ i ].buffer_haddr = buffer->data.dma_addr;
1116 entry->rbd_block->rbd[ i ].handle = FORE200E_BUF2HDL(buffer);
1119 FORE200E_NEXT_ENTRY(bsq->head, QUEUE_SIZE_BS);
1121 /* decrease accordingly the number of free rx buffers */
1122 bsq->freebuf_count -= RBD_BLK_SIZE;
1124 *entry->status = STATUS_PENDING;
1125 fore200e->bus->write(entry->rbd_block_dma, &entry->cp_entry->rbd_block_haddr);
1133 fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rpd)
1135 struct sk_buff* skb;
1136 struct buffer* buffer;
1137 struct fore200e_vcc* fore200e_vcc;
1139 #ifdef FORE200E_52BYTE_AAL0_SDU
1140 u32 cell_header = 0;
1145 fore200e_vcc = FORE200E_VCC(vcc);
1146 ASSERT(fore200e_vcc);
1148 #ifdef FORE200E_52BYTE_AAL0_SDU
1149 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.rxtp.max_sdu == ATM_AAL0_SDU)) {
1151 cell_header = (rpd->atm_header.gfc << ATM_HDR_GFC_SHIFT) |
1152 (rpd->atm_header.vpi << ATM_HDR_VPI_SHIFT) |
1153 (rpd->atm_header.vci << ATM_HDR_VCI_SHIFT) |
1154 (rpd->atm_header.plt << ATM_HDR_PTI_SHIFT) |
1155 rpd->atm_header.clp;
1160 /* compute total PDU length */
1161 for (i = 0; i < rpd->nseg; i++)
1162 pdu_len += rpd->rsd[ i ].length;
1164 skb = alloc_skb(pdu_len, GFP_ATOMIC);
1166 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
1168 atomic_inc(&vcc->stats->rx_drop);
1172 __net_timestamp(skb);
1174 #ifdef FORE200E_52BYTE_AAL0_SDU
1176 *((u32*)skb_put(skb, 4)) = cell_header;
1180 /* reassemble segments */
1181 for (i = 0; i < rpd->nseg; i++) {
1183 /* rebuild rx buffer address from rsd handle */
1184 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1186 /* Make device DMA transfer visible to CPU. */
1187 fore200e->bus->dma_sync_for_cpu(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE);
1189 memcpy(skb_put(skb, rpd->rsd[ i ].length), buffer->data.align_addr, rpd->rsd[ i ].length);
1191 /* Now let the device get at it again. */
1192 fore200e->bus->dma_sync_for_device(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE);
1195 DPRINTK(3, "rx skb: len = %d, truesize = %d\n", skb->len, skb->truesize);
1197 if (pdu_len < fore200e_vcc->rx_min_pdu)
1198 fore200e_vcc->rx_min_pdu = pdu_len;
1199 if (pdu_len > fore200e_vcc->rx_max_pdu)
1200 fore200e_vcc->rx_max_pdu = pdu_len;
1201 fore200e_vcc->rx_pdu++;
1204 if (atm_charge(vcc, skb->truesize) == 0) {
1206 DPRINTK(2, "receive buffers saturated for %d.%d.%d - PDU dropped\n",
1207 vcc->itf, vcc->vpi, vcc->vci);
1209 dev_kfree_skb_any(skb);
1211 atomic_inc(&vcc->stats->rx_drop);
1215 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1217 vcc->push(vcc, skb);
1218 atomic_inc(&vcc->stats->rx);
1220 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1227 fore200e_collect_rpd(struct fore200e* fore200e, struct rpd* rpd)
1229 struct host_bsq* bsq;
1230 struct buffer* buffer;
1233 for (i = 0; i < rpd->nseg; i++) {
1235 /* rebuild rx buffer address from rsd handle */
1236 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1238 bsq = &fore200e->host_bsq[ buffer->scheme ][ buffer->magn ];
1240 #ifdef FORE200E_BSQ_DEBUG
1241 bsq_audit(2, bsq, buffer->scheme, buffer->magn);
1243 if (buffer->supplied == 0)
1244 printk(FORE200E "queue %d.%d, buffer %ld was not supplied\n",
1245 buffer->scheme, buffer->magn, buffer->index);
1246 buffer->supplied = 0;
1249 /* re-insert the buffer into the free buffer list */
1250 buffer->next = bsq->freebuf;
1251 bsq->freebuf = buffer;
1253 /* then increment the number of free rx buffers */
1254 bsq->freebuf_count++;
1260 fore200e_rx_irq(struct fore200e* fore200e)
1262 struct host_rxq* rxq = &fore200e->host_rxq;
1263 struct host_rxq_entry* entry;
1264 struct atm_vcc* vcc;
1265 struct fore200e_vc_map* vc_map;
1269 entry = &rxq->host_entry[ rxq->head ];
1271 /* no more received PDUs */
1272 if ((*entry->status & STATUS_COMPLETE) == 0)
1275 vc_map = FORE200E_VC_MAP(fore200e, entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1277 if ((vc_map->vcc == NULL) ||
1278 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
1280 DPRINTK(1, "no ready VC found for PDU received on %d.%d.%d\n",
1281 fore200e->atm_dev->number,
1282 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1288 if ((*entry->status & STATUS_ERROR) == 0) {
1290 fore200e_push_rpd(fore200e, vcc, entry->rpd);
1293 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
1294 fore200e->atm_dev->number,
1295 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1296 atomic_inc(&vcc->stats->rx_err);
1300 FORE200E_NEXT_ENTRY(rxq->head, QUEUE_SIZE_RX);
1302 fore200e_collect_rpd(fore200e, entry->rpd);
1304 /* rewrite the rpd address to ack the received PDU */
1305 fore200e->bus->write(entry->rpd_dma, &entry->cp_entry->rpd_haddr);
1306 *entry->status = STATUS_FREE;
1308 fore200e_supply(fore200e);
1313 #ifndef FORE200E_USE_TASKLET
1315 fore200e_irq(struct fore200e* fore200e)
1317 unsigned long flags;
1319 spin_lock_irqsave(&fore200e->q_lock, flags);
1320 fore200e_rx_irq(fore200e);
1321 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1323 spin_lock_irqsave(&fore200e->q_lock, flags);
1324 fore200e_tx_irq(fore200e);
1325 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1331 fore200e_interrupt(int irq, void* dev, struct pt_regs* regs)
1333 struct fore200e* fore200e = FORE200E_DEV((struct atm_dev*)dev);
1335 if (fore200e->bus->irq_check(fore200e) == 0) {
1337 DPRINTK(3, "interrupt NOT triggered by device %d\n", fore200e->atm_dev->number);
1340 DPRINTK(3, "interrupt triggered by device %d\n", fore200e->atm_dev->number);
1342 #ifdef FORE200E_USE_TASKLET
1343 tasklet_schedule(&fore200e->tx_tasklet);
1344 tasklet_schedule(&fore200e->rx_tasklet);
1346 fore200e_irq(fore200e);
1349 fore200e->bus->irq_ack(fore200e);
1354 #ifdef FORE200E_USE_TASKLET
1356 fore200e_tx_tasklet(unsigned long data)
1358 struct fore200e* fore200e = (struct fore200e*) data;
1359 unsigned long flags;
1361 DPRINTK(3, "tx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1363 spin_lock_irqsave(&fore200e->q_lock, flags);
1364 fore200e_tx_irq(fore200e);
1365 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1370 fore200e_rx_tasklet(unsigned long data)
1372 struct fore200e* fore200e = (struct fore200e*) data;
1373 unsigned long flags;
1375 DPRINTK(3, "rx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1377 spin_lock_irqsave(&fore200e->q_lock, flags);
1378 fore200e_rx_irq((struct fore200e*) data);
1379 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1385 fore200e_select_scheme(struct atm_vcc* vcc)
1387 /* fairly balance the VCs over (identical) buffer schemes */
1388 int scheme = vcc->vci % 2 ? BUFFER_SCHEME_ONE : BUFFER_SCHEME_TWO;
1390 DPRINTK(1, "VC %d.%d.%d uses buffer scheme %d\n",
1391 vcc->itf, vcc->vpi, vcc->vci, scheme);
1398 fore200e_activate_vcin(struct fore200e* fore200e, int activate, struct atm_vcc* vcc, int mtu)
1400 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1401 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1402 struct activate_opcode activ_opcode;
1403 struct deactivate_opcode deactiv_opcode;
1406 enum fore200e_aal aal = fore200e_atm2fore_aal(vcc->qos.aal);
1408 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1411 FORE200E_VCC(vcc)->scheme = fore200e_select_scheme(vcc);
1413 activ_opcode.opcode = OPCODE_ACTIVATE_VCIN;
1414 activ_opcode.aal = aal;
1415 activ_opcode.scheme = FORE200E_VCC(vcc)->scheme;
1416 activ_opcode.pad = 0;
1419 deactiv_opcode.opcode = OPCODE_DEACTIVATE_VCIN;
1420 deactiv_opcode.pad = 0;
1423 vpvc.vci = vcc->vci;
1424 vpvc.vpi = vcc->vpi;
1426 *entry->status = STATUS_PENDING;
1430 #ifdef FORE200E_52BYTE_AAL0_SDU
1433 /* the MTU is not used by the cp, except in the case of AAL0 */
1434 fore200e->bus->write(mtu, &entry->cp_entry->cmd.activate_block.mtu);
1435 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.vpvc);
1436 fore200e->bus->write(*(u32*)&activ_opcode, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.opcode);
1439 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.vpvc);
1440 fore200e->bus->write(*(u32*)&deactiv_opcode, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.opcode);
1443 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1445 *entry->status = STATUS_FREE;
1448 printk(FORE200E "unable to %s VC %d.%d.%d\n",
1449 activate ? "open" : "close", vcc->itf, vcc->vpi, vcc->vci);
1453 DPRINTK(1, "VC %d.%d.%d %sed\n", vcc->itf, vcc->vpi, vcc->vci,
1454 activate ? "open" : "clos");
1460 #define FORE200E_MAX_BACK2BACK_CELLS 255 /* XXX depends on CDVT */
1463 fore200e_rate_ctrl(struct atm_qos* qos, struct tpd_rate* rate)
1465 if (qos->txtp.max_pcr < ATM_OC3_PCR) {
1467 /* compute the data cells to idle cells ratio from the tx PCR */
1468 rate->data_cells = qos->txtp.max_pcr * FORE200E_MAX_BACK2BACK_CELLS / ATM_OC3_PCR;
1469 rate->idle_cells = FORE200E_MAX_BACK2BACK_CELLS - rate->data_cells;
1472 /* disable rate control */
1473 rate->data_cells = rate->idle_cells = 0;
1479 fore200e_open(struct atm_vcc *vcc)
1481 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1482 struct fore200e_vcc* fore200e_vcc;
1483 struct fore200e_vc_map* vc_map;
1484 unsigned long flags;
1486 short vpi = vcc->vpi;
1488 ASSERT((vpi >= 0) && (vpi < 1<<FORE200E_VPI_BITS));
1489 ASSERT((vci >= 0) && (vci < 1<<FORE200E_VCI_BITS));
1491 spin_lock_irqsave(&fore200e->q_lock, flags);
1493 vc_map = FORE200E_VC_MAP(fore200e, vpi, vci);
1496 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1498 printk(FORE200E "VC %d.%d.%d already in use\n",
1499 fore200e->atm_dev->number, vpi, vci);
1506 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1508 fore200e_vcc = fore200e_kmalloc(sizeof(struct fore200e_vcc), GFP_ATOMIC);
1509 if (fore200e_vcc == NULL) {
1514 DPRINTK(2, "opening %d.%d.%d:%d QoS = (tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
1515 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d)\n",
1516 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1517 fore200e_traffic_class[ vcc->qos.txtp.traffic_class ],
1518 vcc->qos.txtp.min_pcr, vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_cdv, vcc->qos.txtp.max_sdu,
1519 fore200e_traffic_class[ vcc->qos.rxtp.traffic_class ],
1520 vcc->qos.rxtp.min_pcr, vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_cdv, vcc->qos.rxtp.max_sdu);
1522 /* pseudo-CBR bandwidth requested? */
1523 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1525 down(&fore200e->rate_sf);
1526 if (fore200e->available_cell_rate < vcc->qos.txtp.max_pcr) {
1527 up(&fore200e->rate_sf);
1529 fore200e_kfree(fore200e_vcc);
1534 /* reserve bandwidth */
1535 fore200e->available_cell_rate -= vcc->qos.txtp.max_pcr;
1536 up(&fore200e->rate_sf);
1539 vcc->itf = vcc->dev->number;
1541 set_bit(ATM_VF_PARTIAL,&vcc->flags);
1542 set_bit(ATM_VF_ADDR, &vcc->flags);
1544 vcc->dev_data = fore200e_vcc;
1546 if (fore200e_activate_vcin(fore200e, 1, vcc, vcc->qos.rxtp.max_sdu) < 0) {
1550 clear_bit(ATM_VF_ADDR, &vcc->flags);
1551 clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1553 vcc->dev_data = NULL;
1555 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1557 fore200e_kfree(fore200e_vcc);
1561 /* compute rate control parameters */
1562 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1564 fore200e_rate_ctrl(&vcc->qos, &fore200e_vcc->rate);
1565 set_bit(ATM_VF_HASQOS, &vcc->flags);
1567 DPRINTK(3, "tx on %d.%d.%d:%d, tx PCR = %d, rx PCR = %d, data_cells = %u, idle_cells = %u\n",
1568 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1569 vcc->qos.txtp.max_pcr, vcc->qos.rxtp.max_pcr,
1570 fore200e_vcc->rate.data_cells, fore200e_vcc->rate.idle_cells);
1573 fore200e_vcc->tx_min_pdu = fore200e_vcc->rx_min_pdu = MAX_PDU_SIZE + 1;
1574 fore200e_vcc->tx_max_pdu = fore200e_vcc->rx_max_pdu = 0;
1575 fore200e_vcc->tx_pdu = fore200e_vcc->rx_pdu = 0;
1577 /* new incarnation of the vcc */
1578 vc_map->incarn = ++fore200e->incarn_count;
1580 /* VC unusable before this flag is set */
1581 set_bit(ATM_VF_READY, &vcc->flags);
1588 fore200e_close(struct atm_vcc* vcc)
1590 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1591 struct fore200e_vcc* fore200e_vcc;
1592 struct fore200e_vc_map* vc_map;
1593 unsigned long flags;
1596 ASSERT((vcc->vpi >= 0) && (vcc->vpi < 1<<FORE200E_VPI_BITS));
1597 ASSERT((vcc->vci >= 0) && (vcc->vci < 1<<FORE200E_VCI_BITS));
1599 DPRINTK(2, "closing %d.%d.%d:%d\n", vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal));
1601 clear_bit(ATM_VF_READY, &vcc->flags);
1603 fore200e_activate_vcin(fore200e, 0, vcc, 0);
1605 spin_lock_irqsave(&fore200e->q_lock, flags);
1607 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1609 /* the vc is no longer considered as "in use" by fore200e_open() */
1612 vcc->itf = vcc->vci = vcc->vpi = 0;
1614 fore200e_vcc = FORE200E_VCC(vcc);
1615 vcc->dev_data = NULL;
1617 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1619 /* release reserved bandwidth, if any */
1620 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1622 down(&fore200e->rate_sf);
1623 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1624 up(&fore200e->rate_sf);
1626 clear_bit(ATM_VF_HASQOS, &vcc->flags);
1629 clear_bit(ATM_VF_ADDR, &vcc->flags);
1630 clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1632 ASSERT(fore200e_vcc);
1633 fore200e_kfree(fore200e_vcc);
1638 fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
1640 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1641 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
1642 struct fore200e_vc_map* vc_map;
1643 struct host_txq* txq = &fore200e->host_txq;
1644 struct host_txq_entry* entry;
1646 struct tpd_haddr tpd_haddr;
1647 int retry = CONFIG_ATM_FORE200E_TX_RETRY;
1649 int tx_len = skb->len;
1650 u32* cell_header = NULL;
1651 unsigned char* skb_data;
1653 unsigned char* data;
1654 unsigned long flags;
1657 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1659 ASSERT(fore200e_vcc);
1661 if (!test_bit(ATM_VF_READY, &vcc->flags)) {
1662 DPRINTK(1, "VC %d.%d.%d not ready for tx\n", vcc->itf, vcc->vpi, vcc->vpi);
1663 dev_kfree_skb_any(skb);
1667 #ifdef FORE200E_52BYTE_AAL0_SDU
1668 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.txtp.max_sdu == ATM_AAL0_SDU)) {
1669 cell_header = (u32*) skb->data;
1670 skb_data = skb->data + 4; /* skip 4-byte cell header */
1671 skb_len = tx_len = skb->len - 4;
1673 DPRINTK(3, "user-supplied cell header = 0x%08x\n", *cell_header);
1678 skb_data = skb->data;
1682 if (((unsigned long)skb_data) & 0x3) {
1684 DPRINTK(2, "misaligned tx PDU on device %s\n", fore200e->name);
1689 if ((vcc->qos.aal == ATM_AAL0) && (skb_len % ATM_CELL_PAYLOAD)) {
1691 /* this simply NUKES the PCA board */
1692 DPRINTK(2, "incomplete tx AAL0 PDU on device %s\n", fore200e->name);
1694 tx_len = ((skb_len / ATM_CELL_PAYLOAD) + 1) * ATM_CELL_PAYLOAD;
1698 data = kmalloc(tx_len, GFP_ATOMIC | GFP_DMA);
1704 dev_kfree_skb_any(skb);
1709 memcpy(data, skb_data, skb_len);
1710 if (skb_len < tx_len)
1711 memset(data + skb_len, 0x00, tx_len - skb_len);
1717 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1718 ASSERT(vc_map->vcc == vcc);
1722 spin_lock_irqsave(&fore200e->q_lock, flags);
1724 entry = &txq->host_entry[ txq->head ];
1726 if ((*entry->status != STATUS_FREE) || (txq->txing >= QUEUE_SIZE_TX - 2)) {
1728 /* try to free completed tx queue entries */
1729 fore200e_tx_irq(fore200e);
1731 if (*entry->status != STATUS_FREE) {
1733 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1735 /* retry once again? */
1741 atomic_inc(&vcc->stats->tx_err);
1744 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
1745 fore200e->name, fore200e->cp_queues->heartbeat);
1750 dev_kfree_skb_any(skb);
1760 entry->incarn = vc_map->incarn;
1761 entry->vc_map = vc_map;
1763 entry->data = tx_copy ? data : NULL;
1766 tpd->tsd[ 0 ].buffer = fore200e->bus->dma_map(fore200e, data, tx_len, DMA_TO_DEVICE);
1767 tpd->tsd[ 0 ].length = tx_len;
1769 FORE200E_NEXT_ENTRY(txq->head, QUEUE_SIZE_TX);
1772 /* The dma_map call above implies a dma_sync so the device can use it,
1773 * thus no explicit dma_sync call is necessary here.
1776 DPRINTK(3, "tx on %d.%d.%d:%d, len = %u (%u)\n",
1777 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1778 tpd->tsd[0].length, skb_len);
1780 if (skb_len < fore200e_vcc->tx_min_pdu)
1781 fore200e_vcc->tx_min_pdu = skb_len;
1782 if (skb_len > fore200e_vcc->tx_max_pdu)
1783 fore200e_vcc->tx_max_pdu = skb_len;
1784 fore200e_vcc->tx_pdu++;
1786 /* set tx rate control information */
1787 tpd->rate.data_cells = fore200e_vcc->rate.data_cells;
1788 tpd->rate.idle_cells = fore200e_vcc->rate.idle_cells;
1791 tpd->atm_header.clp = (*cell_header & ATM_HDR_CLP);
1792 tpd->atm_header.plt = (*cell_header & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
1793 tpd->atm_header.vci = (*cell_header & ATM_HDR_VCI_MASK) >> ATM_HDR_VCI_SHIFT;
1794 tpd->atm_header.vpi = (*cell_header & ATM_HDR_VPI_MASK) >> ATM_HDR_VPI_SHIFT;
1795 tpd->atm_header.gfc = (*cell_header & ATM_HDR_GFC_MASK) >> ATM_HDR_GFC_SHIFT;
1798 /* set the ATM header, common to all cells conveying the PDU */
1799 tpd->atm_header.clp = 0;
1800 tpd->atm_header.plt = 0;
1801 tpd->atm_header.vci = vcc->vci;
1802 tpd->atm_header.vpi = vcc->vpi;
1803 tpd->atm_header.gfc = 0;
1806 tpd->spec.length = tx_len;
1808 tpd->spec.aal = fore200e_atm2fore_aal(vcc->qos.aal);
1811 tpd_haddr.size = sizeof(struct tpd) / (1<<TPD_HADDR_SHIFT); /* size is expressed in 32 byte blocks */
1813 tpd_haddr.haddr = entry->tpd_dma >> TPD_HADDR_SHIFT; /* shift the address, as we are in a bitfield */
1815 *entry->status = STATUS_PENDING;
1816 fore200e->bus->write(*(u32*)&tpd_haddr, (u32 __iomem *)&entry->cp_entry->tpd_haddr);
1818 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1825 fore200e_getstats(struct fore200e* fore200e)
1827 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1828 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1829 struct stats_opcode opcode;
1833 if (fore200e->stats == NULL) {
1834 fore200e->stats = fore200e_kmalloc(sizeof(struct stats), GFP_KERNEL | GFP_DMA);
1835 if (fore200e->stats == NULL)
1839 stats_dma_addr = fore200e->bus->dma_map(fore200e, fore200e->stats,
1840 sizeof(struct stats), DMA_FROM_DEVICE);
1842 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1844 opcode.opcode = OPCODE_GET_STATS;
1847 fore200e->bus->write(stats_dma_addr, &entry->cp_entry->cmd.stats_block.stats_haddr);
1849 *entry->status = STATUS_PENDING;
1851 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.stats_block.opcode);
1853 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1855 *entry->status = STATUS_FREE;
1857 fore200e->bus->dma_unmap(fore200e, stats_dma_addr, sizeof(struct stats), DMA_FROM_DEVICE);
1860 printk(FORE200E "unable to get statistics from device %s\n", fore200e->name);
1869 fore200e_getsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen)
1871 /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
1873 DPRINTK(2, "getsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
1874 vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
1881 fore200e_setsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen)
1883 /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
1885 DPRINTK(2, "setsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
1886 vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
1892 #if 0 /* currently unused */
1894 fore200e_get_oc3(struct fore200e* fore200e, struct oc3_regs* regs)
1896 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1897 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1898 struct oc3_opcode opcode;
1900 u32 oc3_regs_dma_addr;
1902 oc3_regs_dma_addr = fore200e->bus->dma_map(fore200e, regs, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1904 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1906 opcode.opcode = OPCODE_GET_OC3;
1911 fore200e->bus->write(oc3_regs_dma_addr, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1913 *entry->status = STATUS_PENDING;
1915 fore200e->bus->write(*(u32*)&opcode, (u32*)&entry->cp_entry->cmd.oc3_block.opcode);
1917 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1919 *entry->status = STATUS_FREE;
1921 fore200e->bus->dma_unmap(fore200e, oc3_regs_dma_addr, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1924 printk(FORE200E "unable to get OC-3 regs of device %s\n", fore200e->name);
1934 fore200e_set_oc3(struct fore200e* fore200e, u32 reg, u32 value, u32 mask)
1936 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1937 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1938 struct oc3_opcode opcode;
1941 DPRINTK(2, "set OC-3 reg = 0x%02x, value = 0x%02x, mask = 0x%02x\n", reg, value, mask);
1943 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1945 opcode.opcode = OPCODE_SET_OC3;
1947 opcode.value = value;
1950 fore200e->bus->write(0, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1952 *entry->status = STATUS_PENDING;
1954 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.oc3_block.opcode);
1956 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1958 *entry->status = STATUS_FREE;
1961 printk(FORE200E "unable to set OC-3 reg 0x%02x of device %s\n", reg, fore200e->name);
1970 fore200e_setloop(struct fore200e* fore200e, int loop_mode)
1972 u32 mct_value, mct_mask;
1975 if (!capable(CAP_NET_ADMIN))
1978 switch (loop_mode) {
1982 mct_mask = SUNI_MCT_DLE | SUNI_MCT_LLE;
1985 case ATM_LM_LOC_PHY:
1986 mct_value = mct_mask = SUNI_MCT_DLE;
1989 case ATM_LM_RMT_PHY:
1990 mct_value = mct_mask = SUNI_MCT_LLE;
1997 error = fore200e_set_oc3(fore200e, SUNI_MCT, mct_value, mct_mask);
1999 fore200e->loop_mode = loop_mode;
2005 static inline unsigned int
2006 fore200e_swap(unsigned int in)
2008 #if defined(__LITTLE_ENDIAN)
2017 fore200e_fetch_stats(struct fore200e* fore200e, struct sonet_stats __user *arg)
2019 struct sonet_stats tmp;
2021 if (fore200e_getstats(fore200e) < 0)
2024 tmp.section_bip = fore200e_swap(fore200e->stats->oc3.section_bip8_errors);
2025 tmp.line_bip = fore200e_swap(fore200e->stats->oc3.line_bip24_errors);
2026 tmp.path_bip = fore200e_swap(fore200e->stats->oc3.path_bip8_errors);
2027 tmp.line_febe = fore200e_swap(fore200e->stats->oc3.line_febe_errors);
2028 tmp.path_febe = fore200e_swap(fore200e->stats->oc3.path_febe_errors);
2029 tmp.corr_hcs = fore200e_swap(fore200e->stats->oc3.corr_hcs_errors);
2030 tmp.uncorr_hcs = fore200e_swap(fore200e->stats->oc3.ucorr_hcs_errors);
2031 tmp.tx_cells = fore200e_swap(fore200e->stats->aal0.cells_transmitted) +
2032 fore200e_swap(fore200e->stats->aal34.cells_transmitted) +
2033 fore200e_swap(fore200e->stats->aal5.cells_transmitted);
2034 tmp.rx_cells = fore200e_swap(fore200e->stats->aal0.cells_received) +
2035 fore200e_swap(fore200e->stats->aal34.cells_received) +
2036 fore200e_swap(fore200e->stats->aal5.cells_received);
2039 return copy_to_user(arg, &tmp, sizeof(struct sonet_stats)) ? -EFAULT : 0;
2046 fore200e_ioctl(struct atm_dev* dev, unsigned int cmd, void __user * arg)
2048 struct fore200e* fore200e = FORE200E_DEV(dev);
2050 DPRINTK(2, "ioctl cmd = 0x%x (%u), arg = 0x%p (%lu)\n", cmd, cmd, arg, (unsigned long)arg);
2055 return fore200e_fetch_stats(fore200e, (struct sonet_stats __user *)arg);
2058 return put_user(0, (int __user *)arg) ? -EFAULT : 0;
2061 return fore200e_setloop(fore200e, (int)(unsigned long)arg);
2064 return put_user(fore200e->loop_mode, (int __user *)arg) ? -EFAULT : 0;
2067 return put_user(ATM_LM_LOC_PHY | ATM_LM_RMT_PHY, (int __user *)arg) ? -EFAULT : 0;
2070 return -ENOSYS; /* not implemented */
2075 fore200e_change_qos(struct atm_vcc* vcc,struct atm_qos* qos, int flags)
2077 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
2078 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
2080 if (!test_bit(ATM_VF_READY, &vcc->flags)) {
2081 DPRINTK(1, "VC %d.%d.%d not ready for QoS change\n", vcc->itf, vcc->vpi, vcc->vpi);
2085 DPRINTK(2, "change_qos %d.%d.%d, "
2086 "(tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
2087 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d), flags = 0x%x\n"
2088 "available_cell_rate = %u",
2089 vcc->itf, vcc->vpi, vcc->vci,
2090 fore200e_traffic_class[ qos->txtp.traffic_class ],
2091 qos->txtp.min_pcr, qos->txtp.max_pcr, qos->txtp.max_cdv, qos->txtp.max_sdu,
2092 fore200e_traffic_class[ qos->rxtp.traffic_class ],
2093 qos->rxtp.min_pcr, qos->rxtp.max_pcr, qos->rxtp.max_cdv, qos->rxtp.max_sdu,
2094 flags, fore200e->available_cell_rate);
2096 if ((qos->txtp.traffic_class == ATM_CBR) && (qos->txtp.max_pcr > 0)) {
2098 down(&fore200e->rate_sf);
2099 if (fore200e->available_cell_rate + vcc->qos.txtp.max_pcr < qos->txtp.max_pcr) {
2100 up(&fore200e->rate_sf);
2104 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
2105 fore200e->available_cell_rate -= qos->txtp.max_pcr;
2107 up(&fore200e->rate_sf);
2109 memcpy(&vcc->qos, qos, sizeof(struct atm_qos));
2111 /* update rate control parameters */
2112 fore200e_rate_ctrl(qos, &fore200e_vcc->rate);
2114 set_bit(ATM_VF_HASQOS, &vcc->flags);
2123 static int __devinit
2124 fore200e_irq_request(struct fore200e* fore200e)
2126 if (request_irq(fore200e->irq, fore200e_interrupt, IRQF_SHARED, fore200e->name, fore200e->atm_dev) < 0) {
2128 printk(FORE200E "unable to reserve IRQ %s for device %s\n",
2129 fore200e_irq_itoa(fore200e->irq), fore200e->name);
2133 printk(FORE200E "IRQ %s reserved for device %s\n",
2134 fore200e_irq_itoa(fore200e->irq), fore200e->name);
2136 #ifdef FORE200E_USE_TASKLET
2137 tasklet_init(&fore200e->tx_tasklet, fore200e_tx_tasklet, (unsigned long)fore200e);
2138 tasklet_init(&fore200e->rx_tasklet, fore200e_rx_tasklet, (unsigned long)fore200e);
2141 fore200e->state = FORE200E_STATE_IRQ;
2146 static int __devinit
2147 fore200e_get_esi(struct fore200e* fore200e)
2149 struct prom_data* prom = fore200e_kmalloc(sizeof(struct prom_data), GFP_KERNEL | GFP_DMA);
2155 ok = fore200e->bus->prom_read(fore200e, prom);
2157 fore200e_kfree(prom);
2161 printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %02x:%02x:%02x:%02x:%02x:%02x\n",
2163 (prom->hw_revision & 0xFF) + '@', /* probably meaningless with SBA boards */
2164 prom->serial_number & 0xFFFF,
2165 prom->mac_addr[ 2 ], prom->mac_addr[ 3 ], prom->mac_addr[ 4 ],
2166 prom->mac_addr[ 5 ], prom->mac_addr[ 6 ], prom->mac_addr[ 7 ]);
2168 for (i = 0; i < ESI_LEN; i++) {
2169 fore200e->esi[ i ] = fore200e->atm_dev->esi[ i ] = prom->mac_addr[ i + 2 ];
2172 fore200e_kfree(prom);
2178 static int __devinit
2179 fore200e_alloc_rx_buf(struct fore200e* fore200e)
2181 int scheme, magn, nbr, size, i;
2183 struct host_bsq* bsq;
2184 struct buffer* buffer;
2186 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2187 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2189 bsq = &fore200e->host_bsq[ scheme ][ magn ];
2191 nbr = fore200e_rx_buf_nbr[ scheme ][ magn ];
2192 size = fore200e_rx_buf_size[ scheme ][ magn ];
2194 DPRINTK(2, "rx buffers %d / %d are being allocated\n", scheme, magn);
2196 /* allocate the array of receive buffers */
2197 buffer = bsq->buffer = fore200e_kmalloc(nbr * sizeof(struct buffer), GFP_KERNEL);
2202 bsq->freebuf = NULL;
2204 for (i = 0; i < nbr; i++) {
2206 buffer[ i ].scheme = scheme;
2207 buffer[ i ].magn = magn;
2208 #ifdef FORE200E_BSQ_DEBUG
2209 buffer[ i ].index = i;
2210 buffer[ i ].supplied = 0;
2213 /* allocate the receive buffer body */
2214 if (fore200e_chunk_alloc(fore200e,
2215 &buffer[ i ].data, size, fore200e->bus->buffer_alignment,
2216 DMA_FROM_DEVICE) < 0) {
2219 fore200e_chunk_free(fore200e, &buffer[ --i ].data);
2220 fore200e_kfree(buffer);
2225 /* insert the buffer into the free buffer list */
2226 buffer[ i ].next = bsq->freebuf;
2227 bsq->freebuf = &buffer[ i ];
2229 /* all the buffers are free, initially */
2230 bsq->freebuf_count = nbr;
2232 #ifdef FORE200E_BSQ_DEBUG
2233 bsq_audit(3, bsq, scheme, magn);
2238 fore200e->state = FORE200E_STATE_ALLOC_BUF;
2243 static int __devinit
2244 fore200e_init_bs_queue(struct fore200e* fore200e)
2246 int scheme, magn, i;
2248 struct host_bsq* bsq;
2249 struct cp_bsq_entry __iomem * cp_entry;
2251 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2252 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2254 DPRINTK(2, "buffer supply queue %d / %d is being initialized\n", scheme, magn);
2256 bsq = &fore200e->host_bsq[ scheme ][ magn ];
2258 /* allocate and align the array of status words */
2259 if (fore200e->bus->dma_chunk_alloc(fore200e,
2261 sizeof(enum status),
2263 fore200e->bus->status_alignment) < 0) {
2267 /* allocate and align the array of receive buffer descriptors */
2268 if (fore200e->bus->dma_chunk_alloc(fore200e,
2270 sizeof(struct rbd_block),
2272 fore200e->bus->descr_alignment) < 0) {
2274 fore200e->bus->dma_chunk_free(fore200e, &bsq->status);
2278 /* get the base address of the cp resident buffer supply queue entries */
2279 cp_entry = fore200e->virt_base +
2280 fore200e->bus->read(&fore200e->cp_queues->cp_bsq[ scheme ][ magn ]);
2282 /* fill the host resident and cp resident buffer supply queue entries */
2283 for (i = 0; i < QUEUE_SIZE_BS; i++) {
2285 bsq->host_entry[ i ].status =
2286 FORE200E_INDEX(bsq->status.align_addr, enum status, i);
2287 bsq->host_entry[ i ].rbd_block =
2288 FORE200E_INDEX(bsq->rbd_block.align_addr, struct rbd_block, i);
2289 bsq->host_entry[ i ].rbd_block_dma =
2290 FORE200E_DMA_INDEX(bsq->rbd_block.dma_addr, struct rbd_block, i);
2291 bsq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2293 *bsq->host_entry[ i ].status = STATUS_FREE;
2295 fore200e->bus->write(FORE200E_DMA_INDEX(bsq->status.dma_addr, enum status, i),
2296 &cp_entry[ i ].status_haddr);
2301 fore200e->state = FORE200E_STATE_INIT_BSQ;
2306 static int __devinit
2307 fore200e_init_rx_queue(struct fore200e* fore200e)
2309 struct host_rxq* rxq = &fore200e->host_rxq;
2310 struct cp_rxq_entry __iomem * cp_entry;
2313 DPRINTK(2, "receive queue is being initialized\n");
2315 /* allocate and align the array of status words */
2316 if (fore200e->bus->dma_chunk_alloc(fore200e,
2318 sizeof(enum status),
2320 fore200e->bus->status_alignment) < 0) {
2324 /* allocate and align the array of receive PDU descriptors */
2325 if (fore200e->bus->dma_chunk_alloc(fore200e,
2329 fore200e->bus->descr_alignment) < 0) {
2331 fore200e->bus->dma_chunk_free(fore200e, &rxq->status);
2335 /* get the base address of the cp resident rx queue entries */
2336 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_rxq);
2338 /* fill the host resident and cp resident rx entries */
2339 for (i=0; i < QUEUE_SIZE_RX; i++) {
2341 rxq->host_entry[ i ].status =
2342 FORE200E_INDEX(rxq->status.align_addr, enum status, i);
2343 rxq->host_entry[ i ].rpd =
2344 FORE200E_INDEX(rxq->rpd.align_addr, struct rpd, i);
2345 rxq->host_entry[ i ].rpd_dma =
2346 FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i);
2347 rxq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2349 *rxq->host_entry[ i ].status = STATUS_FREE;
2351 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->status.dma_addr, enum status, i),
2352 &cp_entry[ i ].status_haddr);
2354 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i),
2355 &cp_entry[ i ].rpd_haddr);
2358 /* set the head entry of the queue */
2361 fore200e->state = FORE200E_STATE_INIT_RXQ;
2366 static int __devinit
2367 fore200e_init_tx_queue(struct fore200e* fore200e)
2369 struct host_txq* txq = &fore200e->host_txq;
2370 struct cp_txq_entry __iomem * cp_entry;
2373 DPRINTK(2, "transmit queue is being initialized\n");
2375 /* allocate and align the array of status words */
2376 if (fore200e->bus->dma_chunk_alloc(fore200e,
2378 sizeof(enum status),
2380 fore200e->bus->status_alignment) < 0) {
2384 /* allocate and align the array of transmit PDU descriptors */
2385 if (fore200e->bus->dma_chunk_alloc(fore200e,
2389 fore200e->bus->descr_alignment) < 0) {
2391 fore200e->bus->dma_chunk_free(fore200e, &txq->status);
2395 /* get the base address of the cp resident tx queue entries */
2396 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_txq);
2398 /* fill the host resident and cp resident tx entries */
2399 for (i=0; i < QUEUE_SIZE_TX; i++) {
2401 txq->host_entry[ i ].status =
2402 FORE200E_INDEX(txq->status.align_addr, enum status, i);
2403 txq->host_entry[ i ].tpd =
2404 FORE200E_INDEX(txq->tpd.align_addr, struct tpd, i);
2405 txq->host_entry[ i ].tpd_dma =
2406 FORE200E_DMA_INDEX(txq->tpd.dma_addr, struct tpd, i);
2407 txq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2409 *txq->host_entry[ i ].status = STATUS_FREE;
2411 fore200e->bus->write(FORE200E_DMA_INDEX(txq->status.dma_addr, enum status, i),
2412 &cp_entry[ i ].status_haddr);
2414 /* although there is a one-to-one mapping of tx queue entries and tpds,
2415 we do not write here the DMA (physical) base address of each tpd into
2416 the related cp resident entry, because the cp relies on this write
2417 operation to detect that a new pdu has been submitted for tx */
2420 /* set the head and tail entries of the queue */
2424 fore200e->state = FORE200E_STATE_INIT_TXQ;
2429 static int __devinit
2430 fore200e_init_cmd_queue(struct fore200e* fore200e)
2432 struct host_cmdq* cmdq = &fore200e->host_cmdq;
2433 struct cp_cmdq_entry __iomem * cp_entry;
2436 DPRINTK(2, "command queue is being initialized\n");
2438 /* allocate and align the array of status words */
2439 if (fore200e->bus->dma_chunk_alloc(fore200e,
2441 sizeof(enum status),
2443 fore200e->bus->status_alignment) < 0) {
2447 /* get the base address of the cp resident cmd queue entries */
2448 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_cmdq);
2450 /* fill the host resident and cp resident cmd entries */
2451 for (i=0; i < QUEUE_SIZE_CMD; i++) {
2453 cmdq->host_entry[ i ].status =
2454 FORE200E_INDEX(cmdq->status.align_addr, enum status, i);
2455 cmdq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2457 *cmdq->host_entry[ i ].status = STATUS_FREE;
2459 fore200e->bus->write(FORE200E_DMA_INDEX(cmdq->status.dma_addr, enum status, i),
2460 &cp_entry[ i ].status_haddr);
2463 /* set the head entry of the queue */
2466 fore200e->state = FORE200E_STATE_INIT_CMDQ;
2472 fore200e_param_bs_queue(struct fore200e* fore200e,
2473 enum buffer_scheme scheme, enum buffer_magn magn,
2474 int queue_length, int pool_size, int supply_blksize)
2476 struct bs_spec __iomem * bs_spec = &fore200e->cp_queues->init.bs_spec[ scheme ][ magn ];
2478 fore200e->bus->write(queue_length, &bs_spec->queue_length);
2479 fore200e->bus->write(fore200e_rx_buf_size[ scheme ][ magn ], &bs_spec->buffer_size);
2480 fore200e->bus->write(pool_size, &bs_spec->pool_size);
2481 fore200e->bus->write(supply_blksize, &bs_spec->supply_blksize);
2485 static int __devinit
2486 fore200e_initialize(struct fore200e* fore200e)
2488 struct cp_queues __iomem * cpq;
2489 int ok, scheme, magn;
2491 DPRINTK(2, "device %s being initialized\n", fore200e->name);
2493 init_MUTEX(&fore200e->rate_sf);
2494 spin_lock_init(&fore200e->q_lock);
2496 cpq = fore200e->cp_queues = fore200e->virt_base + FORE200E_CP_QUEUES_OFFSET;
2498 /* enable cp to host interrupts */
2499 fore200e->bus->write(1, &cpq->imask);
2501 if (fore200e->bus->irq_enable)
2502 fore200e->bus->irq_enable(fore200e);
2504 fore200e->bus->write(NBR_CONNECT, &cpq->init.num_connect);
2506 fore200e->bus->write(QUEUE_SIZE_CMD, &cpq->init.cmd_queue_len);
2507 fore200e->bus->write(QUEUE_SIZE_RX, &cpq->init.rx_queue_len);
2508 fore200e->bus->write(QUEUE_SIZE_TX, &cpq->init.tx_queue_len);
2510 fore200e->bus->write(RSD_EXTENSION, &cpq->init.rsd_extension);
2511 fore200e->bus->write(TSD_EXTENSION, &cpq->init.tsd_extension);
2513 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++)
2514 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++)
2515 fore200e_param_bs_queue(fore200e, scheme, magn,
2517 fore200e_rx_buf_nbr[ scheme ][ magn ],
2520 /* issue the initialize command */
2521 fore200e->bus->write(STATUS_PENDING, &cpq->init.status);
2522 fore200e->bus->write(OPCODE_INITIALIZE, &cpq->init.opcode);
2524 ok = fore200e_io_poll(fore200e, &cpq->init.status, STATUS_COMPLETE, 3000);
2526 printk(FORE200E "device %s initialization failed\n", fore200e->name);
2530 printk(FORE200E "device %s initialized\n", fore200e->name);
2532 fore200e->state = FORE200E_STATE_INITIALIZE;
2537 static void __devinit
2538 fore200e_monitor_putc(struct fore200e* fore200e, char c)
2540 struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2545 fore200e->bus->write(((u32) c) | FORE200E_CP_MONITOR_UART_AVAIL, &monitor->soft_uart.send);
2549 static int __devinit
2550 fore200e_monitor_getc(struct fore200e* fore200e)
2552 struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2553 unsigned long timeout = jiffies + msecs_to_jiffies(50);
2556 while (time_before(jiffies, timeout)) {
2558 c = (int) fore200e->bus->read(&monitor->soft_uart.recv);
2560 if (c & FORE200E_CP_MONITOR_UART_AVAIL) {
2562 fore200e->bus->write(FORE200E_CP_MONITOR_UART_FREE, &monitor->soft_uart.recv);
2564 printk("%c", c & 0xFF);
2574 static void __devinit
2575 fore200e_monitor_puts(struct fore200e* fore200e, char* str)
2579 /* the i960 monitor doesn't accept any new character if it has something to say */
2580 while (fore200e_monitor_getc(fore200e) >= 0);
2582 fore200e_monitor_putc(fore200e, *str++);
2585 while (fore200e_monitor_getc(fore200e) >= 0);
2589 static int __devinit
2590 fore200e_start_fw(struct fore200e* fore200e)
2594 struct fw_header* fw_header = (struct fw_header*) fore200e->bus->fw_data;
2596 DPRINTK(2, "device %s firmware being started\n", fore200e->name);
2598 #if defined(__sparc_v9__)
2599 /* reported to be required by SBA cards on some sparc64 hosts */
2603 sprintf(cmd, "\rgo %x\r", le32_to_cpu(fw_header->start_offset));
2605 fore200e_monitor_puts(fore200e, cmd);
2607 ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_CP_RUNNING, 1000);
2609 printk(FORE200E "device %s firmware didn't start\n", fore200e->name);
2613 printk(FORE200E "device %s firmware started\n", fore200e->name);
2615 fore200e->state = FORE200E_STATE_START_FW;
2620 static int __devinit
2621 fore200e_load_fw(struct fore200e* fore200e)
2623 u32* fw_data = (u32*) fore200e->bus->fw_data;
2624 u32 fw_size = (u32) *fore200e->bus->fw_size / sizeof(u32);
2626 struct fw_header* fw_header = (struct fw_header*) fw_data;
2628 u32 __iomem *load_addr = fore200e->virt_base + le32_to_cpu(fw_header->load_offset);
2630 DPRINTK(2, "device %s firmware being loaded at 0x%p (%d words)\n",
2631 fore200e->name, load_addr, fw_size);
2633 if (le32_to_cpu(fw_header->magic) != FW_HEADER_MAGIC) {
2634 printk(FORE200E "corrupted %s firmware image\n", fore200e->bus->model_name);
2638 for (; fw_size--; fw_data++, load_addr++)
2639 fore200e->bus->write(le32_to_cpu(*fw_data), load_addr);
2641 fore200e->state = FORE200E_STATE_LOAD_FW;
2646 static int __devinit
2647 fore200e_register(struct fore200e* fore200e)
2649 struct atm_dev* atm_dev;
2651 DPRINTK(2, "device %s being registered\n", fore200e->name);
2653 atm_dev = atm_dev_register(fore200e->bus->proc_name, &fore200e_ops, -1,
2655 if (atm_dev == NULL) {
2656 printk(FORE200E "unable to register device %s\n", fore200e->name);
2660 atm_dev->dev_data = fore200e;
2661 fore200e->atm_dev = atm_dev;
2663 atm_dev->ci_range.vpi_bits = FORE200E_VPI_BITS;
2664 atm_dev->ci_range.vci_bits = FORE200E_VCI_BITS;
2666 fore200e->available_cell_rate = ATM_OC3_PCR;
2668 fore200e->state = FORE200E_STATE_REGISTER;
2673 static int __devinit
2674 fore200e_init(struct fore200e* fore200e)
2676 if (fore200e_register(fore200e) < 0)
2679 if (fore200e->bus->configure(fore200e) < 0)
2682 if (fore200e->bus->map(fore200e) < 0)
2685 if (fore200e_reset(fore200e, 1) < 0)
2688 if (fore200e_load_fw(fore200e) < 0)
2691 if (fore200e_start_fw(fore200e) < 0)
2694 if (fore200e_initialize(fore200e) < 0)
2697 if (fore200e_init_cmd_queue(fore200e) < 0)
2700 if (fore200e_init_tx_queue(fore200e) < 0)
2703 if (fore200e_init_rx_queue(fore200e) < 0)
2706 if (fore200e_init_bs_queue(fore200e) < 0)
2709 if (fore200e_alloc_rx_buf(fore200e) < 0)
2712 if (fore200e_get_esi(fore200e) < 0)
2715 if (fore200e_irq_request(fore200e) < 0)
2718 fore200e_supply(fore200e);
2720 /* all done, board initialization is now complete */
2721 fore200e->state = FORE200E_STATE_COMPLETE;
2726 static int __devinit
2727 fore200e_pca_detect(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
2729 const struct fore200e_bus* bus = (struct fore200e_bus*) pci_ent->driver_data;
2730 struct fore200e* fore200e;
2732 static int index = 0;
2734 if (pci_enable_device(pci_dev)) {
2739 fore200e = fore200e_kmalloc(sizeof(struct fore200e), GFP_KERNEL);
2740 if (fore200e == NULL) {
2745 fore200e->bus = bus;
2746 fore200e->bus_dev = pci_dev;
2747 fore200e->irq = pci_dev->irq;
2748 fore200e->phys_base = pci_resource_start(pci_dev, 0);
2750 sprintf(fore200e->name, "%s-%d", bus->model_name, index - 1);
2752 pci_set_master(pci_dev);
2754 printk(FORE200E "device %s found at 0x%lx, IRQ %s\n",
2755 fore200e->bus->model_name,
2756 fore200e->phys_base, fore200e_irq_itoa(fore200e->irq));
2758 sprintf(fore200e->name, "%s-%d", bus->model_name, index);
2760 err = fore200e_init(fore200e);
2762 fore200e_shutdown(fore200e);
2767 pci_set_drvdata(pci_dev, fore200e);
2775 pci_disable_device(pci_dev);
2780 static void __devexit fore200e_pca_remove_one(struct pci_dev *pci_dev)
2782 struct fore200e *fore200e;
2784 fore200e = pci_get_drvdata(pci_dev);
2786 fore200e_shutdown(fore200e);
2788 pci_disable_device(pci_dev);
2792 #ifdef CONFIG_ATM_FORE200E_PCA
2793 static struct pci_device_id fore200e_pca_tbl[] = {
2794 { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID,
2795 0, 0, (unsigned long) &fore200e_bus[0] },
2799 MODULE_DEVICE_TABLE(pci, fore200e_pca_tbl);
2801 static struct pci_driver fore200e_pca_driver = {
2802 .name = "fore_200e",
2803 .probe = fore200e_pca_detect,
2804 .remove = __devexit_p(fore200e_pca_remove_one),
2805 .id_table = fore200e_pca_tbl,
2811 fore200e_module_init(void)
2813 const struct fore200e_bus* bus;
2814 struct fore200e* fore200e;
2817 printk(FORE200E "FORE Systems 200E-series ATM driver - version " FORE200E_VERSION "\n");
2819 /* for each configured bus interface */
2820 for (bus = fore200e_bus; bus->model_name; bus++) {
2822 /* detect all boards present on that bus */
2823 for (index = 0; bus->detect && (fore200e = bus->detect(bus, index)); index++) {
2825 printk(FORE200E "device %s found at 0x%lx, IRQ %s\n",
2826 fore200e->bus->model_name,
2827 fore200e->phys_base, fore200e_irq_itoa(fore200e->irq));
2829 sprintf(fore200e->name, "%s-%d", bus->model_name, index);
2831 if (fore200e_init(fore200e) < 0) {
2833 fore200e_shutdown(fore200e);
2837 list_add(&fore200e->entry, &fore200e_boards);
2841 #ifdef CONFIG_ATM_FORE200E_PCA
2842 if (!pci_register_driver(&fore200e_pca_driver))
2846 if (!list_empty(&fore200e_boards))
2854 fore200e_module_cleanup(void)
2856 struct fore200e *fore200e, *next;
2858 #ifdef CONFIG_ATM_FORE200E_PCA
2859 pci_unregister_driver(&fore200e_pca_driver);
2862 list_for_each_entry_safe(fore200e, next, &fore200e_boards, entry) {
2863 fore200e_shutdown(fore200e);
2866 DPRINTK(1, "module being removed\n");
2871 fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
2873 struct fore200e* fore200e = FORE200E_DEV(dev);
2874 struct fore200e_vcc* fore200e_vcc;
2875 struct atm_vcc* vcc;
2876 int i, len, left = *pos;
2877 unsigned long flags;
2881 if (fore200e_getstats(fore200e) < 0)
2884 len = sprintf(page,"\n"
2886 " internal name:\t\t%s\n", fore200e->name);
2888 /* print bus-specific information */
2889 if (fore200e->bus->proc_read)
2890 len += fore200e->bus->proc_read(fore200e, page + len);
2892 len += sprintf(page + len,
2893 " interrupt line:\t\t%s\n"
2894 " physical base address:\t0x%p\n"
2895 " virtual base address:\t0x%p\n"
2896 " factory address (ESI):\t%02x:%02x:%02x:%02x:%02x:%02x\n"
2897 " board serial number:\t\t%d\n\n",
2898 fore200e_irq_itoa(fore200e->irq),
2899 (void*)fore200e->phys_base,
2900 fore200e->virt_base,
2901 fore200e->esi[0], fore200e->esi[1], fore200e->esi[2],
2902 fore200e->esi[3], fore200e->esi[4], fore200e->esi[5],
2903 fore200e->esi[4] * 256 + fore200e->esi[5]);
2909 return sprintf(page,
2910 " free small bufs, scheme 1:\t%d\n"
2911 " free large bufs, scheme 1:\t%d\n"
2912 " free small bufs, scheme 2:\t%d\n"
2913 " free large bufs, scheme 2:\t%d\n",
2914 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_SMALL ].freebuf_count,
2915 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_LARGE ].freebuf_count,
2916 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_SMALL ].freebuf_count,
2917 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_LARGE ].freebuf_count);
2920 u32 hb = fore200e->bus->read(&fore200e->cp_queues->heartbeat);
2922 len = sprintf(page,"\n\n"
2923 " cell processor:\n"
2924 " heartbeat state:\t\t");
2926 if (hb >> 16 != 0xDEAD)
2927 len += sprintf(page + len, "0x%08x\n", hb);
2929 len += sprintf(page + len, "*** FATAL ERROR %04x ***\n", hb & 0xFFFF);
2935 static const char* media_name[] = {
2936 "unshielded twisted pair",
2937 "multimode optical fiber ST",
2938 "multimode optical fiber SC",
2939 "single-mode optical fiber ST",
2940 "single-mode optical fiber SC",
2944 static const char* oc3_mode[] = {
2946 "diagnostic loopback",
2951 u32 fw_release = fore200e->bus->read(&fore200e->cp_queues->fw_release);
2952 u32 mon960_release = fore200e->bus->read(&fore200e->cp_queues->mon960_release);
2953 u32 oc3_revision = fore200e->bus->read(&fore200e->cp_queues->oc3_revision);
2954 u32 media_index = FORE200E_MEDIA_INDEX(fore200e->bus->read(&fore200e->cp_queues->media_type));
2957 if ((media_index < 0) || (media_index > 4))
2960 switch (fore200e->loop_mode) {
2961 case ATM_LM_NONE: oc3_index = 0;
2963 case ATM_LM_LOC_PHY: oc3_index = 1;
2965 case ATM_LM_RMT_PHY: oc3_index = 2;
2967 default: oc3_index = 3;
2970 return sprintf(page,
2971 " firmware release:\t\t%d.%d.%d\n"
2972 " monitor release:\t\t%d.%d\n"
2973 " media type:\t\t\t%s\n"
2974 " OC-3 revision:\t\t0x%x\n"
2975 " OC-3 mode:\t\t\t%s",
2976 fw_release >> 16, fw_release << 16 >> 24, fw_release << 24 >> 24,
2977 mon960_release >> 16, mon960_release << 16 >> 16,
2978 media_name[ media_index ],
2980 oc3_mode[ oc3_index ]);
2984 struct cp_monitor __iomem * cp_monitor = fore200e->cp_monitor;
2986 return sprintf(page,
2989 " version number:\t\t%d\n"
2990 " boot status word:\t\t0x%08x\n",
2991 fore200e->bus->read(&cp_monitor->mon_version),
2992 fore200e->bus->read(&cp_monitor->bstat));
2996 return sprintf(page,
2998 " device statistics:\n"
3000 " crc_header_errors:\t\t%10u\n"
3001 " framing_errors:\t\t%10u\n",
3002 fore200e_swap(fore200e->stats->phy.crc_header_errors),
3003 fore200e_swap(fore200e->stats->phy.framing_errors));
3006 return sprintf(page, "\n"
3008 " section_bip8_errors:\t%10u\n"
3009 " path_bip8_errors:\t\t%10u\n"
3010 " line_bip24_errors:\t\t%10u\n"
3011 " line_febe_errors:\t\t%10u\n"
3012 " path_febe_errors:\t\t%10u\n"
3013 " corr_hcs_errors:\t\t%10u\n"
3014 " ucorr_hcs_errors:\t\t%10u\n",
3015 fore200e_swap(fore200e->stats->oc3.section_bip8_errors),
3016 fore200e_swap(fore200e->stats->oc3.path_bip8_errors),
3017 fore200e_swap(fore200e->stats->oc3.line_bip24_errors),
3018 fore200e_swap(fore200e->stats->oc3.line_febe_errors),
3019 fore200e_swap(fore200e->stats->oc3.path_febe_errors),
3020 fore200e_swap(fore200e->stats->oc3.corr_hcs_errors),
3021 fore200e_swap(fore200e->stats->oc3.ucorr_hcs_errors));
3024 return sprintf(page,"\n"
3025 " ATM:\t\t\t\t cells\n"
3028 " vpi out of range:\t\t%10u\n"
3029 " vpi no conn:\t\t%10u\n"
3030 " vci out of range:\t\t%10u\n"
3031 " vci no conn:\t\t%10u\n",
3032 fore200e_swap(fore200e->stats->atm.cells_transmitted),
3033 fore200e_swap(fore200e->stats->atm.cells_received),
3034 fore200e_swap(fore200e->stats->atm.vpi_bad_range),
3035 fore200e_swap(fore200e->stats->atm.vpi_no_conn),
3036 fore200e_swap(fore200e->stats->atm.vci_bad_range),
3037 fore200e_swap(fore200e->stats->atm.vci_no_conn));
3040 return sprintf(page,"\n"
3041 " AAL0:\t\t\t cells\n"
3044 " dropped:\t\t\t%10u\n",
3045 fore200e_swap(fore200e->stats->aal0.cells_transmitted),
3046 fore200e_swap(fore200e->stats->aal0.cells_received),
3047 fore200e_swap(fore200e->stats->aal0.cells_dropped));
3050 return sprintf(page,"\n"
3052 " SAR sublayer:\t\t cells\n"
3055 " dropped:\t\t\t%10u\n"
3056 " CRC errors:\t\t%10u\n"
3057 " protocol errors:\t\t%10u\n\n"
3058 " CS sublayer:\t\t PDUs\n"
3061 " dropped:\t\t\t%10u\n"
3062 " protocol errors:\t\t%10u\n",
3063 fore200e_swap(fore200e->stats->aal34.cells_transmitted),
3064 fore200e_swap(fore200e->stats->aal34.cells_received),
3065 fore200e_swap(fore200e->stats->aal34.cells_dropped),
3066 fore200e_swap(fore200e->stats->aal34.cells_crc_errors),
3067 fore200e_swap(fore200e->stats->aal34.cells_protocol_errors),
3068 fore200e_swap(fore200e->stats->aal34.cspdus_transmitted),
3069 fore200e_swap(fore200e->stats->aal34.cspdus_received),
3070 fore200e_swap(fore200e->stats->aal34.cspdus_dropped),
3071 fore200e_swap(fore200e->stats->aal34.cspdus_protocol_errors));
3074 return sprintf(page,"\n"
3076 " SAR sublayer:\t\t cells\n"
3079 " dropped:\t\t\t%10u\n"
3080 " congestions:\t\t%10u\n\n"
3081 " CS sublayer:\t\t PDUs\n"
3084 " dropped:\t\t\t%10u\n"
3085 " CRC errors:\t\t%10u\n"
3086 " protocol errors:\t\t%10u\n",
3087 fore200e_swap(fore200e->stats->aal5.cells_transmitted),
3088 fore200e_swap(fore200e->stats->aal5.cells_received),
3089 fore200e_swap(fore200e->stats->aal5.cells_dropped),
3090 fore200e_swap(fore200e->stats->aal5.congestion_experienced),
3091 fore200e_swap(fore200e->stats->aal5.cspdus_transmitted),
3092 fore200e_swap(fore200e->stats->aal5.cspdus_received),
3093 fore200e_swap(fore200e->stats->aal5.cspdus_dropped),
3094 fore200e_swap(fore200e->stats->aal5.cspdus_crc_errors),
3095 fore200e_swap(fore200e->stats->aal5.cspdus_protocol_errors));
3098 return sprintf(page,"\n"
3099 " AUX:\t\t allocation failures\n"
3100 " small b1:\t\t\t%10u\n"
3101 " large b1:\t\t\t%10u\n"
3102 " small b2:\t\t\t%10u\n"
3103 " large b2:\t\t\t%10u\n"
3104 " RX PDUs:\t\t\t%10u\n"
3105 " TX PDUs:\t\t\t%10lu\n",
3106 fore200e_swap(fore200e->stats->aux.small_b1_failed),
3107 fore200e_swap(fore200e->stats->aux.large_b1_failed),
3108 fore200e_swap(fore200e->stats->aux.small_b2_failed),
3109 fore200e_swap(fore200e->stats->aux.large_b2_failed),
3110 fore200e_swap(fore200e->stats->aux.rpd_alloc_failed),
3114 return sprintf(page,"\n"
3115 " receive carrier:\t\t\t%s\n",
3116 fore200e->stats->aux.receive_carrier ? "ON" : "OFF!");
3119 return sprintf(page,"\n"
3120 " VCCs:\n address VPI VCI AAL "
3121 "TX PDUs TX min/max size RX PDUs RX min/max size\n");
3124 for (i = 0; i < NBR_CONNECT; i++) {
3126 vcc = fore200e->vc_map[i].vcc;
3131 spin_lock_irqsave(&fore200e->q_lock, flags);
3133 if (vcc && test_bit(ATM_VF_READY, &vcc->flags) && !left--) {
3135 fore200e_vcc = FORE200E_VCC(vcc);
3136 ASSERT(fore200e_vcc);
3139 " %08x %03d %05d %1d %09lu %05d/%05d %09lu %05d/%05d\n",
3140 (u32)(unsigned long)vcc,
3141 vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
3142 fore200e_vcc->tx_pdu,
3143 fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu,
3144 fore200e_vcc->tx_max_pdu,
3145 fore200e_vcc->rx_pdu,
3146 fore200e_vcc->rx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->rx_min_pdu,
3147 fore200e_vcc->rx_max_pdu);
3149 spin_unlock_irqrestore(&fore200e->q_lock, flags);
3153 spin_unlock_irqrestore(&fore200e->q_lock, flags);
3159 module_init(fore200e_module_init);
3160 module_exit(fore200e_module_cleanup);
3163 static const struct atmdev_ops fore200e_ops =
3165 .open = fore200e_open,
3166 .close = fore200e_close,
3167 .ioctl = fore200e_ioctl,
3168 .getsockopt = fore200e_getsockopt,
3169 .setsockopt = fore200e_setsockopt,
3170 .send = fore200e_send,
3171 .change_qos = fore200e_change_qos,
3172 .proc_read = fore200e_proc_read,
3173 .owner = THIS_MODULE
3177 #ifdef CONFIG_ATM_FORE200E_PCA
3178 extern const unsigned char _fore200e_pca_fw_data[];
3179 extern const unsigned int _fore200e_pca_fw_size;
3181 #ifdef CONFIG_ATM_FORE200E_SBA
3182 extern const unsigned char _fore200e_sba_fw_data[];
3183 extern const unsigned int _fore200e_sba_fw_size;
3186 static const struct fore200e_bus fore200e_bus[] = {
3187 #ifdef CONFIG_ATM_FORE200E_PCA
3188 { "PCA-200E", "pca200e", 32, 4, 32,
3189 _fore200e_pca_fw_data, &_fore200e_pca_fw_size,
3192 fore200e_pca_dma_map,
3193 fore200e_pca_dma_unmap,
3194 fore200e_pca_dma_sync_for_cpu,
3195 fore200e_pca_dma_sync_for_device,
3196 fore200e_pca_dma_chunk_alloc,
3197 fore200e_pca_dma_chunk_free,
3199 fore200e_pca_configure,
3202 fore200e_pca_prom_read,
3205 fore200e_pca_irq_check,
3206 fore200e_pca_irq_ack,
3207 fore200e_pca_proc_read,
3210 #ifdef CONFIG_ATM_FORE200E_SBA
3211 { "SBA-200E", "sba200e", 32, 64, 32,
3212 _fore200e_sba_fw_data, &_fore200e_sba_fw_size,
3215 fore200e_sba_dma_map,
3216 fore200e_sba_dma_unmap,
3217 fore200e_sba_dma_sync_for_cpu,
3218 fore200e_sba_dma_sync_for_device,
3219 fore200e_sba_dma_chunk_alloc,
3220 fore200e_sba_dma_chunk_free,
3221 fore200e_sba_detect,
3222 fore200e_sba_configure,
3225 fore200e_sba_prom_read,
3227 fore200e_sba_irq_enable,
3228 fore200e_sba_irq_check,
3229 fore200e_sba_irq_ack,
3230 fore200e_sba_proc_read,
3236 #ifdef MODULE_LICENSE
3237 MODULE_LICENSE("GPL");