1 /* $Id: he.c,v 1.18 2003/05/06 22:57:15 chas Exp $ */
7 ForeRunnerHE ATM Adapter driver for ATM on Linux
8 Copyright (C) 1999-2001 Naval Research Laboratory
10 This library is free software; you can redistribute it and/or
11 modify it under the terms of the GNU Lesser General Public
12 License as published by the Free Software Foundation; either
13 version 2.1 of the License, or (at your option) any later version.
15 This library is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 Lesser General Public License for more details.
20 You should have received a copy of the GNU Lesser General Public
21 License along with this library; if not, write to the Free Software
22 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
30 ForeRunnerHE ATM Adapter driver for ATM on Linux
31 Copyright (C) 1999-2001 Naval Research Laboratory
33 Permission to use, copy, modify and distribute this software and its
34 documentation is hereby granted, provided that both the copyright
35 notice and this permission notice appear in all copies of the software,
36 derivative works or modified versions, and any portions thereof, and
37 that both notices appear in supporting documentation.
39 NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
40 DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
41 RESULTING FROM THE USE OF THIS SOFTWARE.
43 This driver was written using the "Programmer's Reference Manual for
44 ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
47 chas williams <chas@cmf.nrl.navy.mil>
48 eric kinzie <ekinzie@cmf.nrl.navy.mil>
51 4096 supported 'connections'
52 group 0 is used for all traffic
53 interrupt queue 0 is used for all interrupts
54 aal0 support (based on work from ulrich.u.muller@nokia.com)
58 #include <linux/config.h>
59 #include <linux/module.h>
60 #include <linux/version.h>
61 #include <linux/kernel.h>
62 #include <linux/skbuff.h>
63 #include <linux/pci.h>
64 #include <linux/errno.h>
65 #include <linux/types.h>
66 #include <linux/string.h>
67 #include <linux/delay.h>
68 #include <linux/init.h>
70 #include <linux/sched.h>
71 #include <linux/timer.h>
72 #include <linux/interrupt.h>
73 #include <linux/dma-mapping.h>
75 #include <asm/byteorder.h>
76 #include <asm/uaccess.h>
78 #include <linux/atmdev.h>
79 #include <linux/atm.h>
80 #include <linux/sonet.h>
83 #undef USE_SCATTERGATHER
84 #undef USE_CHECKSUM_HW /* still confused about this */
86 #undef USE_RBPS_POOL /* if memory is tight try this */
87 #undef USE_RBPL_POOL /* if memory is tight try this */
89 /* #undef CONFIG_ATM_HE_USE_SUNI */
94 #include <linux/atm_he.h>
96 #define hprintk(fmt,args...) printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
99 #define HPRINTK(fmt,args...) printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
100 #else /* !HE_DEBUG */
101 #define HPRINTK(fmt,args...) do { } while (0)
102 #endif /* HE_DEBUG */
104 /* version definition */
106 static char *version = "$Id: he.c,v 1.18 2003/05/06 22:57:15 chas Exp $";
110 static int he_open(struct atm_vcc *vcc);
111 static void he_close(struct atm_vcc *vcc);
112 static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
113 static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
114 static irqreturn_t he_irq_handler(int irq, void *dev_id, struct pt_regs *regs);
115 static void he_tasklet(unsigned long data);
116 static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
117 static int he_start(struct atm_dev *dev);
118 static void he_stop(struct he_dev *dev);
119 static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
120 static unsigned char he_phy_get(struct atm_dev *, unsigned long);
122 static u8 read_prom_byte(struct he_dev *he_dev, int addr);
126 static struct he_dev *he_devs;
127 static int disable64;
128 static short nvpibits = -1;
129 static short nvcibits = -1;
130 static short rx_skb_reserve = 16;
131 static int irq_coalesce = 1;
134 /* Read from EEPROM = 0000 0011b */
135 static unsigned int readtab[] = {
150 CLK_HIGH | SI_HIGH, /* 1 */
152 CLK_HIGH | SI_HIGH /* 1 */
155 /* Clock to read from/write to the EEPROM */
156 static unsigned int clocktab[] = {
176 static struct atmdev_ops he_ops =
182 .phy_put = he_phy_put,
183 .phy_get = he_phy_get,
184 .proc_read = he_proc_read,
188 #define he_writel(dev, val, reg) do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
189 #define he_readl(dev, reg) readl((dev)->membase + (reg))
191 /* section 2.12 connection memory access */
193 static __inline__ void
194 he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
197 he_writel(he_dev, val, CON_DAT);
198 (void) he_readl(he_dev, CON_DAT); /* flush posted writes */
199 he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
200 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
203 #define he_writel_rcm(dev, val, reg) \
204 he_writel_internal(dev, val, reg, CON_CTL_RCM)
206 #define he_writel_tcm(dev, val, reg) \
207 he_writel_internal(dev, val, reg, CON_CTL_TCM)
209 #define he_writel_mbox(dev, val, reg) \
210 he_writel_internal(dev, val, reg, CON_CTL_MBOX)
213 he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
215 he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
216 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
217 return he_readl(he_dev, CON_DAT);
220 #define he_readl_rcm(dev, reg) \
221 he_readl_internal(dev, reg, CON_CTL_RCM)
223 #define he_readl_tcm(dev, reg) \
224 he_readl_internal(dev, reg, CON_CTL_TCM)
226 #define he_readl_mbox(dev, reg) \
227 he_readl_internal(dev, reg, CON_CTL_MBOX)
230 /* figure 2.2 connection id */
232 #define he_mkcid(dev, vpi, vci) (((vpi << (dev)->vcibits) | vci) & 0x1fff)
234 /* 2.5.1 per connection transmit state registers */
236 #define he_writel_tsr0(dev, val, cid) \
237 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
238 #define he_readl_tsr0(dev, cid) \
239 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
241 #define he_writel_tsr1(dev, val, cid) \
242 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
244 #define he_writel_tsr2(dev, val, cid) \
245 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
247 #define he_writel_tsr3(dev, val, cid) \
248 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
250 #define he_writel_tsr4(dev, val, cid) \
251 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
255 * NOTE While the transmit connection is active, bits 23 through 0
256 * of this register must not be written by the host. Byte
257 * enables should be used during normal operation when writing
258 * the most significant byte.
261 #define he_writel_tsr4_upper(dev, val, cid) \
262 he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
264 | CON_BYTE_DISABLE_2 \
265 | CON_BYTE_DISABLE_1 \
266 | CON_BYTE_DISABLE_0)
268 #define he_readl_tsr4(dev, cid) \
269 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
271 #define he_writel_tsr5(dev, val, cid) \
272 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
274 #define he_writel_tsr6(dev, val, cid) \
275 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
277 #define he_writel_tsr7(dev, val, cid) \
278 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
281 #define he_writel_tsr8(dev, val, cid) \
282 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
284 #define he_writel_tsr9(dev, val, cid) \
285 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
287 #define he_writel_tsr10(dev, val, cid) \
288 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
290 #define he_writel_tsr11(dev, val, cid) \
291 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
294 #define he_writel_tsr12(dev, val, cid) \
295 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
297 #define he_writel_tsr13(dev, val, cid) \
298 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
301 #define he_writel_tsr14(dev, val, cid) \
302 he_writel_tcm(dev, val, CONFIG_TSRD | cid)
304 #define he_writel_tsr14_upper(dev, val, cid) \
305 he_writel_internal(dev, val, CONFIG_TSRD | cid, \
307 | CON_BYTE_DISABLE_2 \
308 | CON_BYTE_DISABLE_1 \
309 | CON_BYTE_DISABLE_0)
311 /* 2.7.1 per connection receive state registers */
313 #define he_writel_rsr0(dev, val, cid) \
314 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
315 #define he_readl_rsr0(dev, cid) \
316 he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
318 #define he_writel_rsr1(dev, val, cid) \
319 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
321 #define he_writel_rsr2(dev, val, cid) \
322 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
324 #define he_writel_rsr3(dev, val, cid) \
325 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
327 #define he_writel_rsr4(dev, val, cid) \
328 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
330 #define he_writel_rsr5(dev, val, cid) \
331 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
333 #define he_writel_rsr6(dev, val, cid) \
334 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
336 #define he_writel_rsr7(dev, val, cid) \
337 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
339 static __inline__ struct atm_vcc*
340 __find_vcc(struct he_dev *he_dev, unsigned cid)
342 struct hlist_head *head;
344 struct hlist_node *node;
349 vpi = cid >> he_dev->vcibits;
350 vci = cid & ((1 << he_dev->vcibits) - 1);
351 head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
353 sk_for_each(s, node, head) {
355 if (vcc->dev == he_dev->atm_dev &&
356 vcc->vci == vci && vcc->vpi == vpi &&
357 vcc->qos.rxtp.traffic_class != ATM_NONE) {
365 he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
367 struct atm_dev *atm_dev = NULL;
368 struct he_dev *he_dev = NULL;
371 printk(KERN_INFO "he: %s\n", version);
373 if (pci_enable_device(pci_dev))
375 if (pci_set_dma_mask(pci_dev, DMA_32BIT_MASK) != 0) {
376 printk(KERN_WARNING "he: no suitable dma available\n");
378 goto init_one_failure;
381 atm_dev = atm_dev_register(DEV_LABEL, &he_ops, -1, NULL);
384 goto init_one_failure;
386 pci_set_drvdata(pci_dev, atm_dev);
388 he_dev = (struct he_dev *) kmalloc(sizeof(struct he_dev),
392 goto init_one_failure;
394 memset(he_dev, 0, sizeof(struct he_dev));
396 he_dev->pci_dev = pci_dev;
397 he_dev->atm_dev = atm_dev;
398 he_dev->atm_dev->dev_data = he_dev;
399 atm_dev->dev_data = he_dev;
400 he_dev->number = atm_dev->number;
401 if (he_start(atm_dev)) {
404 goto init_one_failure;
408 he_dev->next = he_devs;
414 atm_dev_deregister(atm_dev);
416 pci_disable_device(pci_dev);
420 static void __devexit
421 he_remove_one (struct pci_dev *pci_dev)
423 struct atm_dev *atm_dev;
424 struct he_dev *he_dev;
426 atm_dev = pci_get_drvdata(pci_dev);
427 he_dev = HE_DEV(atm_dev);
429 /* need to remove from he_devs */
432 atm_dev_deregister(atm_dev);
435 pci_set_drvdata(pci_dev, NULL);
436 pci_disable_device(pci_dev);
441 rate_to_atmf(unsigned rate) /* cps to atm forum format */
443 #define NONZERO (1 << 14)
451 while (rate > 0x3ff) {
456 return (NONZERO | (exp << 9) | (rate & 0x1ff));
460 he_init_rx_lbfp0(struct he_dev *he_dev)
462 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
463 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
464 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
465 unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
468 lbm_offset = he_readl(he_dev, RCMLBM_BA);
470 he_writel(he_dev, lbufd_index, RLBF0_H);
472 for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
474 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
476 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
477 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
479 if (++lbuf_count == lbufs_per_row) {
481 row_offset += he_dev->bytes_per_row;
486 he_writel(he_dev, lbufd_index - 2, RLBF0_T);
487 he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
491 he_init_rx_lbfp1(struct he_dev *he_dev)
493 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
494 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
495 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
496 unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
499 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
501 he_writel(he_dev, lbufd_index, RLBF1_H);
503 for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
505 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
507 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
508 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
510 if (++lbuf_count == lbufs_per_row) {
512 row_offset += he_dev->bytes_per_row;
517 he_writel(he_dev, lbufd_index - 2, RLBF1_T);
518 he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
522 he_init_tx_lbfp(struct he_dev *he_dev)
524 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
525 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
526 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
527 unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
529 lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
530 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
532 he_writel(he_dev, lbufd_index, TLBF_H);
534 for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
536 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
538 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
539 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
541 if (++lbuf_count == lbufs_per_row) {
543 row_offset += he_dev->bytes_per_row;
548 he_writel(he_dev, lbufd_index - 1, TLBF_T);
552 he_init_tpdrq(struct he_dev *he_dev)
554 he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev,
555 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys);
556 if (he_dev->tpdrq_base == NULL) {
557 hprintk("failed to alloc tpdrq\n");
560 memset(he_dev->tpdrq_base, 0,
561 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq));
563 he_dev->tpdrq_tail = he_dev->tpdrq_base;
564 he_dev->tpdrq_head = he_dev->tpdrq_base;
566 he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
567 he_writel(he_dev, 0, TPDRQ_T);
568 he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
574 he_init_cs_block(struct he_dev *he_dev)
576 unsigned clock, rate, delta;
579 /* 5.1.7 cs block initialization */
581 for (reg = 0; reg < 0x20; ++reg)
582 he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
584 /* rate grid timer reload values */
586 clock = he_is622(he_dev) ? 66667000 : 50000000;
587 rate = he_dev->atm_dev->link_rate;
588 delta = rate / 16 / 2;
590 for (reg = 0; reg < 0x10; ++reg) {
591 /* 2.4 internal transmit function
593 * we initialize the first row in the rate grid.
594 * values are period (in clock cycles) of timer
596 unsigned period = clock / rate;
598 he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
602 if (he_is622(he_dev)) {
603 /* table 5.2 (4 cells per lbuf) */
604 he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
605 he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
606 he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
607 he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
608 he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
610 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
611 he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
612 he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
613 he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
614 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
615 he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
616 he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
618 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
621 he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
622 he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
623 he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
624 he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
625 he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
626 he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
629 he_writel_mbox(he_dev, 0x5, CS_OTPPER);
630 he_writel_mbox(he_dev, 0x14, CS_OTWPER);
632 /* table 5.1 (4 cells per lbuf) */
633 he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
634 he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
635 he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
636 he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
637 he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
639 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
640 he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
641 he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
642 he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
643 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
644 he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
645 he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
647 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
650 he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
651 he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
652 he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
653 he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
654 he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
655 he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
658 he_writel_mbox(he_dev, 0x6, CS_OTPPER);
659 he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
662 he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
664 for (reg = 0; reg < 0x8; ++reg)
665 he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
670 he_init_cs_block_rcm(struct he_dev *he_dev)
672 unsigned (*rategrid)[16][16];
673 unsigned rate, delta;
676 unsigned rate_atmf, exp, man;
677 unsigned long long rate_cps;
678 int mult, buf, buf_limit = 4;
680 rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
684 /* initialize rate grid group table */
686 for (reg = 0x0; reg < 0xff; ++reg)
687 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
689 /* initialize rate controller groups */
691 for (reg = 0x100; reg < 0x1ff; ++reg)
692 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
694 /* initialize tNrm lookup table */
696 /* the manual makes reference to a routine in a sample driver
697 for proper configuration; fortunately, we only need this
698 in order to support abr connection */
700 /* initialize rate to group table */
702 rate = he_dev->atm_dev->link_rate;
706 * 2.4 transmit internal functions
708 * we construct a copy of the rate grid used by the scheduler
709 * in order to construct the rate to group table below
712 for (j = 0; j < 16; j++) {
713 (*rategrid)[0][j] = rate;
717 for (i = 1; i < 16; i++)
718 for (j = 0; j < 16; j++)
720 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
722 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
725 * 2.4 transmit internal function
727 * this table maps the upper 5 bits of exponent and mantissa
728 * of the atm forum representation of the rate into an index
733 while (rate_atmf < 0x400) {
734 man = (rate_atmf & 0x1f) << 4;
735 exp = rate_atmf >> 5;
738 instead of '/ 512', use '>> 9' to prevent a call
739 to divdu3 on x86 platforms
741 rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
744 rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */
746 for (i = 255; i > 0; i--)
747 if ((*rategrid)[i/16][i%16] >= rate_cps)
748 break; /* pick nearest rate instead? */
751 * each table entry is 16 bits: (rate grid index (8 bits)
752 * and a buffer limit (8 bits)
753 * there are two table entries in each 32-bit register
757 buf = rate_cps * he_dev->tx_numbuffs /
758 (he_dev->atm_dev->link_rate * 2);
760 /* this is pretty, but avoids _divdu3 and is mostly correct */
761 mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
762 if (rate_cps > (272 * mult))
764 else if (rate_cps > (204 * mult))
766 else if (rate_cps > (136 * mult))
768 else if (rate_cps > (68 * mult))
775 reg = (reg << 16) | ((i << 8) | buf);
777 #define RTGTBL_OFFSET 0x400
780 he_writel_rcm(he_dev, reg,
781 CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
791 he_init_group(struct he_dev *he_dev, int group)
796 /* small buffer pool */
798 he_dev->rbps_pool = pci_pool_create("rbps", he_dev->pci_dev,
799 CONFIG_RBPS_BUFSIZE, 8, 0);
800 if (he_dev->rbps_pool == NULL) {
801 hprintk("unable to create rbps pages\n");
804 #else /* !USE_RBPS_POOL */
805 he_dev->rbps_pages = pci_alloc_consistent(he_dev->pci_dev,
806 CONFIG_RBPS_SIZE * CONFIG_RBPS_BUFSIZE, &he_dev->rbps_pages_phys);
807 if (he_dev->rbps_pages == NULL) {
808 hprintk("unable to create rbps page pool\n");
811 #endif /* USE_RBPS_POOL */
813 he_dev->rbps_base = pci_alloc_consistent(he_dev->pci_dev,
814 CONFIG_RBPS_SIZE * sizeof(struct he_rbp), &he_dev->rbps_phys);
815 if (he_dev->rbps_base == NULL) {
816 hprintk("failed to alloc rbps\n");
819 memset(he_dev->rbps_base, 0, CONFIG_RBPS_SIZE * sizeof(struct he_rbp));
820 he_dev->rbps_virt = kmalloc(CONFIG_RBPS_SIZE * sizeof(struct he_virt), GFP_KERNEL);
822 for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
823 dma_addr_t dma_handle;
827 cpuaddr = pci_pool_alloc(he_dev->rbps_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle);
831 cpuaddr = he_dev->rbps_pages + (i * CONFIG_RBPS_BUFSIZE);
832 dma_handle = he_dev->rbps_pages_phys + (i * CONFIG_RBPS_BUFSIZE);
835 he_dev->rbps_virt[i].virt = cpuaddr;
836 he_dev->rbps_base[i].status = RBP_LOANED | RBP_SMALLBUF | (i << RBP_INDEX_OFF);
837 he_dev->rbps_base[i].phys = dma_handle;
840 he_dev->rbps_tail = &he_dev->rbps_base[CONFIG_RBPS_SIZE - 1];
842 he_writel(he_dev, he_dev->rbps_phys, G0_RBPS_S + (group * 32));
843 he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail),
844 G0_RBPS_T + (group * 32));
845 he_writel(he_dev, CONFIG_RBPS_BUFSIZE/4,
846 G0_RBPS_BS + (group * 32));
848 RBP_THRESH(CONFIG_RBPS_THRESH) |
849 RBP_QSIZE(CONFIG_RBPS_SIZE - 1) |
851 G0_RBPS_QI + (group * 32));
852 #else /* !USE_RBPS */
853 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
854 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
855 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
856 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
857 G0_RBPS_BS + (group * 32));
858 #endif /* USE_RBPS */
860 /* large buffer pool */
862 he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
863 CONFIG_RBPL_BUFSIZE, 8, 0);
864 if (he_dev->rbpl_pool == NULL) {
865 hprintk("unable to create rbpl pool\n");
868 #else /* !USE_RBPL_POOL */
869 he_dev->rbpl_pages = (void *) pci_alloc_consistent(he_dev->pci_dev,
870 CONFIG_RBPL_SIZE * CONFIG_RBPL_BUFSIZE, &he_dev->rbpl_pages_phys);
871 if (he_dev->rbpl_pages == NULL) {
872 hprintk("unable to create rbpl pages\n");
875 #endif /* USE_RBPL_POOL */
877 he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev,
878 CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys);
879 if (he_dev->rbpl_base == NULL) {
880 hprintk("failed to alloc rbpl\n");
883 memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp));
884 he_dev->rbpl_virt = kmalloc(CONFIG_RBPL_SIZE * sizeof(struct he_virt), GFP_KERNEL);
886 for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
887 dma_addr_t dma_handle;
891 cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle);
895 cpuaddr = he_dev->rbpl_pages + (i * CONFIG_RBPL_BUFSIZE);
896 dma_handle = he_dev->rbpl_pages_phys + (i * CONFIG_RBPL_BUFSIZE);
899 he_dev->rbpl_virt[i].virt = cpuaddr;
900 he_dev->rbpl_base[i].status = RBP_LOANED | (i << RBP_INDEX_OFF);
901 he_dev->rbpl_base[i].phys = dma_handle;
903 he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
905 he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
906 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
907 G0_RBPL_T + (group * 32));
908 he_writel(he_dev, CONFIG_RBPL_BUFSIZE/4,
909 G0_RBPL_BS + (group * 32));
911 RBP_THRESH(CONFIG_RBPL_THRESH) |
912 RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
914 G0_RBPL_QI + (group * 32));
916 /* rx buffer ready queue */
918 he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev,
919 CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys);
920 if (he_dev->rbrq_base == NULL) {
921 hprintk("failed to allocate rbrq\n");
924 memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq));
926 he_dev->rbrq_head = he_dev->rbrq_base;
927 he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
928 he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
930 RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
931 G0_RBRQ_Q + (group * 16));
933 hprintk("coalescing interrupts\n");
934 he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
935 G0_RBRQ_I + (group * 16));
937 he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
938 G0_RBRQ_I + (group * 16));
940 /* tx buffer ready queue */
942 he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev,
943 CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys);
944 if (he_dev->tbrq_base == NULL) {
945 hprintk("failed to allocate tbrq\n");
948 memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq));
950 he_dev->tbrq_head = he_dev->tbrq_base;
952 he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
953 he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
954 he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
955 he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
961 he_init_irq(struct he_dev *he_dev)
965 /* 2.9.3.5 tail offset for each interrupt queue is located after the
966 end of the interrupt queue */
968 he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev,
969 (CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys);
970 if (he_dev->irq_base == NULL) {
971 hprintk("failed to allocate irq\n");
974 he_dev->irq_tailoffset = (unsigned *)
975 &he_dev->irq_base[CONFIG_IRQ_SIZE];
976 *he_dev->irq_tailoffset = 0;
977 he_dev->irq_head = he_dev->irq_base;
978 he_dev->irq_tail = he_dev->irq_base;
980 for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
981 he_dev->irq_base[i].isw = ITYPE_INVALID;
983 he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
985 IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
987 he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
988 he_writel(he_dev, 0x0, IRQ0_DATA);
990 he_writel(he_dev, 0x0, IRQ1_BASE);
991 he_writel(he_dev, 0x0, IRQ1_HEAD);
992 he_writel(he_dev, 0x0, IRQ1_CNTL);
993 he_writel(he_dev, 0x0, IRQ1_DATA);
995 he_writel(he_dev, 0x0, IRQ2_BASE);
996 he_writel(he_dev, 0x0, IRQ2_HEAD);
997 he_writel(he_dev, 0x0, IRQ2_CNTL);
998 he_writel(he_dev, 0x0, IRQ2_DATA);
1000 he_writel(he_dev, 0x0, IRQ3_BASE);
1001 he_writel(he_dev, 0x0, IRQ3_HEAD);
1002 he_writel(he_dev, 0x0, IRQ3_CNTL);
1003 he_writel(he_dev, 0x0, IRQ3_DATA);
1005 /* 2.9.3.2 interrupt queue mapping registers */
1007 he_writel(he_dev, 0x0, GRP_10_MAP);
1008 he_writel(he_dev, 0x0, GRP_32_MAP);
1009 he_writel(he_dev, 0x0, GRP_54_MAP);
1010 he_writel(he_dev, 0x0, GRP_76_MAP);
1012 if (request_irq(he_dev->pci_dev->irq, he_irq_handler, SA_INTERRUPT|SA_SHIRQ, DEV_LABEL, he_dev)) {
1013 hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
1017 he_dev->irq = he_dev->pci_dev->irq;
1023 he_start(struct atm_dev *dev)
1025 struct he_dev *he_dev;
1026 struct pci_dev *pci_dev;
1027 unsigned long membase;
1030 u32 gen_cntl_0, host_cntl, lb_swap;
1031 u8 cache_size, timer;
1034 unsigned int status, reg;
1037 he_dev = HE_DEV(dev);
1038 pci_dev = he_dev->pci_dev;
1040 membase = pci_resource_start(pci_dev, 0);
1041 HPRINTK("membase = 0x%lx irq = %d.\n", membase, pci_dev->irq);
1044 * pci bus controller initialization
1047 /* 4.3 pci bus controller-specific initialization */
1048 if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
1049 hprintk("can't read GEN_CNTL_0\n");
1052 gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1053 if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1054 hprintk("can't write GEN_CNTL_0.\n");
1058 if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1059 hprintk("can't read PCI_COMMAND.\n");
1063 command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1064 if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1065 hprintk("can't enable memory.\n");
1069 if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1070 hprintk("can't read cache line size?\n");
1074 if (cache_size < 16) {
1076 if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1077 hprintk("can't set cache line size to %d\n", cache_size);
1080 if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1081 hprintk("can't read latency timer?\n");
1087 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1089 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1090 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1093 #define LAT_TIMER 209
1094 if (timer < LAT_TIMER) {
1095 HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1097 if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1098 hprintk("can't set latency timer to %d\n", timer);
1101 if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1102 hprintk("can't set up page mapping\n");
1106 /* 4.4 card reset */
1107 he_writel(he_dev, 0x0, RESET_CNTL);
1108 he_writel(he_dev, 0xff, RESET_CNTL);
1110 udelay(16*1000); /* 16 ms */
1111 status = he_readl(he_dev, RESET_CNTL);
1112 if ((status & BOARD_RST_STATUS) == 0) {
1113 hprintk("reset failed\n");
1117 /* 4.5 set bus width */
1118 host_cntl = he_readl(he_dev, HOST_CNTL);
1119 if (host_cntl & PCI_BUS_SIZE64)
1120 gen_cntl_0 |= ENBL_64;
1122 gen_cntl_0 &= ~ENBL_64;
1124 if (disable64 == 1) {
1125 hprintk("disabling 64-bit pci bus transfers\n");
1126 gen_cntl_0 &= ~ENBL_64;
1129 if (gen_cntl_0 & ENBL_64)
1130 hprintk("64-bit transfers enabled\n");
1132 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1134 /* 4.7 read prom contents */
1135 for (i = 0; i < PROD_ID_LEN; ++i)
1136 he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1138 he_dev->media = read_prom_byte(he_dev, MEDIA);
1140 for (i = 0; i < 6; ++i)
1141 dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1143 hprintk("%s%s, %x:%x:%x:%x:%x:%x\n",
1145 he_dev->media & 0x40 ? "SM" : "MM",
1152 he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1153 ATM_OC12_PCR : ATM_OC3_PCR;
1155 /* 4.6 set host endianess */
1156 lb_swap = he_readl(he_dev, LB_SWAP);
1157 if (he_is622(he_dev))
1158 lb_swap &= ~XFER_SIZE; /* 4 cells */
1160 lb_swap |= XFER_SIZE; /* 8 cells */
1162 lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1164 lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1165 DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1166 #endif /* __BIG_ENDIAN */
1167 he_writel(he_dev, lb_swap, LB_SWAP);
1169 /* 4.8 sdram controller initialization */
1170 he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1172 /* 4.9 initialize rnum value */
1173 lb_swap |= SWAP_RNUM_MAX(0xf);
1174 he_writel(he_dev, lb_swap, LB_SWAP);
1176 /* 4.10 initialize the interrupt queues */
1177 if ((err = he_init_irq(he_dev)) != 0)
1181 tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
1183 spin_lock_init(&he_dev->global_lock);
1185 /* 4.11 enable pci bus controller state machines */
1186 host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1187 QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1188 he_writel(he_dev, host_cntl, HOST_CNTL);
1190 gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1191 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1194 * atm network controller initialization
1197 /* 5.1.1 generic configuration state */
1200 * local (cell) buffer memory map
1204 * 0 ____________1023 bytes 0 _______________________2047 bytes
1206 * | utility | | rx0 | |
1207 * 5|____________| 255|___________________| u |
1210 * | rx0 | row | tx | l |
1212 * | | 767|___________________| t |
1213 * 517|____________| 768| | y |
1214 * row 518| | | rx1 | |
1215 * | | 1023|___________________|___|
1220 * 1535|____________|
1223 * 2047|____________|
1227 /* total 4096 connections */
1228 he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1229 he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1231 if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1232 hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1236 if (nvpibits != -1) {
1237 he_dev->vpibits = nvpibits;
1238 he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1241 if (nvcibits != -1) {
1242 he_dev->vcibits = nvcibits;
1243 he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1247 if (he_is622(he_dev)) {
1248 he_dev->cells_per_row = 40;
1249 he_dev->bytes_per_row = 2048;
1250 he_dev->r0_numrows = 256;
1251 he_dev->tx_numrows = 512;
1252 he_dev->r1_numrows = 256;
1253 he_dev->r0_startrow = 0;
1254 he_dev->tx_startrow = 256;
1255 he_dev->r1_startrow = 768;
1257 he_dev->cells_per_row = 20;
1258 he_dev->bytes_per_row = 1024;
1259 he_dev->r0_numrows = 512;
1260 he_dev->tx_numrows = 1018;
1261 he_dev->r1_numrows = 512;
1262 he_dev->r0_startrow = 6;
1263 he_dev->tx_startrow = 518;
1264 he_dev->r1_startrow = 1536;
1267 he_dev->cells_per_lbuf = 4;
1268 he_dev->buffer_limit = 4;
1269 he_dev->r0_numbuffs = he_dev->r0_numrows *
1270 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1271 if (he_dev->r0_numbuffs > 2560)
1272 he_dev->r0_numbuffs = 2560;
1274 he_dev->r1_numbuffs = he_dev->r1_numrows *
1275 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1276 if (he_dev->r1_numbuffs > 2560)
1277 he_dev->r1_numbuffs = 2560;
1279 he_dev->tx_numbuffs = he_dev->tx_numrows *
1280 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1281 if (he_dev->tx_numbuffs > 5120)
1282 he_dev->tx_numbuffs = 5120;
1284 /* 5.1.2 configure hardware dependent registers */
1287 SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1288 RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1289 (he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1290 (he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1293 he_writel(he_dev, BANK_ON |
1294 (he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1298 (he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1299 RM_RW_WAIT(1), RCMCONFIG);
1301 (he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1302 TM_RW_WAIT(1), TCMCONFIG);
1304 he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1307 (he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1308 (he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1309 RX_VALVP(he_dev->vpibits) |
1310 RX_VALVC(he_dev->vcibits), RC_CONFIG);
1312 he_writel(he_dev, DRF_THRESH(0x20) |
1313 (he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1314 TX_VCI_MASK(he_dev->vcibits) |
1315 LBFREE_CNT(he_dev->tx_numbuffs), TX_CONFIG);
1317 he_writel(he_dev, 0x0, TXAAL5_PROTO);
1319 he_writel(he_dev, PHY_INT_ENB |
1320 (he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1323 /* 5.1.3 initialize connection memory */
1325 for (i = 0; i < TCM_MEM_SIZE; ++i)
1326 he_writel_tcm(he_dev, 0, i);
1328 for (i = 0; i < RCM_MEM_SIZE; ++i)
1329 he_writel_rcm(he_dev, 0, i);
1332 * transmit connection memory map
1335 * 0x0 ___________________
1341 * 0x8000|___________________|
1344 * 0xc000|___________________|
1347 * 0xe000|___________________|
1349 * 0xf000|___________________|
1351 * 0x10000|___________________|
1354 * |___________________|
1357 * 0x1ffff|___________________|
1362 he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1363 he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1364 he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1365 he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1366 he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1370 * receive connection memory map
1372 * 0x0 ___________________
1378 * 0x8000|___________________|
1381 * | LBM | link lists of local
1382 * | tx | buffer memory
1384 * 0xd000|___________________|
1387 * 0xe000|___________________|
1390 * |___________________|
1393 * 0xffff|___________________|
1396 he_writel(he_dev, 0x08000, RCMLBM_BA);
1397 he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1398 he_writel(he_dev, 0x0d800, RCMABR_BA);
1400 /* 5.1.4 initialize local buffer free pools linked lists */
1402 he_init_rx_lbfp0(he_dev);
1403 he_init_rx_lbfp1(he_dev);
1405 he_writel(he_dev, 0x0, RLBC_H);
1406 he_writel(he_dev, 0x0, RLBC_T);
1407 he_writel(he_dev, 0x0, RLBC_H2);
1409 he_writel(he_dev, 512, RXTHRSH); /* 10% of r0+r1 buffers */
1410 he_writel(he_dev, 256, LITHRSH); /* 5% of r0+r1 buffers */
1412 he_init_tx_lbfp(he_dev);
1414 he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1416 /* 5.1.5 initialize intermediate receive queues */
1418 if (he_is622(he_dev)) {
1419 he_writel(he_dev, 0x000f, G0_INMQ_S);
1420 he_writel(he_dev, 0x200f, G0_INMQ_L);
1422 he_writel(he_dev, 0x001f, G1_INMQ_S);
1423 he_writel(he_dev, 0x201f, G1_INMQ_L);
1425 he_writel(he_dev, 0x002f, G2_INMQ_S);
1426 he_writel(he_dev, 0x202f, G2_INMQ_L);
1428 he_writel(he_dev, 0x003f, G3_INMQ_S);
1429 he_writel(he_dev, 0x203f, G3_INMQ_L);
1431 he_writel(he_dev, 0x004f, G4_INMQ_S);
1432 he_writel(he_dev, 0x204f, G4_INMQ_L);
1434 he_writel(he_dev, 0x005f, G5_INMQ_S);
1435 he_writel(he_dev, 0x205f, G5_INMQ_L);
1437 he_writel(he_dev, 0x006f, G6_INMQ_S);
1438 he_writel(he_dev, 0x206f, G6_INMQ_L);
1440 he_writel(he_dev, 0x007f, G7_INMQ_S);
1441 he_writel(he_dev, 0x207f, G7_INMQ_L);
1443 he_writel(he_dev, 0x0000, G0_INMQ_S);
1444 he_writel(he_dev, 0x0008, G0_INMQ_L);
1446 he_writel(he_dev, 0x0001, G1_INMQ_S);
1447 he_writel(he_dev, 0x0009, G1_INMQ_L);
1449 he_writel(he_dev, 0x0002, G2_INMQ_S);
1450 he_writel(he_dev, 0x000a, G2_INMQ_L);
1452 he_writel(he_dev, 0x0003, G3_INMQ_S);
1453 he_writel(he_dev, 0x000b, G3_INMQ_L);
1455 he_writel(he_dev, 0x0004, G4_INMQ_S);
1456 he_writel(he_dev, 0x000c, G4_INMQ_L);
1458 he_writel(he_dev, 0x0005, G5_INMQ_S);
1459 he_writel(he_dev, 0x000d, G5_INMQ_L);
1461 he_writel(he_dev, 0x0006, G6_INMQ_S);
1462 he_writel(he_dev, 0x000e, G6_INMQ_L);
1464 he_writel(he_dev, 0x0007, G7_INMQ_S);
1465 he_writel(he_dev, 0x000f, G7_INMQ_L);
1468 /* 5.1.6 application tunable parameters */
1470 he_writel(he_dev, 0x0, MCC);
1471 he_writel(he_dev, 0x0, OEC);
1472 he_writel(he_dev, 0x0, DCC);
1473 he_writel(he_dev, 0x0, CEC);
1475 /* 5.1.7 cs block initialization */
1477 he_init_cs_block(he_dev);
1479 /* 5.1.8 cs block connection memory initialization */
1481 if (he_init_cs_block_rcm(he_dev) < 0)
1484 /* 5.1.10 initialize host structures */
1486 he_init_tpdrq(he_dev);
1489 he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
1490 sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1491 if (he_dev->tpd_pool == NULL) {
1492 hprintk("unable to create tpd pci_pool\n");
1496 INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1498 he_dev->tpd_base = (void *) pci_alloc_consistent(he_dev->pci_dev,
1499 CONFIG_NUMTPDS * sizeof(struct he_tpd), &he_dev->tpd_base_phys);
1500 if (!he_dev->tpd_base)
1503 for (i = 0; i < CONFIG_NUMTPDS; ++i) {
1504 he_dev->tpd_base[i].status = (i << TPD_ADDR_SHIFT);
1505 he_dev->tpd_base[i].inuse = 0;
1508 he_dev->tpd_head = he_dev->tpd_base;
1509 he_dev->tpd_end = &he_dev->tpd_base[CONFIG_NUMTPDS - 1];
1512 if (he_init_group(he_dev, 0) != 0)
1515 for (group = 1; group < HE_NUM_GROUPS; ++group) {
1516 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1517 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1518 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1519 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1520 G0_RBPS_BS + (group * 32));
1522 he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1523 he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1524 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1525 G0_RBPL_QI + (group * 32));
1526 he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1528 he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1529 he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1530 he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1531 G0_RBRQ_Q + (group * 16));
1532 he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1534 he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1535 he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1536 he_writel(he_dev, TBRQ_THRESH(0x1),
1537 G0_TBRQ_THRESH + (group * 16));
1538 he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1541 /* host status page */
1543 he_dev->hsp = pci_alloc_consistent(he_dev->pci_dev,
1544 sizeof(struct he_hsp), &he_dev->hsp_phys);
1545 if (he_dev->hsp == NULL) {
1546 hprintk("failed to allocate host status page\n");
1549 memset(he_dev->hsp, 0, sizeof(struct he_hsp));
1550 he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1552 /* initialize framer */
1554 #ifdef CONFIG_ATM_HE_USE_SUNI
1555 suni_init(he_dev->atm_dev);
1556 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1557 he_dev->atm_dev->phy->start(he_dev->atm_dev);
1558 #endif /* CONFIG_ATM_HE_USE_SUNI */
1561 /* this really should be in suni.c but for now... */
1564 val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1565 val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1566 he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1569 /* 5.1.12 enable transmit and receive */
1571 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1572 reg |= TX_ENABLE|ER_ENABLE;
1573 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1575 reg = he_readl(he_dev, RC_CONFIG);
1577 he_writel(he_dev, reg, RC_CONFIG);
1579 for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1580 he_dev->cs_stper[i].inuse = 0;
1581 he_dev->cs_stper[i].pcr = -1;
1583 he_dev->total_bw = 0;
1586 /* atm linux initialization */
1588 he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1589 he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1591 he_dev->irq_peak = 0;
1592 he_dev->rbrq_peak = 0;
1593 he_dev->rbpl_peak = 0;
1594 he_dev->tbrq_peak = 0;
1596 HPRINTK("hell bent for leather!\n");
1602 he_stop(struct he_dev *he_dev)
1605 u32 gen_cntl_0, reg;
1606 struct pci_dev *pci_dev;
1608 pci_dev = he_dev->pci_dev;
1610 /* disable interrupts */
1612 if (he_dev->membase) {
1613 pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1614 gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1615 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1618 tasklet_disable(&he_dev->tasklet);
1621 /* disable recv and transmit */
1623 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1624 reg &= ~(TX_ENABLE|ER_ENABLE);
1625 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1627 reg = he_readl(he_dev, RC_CONFIG);
1628 reg &= ~(RX_ENABLE);
1629 he_writel(he_dev, reg, RC_CONFIG);
1632 #ifdef CONFIG_ATM_HE_USE_SUNI
1633 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1634 he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1635 #endif /* CONFIG_ATM_HE_USE_SUNI */
1638 free_irq(he_dev->irq, he_dev);
1640 if (he_dev->irq_base)
1641 pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1)
1642 * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1645 pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp),
1646 he_dev->hsp, he_dev->hsp_phys);
1648 if (he_dev->rbpl_base) {
1649 #ifdef USE_RBPL_POOL
1650 for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
1651 void *cpuaddr = he_dev->rbpl_virt[i].virt;
1652 dma_addr_t dma_handle = he_dev->rbpl_base[i].phys;
1654 pci_pool_free(he_dev->rbpl_pool, cpuaddr, dma_handle);
1657 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1658 * CONFIG_RBPL_BUFSIZE, he_dev->rbpl_pages, he_dev->rbpl_pages_phys);
1660 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1661 * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1664 #ifdef USE_RBPL_POOL
1665 if (he_dev->rbpl_pool)
1666 pci_pool_destroy(he_dev->rbpl_pool);
1670 if (he_dev->rbps_base) {
1671 #ifdef USE_RBPS_POOL
1672 for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
1673 void *cpuaddr = he_dev->rbps_virt[i].virt;
1674 dma_addr_t dma_handle = he_dev->rbps_base[i].phys;
1676 pci_pool_free(he_dev->rbps_pool, cpuaddr, dma_handle);
1679 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
1680 * CONFIG_RBPS_BUFSIZE, he_dev->rbps_pages, he_dev->rbps_pages_phys);
1682 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
1683 * sizeof(struct he_rbp), he_dev->rbps_base, he_dev->rbps_phys);
1686 #ifdef USE_RBPS_POOL
1687 if (he_dev->rbps_pool)
1688 pci_pool_destroy(he_dev->rbps_pool);
1691 #endif /* USE_RBPS */
1693 if (he_dev->rbrq_base)
1694 pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1695 he_dev->rbrq_base, he_dev->rbrq_phys);
1697 if (he_dev->tbrq_base)
1698 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1699 he_dev->tbrq_base, he_dev->tbrq_phys);
1701 if (he_dev->tpdrq_base)
1702 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1703 he_dev->tpdrq_base, he_dev->tpdrq_phys);
1706 if (he_dev->tpd_pool)
1707 pci_pool_destroy(he_dev->tpd_pool);
1709 if (he_dev->tpd_base)
1710 pci_free_consistent(he_dev->pci_dev, CONFIG_NUMTPDS * sizeof(struct he_tpd),
1711 he_dev->tpd_base, he_dev->tpd_base_phys);
1714 if (he_dev->pci_dev) {
1715 pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1716 command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1717 pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1720 if (he_dev->membase)
1721 iounmap(he_dev->membase);
1724 static struct he_tpd *
1725 __alloc_tpd(struct he_dev *he_dev)
1729 dma_addr_t dma_handle;
1731 tpd = pci_pool_alloc(he_dev->tpd_pool, SLAB_ATOMIC|SLAB_DMA, &dma_handle);
1735 tpd->status = TPD_ADDR(dma_handle);
1737 tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1738 tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1739 tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1745 for (i = 0; i < CONFIG_NUMTPDS; ++i) {
1747 if (he_dev->tpd_head > he_dev->tpd_end) {
1748 he_dev->tpd_head = he_dev->tpd_base;
1751 if (!he_dev->tpd_head->inuse) {
1752 he_dev->tpd_head->inuse = 1;
1753 he_dev->tpd_head->status &= TPD_MASK;
1754 he_dev->tpd_head->iovec[0].addr = 0; he_dev->tpd_head->iovec[0].len = 0;
1755 he_dev->tpd_head->iovec[1].addr = 0; he_dev->tpd_head->iovec[1].len = 0;
1756 he_dev->tpd_head->iovec[2].addr = 0; he_dev->tpd_head->iovec[2].len = 0;
1757 return he_dev->tpd_head;
1760 hprintk("out of tpds -- increase CONFIG_NUMTPDS (%d)\n", CONFIG_NUMTPDS);
1765 #define AAL5_LEN(buf,len) \
1766 ((((unsigned char *)(buf))[(len)-6] << 8) | \
1767 (((unsigned char *)(buf))[(len)-5]))
1771 * aal5 packets can optionally return the tcp checksum in the lower
1772 * 16 bits of the crc (RSR0_TCP_CKSUM)
1775 #define TCP_CKSUM(buf,len) \
1776 ((((unsigned char *)(buf))[(len)-2] << 8) | \
1777 (((unsigned char *)(buf))[(len-1)]))
1780 he_service_rbrq(struct he_dev *he_dev, int group)
1782 struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1783 ((unsigned long)he_dev->rbrq_base |
1784 he_dev->hsp->group[group].rbrq_tail);
1785 struct he_rbp *rbp = NULL;
1786 unsigned cid, lastcid = -1;
1787 unsigned buf_len = 0;
1788 struct sk_buff *skb;
1789 struct atm_vcc *vcc = NULL;
1790 struct he_vcc *he_vcc;
1791 struct he_iovec *iov;
1792 int pdus_assembled = 0;
1795 read_lock(&vcc_sklist_lock);
1796 while (he_dev->rbrq_head != rbrq_tail) {
1799 HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1800 he_dev->rbrq_head, group,
1801 RBRQ_ADDR(he_dev->rbrq_head),
1802 RBRQ_BUFLEN(he_dev->rbrq_head),
1803 RBRQ_CID(he_dev->rbrq_head),
1804 RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1805 RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1806 RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1807 RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1808 RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1809 RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1812 if (RBRQ_ADDR(he_dev->rbrq_head) & RBP_SMALLBUF)
1813 rbp = &he_dev->rbps_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
1816 rbp = &he_dev->rbpl_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
1818 buf_len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1819 cid = RBRQ_CID(he_dev->rbrq_head);
1822 vcc = __find_vcc(he_dev, cid);
1826 hprintk("vcc == NULL (cid 0x%x)\n", cid);
1827 if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1828 rbp->status &= ~RBP_LOANED;
1830 goto next_rbrq_entry;
1833 he_vcc = HE_VCC(vcc);
1834 if (he_vcc == NULL) {
1835 hprintk("he_vcc == NULL (cid 0x%x)\n", cid);
1836 if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1837 rbp->status &= ~RBP_LOANED;
1838 goto next_rbrq_entry;
1841 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1842 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
1843 atomic_inc(&vcc->stats->rx_drop);
1844 goto return_host_buffers;
1847 he_vcc->iov_tail->iov_base = RBRQ_ADDR(he_dev->rbrq_head);
1848 he_vcc->iov_tail->iov_len = buf_len;
1849 he_vcc->pdu_len += buf_len;
1852 if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1854 HPRINTK("wake_up rx_waitq (cid 0x%x)\n", cid);
1855 wake_up(&he_vcc->rx_waitq);
1856 goto return_host_buffers;
1860 if ((he_vcc->iov_tail - he_vcc->iov_head) > HE_MAXIOV) {
1861 hprintk("iovec full! cid 0x%x\n", cid);
1862 goto return_host_buffers;
1865 if (!RBRQ_END_PDU(he_dev->rbrq_head))
1866 goto next_rbrq_entry;
1868 if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1869 || RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1870 HPRINTK("%s%s (%d.%d)\n",
1871 RBRQ_CRC_ERR(he_dev->rbrq_head)
1873 RBRQ_LEN_ERR(he_dev->rbrq_head)
1875 vcc->vpi, vcc->vci);
1876 atomic_inc(&vcc->stats->rx_err);
1877 goto return_host_buffers;
1880 skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1883 HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1884 goto return_host_buffers;
1887 if (rx_skb_reserve > 0)
1888 skb_reserve(skb, rx_skb_reserve);
1890 do_gettimeofday(&skb->stamp);
1892 for (iov = he_vcc->iov_head;
1893 iov < he_vcc->iov_tail; ++iov) {
1895 if (iov->iov_base & RBP_SMALLBUF)
1896 memcpy(skb_put(skb, iov->iov_len),
1897 he_dev->rbps_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
1900 memcpy(skb_put(skb, iov->iov_len),
1901 he_dev->rbpl_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
1904 switch (vcc->qos.aal) {
1906 /* 2.10.1.5 raw cell receive */
1907 skb->len = ATM_AAL0_SDU;
1908 skb->tail = skb->data + skb->len;
1911 /* 2.10.1.2 aal5 receive */
1913 skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1914 skb->tail = skb->data + skb->len;
1915 #ifdef USE_CHECKSUM_HW
1916 if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1917 skb->ip_summed = CHECKSUM_HW;
1918 skb->csum = TCP_CKSUM(skb->data,
1925 #ifdef should_never_happen
1926 if (skb->len > vcc->qos.rxtp.max_sdu)
1927 hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)! cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1931 ATM_SKB(skb)->vcc = vcc;
1933 vcc->push(vcc, skb);
1935 atomic_inc(&vcc->stats->rx);
1937 return_host_buffers:
1940 for (iov = he_vcc->iov_head;
1941 iov < he_vcc->iov_tail; ++iov) {
1943 if (iov->iov_base & RBP_SMALLBUF)
1944 rbp = &he_dev->rbps_base[RBP_INDEX(iov->iov_base)];
1947 rbp = &he_dev->rbpl_base[RBP_INDEX(iov->iov_base)];
1949 rbp->status &= ~RBP_LOANED;
1952 he_vcc->iov_tail = he_vcc->iov_head;
1953 he_vcc->pdu_len = 0;
1956 he_dev->rbrq_head = (struct he_rbrq *)
1957 ((unsigned long) he_dev->rbrq_base |
1958 RBRQ_MASK(++he_dev->rbrq_head));
1961 read_unlock(&vcc_sklist_lock);
1964 if (updated > he_dev->rbrq_peak)
1965 he_dev->rbrq_peak = updated;
1967 he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1968 G0_RBRQ_H + (group * 16));
1971 return pdus_assembled;
1975 he_service_tbrq(struct he_dev *he_dev, int group)
1977 struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1978 ((unsigned long)he_dev->tbrq_base |
1979 he_dev->hsp->group[group].tbrq_tail);
1981 int slot, updated = 0;
1983 struct he_tpd *__tpd;
1986 /* 2.1.6 transmit buffer return queue */
1988 while (he_dev->tbrq_head != tbrq_tail) {
1991 HPRINTK("tbrq%d 0x%x%s%s\n",
1993 TBRQ_TPD(he_dev->tbrq_head),
1994 TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1995 TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1998 list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1999 if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
2001 list_del(&__tpd->entry);
2007 hprintk("unable to locate tpd for dma buffer %x\n",
2008 TBRQ_TPD(he_dev->tbrq_head));
2009 goto next_tbrq_entry;
2012 tpd = &he_dev->tpd_base[ TPD_INDEX(TBRQ_TPD(he_dev->tbrq_head)) ];
2015 if (TBRQ_EOS(he_dev->tbrq_head)) {
2016 HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
2017 he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
2019 wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
2021 goto next_tbrq_entry;
2024 for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2025 if (tpd->iovec[slot].addr)
2026 pci_unmap_single(he_dev->pci_dev,
2027 tpd->iovec[slot].addr,
2028 tpd->iovec[slot].len & TPD_LEN_MASK,
2030 if (tpd->iovec[slot].len & TPD_LST)
2035 if (tpd->skb) { /* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
2036 if (tpd->vcc && tpd->vcc->pop)
2037 tpd->vcc->pop(tpd->vcc, tpd->skb);
2039 dev_kfree_skb_any(tpd->skb);
2045 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2049 he_dev->tbrq_head = (struct he_tbrq *)
2050 ((unsigned long) he_dev->tbrq_base |
2051 TBRQ_MASK(++he_dev->tbrq_head));
2055 if (updated > he_dev->tbrq_peak)
2056 he_dev->tbrq_peak = updated;
2058 he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
2059 G0_TBRQ_H + (group * 16));
2065 he_service_rbpl(struct he_dev *he_dev, int group)
2067 struct he_rbp *newtail;
2068 struct he_rbp *rbpl_head;
2071 rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
2072 RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
2075 newtail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
2076 RBPL_MASK(he_dev->rbpl_tail+1));
2078 /* table 3.42 -- rbpl_tail should never be set to rbpl_head */
2079 if ((newtail == rbpl_head) || (newtail->status & RBP_LOANED))
2082 newtail->status |= RBP_LOANED;
2083 he_dev->rbpl_tail = newtail;
2088 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
2093 he_service_rbps(struct he_dev *he_dev, int group)
2095 struct he_rbp *newtail;
2096 struct he_rbp *rbps_head;
2099 rbps_head = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
2100 RBPS_MASK(he_readl(he_dev, G0_RBPS_S)));
2103 newtail = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
2104 RBPS_MASK(he_dev->rbps_tail+1));
2106 /* table 3.42 -- rbps_tail should never be set to rbps_head */
2107 if ((newtail == rbps_head) || (newtail->status & RBP_LOANED))
2110 newtail->status |= RBP_LOANED;
2111 he_dev->rbps_tail = newtail;
2116 he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail), G0_RBPS_T);
2118 #endif /* USE_RBPS */
2121 he_tasklet(unsigned long data)
2123 unsigned long flags;
2124 struct he_dev *he_dev = (struct he_dev *) data;
2128 HPRINTK("tasklet (0x%lx)\n", data);
2130 spin_lock_irqsave(&he_dev->global_lock, flags);
2133 while (he_dev->irq_head != he_dev->irq_tail) {
2136 type = ITYPE_TYPE(he_dev->irq_head->isw);
2137 group = ITYPE_GROUP(he_dev->irq_head->isw);
2140 case ITYPE_RBRQ_THRESH:
2141 HPRINTK("rbrq%d threshold\n", group);
2143 case ITYPE_RBRQ_TIMER:
2144 if (he_service_rbrq(he_dev, group)) {
2145 he_service_rbpl(he_dev, group);
2147 he_service_rbps(he_dev, group);
2148 #endif /* USE_RBPS */
2151 case ITYPE_TBRQ_THRESH:
2152 HPRINTK("tbrq%d threshold\n", group);
2154 case ITYPE_TPD_COMPLETE:
2155 he_service_tbrq(he_dev, group);
2157 case ITYPE_RBPL_THRESH:
2158 he_service_rbpl(he_dev, group);
2160 case ITYPE_RBPS_THRESH:
2162 he_service_rbps(he_dev, group);
2163 #endif /* USE_RBPS */
2166 HPRINTK("phy interrupt\n");
2167 #ifdef CONFIG_ATM_HE_USE_SUNI
2168 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2169 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
2170 he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
2171 spin_lock_irqsave(&he_dev->global_lock, flags);
2175 switch (type|group) {
2177 hprintk("parity error\n");
2180 hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
2184 case ITYPE_TYPE(ITYPE_INVALID):
2185 /* see 8.1.1 -- check all queues */
2187 HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
2189 he_service_rbrq(he_dev, 0);
2190 he_service_rbpl(he_dev, 0);
2192 he_service_rbps(he_dev, 0);
2193 #endif /* USE_RBPS */
2194 he_service_tbrq(he_dev, 0);
2197 hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
2200 he_dev->irq_head->isw = ITYPE_INVALID;
2202 he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
2206 if (updated > he_dev->irq_peak)
2207 he_dev->irq_peak = updated;
2210 IRQ_SIZE(CONFIG_IRQ_SIZE) |
2211 IRQ_THRESH(CONFIG_IRQ_THRESH) |
2212 IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2213 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2216 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2221 he_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
2223 unsigned long flags;
2224 struct he_dev *he_dev = (struct he_dev * )dev_id;
2230 spin_lock_irqsave(&he_dev->global_lock, flags);
2232 he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2233 (*he_dev->irq_tailoffset << 2));
2235 if (he_dev->irq_tail == he_dev->irq_head) {
2236 HPRINTK("tailoffset not updated?\n");
2237 he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2238 ((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2239 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata */
2243 if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2244 hprintk("spurious (or shared) interrupt?\n");
2247 if (he_dev->irq_head != he_dev->irq_tail) {
2250 tasklet_schedule(&he_dev->tasklet);
2252 he_tasklet((unsigned long) he_dev);
2254 he_writel(he_dev, INT_CLEAR_A, INT_FIFO); /* clear interrupt */
2255 (void) he_readl(he_dev, INT_FIFO); /* flush posted writes */
2257 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2258 return IRQ_RETVAL(handled);
2262 static __inline__ void
2263 __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2265 struct he_tpdrq *new_tail;
2267 HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2268 tpd, cid, he_dev->tpdrq_tail);
2270 /* new_tail = he_dev->tpdrq_tail; */
2271 new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2272 TPDRQ_MASK(he_dev->tpdrq_tail+1));
2275 * check to see if we are about to set the tail == head
2276 * if true, update the head pointer from the adapter
2277 * to see if this is really the case (reading the queue
2278 * head for every enqueue would be unnecessarily slow)
2281 if (new_tail == he_dev->tpdrq_head) {
2282 he_dev->tpdrq_head = (struct he_tpdrq *)
2283 (((unsigned long)he_dev->tpdrq_base) |
2284 TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2286 if (new_tail == he_dev->tpdrq_head) {
2287 hprintk("tpdrq full (cid 0x%x)\n", cid);
2290 * push tpd onto a transmit backlog queue
2291 * after service_tbrq, service the backlog
2292 * for now, we just drop the pdu
2296 tpd->vcc->pop(tpd->vcc, tpd->skb);
2298 dev_kfree_skb_any(tpd->skb);
2299 atomic_inc(&tpd->vcc->stats->tx_err);
2302 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2310 /* 2.1.5 transmit packet descriptor ready queue */
2312 list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2313 he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2315 he_dev->tpdrq_tail->tpd = he_dev->tpd_base_phys +
2316 (TPD_INDEX(tpd->status) * sizeof(struct he_tpd));
2318 he_dev->tpdrq_tail->cid = cid;
2321 he_dev->tpdrq_tail = new_tail;
2323 he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2324 (void) he_readl(he_dev, TPDRQ_T); /* flush posted writes */
2328 he_open(struct atm_vcc *vcc)
2330 unsigned long flags;
2331 struct he_dev *he_dev = HE_DEV(vcc->dev);
2332 struct he_vcc *he_vcc;
2334 unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2335 short vpi = vcc->vpi;
2338 if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2341 HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2343 set_bit(ATM_VF_ADDR, &vcc->flags);
2345 cid = he_mkcid(he_dev, vpi, vci);
2347 he_vcc = (struct he_vcc *) kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2348 if (he_vcc == NULL) {
2349 hprintk("unable to allocate he_vcc during open\n");
2353 he_vcc->iov_tail = he_vcc->iov_head;
2354 he_vcc->pdu_len = 0;
2355 he_vcc->rc_index = -1;
2357 init_waitqueue_head(&he_vcc->rx_waitq);
2358 init_waitqueue_head(&he_vcc->tx_waitq);
2360 vcc->dev_data = he_vcc;
2362 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2365 pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2367 pcr_goal = he_dev->atm_dev->link_rate;
2368 if (pcr_goal < 0) /* means round down, technically */
2369 pcr_goal = -pcr_goal;
2371 HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2373 switch (vcc->qos.aal) {
2375 tsr0_aal = TSR0_AAL5;
2379 tsr0_aal = TSR0_AAL0_SDU;
2380 tsr4 = TSR4_AAL0_SDU;
2387 spin_lock_irqsave(&he_dev->global_lock, flags);
2388 tsr0 = he_readl_tsr0(he_dev, cid);
2389 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2391 if (TSR0_CONN_STATE(tsr0) != 0) {
2392 hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2397 switch (vcc->qos.txtp.traffic_class) {
2399 /* 2.3.3.1 open connection ubr */
2401 tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2402 TSR0_USE_WMIN | TSR0_UPDATE_GER;
2406 /* 2.3.3.2 open connection cbr */
2408 /* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2409 if ((he_dev->total_bw + pcr_goal)
2410 > (he_dev->atm_dev->link_rate * 9 / 10))
2416 spin_lock_irqsave(&he_dev->global_lock, flags); /* also protects he_dev->cs_stper[] */
2418 /* find an unused cs_stper register */
2419 for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2420 if (he_dev->cs_stper[reg].inuse == 0 ||
2421 he_dev->cs_stper[reg].pcr == pcr_goal)
2424 if (reg == HE_NUM_CS_STPER) {
2426 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2430 he_dev->total_bw += pcr_goal;
2432 he_vcc->rc_index = reg;
2433 ++he_dev->cs_stper[reg].inuse;
2434 he_dev->cs_stper[reg].pcr = pcr_goal;
2436 clock = he_is622(he_dev) ? 66667000 : 50000000;
2437 period = clock / pcr_goal;
2439 HPRINTK("rc_index = %d period = %d\n",
2442 he_writel_mbox(he_dev, rate_to_atmf(period/2),
2444 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2446 tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2455 spin_lock_irqsave(&he_dev->global_lock, flags);
2457 he_writel_tsr0(he_dev, tsr0, cid);
2458 he_writel_tsr4(he_dev, tsr4 | 1, cid);
2459 he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2460 TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2461 he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2462 he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2464 he_writel_tsr3(he_dev, 0x0, cid);
2465 he_writel_tsr5(he_dev, 0x0, cid);
2466 he_writel_tsr6(he_dev, 0x0, cid);
2467 he_writel_tsr7(he_dev, 0x0, cid);
2468 he_writel_tsr8(he_dev, 0x0, cid);
2469 he_writel_tsr10(he_dev, 0x0, cid);
2470 he_writel_tsr11(he_dev, 0x0, cid);
2471 he_writel_tsr12(he_dev, 0x0, cid);
2472 he_writel_tsr13(he_dev, 0x0, cid);
2473 he_writel_tsr14(he_dev, 0x0, cid);
2474 (void) he_readl_tsr0(he_dev, cid); /* flush posted writes */
2475 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2478 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2481 HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2482 &HE_VCC(vcc)->rx_waitq);
2484 switch (vcc->qos.aal) {
2496 spin_lock_irqsave(&he_dev->global_lock, flags);
2498 rsr0 = he_readl_rsr0(he_dev, cid);
2499 if (rsr0 & RSR0_OPEN_CONN) {
2500 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2502 hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2508 rsr1 = RSR1_GROUP(0);
2509 rsr4 = RSR4_GROUP(0);
2510 #else /* !USE_RBPS */
2511 rsr1 = RSR1_GROUP(0)|RSR1_RBPL_ONLY;
2512 rsr4 = RSR4_GROUP(0)|RSR4_RBPL_ONLY;
2513 #endif /* USE_RBPS */
2514 rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ?
2515 (RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2517 #ifdef USE_CHECKSUM_HW
2518 if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2519 rsr0 |= RSR0_TCP_CKSUM;
2522 he_writel_rsr4(he_dev, rsr4, cid);
2523 he_writel_rsr1(he_dev, rsr1, cid);
2524 /* 5.1.11 last parameter initialized should be
2525 the open/closed indication in rsr0 */
2526 he_writel_rsr0(he_dev,
2527 rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2528 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2530 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2537 clear_bit(ATM_VF_ADDR, &vcc->flags);
2540 set_bit(ATM_VF_READY, &vcc->flags);
2546 he_close(struct atm_vcc *vcc)
2548 unsigned long flags;
2549 DECLARE_WAITQUEUE(wait, current);
2550 struct he_dev *he_dev = HE_DEV(vcc->dev);
2553 struct he_vcc *he_vcc = HE_VCC(vcc);
2554 #define MAX_RETRY 30
2555 int retry = 0, sleep = 1, tx_inuse;
2557 HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2559 clear_bit(ATM_VF_READY, &vcc->flags);
2560 cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2562 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2565 HPRINTK("close rx cid 0x%x\n", cid);
2567 /* 2.7.2.2 close receive operation */
2569 /* wait for previous close (if any) to finish */
2571 spin_lock_irqsave(&he_dev->global_lock, flags);
2572 while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2573 HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2577 set_current_state(TASK_UNINTERRUPTIBLE);
2578 add_wait_queue(&he_vcc->rx_waitq, &wait);
2580 he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2581 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2582 he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2583 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2585 timeout = schedule_timeout(30*HZ);
2587 remove_wait_queue(&he_vcc->rx_waitq, &wait);
2588 set_current_state(TASK_RUNNING);
2591 hprintk("close rx timeout cid 0x%x\n", cid);
2593 HPRINTK("close rx cid 0x%x complete\n", cid);
2597 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2598 volatile unsigned tsr4, tsr0;
2601 HPRINTK("close tx cid 0x%x\n", cid);
2605 * ... the host must first stop queueing packets to the TPDRQ
2606 * on the connection to be closed, then wait for all outstanding
2607 * packets to be transmitted and their buffers returned to the
2608 * TBRQ. When the last packet on the connection arrives in the
2609 * TBRQ, the host issues the close command to the adapter.
2612 while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 0) &&
2613 (retry < MAX_RETRY)) {
2622 hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2624 /* 2.3.1.1 generic close operations with flush */
2626 spin_lock_irqsave(&he_dev->global_lock, flags);
2627 he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2628 /* also clears TSR4_SESSION_ENDED */
2630 switch (vcc->qos.txtp.traffic_class) {
2632 he_writel_tsr1(he_dev,
2633 TSR1_MCR(rate_to_atmf(200000))
2634 | TSR1_PCR(0), cid);
2637 he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2640 (void) he_readl_tsr4(he_dev, cid); /* flush posted writes */
2642 tpd = __alloc_tpd(he_dev);
2644 hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2645 goto close_tx_incomplete;
2647 tpd->status |= TPD_EOS | TPD_INT;
2652 set_current_state(TASK_UNINTERRUPTIBLE);
2653 add_wait_queue(&he_vcc->tx_waitq, &wait);
2654 __enqueue_tpd(he_dev, tpd, cid);
2655 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2657 timeout = schedule_timeout(30*HZ);
2659 remove_wait_queue(&he_vcc->tx_waitq, &wait);
2660 set_current_state(TASK_RUNNING);
2662 spin_lock_irqsave(&he_dev->global_lock, flags);
2665 hprintk("close tx timeout cid 0x%x\n", cid);
2666 goto close_tx_incomplete;
2669 while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2670 HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2674 while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2675 HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2679 close_tx_incomplete:
2681 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2682 int reg = he_vcc->rc_index;
2684 HPRINTK("cs_stper reg = %d\n", reg);
2686 if (he_dev->cs_stper[reg].inuse == 0)
2687 hprintk("cs_stper[%d].inuse = 0!\n", reg);
2689 --he_dev->cs_stper[reg].inuse;
2691 he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2693 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2695 HPRINTK("close tx cid 0x%x complete\n", cid);
2700 clear_bit(ATM_VF_ADDR, &vcc->flags);
2704 he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2706 unsigned long flags;
2707 struct he_dev *he_dev = HE_DEV(vcc->dev);
2708 unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2710 #ifdef USE_SCATTERGATHER
2714 #define HE_TPD_BUFSIZE 0xffff
2716 HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2718 if ((skb->len > HE_TPD_BUFSIZE) ||
2719 ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2720 hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2724 dev_kfree_skb_any(skb);
2725 atomic_inc(&vcc->stats->tx_err);
2729 #ifndef USE_SCATTERGATHER
2730 if (skb_shinfo(skb)->nr_frags) {
2731 hprintk("no scatter/gather support\n");
2735 dev_kfree_skb_any(skb);
2736 atomic_inc(&vcc->stats->tx_err);
2740 spin_lock_irqsave(&he_dev->global_lock, flags);
2742 tpd = __alloc_tpd(he_dev);
2747 dev_kfree_skb_any(skb);
2748 atomic_inc(&vcc->stats->tx_err);
2749 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2753 if (vcc->qos.aal == ATM_AAL5)
2754 tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2756 char *pti_clp = (void *) (skb->data + 3);
2759 pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
2760 clp = (*pti_clp & ATM_HDR_CLP);
2761 tpd->status |= TPD_CELLTYPE(pti);
2763 tpd->status |= TPD_CLP;
2765 skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2768 #ifdef USE_SCATTERGATHER
2769 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
2770 skb->len - skb->data_len, PCI_DMA_TODEVICE);
2771 tpd->iovec[slot].len = skb->len - skb->data_len;
2774 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2775 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2777 if (slot == TPD_MAXIOV) { /* queue tpd; start new tpd */
2779 tpd->skb = NULL; /* not the last fragment
2780 so dont ->push() yet */
2783 __enqueue_tpd(he_dev, tpd, cid);
2784 tpd = __alloc_tpd(he_dev);
2789 dev_kfree_skb_any(skb);
2790 atomic_inc(&vcc->stats->tx_err);
2791 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2794 tpd->status |= TPD_USERCELL;
2798 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev,
2799 (void *) page_address(frag->page) + frag->page_offset,
2800 frag->size, PCI_DMA_TODEVICE);
2801 tpd->iovec[slot].len = frag->size;
2806 tpd->iovec[slot - 1].len |= TPD_LST;
2808 tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
2809 tpd->length0 = skb->len | TPD_LST;
2811 tpd->status |= TPD_INT;
2816 ATM_SKB(skb)->vcc = vcc;
2818 __enqueue_tpd(he_dev, tpd, cid);
2819 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2821 atomic_inc(&vcc->stats->tx);
2827 he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2829 unsigned long flags;
2830 struct he_dev *he_dev = HE_DEV(atm_dev);
2831 struct he_ioctl_reg reg;
2836 if (!capable(CAP_NET_ADMIN))
2839 if (copy_from_user(®, arg,
2840 sizeof(struct he_ioctl_reg)))
2843 spin_lock_irqsave(&he_dev->global_lock, flags);
2845 case HE_REGTYPE_PCI:
2846 reg.val = he_readl(he_dev, reg.addr);
2848 case HE_REGTYPE_RCM:
2850 he_readl_rcm(he_dev, reg.addr);
2852 case HE_REGTYPE_TCM:
2854 he_readl_tcm(he_dev, reg.addr);
2856 case HE_REGTYPE_MBOX:
2858 he_readl_mbox(he_dev, reg.addr);
2864 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2866 if (copy_to_user(arg, ®,
2867 sizeof(struct he_ioctl_reg)))
2871 #ifdef CONFIG_ATM_HE_USE_SUNI
2872 if (atm_dev->phy && atm_dev->phy->ioctl)
2873 err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2874 #else /* CONFIG_ATM_HE_USE_SUNI */
2876 #endif /* CONFIG_ATM_HE_USE_SUNI */
2884 he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2886 unsigned long flags;
2887 struct he_dev *he_dev = HE_DEV(atm_dev);
2889 HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2891 spin_lock_irqsave(&he_dev->global_lock, flags);
2892 he_writel(he_dev, val, FRAMER + (addr*4));
2893 (void) he_readl(he_dev, FRAMER + (addr*4)); /* flush posted writes */
2894 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2898 static unsigned char
2899 he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2901 unsigned long flags;
2902 struct he_dev *he_dev = HE_DEV(atm_dev);
2905 spin_lock_irqsave(&he_dev->global_lock, flags);
2906 reg = he_readl(he_dev, FRAMER + (addr*4));
2907 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2909 HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2914 he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2916 unsigned long flags;
2917 struct he_dev *he_dev = HE_DEV(dev);
2920 struct he_rbrq *rbrq_tail;
2921 struct he_tpdrq *tpdrq_head;
2922 int rbpl_head, rbpl_tail;
2924 static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2929 return sprintf(page, "%s\n", version);
2932 return sprintf(page, "%s%s\n\n",
2933 he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2936 return sprintf(page, "Mismatched Cells VPI/VCI Not Open Dropped Cells RCM Dropped Cells\n");
2938 spin_lock_irqsave(&he_dev->global_lock, flags);
2939 mcc += he_readl(he_dev, MCC);
2940 oec += he_readl(he_dev, OEC);
2941 dcc += he_readl(he_dev, DCC);
2942 cec += he_readl(he_dev, CEC);
2943 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2946 return sprintf(page, "%16ld %16ld %13ld %17ld\n\n",
2947 mcc, oec, dcc, cec);
2950 return sprintf(page, "irq_size = %d inuse = ? peak = %d\n",
2951 CONFIG_IRQ_SIZE, he_dev->irq_peak);
2954 return sprintf(page, "tpdrq_size = %d inuse = ?\n",
2958 return sprintf(page, "rbrq_size = %d inuse = ? peak = %d\n",
2959 CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2962 return sprintf(page, "tbrq_size = %d peak = %d\n",
2963 CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2967 rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2968 rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2970 inuse = rbpl_head - rbpl_tail;
2972 inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2973 inuse /= sizeof(struct he_rbp);
2976 return sprintf(page, "rbpl_size = %d inuse = %d\n\n",
2977 CONFIG_RBPL_SIZE, inuse);
2981 return sprintf(page, "rate controller periods (cbr)\n pcr #vc\n");
2983 for (i = 0; i < HE_NUM_CS_STPER; ++i)
2985 return sprintf(page, "cs_stper%-2d %8ld %3d\n", i,
2986 he_dev->cs_stper[i].pcr,
2987 he_dev->cs_stper[i].inuse);
2990 return sprintf(page, "total bw (cbr): %d (limit %d)\n",
2991 he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2996 /* eeprom routines -- see 4.7 */
2999 read_prom_byte(struct he_dev *he_dev, int addr)
3001 u32 val = 0, tmp_read = 0;
3005 val = readl(he_dev->membase + HOST_CNTL);
3008 /* Turn on write enable */
3010 he_writel(he_dev, val, HOST_CNTL);
3012 /* Send READ instruction */
3013 for (i = 0; i < sizeof(readtab)/sizeof(readtab[0]); i++) {
3014 he_writel(he_dev, val | readtab[i], HOST_CNTL);
3015 udelay(EEPROM_DELAY);
3018 /* Next, we need to send the byte address to read from */
3019 for (i = 7; i >= 0; i--) {
3020 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
3021 udelay(EEPROM_DELAY);
3022 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
3023 udelay(EEPROM_DELAY);
3028 val &= 0xFFFFF7FF; /* Turn off write enable */
3029 he_writel(he_dev, val, HOST_CNTL);
3031 /* Now, we can read data from the EEPROM by clocking it in */
3032 for (i = 7; i >= 0; i--) {
3033 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
3034 udelay(EEPROM_DELAY);
3035 tmp_read = he_readl(he_dev, HOST_CNTL);
3036 byte_read |= (unsigned char)
3037 ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
3038 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
3039 udelay(EEPROM_DELAY);
3042 he_writel(he_dev, val | ID_CS, HOST_CNTL);
3043 udelay(EEPROM_DELAY);
3048 MODULE_LICENSE("GPL");
3049 MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
3050 MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
3051 module_param(disable64, bool, 0);
3052 MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
3053 module_param(nvpibits, short, 0);
3054 MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
3055 module_param(nvcibits, short, 0);
3056 MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
3057 module_param(rx_skb_reserve, short, 0);
3058 MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
3059 module_param(irq_coalesce, bool, 0);
3060 MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
3061 module_param(sdh, bool, 0);
3062 MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
3064 static struct pci_device_id he_pci_tbl[] = {
3065 { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_HE, PCI_ANY_ID, PCI_ANY_ID,
3070 MODULE_DEVICE_TABLE(pci, he_pci_tbl);
3072 static struct pci_driver he_driver = {
3074 .probe = he_init_one,
3075 .remove = __devexit_p(he_remove_one),
3076 .id_table = he_pci_tbl,
3079 static int __init he_init(void)
3081 return pci_register_driver(&he_driver);
3084 static void __exit he_cleanup(void)
3086 pci_unregister_driver(&he_driver);
3089 module_init(he_init);
3090 module_exit(he_cleanup);