1 /******************************************************************************
2 iphase.c: Device driver for Interphase ATM PCI adapter cards
3 Author: Peter Wang <pwang@iphase.com>
4 Some fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
5 Interphase Corporation <www.iphase.com>
7 *******************************************************************************
9 This software may be used and distributed according to the terms
10 of the GNU General Public License (GPL), incorporated herein by reference.
11 Drivers based on this skeleton fall under the GPL and must retain
12 the authorship (implicit copyright) notice.
14 This program is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 General Public License for more details.
19 Modified from an incomplete driver for Interphase 5575 1KVC 1M card which
20 was originally written by Monalisa Agrawal at UNH. Now this driver
21 supports a variety of varients of Interphase ATM PCI (i)Chip adapter
22 card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM)
23 in terms of PHY type, the size of control memory and the size of
24 packet memory. The followings are the change log and history:
26 Bugfix the Mona's UBR driver.
27 Modify the basic memory allocation and dma logic.
28 Port the driver to the latest kernel from 2.0.46.
29 Complete the ABR logic of the driver, and added the ABR work-
30 around for the hardware anormalies.
32 Add the flow control logic to the driver to allow rate-limit VC.
33 Add 4K VC support to the board with 512K control memory.
34 Add the support of all the variants of the Interphase ATM PCI
35 (i)Chip adapter cards including x575 (155M OC3 and UTP155), x525
36 (25M UTP25) and x531 (DS3 and E3).
39 Support and updates available at: ftp://ftp.iphase.com/pub/atm
41 *******************************************************************************/
43 #include <linux/module.h>
44 #include <linux/kernel.h>
46 #include <linux/pci.h>
47 #include <linux/errno.h>
48 #include <linux/atm.h>
49 #include <linux/atmdev.h>
50 #include <linux/sonet.h>
51 #include <linux/skbuff.h>
52 #include <linux/time.h>
53 #include <linux/delay.h>
54 #include <linux/uio.h>
55 #include <linux/init.h>
56 #include <linux/wait.h>
57 #include <asm/system.h>
59 #include <asm/atomic.h>
60 #include <asm/uaccess.h>
61 #include <asm/string.h>
62 #include <asm/byteorder.h>
63 #include <linux/vmalloc.h>
66 #define swap(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
68 struct k_sonet_stats sonet_stats; /* link diagnostics */
69 unsigned char loop_mode; /* loopback mode */
70 struct atm_dev *dev; /* device back-pointer */
71 struct suni_priv *next; /* next SUNI */
73 #define PRIV(dev) ((struct suni_priv *) dev->phy_data)
75 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr);
76 static void desc_dbg(IADEV *iadev);
78 static IADEV *ia_dev[8];
79 static struct atm_dev *_ia_dev[8];
80 static int iadev_count;
81 static void ia_led_timer(unsigned long arg);
82 static DEFINE_TIMER(ia_timer, ia_led_timer, 0, 0);
83 static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
84 static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
85 static uint IADebugFlag = /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER
86 |IF_IADBG_ABR | IF_IADBG_EVENT*/ 0;
88 module_param(IA_TX_BUF, int, 0);
89 module_param(IA_TX_BUF_SZ, int, 0);
90 module_param(IA_RX_BUF, int, 0);
91 module_param(IA_RX_BUF_SZ, int, 0);
92 module_param(IADebugFlag, uint, 0644);
94 MODULE_LICENSE("GPL");
96 #if BITS_PER_LONG != 32
97 # error FIXME: this driver only works on 32-bit platforms
100 /**************************** IA_LIB **********************************/
102 static void ia_init_rtn_q (IARTN_Q *que)
108 static void ia_enque_head_rtn_q (IARTN_Q *que, IARTN_Q * data)
111 if (que->next == NULL)
112 que->next = que->tail = data;
114 data->next = que->next;
120 static int ia_enque_rtn_q (IARTN_Q *que, struct desc_tbl_t data) {
121 IARTN_Q *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
122 if (!entry) return -1;
125 if (que->next == NULL)
126 que->next = que->tail = entry;
128 que->tail->next = entry;
129 que->tail = que->tail->next;
134 static IARTN_Q * ia_deque_rtn_q (IARTN_Q *que) {
136 if (que->next == NULL)
139 if ( que->next == que->tail)
140 que->next = que->tail = NULL;
142 que->next = que->next->next;
146 static void ia_hack_tcq(IADEV *dev) {
150 struct ia_vcc *iavcc_r = NULL;
152 tcq_wr = readl(dev->seg_reg+TCQ_WR_PTR) & 0xffff;
153 while (dev->host_tcq_wr != tcq_wr) {
154 desc1 = *(u_short *)(dev->seg_ram + dev->host_tcq_wr);
156 else if (!dev->desc_tbl[desc1 -1].timestamp) {
157 IF_ABR(printk(" Desc %d is reset at %ld\n", desc1 -1, jiffies);)
158 *(u_short *) (dev->seg_ram + dev->host_tcq_wr) = 0;
160 else if (dev->desc_tbl[desc1 -1].timestamp) {
161 if (!(iavcc_r = dev->desc_tbl[desc1 -1].iavcc)) {
162 printk("IA: Fatal err in get_desc\n");
165 iavcc_r->vc_desc_cnt--;
166 dev->desc_tbl[desc1 -1].timestamp = 0;
167 IF_EVENT(printk("ia_hack: return_q skb = 0x%x desc = %d\n",
168 (u32)dev->desc_tbl[desc1 -1].txskb, desc1);)
169 if (iavcc_r->pcr < dev->rate_limit) {
170 IA_SKB_STATE (dev->desc_tbl[desc1-1].txskb) |= IA_TX_DONE;
171 if (ia_enque_rtn_q(&dev->tx_return_q, dev->desc_tbl[desc1 -1]) < 0)
172 printk("ia_hack_tcq: No memory available\n");
174 dev->desc_tbl[desc1 -1].iavcc = NULL;
175 dev->desc_tbl[desc1 -1].txskb = NULL;
177 dev->host_tcq_wr += 2;
178 if (dev->host_tcq_wr > dev->ffL.tcq_ed)
179 dev->host_tcq_wr = dev->ffL.tcq_st;
183 static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
186 struct ia_vcc *iavcc_r = NULL;
188 static unsigned long timer = 0;
192 if(((jiffies - timer)>50)||((dev->ffL.tcq_rd==dev->host_tcq_wr))){
195 while (i < dev->num_tx_desc) {
196 if (!dev->desc_tbl[i].timestamp) {
200 ltimeout = dev->desc_tbl[i].iavcc->ltimeout;
201 delta = jiffies - dev->desc_tbl[i].timestamp;
202 if (delta >= ltimeout) {
203 IF_ABR(printk("RECOVER run!! desc_tbl %d = %d delta = %ld, time = %ld\n", i,dev->desc_tbl[i].timestamp, delta, jiffies);)
204 if (dev->ffL.tcq_rd == dev->ffL.tcq_st)
205 dev->ffL.tcq_rd = dev->ffL.tcq_ed;
207 dev->ffL.tcq_rd -= 2;
208 *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1;
209 if (!(skb = dev->desc_tbl[i].txskb) ||
210 !(iavcc_r = dev->desc_tbl[i].iavcc))
211 printk("Fatal err, desc table vcc or skb is NULL\n");
213 iavcc_r->vc_desc_cnt--;
214 dev->desc_tbl[i].timestamp = 0;
215 dev->desc_tbl[i].iavcc = NULL;
216 dev->desc_tbl[i].txskb = NULL;
221 if (dev->ffL.tcq_rd == dev->host_tcq_wr)
224 /* Get the next available descriptor number from TCQ */
225 desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
227 while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) {
228 dev->ffL.tcq_rd += 2;
229 if (dev->ffL.tcq_rd > dev->ffL.tcq_ed)
230 dev->ffL.tcq_rd = dev->ffL.tcq_st;
231 if (dev->ffL.tcq_rd == dev->host_tcq_wr)
233 desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
236 /* get system time */
237 dev->desc_tbl[desc_num -1].timestamp = jiffies;
241 static void clear_lockup (struct atm_vcc *vcc, IADEV *dev) {
243 vcstatus_t *vcstatus;
245 u_short tempCellSlot, tempFract;
246 struct main_vc *abr_vc = (struct main_vc *)dev->MAIN_VC_TABLE_ADDR;
247 struct ext_vc *eabr_vc = (struct ext_vc *)dev->EXT_VC_TABLE_ADDR;
250 if (vcc->qos.txtp.traffic_class == ATM_ABR) {
251 vcstatus = (vcstatus_t *) &(dev->testTable[vcc->vci]->vc_status);
254 if( vcstatus->cnt == 0x05 ) {
257 if( eabr_vc->last_desc ) {
258 if( (abr_vc->status & 0x07) == ABR_STATE /* 0x2 */ ) {
259 /* Wait for 10 Micro sec */
261 if ((eabr_vc->last_desc)&&((abr_vc->status & 0x07)==ABR_STATE))
265 tempCellSlot = abr_vc->last_cell_slot;
266 tempFract = abr_vc->fraction;
267 if((tempCellSlot == dev->testTable[vcc->vci]->lastTime)
268 && (tempFract == dev->testTable[vcc->vci]->fract))
270 dev->testTable[vcc->vci]->lastTime = tempCellSlot;
271 dev->testTable[vcc->vci]->fract = tempFract;
273 } /* last descriptor */
275 } /* vcstatus->cnt */
278 IF_ABR(printk("LOCK UP found\n");)
279 writew(0xFFFD, dev->seg_reg+MODE_REG_0);
280 /* Wait for 10 Micro sec */
282 abr_vc->status &= 0xFFF8;
283 abr_vc->status |= 0x0001; /* state is idle */
284 shd_tbl = (u_short *)dev->ABR_SCHED_TABLE_ADDR;
285 for( i = 0; ((i < dev->num_vc) && (shd_tbl[i])); i++ );
287 shd_tbl[i] = vcc->vci;
289 IF_ERR(printk("ABR Seg. may not continue on VC %x\n",vcc->vci);)
290 writew(T_ONLINE, dev->seg_reg+MODE_REG_0);
291 writew(~(TRANSMIT_DONE|TCQ_NOT_EMPTY), dev->seg_reg+SEG_MASK_REG);
292 writew(TRANSMIT_DONE, dev->seg_reg+SEG_INTR_STATUS_REG);
302 ** Conversion of 24-bit cellrate (cells/sec) to 16-bit floating point format.
304 ** +----+----+------------------+-------------------------------+
305 ** | R | NZ | 5-bit exponent | 9-bit mantissa |
306 ** +----+----+------------------+-------------------------------+
308 ** R = reserved (written as 0)
309 ** NZ = 0 if 0 cells/sec; 1 otherwise
311 ** if NZ = 1, rate = 1.mmmmmmmmm x 2^(eeeee) cells/sec
314 cellrate_to_float(u32 cr)
318 #define M_BITS 9 /* Number of bits in mantissa */
319 #define E_BITS 5 /* Number of bits in exponent */
323 u32 tmp = cr & 0x00ffffff;
332 flot = NZ | (i << M_BITS) | (cr & M_MASK);
334 flot = NZ | (i << M_BITS) | ((cr << (M_BITS - i)) & M_MASK);
336 flot = NZ | (i << M_BITS) | ((cr >> (i - M_BITS)) & M_MASK);
342 ** Conversion of 16-bit floating point format to 24-bit cellrate (cells/sec).
345 float_to_cellrate(u16 rate)
347 u32 exp, mantissa, cps;
348 if ((rate & NZ) == 0)
350 exp = (rate >> M_BITS) & E_MASK;
351 mantissa = rate & M_MASK;
354 cps = (1 << M_BITS) | mantissa;
357 else if (exp > M_BITS)
358 cps <<= (exp - M_BITS);
360 cps >>= (M_BITS - exp);
365 static void init_abr_vc (IADEV *dev, srv_cls_param_t *srv_p) {
366 srv_p->class_type = ATM_ABR;
367 srv_p->pcr = dev->LineRate;
369 srv_p->icr = 0x055cb7;
370 srv_p->tbe = 0xffffff;
381 ia_open_abr_vc(IADEV *dev, srv_cls_param_t *srv_p,
382 struct atm_vcc *vcc, u8 flag)
384 f_vc_abr_entry *f_abr_vc;
385 r_vc_abr_entry *r_abr_vc;
388 u16 adtf, air, *ptr16;
389 f_abr_vc =(f_vc_abr_entry *)dev->MAIN_VC_TABLE_ADDR;
390 f_abr_vc += vcc->vci;
392 case 1: /* FFRED initialization */
393 #if 0 /* sanity check */
396 if (srv_p->pcr > dev->LineRate)
397 srv_p->pcr = dev->LineRate;
398 if ((srv_p->mcr + dev->sum_mcr) > dev->LineRate)
399 return MCR_UNAVAILABLE;
400 if (srv_p->mcr > srv_p->pcr)
403 srv_p->icr = srv_p->pcr;
404 if ((srv_p->icr < srv_p->mcr) || (srv_p->icr > srv_p->pcr))
406 if ((srv_p->tbe < MIN_TBE) || (srv_p->tbe > MAX_TBE))
408 if ((srv_p->frtt < MIN_FRTT) || (srv_p->frtt > MAX_FRTT))
410 if (srv_p->nrm > MAX_NRM)
412 if (srv_p->trm > MAX_TRM)
414 if (srv_p->adtf > MAX_ADTF)
416 else if (srv_p->adtf == 0)
418 if (srv_p->cdf > MAX_CDF)
420 if (srv_p->rif > MAX_RIF)
422 if (srv_p->rdf > MAX_RDF)
425 memset ((caddr_t)f_abr_vc, 0, sizeof(*f_abr_vc));
426 f_abr_vc->f_vc_type = ABR;
427 nrm = 2 << srv_p->nrm; /* (2 ** (srv_p->nrm +1)) */
428 /* i.e 2**n = 2 << (n-1) */
429 f_abr_vc->f_nrm = nrm << 8 | nrm;
430 trm = 100000/(2 << (16 - srv_p->trm));
431 if ( trm == 0) trm = 1;
432 f_abr_vc->f_nrmexp =(((srv_p->nrm +1) & 0x0f) << 12)|(MRM << 8) | trm;
433 crm = srv_p->tbe / nrm;
434 if (crm == 0) crm = 1;
435 f_abr_vc->f_crm = crm & 0xff;
436 f_abr_vc->f_pcr = cellrate_to_float(srv_p->pcr);
437 icr = min( srv_p->icr, (srv_p->tbe > srv_p->frtt) ?
438 ((srv_p->tbe/srv_p->frtt)*1000000) :
439 (1000000/(srv_p->frtt/srv_p->tbe)));
440 f_abr_vc->f_icr = cellrate_to_float(icr);
441 adtf = (10000 * srv_p->adtf)/8192;
442 if (adtf == 0) adtf = 1;
443 f_abr_vc->f_cdf = ((7 - srv_p->cdf) << 12 | adtf) & 0xfff;
444 f_abr_vc->f_mcr = cellrate_to_float(srv_p->mcr);
445 f_abr_vc->f_acr = f_abr_vc->f_icr;
446 f_abr_vc->f_status = 0x0042;
448 case 0: /* RFRED initialization */
449 ptr16 = (u_short *)(dev->reass_ram + REASS_TABLE*dev->memSize);
450 *(ptr16 + vcc->vci) = NO_AAL5_PKT | REASS_ABR;
451 r_abr_vc = (r_vc_abr_entry*)(dev->reass_ram+ABR_VC_TABLE*dev->memSize);
452 r_abr_vc += vcc->vci;
453 r_abr_vc->r_status_rdf = (15 - srv_p->rdf) & 0x000f;
454 air = srv_p->pcr << (15 - srv_p->rif);
455 if (air == 0) air = 1;
456 r_abr_vc->r_air = cellrate_to_float(air);
457 dev->testTable[vcc->vci]->vc_status = VC_ACTIVE | VC_ABR;
458 dev->sum_mcr += srv_p->mcr;
466 static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
467 u32 rateLow=0, rateHigh, rate;
469 struct ia_vcc *ia_vcc;
471 int idealSlot =0, testSlot, toBeAssigned, inc;
473 u16 *SchedTbl, *TstSchedTbl;
479 /* IpAdjustTrafficParams */
480 if (vcc->qos.txtp.max_pcr <= 0) {
481 IF_ERR(printk("PCR for CBR not defined\n");)
484 rate = vcc->qos.txtp.max_pcr;
485 entries = rate / dev->Granularity;
486 IF_CBR(printk("CBR: CBR entries=0x%x for rate=0x%x & Gran=0x%x\n",
487 entries, rate, dev->Granularity);)
489 IF_CBR(printk("CBR: Bandwidth smaller than granularity of CBR table\n");)
490 rateLow = entries * dev->Granularity;
491 rateHigh = (entries + 1) * dev->Granularity;
492 if (3*(rate - rateLow) > (rateHigh - rate))
494 if (entries > dev->CbrRemEntries) {
495 IF_CBR(printk("CBR: Not enough bandwidth to support this PCR.\n");)
496 IF_CBR(printk("Entries = 0x%x, CbrRemEntries = 0x%x.\n",
497 entries, dev->CbrRemEntries);)
501 ia_vcc = INPH_IA_VCC(vcc);
502 ia_vcc->NumCbrEntry = entries;
503 dev->sum_mcr += entries * dev->Granularity;
504 /* IaFFrednInsertCbrSched */
505 // Starting at an arbitrary location, place the entries into the table
506 // as smoothly as possible
508 spacing = dev->CbrTotEntries / entries;
509 sp_mod = dev->CbrTotEntries % entries; // get modulo
510 toBeAssigned = entries;
513 IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex,spacing,sp_mod);)
516 // If this is the first time, start the table loading for this connection
517 // as close to entryPoint as possible.
518 if (toBeAssigned == entries)
520 idealSlot = dev->CbrEntryPt;
521 dev->CbrEntryPt += 2; // Adding 2 helps to prevent clumping
522 if (dev->CbrEntryPt >= dev->CbrTotEntries)
523 dev->CbrEntryPt -= dev->CbrTotEntries;// Wrap if necessary
525 idealSlot += (u32)(spacing + fracSlot); // Point to the next location
526 // in the table that would be smoothest
527 fracSlot = ((sp_mod + sp_mod2) / entries); // get new integer part
528 sp_mod2 = ((sp_mod + sp_mod2) % entries); // calc new fractional part
530 if (idealSlot >= (int)dev->CbrTotEntries)
531 idealSlot -= dev->CbrTotEntries;
532 // Continuously check around this ideal value until a null
533 // location is encountered.
534 SchedTbl = (u16*)(dev->seg_ram+CBR_SCHED_TABLE*dev->memSize);
536 testSlot = idealSlot;
537 TstSchedTbl = (u16*)(SchedTbl+testSlot); //set index and read in value
538 IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%x, NumToAssign=%d\n",
539 testSlot, (u32)TstSchedTbl,toBeAssigned);)
540 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
541 while (cbrVC) // If another VC at this location, we have to keep looking
544 testSlot = idealSlot - inc;
545 if (testSlot < 0) { // Wrap if necessary
546 testSlot += dev->CbrTotEntries;
547 IF_CBR(printk("Testslot Wrap. STable Start=0x%x,Testslot=%d\n",
548 (u32)SchedTbl,testSlot);)
550 TstSchedTbl = (u16 *)(SchedTbl + testSlot); // set table index
551 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
554 testSlot = idealSlot + inc;
555 if (testSlot >= (int)dev->CbrTotEntries) { // Wrap if necessary
556 testSlot -= dev->CbrTotEntries;
557 IF_CBR(printk("TotCbrEntries=%d",dev->CbrTotEntries);)
558 IF_CBR(printk(" Testslot=0x%x ToBeAssgned=%d\n",
559 testSlot, toBeAssigned);)
561 // set table index and read in value
562 TstSchedTbl = (u16*)(SchedTbl + testSlot);
563 IF_CBR(printk("Reading CBR Tbl from 0x%x, CbrVal=0x%x Iteration %d\n",
564 (u32)TstSchedTbl,cbrVC,inc);)
565 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
567 // Move this VCI number into this location of the CBR Sched table.
568 memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex,sizeof(TstSchedTbl));
569 dev->CbrRemEntries--;
573 /* IaFFrednCbrEnable */
574 dev->NumEnabledCBR++;
575 if (dev->NumEnabledCBR == 1) {
576 writew((CBR_EN | UBR_EN | ABR_EN | (0x23 << 2)), dev->seg_reg+STPARMS);
577 IF_CBR(printk("CBR is enabled\n");)
581 static void ia_cbrVc_close (struct atm_vcc *vcc) {
583 u16 *SchedTbl, NullVci = 0;
586 iadev = INPH_IA_DEV(vcc->dev);
587 iadev->NumEnabledCBR--;
588 SchedTbl = (u16*)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
589 if (iadev->NumEnabledCBR == 0) {
590 writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
591 IF_CBR (printk("CBR support disabled\n");)
594 for (i=0; i < iadev->CbrTotEntries; i++)
596 if (*SchedTbl == vcc->vci) {
597 iadev->CbrRemEntries++;
603 IF_CBR(printk("Exit ia_cbrVc_close, NumRemoved=%d\n",NumFound);)
606 static int ia_avail_descs(IADEV *iadev) {
609 if (iadev->host_tcq_wr >= iadev->ffL.tcq_rd)
610 tmp = (iadev->host_tcq_wr - iadev->ffL.tcq_rd) / 2;
612 tmp = (iadev->ffL.tcq_ed - iadev->ffL.tcq_rd + 2 + iadev->host_tcq_wr -
613 iadev->ffL.tcq_st) / 2;
617 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb);
619 static int ia_que_tx (IADEV *iadev) {
623 struct ia_vcc *iavcc;
624 num_desc = ia_avail_descs(iadev);
626 while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) {
627 if (!(vcc = ATM_SKB(skb)->vcc)) {
628 dev_kfree_skb_any(skb);
629 printk("ia_que_tx: Null vcc\n");
632 if (!test_bit(ATM_VF_READY,&vcc->flags)) {
633 dev_kfree_skb_any(skb);
634 printk("Free the SKB on closed vci %d \n", vcc->vci);
637 iavcc = INPH_IA_VCC(vcc);
638 if (ia_pkt_tx (vcc, skb)) {
639 skb_queue_head(&iadev->tx_backlog, skb);
646 static void ia_tx_poll (IADEV *iadev) {
647 struct atm_vcc *vcc = NULL;
648 struct sk_buff *skb = NULL, *skb1 = NULL;
649 struct ia_vcc *iavcc;
653 while ( (rtne = ia_deque_rtn_q(&iadev->tx_return_q))) {
654 skb = rtne->data.txskb;
656 printk("ia_tx_poll: skb is null\n");
659 vcc = ATM_SKB(skb)->vcc;
661 printk("ia_tx_poll: vcc is null\n");
662 dev_kfree_skb_any(skb);
666 iavcc = INPH_IA_VCC(vcc);
668 printk("ia_tx_poll: iavcc is null\n");
669 dev_kfree_skb_any(skb);
673 skb1 = skb_dequeue(&iavcc->txing_skb);
674 while (skb1 && (skb1 != skb)) {
675 if (!(IA_SKB_STATE(skb1) & IA_TX_DONE)) {
676 printk("IA_tx_intr: Vci %d lost pkt!!!\n", vcc->vci);
678 IF_ERR(printk("Release the SKB not match\n");)
679 if ((vcc->pop) && (skb1->len != 0))
682 IF_EVENT(printk("Tansmit Done - skb 0x%lx return\n",
686 dev_kfree_skb_any(skb1);
687 skb1 = skb_dequeue(&iavcc->txing_skb);
690 IF_EVENT(printk("IA: Vci %d - skb not found requed\n",vcc->vci);)
691 ia_enque_head_rtn_q (&iadev->tx_return_q, rtne);
694 if ((vcc->pop) && (skb->len != 0))
697 IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb);)
700 dev_kfree_skb_any(skb);
708 static void ia_eeprom_put (IADEV *iadev, u32 addr, u_short val)
713 * Issue a command to enable writes to the NOVRAM
715 NVRAM_CMD (EXTEND + EWEN);
718 * issue the write command
720 NVRAM_CMD(IAWRITE + addr);
722 * Send the data, starting with D15, then D14, and so on for 16 bits
724 for (i=15; i>=0; i--) {
725 NVRAM_CLKOUT (val & 0x8000);
730 t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
732 t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
736 * disable writes again
738 NVRAM_CMD(EXTEND + EWDS)
744 static u16 ia_eeprom_get (IADEV *iadev, u32 addr)
750 * Read the first bit that was clocked with the falling edge of the
751 * the last command data clock
753 NVRAM_CMD(IAREAD + addr);
755 * Now read the rest of the bits, the next bit read is D14, then D13,
759 for (i=15; i>=0; i--) {
768 static void ia_hw_type(IADEV *iadev) {
769 u_short memType = ia_eeprom_get(iadev, 25);
770 iadev->memType = memType;
771 if ((memType & MEM_SIZE_MASK) == MEM_SIZE_1M) {
772 iadev->num_tx_desc = IA_TX_BUF;
773 iadev->tx_buf_sz = IA_TX_BUF_SZ;
774 iadev->num_rx_desc = IA_RX_BUF;
775 iadev->rx_buf_sz = IA_RX_BUF_SZ;
776 } else if ((memType & MEM_SIZE_MASK) == MEM_SIZE_512K) {
777 if (IA_TX_BUF == DFL_TX_BUFFERS)
778 iadev->num_tx_desc = IA_TX_BUF / 2;
780 iadev->num_tx_desc = IA_TX_BUF;
781 iadev->tx_buf_sz = IA_TX_BUF_SZ;
782 if (IA_RX_BUF == DFL_RX_BUFFERS)
783 iadev->num_rx_desc = IA_RX_BUF / 2;
785 iadev->num_rx_desc = IA_RX_BUF;
786 iadev->rx_buf_sz = IA_RX_BUF_SZ;
789 if (IA_TX_BUF == DFL_TX_BUFFERS)
790 iadev->num_tx_desc = IA_TX_BUF / 8;
792 iadev->num_tx_desc = IA_TX_BUF;
793 iadev->tx_buf_sz = IA_TX_BUF_SZ;
794 if (IA_RX_BUF == DFL_RX_BUFFERS)
795 iadev->num_rx_desc = IA_RX_BUF / 8;
797 iadev->num_rx_desc = IA_RX_BUF;
798 iadev->rx_buf_sz = IA_RX_BUF_SZ;
800 iadev->rx_pkt_ram = TX_PACKET_RAM + (iadev->num_tx_desc * iadev->tx_buf_sz);
801 IF_INIT(printk("BUF: tx=%d,sz=%d rx=%d sz= %d rx_pkt_ram=%d\n",
802 iadev->num_tx_desc, iadev->tx_buf_sz, iadev->num_rx_desc,
803 iadev->rx_buf_sz, iadev->rx_pkt_ram);)
806 if ((memType & FE_MASK) == FE_SINGLE_MODE) {
807 iadev->phy_type = PHY_OC3C_S;
808 else if ((memType & FE_MASK) == FE_UTP_OPTION)
809 iadev->phy_type = PHY_UTP155;
811 iadev->phy_type = PHY_OC3C_M;
814 iadev->phy_type = memType & FE_MASK;
815 IF_INIT(printk("memType = 0x%x iadev->phy_type = 0x%x\n",
816 memType,iadev->phy_type);)
817 if (iadev->phy_type == FE_25MBIT_PHY)
818 iadev->LineRate = (u32)(((25600000/8)*26)/(27*53));
819 else if (iadev->phy_type == FE_DS3_PHY)
820 iadev->LineRate = (u32)(((44736000/8)*26)/(27*53));
821 else if (iadev->phy_type == FE_E3_PHY)
822 iadev->LineRate = (u32)(((34368000/8)*26)/(27*53));
824 iadev->LineRate = (u32)(ATM_OC3_PCR);
825 IF_INIT(printk("iadev->LineRate = %d \n", iadev->LineRate);)
829 static void IaFrontEndIntr(IADEV *iadev) {
830 volatile IA_SUNI *suni;
831 volatile ia_mb25_t *mb25;
832 volatile suni_pm7345_t *suni_pm7345;
836 if(iadev->phy_type & FE_25MBIT_PHY) {
837 mb25 = (ia_mb25_t*)iadev->phy;
838 iadev->carrier_detect = Boolean(mb25->mb25_intr_status & MB25_IS_GSB);
839 } else if (iadev->phy_type & FE_DS3_PHY) {
840 suni_pm7345 = (suni_pm7345_t *)iadev->phy;
841 /* clear FRMR interrupts */
842 frmr_intr = suni_pm7345->suni_ds3_frm_intr_stat;
843 iadev->carrier_detect =
844 Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV));
845 } else if (iadev->phy_type & FE_E3_PHY ) {
846 suni_pm7345 = (suni_pm7345_t *)iadev->phy;
847 frmr_intr = suni_pm7345->suni_e3_frm_maint_intr_ind;
848 iadev->carrier_detect =
849 Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat&SUNI_E3_LOS));
852 suni = (IA_SUNI *)iadev->phy;
853 intr_status = suni->suni_rsop_status & 0xff;
854 iadev->carrier_detect = Boolean(!(suni->suni_rsop_status & SUNI_LOSV));
856 if (iadev->carrier_detect)
857 printk("IA: SUNI carrier detected\n");
859 printk("IA: SUNI carrier lost signal\n");
863 static void ia_mb25_init (IADEV *iadev)
865 volatile ia_mb25_t *mb25 = (ia_mb25_t*)iadev->phy;
867 mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC | MB25_MC_ENABLED;
869 mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC;
870 mb25->mb25_diag_control = 0;
872 * Initialize carrier detect state
874 iadev->carrier_detect = Boolean(mb25->mb25_intr_status & MB25_IS_GSB);
878 static void ia_suni_pm7345_init (IADEV *iadev)
880 volatile suni_pm7345_t *suni_pm7345 = (suni_pm7345_t *)iadev->phy;
881 if (iadev->phy_type & FE_DS3_PHY)
883 iadev->carrier_detect =
884 Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV));
885 suni_pm7345->suni_ds3_frm_intr_enbl = 0x17;
886 suni_pm7345->suni_ds3_frm_cfg = 1;
887 suni_pm7345->suni_ds3_tran_cfg = 1;
888 suni_pm7345->suni_config = 0;
889 suni_pm7345->suni_splr_cfg = 0;
890 suni_pm7345->suni_splt_cfg = 0;
894 iadev->carrier_detect =
895 Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat & SUNI_E3_LOS));
896 suni_pm7345->suni_e3_frm_fram_options = 0x4;
897 suni_pm7345->suni_e3_frm_maint_options = 0x20;
898 suni_pm7345->suni_e3_frm_fram_intr_enbl = 0x1d;
899 suni_pm7345->suni_e3_frm_maint_intr_enbl = 0x30;
900 suni_pm7345->suni_e3_tran_stat_diag_options = 0x0;
901 suni_pm7345->suni_e3_tran_fram_options = 0x1;
902 suni_pm7345->suni_config = SUNI_PM7345_E3ENBL;
903 suni_pm7345->suni_splr_cfg = 0x41;
904 suni_pm7345->suni_splt_cfg = 0x41;
907 * Enable RSOP loss of signal interrupt.
909 suni_pm7345->suni_intr_enbl = 0x28;
912 * Clear error counters
914 suni_pm7345->suni_id_reset = 0;
917 * Clear "PMCTST" in master test register.
919 suni_pm7345->suni_master_test = 0;
921 suni_pm7345->suni_rxcp_ctrl = 0x2c;
922 suni_pm7345->suni_rxcp_fctrl = 0x81;
924 suni_pm7345->suni_rxcp_idle_pat_h1 =
925 suni_pm7345->suni_rxcp_idle_pat_h2 =
926 suni_pm7345->suni_rxcp_idle_pat_h3 = 0;
927 suni_pm7345->suni_rxcp_idle_pat_h4 = 1;
929 suni_pm7345->suni_rxcp_idle_mask_h1 = 0xff;
930 suni_pm7345->suni_rxcp_idle_mask_h2 = 0xff;
931 suni_pm7345->suni_rxcp_idle_mask_h3 = 0xff;
932 suni_pm7345->suni_rxcp_idle_mask_h4 = 0xfe;
934 suni_pm7345->suni_rxcp_cell_pat_h1 =
935 suni_pm7345->suni_rxcp_cell_pat_h2 =
936 suni_pm7345->suni_rxcp_cell_pat_h3 = 0;
937 suni_pm7345->suni_rxcp_cell_pat_h4 = 1;
939 suni_pm7345->suni_rxcp_cell_mask_h1 =
940 suni_pm7345->suni_rxcp_cell_mask_h2 =
941 suni_pm7345->suni_rxcp_cell_mask_h3 =
942 suni_pm7345->suni_rxcp_cell_mask_h4 = 0xff;
944 suni_pm7345->suni_txcp_ctrl = 0xa4;
945 suni_pm7345->suni_txcp_intr_en_sts = 0x10;
946 suni_pm7345->suni_txcp_idle_pat_h5 = 0x55;
948 suni_pm7345->suni_config &= ~(SUNI_PM7345_LLB |
953 suni_pm7345->suni_rxcp_intr_en_sts |= SUNI_OOCDE;
954 #endif /* __SNMP__ */
959 /***************************** IA_LIB END *****************************/
961 #ifdef CONFIG_ATM_IA_DEBUG
962 static int tcnter = 0;
963 static void xdump( u_char* cp, int length, char* prefix )
967 u_char* pBuf = prntBuf;
969 while(count < length){
970 pBuf += sprintf( pBuf, "%s", prefix );
971 for(col = 0;count + col < length && col < 16; col++){
972 if (col != 0 && (col % 4) == 0)
973 pBuf += sprintf( pBuf, " " );
974 pBuf += sprintf( pBuf, "%02X ", cp[count + col] );
976 while(col++ < 16){ /* pad end of buffer with blanks */
978 sprintf( pBuf, " " );
979 pBuf += sprintf( pBuf, " " );
981 pBuf += sprintf( pBuf, " " );
982 for(col = 0;count + col < length && col < 16; col++){
983 if (isprint((int)cp[count + col]))
984 pBuf += sprintf( pBuf, "%c", cp[count + col] );
986 pBuf += sprintf( pBuf, "." );
988 sprintf( pBuf, "\n" );
995 } /* close xdump(... */
996 #endif /* CONFIG_ATM_IA_DEBUG */
999 static struct atm_dev *ia_boards = NULL;
1001 #define ACTUAL_RAM_BASE \
1002 RAM_BASE*((iadev->mem)/(128 * 1024))
1003 #define ACTUAL_SEG_RAM_BASE \
1004 IPHASE5575_FRAG_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1005 #define ACTUAL_REASS_RAM_BASE \
1006 IPHASE5575_REASS_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1009 /*-- some utilities and memory allocation stuff will come here -------------*/
1011 static void desc_dbg(IADEV *iadev) {
1013 u_short tcq_wr_ptr, tcq_st_ptr, tcq_ed_ptr;
1016 // regval = readl((u32)ia_cmds->maddr);
1017 tcq_wr_ptr = readw(iadev->seg_reg+TCQ_WR_PTR);
1018 printk("B_tcq_wr = 0x%x desc = %d last desc = %d\n",
1019 tcq_wr_ptr, readw(iadev->seg_ram+tcq_wr_ptr),
1020 readw(iadev->seg_ram+tcq_wr_ptr-2));
1021 printk(" host_tcq_wr = 0x%x host_tcq_rd = 0x%x \n", iadev->host_tcq_wr,
1023 tcq_st_ptr = readw(iadev->seg_reg+TCQ_ST_ADR);
1024 tcq_ed_ptr = readw(iadev->seg_reg+TCQ_ED_ADR);
1025 printk("tcq_st_ptr = 0x%x tcq_ed_ptr = 0x%x \n", tcq_st_ptr, tcq_ed_ptr);
1027 while (tcq_st_ptr != tcq_ed_ptr) {
1028 tmp = iadev->seg_ram+tcq_st_ptr;
1029 printk("TCQ slot %d desc = %d Addr = %p\n", i++, readw(tmp), tmp);
1032 for(i=0; i <iadev->num_tx_desc; i++)
1033 printk("Desc_tbl[%d] = %d \n", i, iadev->desc_tbl[i].timestamp);
1037 /*----------------------------- Recieving side stuff --------------------------*/
1039 static void rx_excp_rcvd(struct atm_dev *dev)
1041 #if 0 /* closing the receiving size will cause too many excp int */
1044 u_short excpq_rd_ptr;
1047 iadev = INPH_IA_DEV(dev);
1048 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1049 while((state & EXCPQ_EMPTY) != EXCPQ_EMPTY)
1050 { printk("state = %x \n", state);
1051 excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_RD_PTR) & 0xffff;
1052 printk("state = %x excpq_rd_ptr = %x \n", state, excpq_rd_ptr);
1053 if (excpq_rd_ptr == *(u16*)(iadev->reass_reg + EXCP_Q_WR_PTR))
1054 IF_ERR(printk("excpq_rd_ptr is wrong!!!\n");)
1055 // TODO: update exception stat
1056 vci = readw(iadev->reass_ram+excpq_rd_ptr);
1057 error = readw(iadev->reass_ram+excpq_rd_ptr+2) & 0x0007;
1060 if (excpq_rd_ptr > (readw(iadev->reass_reg + EXCP_Q_ED_ADR)& 0xffff))
1061 excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_ST_ADR)& 0xffff;
1062 writew( excpq_rd_ptr, iadev->reass_reg + EXCP_Q_RD_PTR);
1063 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1068 static void free_desc(struct atm_dev *dev, int desc)
1071 iadev = INPH_IA_DEV(dev);
1072 writew(desc, iadev->reass_ram+iadev->rfL.fdq_wr);
1073 iadev->rfL.fdq_wr +=2;
1074 if (iadev->rfL.fdq_wr > iadev->rfL.fdq_ed)
1075 iadev->rfL.fdq_wr = iadev->rfL.fdq_st;
1076 writew(iadev->rfL.fdq_wr, iadev->reass_reg+FREEQ_WR_PTR);
1080 static int rx_pkt(struct atm_dev *dev)
1083 struct atm_vcc *vcc;
1084 unsigned short status;
1085 struct rx_buf_desc __iomem *buf_desc_ptr;
1089 struct sk_buff *skb;
1090 u_int buf_addr, dma_addr;
1092 iadev = INPH_IA_DEV(dev);
1093 if (iadev->rfL.pcq_rd == (readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff))
1095 printk(KERN_ERR DEV_LABEL "(itf %d) Receive queue empty\n", dev->number);
1098 /* mask 1st 3 bits to get the actual descno. */
1099 desc = readw(iadev->reass_ram+iadev->rfL.pcq_rd) & 0x1fff;
1100 IF_RX(printk("reass_ram = %p iadev->rfL.pcq_rd = 0x%x desc = %d\n",
1101 iadev->reass_ram, iadev->rfL.pcq_rd, desc);
1102 printk(" pcq_wr_ptr = 0x%x\n",
1103 readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff);)
1104 /* update the read pointer - maybe we shud do this in the end*/
1105 if ( iadev->rfL.pcq_rd== iadev->rfL.pcq_ed)
1106 iadev->rfL.pcq_rd = iadev->rfL.pcq_st;
1108 iadev->rfL.pcq_rd += 2;
1109 writew(iadev->rfL.pcq_rd, iadev->reass_reg+PCQ_RD_PTR);
1111 /* get the buffer desc entry.
1112 update stuff. - doesn't seem to be any update necessary
1114 buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1115 /* make the ptr point to the corresponding buffer desc entry */
1116 buf_desc_ptr += desc;
1117 if (!desc || (desc > iadev->num_rx_desc) ||
1118 ((buf_desc_ptr->vc_index & 0xffff) > iadev->num_vc)) {
1119 free_desc(dev, desc);
1120 IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
1123 vcc = iadev->rx_open[buf_desc_ptr->vc_index & 0xffff];
1126 free_desc(dev, desc);
1127 printk("IA: null vcc, drop PDU\n");
1132 /* might want to check the status bits for errors */
1133 status = (u_short) (buf_desc_ptr->desc_mode);
1134 if (status & (RX_CER | RX_PTE | RX_OFL))
1136 atomic_inc(&vcc->stats->rx_err);
1137 IF_ERR(printk("IA: bad packet, dropping it");)
1138 if (status & RX_CER) {
1139 IF_ERR(printk(" cause: packet CRC error\n");)
1141 else if (status & RX_PTE) {
1142 IF_ERR(printk(" cause: packet time out\n");)
1145 IF_ERR(printk(" cause: buffer over flow\n");)
1154 buf_addr = (buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo;
1155 dma_addr = (buf_desc_ptr->dma_start_hi << 16) | buf_desc_ptr->dma_start_lo;
1156 len = dma_addr - buf_addr;
1157 if (len > iadev->rx_buf_sz) {
1158 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
1159 atomic_inc(&vcc->stats->rx_err);
1163 if (!(skb = atm_alloc_charge(vcc, len, GFP_ATOMIC))) {
1165 printk("Drop control packets\n");
1170 ATM_SKB(skb)->vcc = vcc;
1171 ATM_DESC(skb) = desc;
1172 skb_queue_tail(&iadev->rx_dma_q, skb);
1174 /* Build the DLE structure */
1175 wr_ptr = iadev->rx_dle_q.write;
1176 wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
1177 len, PCI_DMA_FROMDEVICE);
1178 wr_ptr->local_pkt_addr = buf_addr;
1179 wr_ptr->bytes = len; /* We don't know this do we ?? */
1180 wr_ptr->mode = DMA_INT_ENABLE;
1182 /* shud take care of wrap around here too. */
1183 if(++wr_ptr == iadev->rx_dle_q.end)
1184 wr_ptr = iadev->rx_dle_q.start;
1185 iadev->rx_dle_q.write = wr_ptr;
1187 /* Increment transaction counter */
1188 writel(1, iadev->dma+IPHASE5575_RX_COUNTER);
1191 free_desc(dev, desc);
1195 static void rx_intr(struct atm_dev *dev)
1201 iadev = INPH_IA_DEV(dev);
1202 status = readl(iadev->reass_reg+REASS_INTR_STATUS_REG) & 0xffff;
1203 IF_EVENT(printk("rx_intr: status = 0x%x\n", status);)
1204 if (status & RX_PKT_RCVD)
1207 /* Basically recvd an interrupt for receving a packet.
1208 A descriptor would have been written to the packet complete
1209 queue. Get all the descriptors and set up dma to move the
1210 packets till the packet complete queue is empty..
1212 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1213 IF_EVENT(printk("Rx intr status: RX_PKT_RCVD %08x\n", status);)
1214 while(!(state & PCQ_EMPTY))
1217 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1221 if (status & RX_FREEQ_EMPT)
1224 iadev->rx_tmp_cnt = iadev->rx_pkt_cnt;
1225 iadev->rx_tmp_jif = jiffies;
1228 else if (((jiffies - iadev->rx_tmp_jif) > 50) &&
1229 ((iadev->rx_pkt_cnt - iadev->rx_tmp_cnt) == 0)) {
1230 for (i = 1; i <= iadev->num_rx_desc; i++)
1232 printk("Test logic RUN!!!!\n");
1233 writew( ~(RX_FREEQ_EMPT|RX_EXCP_RCVD),iadev->reass_reg+REASS_MASK_REG);
1236 IF_EVENT(printk("Rx intr status: RX_FREEQ_EMPT %08x\n", status);)
1239 if (status & RX_EXCP_RCVD)
1241 /* probably need to handle the exception queue also. */
1242 IF_EVENT(printk("Rx intr status: RX_EXCP_RCVD %08x\n", status);)
1247 if (status & RX_RAW_RCVD)
1249 /* need to handle the raw incoming cells. This deepnds on
1250 whether we have programmed to receive the raw cells or not.
1252 IF_EVENT(printk("Rx intr status: RX_RAW_RCVD %08x\n", status);)
1257 static void rx_dle_intr(struct atm_dev *dev)
1260 struct atm_vcc *vcc;
1261 struct sk_buff *skb;
1264 struct dle *dle, *cur_dle;
1267 iadev = INPH_IA_DEV(dev);
1269 /* free all the dles done, that is just update our own dle read pointer
1270 - do we really need to do this. Think not. */
1271 /* DMA is done, just get all the recevie buffers from the rx dma queue
1272 and push them up to the higher layer protocol. Also free the desc
1273 associated with the buffer. */
1274 dle = iadev->rx_dle_q.read;
1275 dle_lp = readl(iadev->dma+IPHASE5575_RX_LIST_ADDR) & (sizeof(struct dle)*DLE_ENTRIES - 1);
1276 cur_dle = (struct dle*)(iadev->rx_dle_q.start + (dle_lp >> 4));
1277 while(dle != cur_dle)
1279 /* free the DMAed skb */
1280 skb = skb_dequeue(&iadev->rx_dma_q);
1283 desc = ATM_DESC(skb);
1284 free_desc(dev, desc);
1286 if (!(len = skb->len))
1288 printk("rx_dle_intr: skb len 0\n");
1289 dev_kfree_skb_any(skb);
1293 struct cpcs_trailer *trailer;
1295 struct ia_vcc *ia_vcc;
1297 pci_unmap_single(iadev->pci, iadev->rx_dle_q.write->sys_pkt_addr,
1298 len, PCI_DMA_FROMDEVICE);
1299 /* no VCC related housekeeping done as yet. lets see */
1300 vcc = ATM_SKB(skb)->vcc;
1302 printk("IA: null vcc\n");
1303 dev_kfree_skb_any(skb);
1306 ia_vcc = INPH_IA_VCC(vcc);
1309 atomic_inc(&vcc->stats->rx_err);
1310 dev_kfree_skb_any(skb);
1311 atm_return(vcc, atm_guess_pdu2truesize(len));
1314 // get real pkt length pwang_test
1315 trailer = (struct cpcs_trailer*)((u_char *)skb->data +
1316 skb->len - sizeof(*trailer));
1317 length = swap(trailer->length);
1318 if ((length > iadev->rx_buf_sz) || (length >
1319 (skb->len - sizeof(struct cpcs_trailer))))
1321 atomic_inc(&vcc->stats->rx_err);
1322 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
1324 dev_kfree_skb_any(skb);
1325 atm_return(vcc, atm_guess_pdu2truesize(len));
1328 skb_trim(skb, length);
1330 /* Display the packet */
1331 IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb->len);
1332 xdump(skb->data, skb->len, "RX: ");
1335 IF_RX(printk("rx_dle_intr: skb push");)
1337 atomic_inc(&vcc->stats->rx);
1338 iadev->rx_pkt_cnt++;
1341 if (++dle == iadev->rx_dle_q.end)
1342 dle = iadev->rx_dle_q.start;
1344 iadev->rx_dle_q.read = dle;
1346 /* if the interrupts are masked because there were no free desc available,
1348 if (!iadev->rxing) {
1349 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1350 if (!(state & FREEQ_EMPTY)) {
1351 state = readl(iadev->reass_reg + REASS_MASK_REG) & 0xffff;
1352 writel(state & ~(RX_FREEQ_EMPT |/* RX_EXCP_RCVD |*/ RX_PKT_RCVD),
1353 iadev->reass_reg+REASS_MASK_REG);
1360 static int open_rx(struct atm_vcc *vcc)
1363 u_short __iomem *vc_table;
1364 u_short __iomem *reass_ptr;
1365 IF_EVENT(printk("iadev: open_rx %d.%d\n", vcc->vpi, vcc->vci);)
1367 if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;
1368 iadev = INPH_IA_DEV(vcc->dev);
1369 if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
1370 if (iadev->phy_type & FE_25MBIT_PHY) {
1371 printk("IA: ABR not support\n");
1375 /* Make only this VCI in the vc table valid and let all
1376 others be invalid entries */
1377 vc_table = iadev->reass_ram+RX_VC_TABLE*iadev->memSize;
1378 vc_table += vcc->vci;
1379 /* mask the last 6 bits and OR it with 3 for 1K VCs */
1381 *vc_table = vcc->vci << 6;
1382 /* Also keep a list of open rx vcs so that we can attach them with
1383 incoming PDUs later. */
1384 if ((vcc->qos.rxtp.traffic_class == ATM_ABR) ||
1385 (vcc->qos.txtp.traffic_class == ATM_ABR))
1387 srv_cls_param_t srv_p;
1388 init_abr_vc(iadev, &srv_p);
1389 ia_open_abr_vc(iadev, &srv_p, vcc, 0);
1391 else { /* for UBR later may need to add CBR logic */
1392 reass_ptr = iadev->reass_ram+REASS_TABLE*iadev->memSize;
1393 reass_ptr += vcc->vci;
1394 *reass_ptr = NO_AAL5_PKT;
1397 if (iadev->rx_open[vcc->vci])
1398 printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d already open\n",
1399 vcc->dev->number, vcc->vci);
1400 iadev->rx_open[vcc->vci] = vcc;
1404 static int rx_init(struct atm_dev *dev)
1407 struct rx_buf_desc __iomem *buf_desc_ptr;
1408 unsigned long rx_pkt_start = 0;
1410 struct abr_vc_table *abr_vc_table;
1414 int i,j, vcsize_sel;
1415 u_short freeq_st_adr;
1416 u_short *freeq_start;
1418 iadev = INPH_IA_DEV(dev);
1419 // spin_lock_init(&iadev->rx_lock);
1421 /* Allocate 4k bytes - more aligned than needed (4k boundary) */
1422 dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
1423 &iadev->rx_dle_dma);
1425 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1428 iadev->rx_dle_q.start = (struct dle*)dle_addr;
1429 iadev->rx_dle_q.read = iadev->rx_dle_q.start;
1430 iadev->rx_dle_q.write = iadev->rx_dle_q.start;
1431 iadev->rx_dle_q.end = (struct dle*)((u32)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1432 /* the end of the dle q points to the entry after the last
1433 DLE that can be used. */
1435 /* write the upper 20 bits of the start address to rx list address register */
1436 writel(iadev->rx_dle_dma & 0xfffff000,
1437 iadev->dma + IPHASE5575_RX_LIST_ADDR);
1438 IF_INIT(printk("Tx Dle list addr: 0x%08x value: 0x%0x\n",
1439 (u32)(iadev->dma+IPHASE5575_TX_LIST_ADDR),
1440 *(u32*)(iadev->dma+IPHASE5575_TX_LIST_ADDR));
1441 printk("Rx Dle list addr: 0x%08x value: 0x%0x\n",
1442 (u32)(iadev->dma+IPHASE5575_RX_LIST_ADDR),
1443 *(u32*)(iadev->dma+IPHASE5575_RX_LIST_ADDR));)
1445 writew(0xffff, iadev->reass_reg+REASS_MASK_REG);
1446 writew(0, iadev->reass_reg+MODE_REG);
1447 writew(RESET_REASS, iadev->reass_reg+REASS_COMMAND_REG);
1449 /* Receive side control memory map
1450 -------------------------------
1452 Buffer descr 0x0000 (736 - 23K)
1453 VP Table 0x5c00 (256 - 512)
1454 Except q 0x5e00 (128 - 512)
1455 Free buffer q 0x6000 (1K - 2K)
1456 Packet comp q 0x6800 (1K - 2K)
1457 Reass Table 0x7000 (1K - 2K)
1458 VC Table 0x7800 (1K - 2K)
1459 ABR VC Table 0x8000 (1K - 32K)
1462 /* Base address for Buffer Descriptor Table */
1463 writew(RX_DESC_BASE >> 16, iadev->reass_reg+REASS_DESC_BASE);
1464 /* Set the buffer size register */
1465 writew(iadev->rx_buf_sz, iadev->reass_reg+BUF_SIZE);
1467 /* Initialize each entry in the Buffer Descriptor Table */
1468 iadev->RX_DESC_BASE_ADDR = iadev->reass_ram+RX_DESC_BASE*iadev->memSize;
1469 buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1470 memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1472 rx_pkt_start = iadev->rx_pkt_ram;
1473 for(i=1; i<=iadev->num_rx_desc; i++)
1475 memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1476 buf_desc_ptr->buf_start_hi = rx_pkt_start >> 16;
1477 buf_desc_ptr->buf_start_lo = rx_pkt_start & 0x0000ffff;
1479 rx_pkt_start += iadev->rx_buf_sz;
1481 IF_INIT(printk("Rx Buffer desc ptr: 0x%0x\n", (u32)(buf_desc_ptr));)
1482 i = FREE_BUF_DESC_Q*iadev->memSize;
1483 writew(i >> 16, iadev->reass_reg+REASS_QUEUE_BASE);
1484 writew(i, iadev->reass_reg+FREEQ_ST_ADR);
1485 writew(i+iadev->num_rx_desc*sizeof(u_short),
1486 iadev->reass_reg+FREEQ_ED_ADR);
1487 writew(i, iadev->reass_reg+FREEQ_RD_PTR);
1488 writew(i+iadev->num_rx_desc*sizeof(u_short),
1489 iadev->reass_reg+FREEQ_WR_PTR);
1490 /* Fill the FREEQ with all the free descriptors. */
1491 freeq_st_adr = readw(iadev->reass_reg+FREEQ_ST_ADR);
1492 freeq_start = (u_short *)(iadev->reass_ram+freeq_st_adr);
1493 for(i=1; i<=iadev->num_rx_desc; i++)
1495 *freeq_start = (u_short)i;
1498 IF_INIT(printk("freeq_start: 0x%0x\n", (u32)freeq_start);)
1499 /* Packet Complete Queue */
1500 i = (PKT_COMP_Q * iadev->memSize) & 0xffff;
1501 writew(i, iadev->reass_reg+PCQ_ST_ADR);
1502 writew(i+iadev->num_vc*sizeof(u_short), iadev->reass_reg+PCQ_ED_ADR);
1503 writew(i, iadev->reass_reg+PCQ_RD_PTR);
1504 writew(i, iadev->reass_reg+PCQ_WR_PTR);
1506 /* Exception Queue */
1507 i = (EXCEPTION_Q * iadev->memSize) & 0xffff;
1508 writew(i, iadev->reass_reg+EXCP_Q_ST_ADR);
1509 writew(i + NUM_RX_EXCP * sizeof(RX_ERROR_Q),
1510 iadev->reass_reg+EXCP_Q_ED_ADR);
1511 writew(i, iadev->reass_reg+EXCP_Q_RD_PTR);
1512 writew(i, iadev->reass_reg+EXCP_Q_WR_PTR);
1514 /* Load local copy of FREEQ and PCQ ptrs */
1515 iadev->rfL.fdq_st = readw(iadev->reass_reg+FREEQ_ST_ADR) & 0xffff;
1516 iadev->rfL.fdq_ed = readw(iadev->reass_reg+FREEQ_ED_ADR) & 0xffff ;
1517 iadev->rfL.fdq_rd = readw(iadev->reass_reg+FREEQ_RD_PTR) & 0xffff;
1518 iadev->rfL.fdq_wr = readw(iadev->reass_reg+FREEQ_WR_PTR) & 0xffff;
1519 iadev->rfL.pcq_st = readw(iadev->reass_reg+PCQ_ST_ADR) & 0xffff;
1520 iadev->rfL.pcq_ed = readw(iadev->reass_reg+PCQ_ED_ADR) & 0xffff;
1521 iadev->rfL.pcq_rd = readw(iadev->reass_reg+PCQ_RD_PTR) & 0xffff;
1522 iadev->rfL.pcq_wr = readw(iadev->reass_reg+PCQ_WR_PTR) & 0xffff;
1524 IF_INIT(printk("INIT:pcq_st:0x%x pcq_ed:0x%x pcq_rd:0x%x pcq_wr:0x%x",
1525 iadev->rfL.pcq_st, iadev->rfL.pcq_ed, iadev->rfL.pcq_rd,
1526 iadev->rfL.pcq_wr);)
1527 /* just for check - no VP TBL */
1529 /* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */
1530 /* initialize VP Table for invalid VPIs
1531 - I guess we can write all 1s or 0x000f in the entire memory
1532 space or something similar.
1535 /* This seems to work and looks right to me too !!! */
1536 i = REASS_TABLE * iadev->memSize;
1537 writew((i >> 3), iadev->reass_reg+REASS_TABLE_BASE);
1538 /* initialize Reassembly table to I don't know what ???? */
1539 reass_table = (u16 *)(iadev->reass_ram+i);
1540 j = REASS_TABLE_SZ * iadev->memSize;
1541 for(i=0; i < j; i++)
1542 *reass_table++ = NO_AAL5_PKT;
1545 while (i != iadev->num_vc) {
1549 i = RX_VC_TABLE * iadev->memSize;
1550 writew(((i>>3) & 0xfff8) | vcsize_sel, iadev->reass_reg+VC_LKUP_BASE);
1551 vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
1552 j = RX_VC_TABLE_SZ * iadev->memSize;
1553 for(i = 0; i < j; i++)
1555 /* shift the reassembly pointer by 3 + lower 3 bits of
1556 vc_lkup_base register (=3 for 1K VCs) and the last byte
1557 is those low 3 bits.
1558 Shall program this later.
1560 *vc_table = (i << 6) | 15; /* for invalid VCI */
1564 i = ABR_VC_TABLE * iadev->memSize;
1565 writew(i >> 3, iadev->reass_reg+ABR_LKUP_BASE);
1567 i = ABR_VC_TABLE * iadev->memSize;
1568 abr_vc_table = (struct abr_vc_table *)(iadev->reass_ram+i);
1569 j = REASS_TABLE_SZ * iadev->memSize;
1570 memset ((char*)abr_vc_table, 0, j * sizeof(*abr_vc_table));
1571 for(i = 0; i < j; i++) {
1572 abr_vc_table->rdf = 0x0003;
1573 abr_vc_table->air = 0x5eb1;
1577 /* Initialize other registers */
1579 /* VP Filter Register set for VC Reassembly only */
1580 writew(0xff00, iadev->reass_reg+VP_FILTER);
1581 writew(0, iadev->reass_reg+XTRA_RM_OFFSET);
1582 writew(0x1, iadev->reass_reg+PROTOCOL_ID);
1584 /* Packet Timeout Count related Registers :
1585 Set packet timeout to occur in about 3 seconds
1586 Set Packet Aging Interval count register to overflow in about 4 us
1588 writew(0xF6F8, iadev->reass_reg+PKT_TM_CNT );
1590 i = ((u32)ptr16 >> 6) & 0xff;
1592 i |=(((u32)ptr16 << 2) & 0xff00);
1593 writew(i, iadev->reass_reg+TMOUT_RANGE);
1594 /* initiate the desc_tble */
1595 for(i=0; i<iadev->num_tx_desc;i++)
1596 iadev->desc_tbl[i].timestamp = 0;
1598 /* to clear the interrupt status register - read it */
1599 readw(iadev->reass_reg+REASS_INTR_STATUS_REG);
1601 /* Mask Register - clear it */
1602 writew(~(RX_FREEQ_EMPT|RX_PKT_RCVD), iadev->reass_reg+REASS_MASK_REG);
1604 skb_queue_head_init(&iadev->rx_dma_q);
1605 iadev->rx_free_desc_qhead = NULL;
1607 iadev->rx_open = kzalloc(4 * iadev->num_vc, GFP_KERNEL);
1608 if (!iadev->rx_open) {
1609 printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
1615 iadev->rx_pkt_cnt = 0;
1617 writew(R_ONLINE, iadev->reass_reg+MODE_REG);
1621 pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
1629 The memory map suggested in appendix A and the coding for it.
1630 Keeping it around just in case we change our mind later.
1632 Buffer descr 0x0000 (128 - 4K)
1633 UBR sched 0x1000 (1K - 4K)
1634 UBR Wait q 0x2000 (1K - 4K)
1635 Commn queues 0x3000 Packet Ready, Trasmit comp(0x3100)
1637 extended VC 0x4000 (1K - 8K)
1638 ABR sched 0x6000 and ABR wait queue (1K - 2K) each
1639 CBR sched 0x7000 (as needed)
1640 VC table 0x8000 (1K - 32K)
1643 static void tx_intr(struct atm_dev *dev)
1646 unsigned short status;
1647 unsigned long flags;
1649 iadev = INPH_IA_DEV(dev);
1651 status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);
1652 if (status & TRANSMIT_DONE){
1654 IF_EVENT(printk("Tansmit Done Intr logic run\n");)
1655 spin_lock_irqsave(&iadev->tx_lock, flags);
1657 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1658 writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
1659 if (iadev->close_pending)
1660 wake_up(&iadev->close_wait);
1662 if (status & TCQ_NOT_EMPTY)
1664 IF_EVENT(printk("TCQ_NOT_EMPTY int received\n");)
1668 static void tx_dle_intr(struct atm_dev *dev)
1671 struct dle *dle, *cur_dle;
1672 struct sk_buff *skb;
1673 struct atm_vcc *vcc;
1674 struct ia_vcc *iavcc;
1676 unsigned long flags;
1678 iadev = INPH_IA_DEV(dev);
1679 spin_lock_irqsave(&iadev->tx_lock, flags);
1680 dle = iadev->tx_dle_q.read;
1681 dle_lp = readl(iadev->dma+IPHASE5575_TX_LIST_ADDR) &
1682 (sizeof(struct dle)*DLE_ENTRIES - 1);
1683 cur_dle = (struct dle*)(iadev->tx_dle_q.start + (dle_lp >> 4));
1684 while (dle != cur_dle)
1686 /* free the DMAed skb */
1687 skb = skb_dequeue(&iadev->tx_dma_q);
1690 /* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */
1691 if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
1692 pci_unmap_single(iadev->pci, dle->sys_pkt_addr, skb->len,
1695 vcc = ATM_SKB(skb)->vcc;
1697 printk("tx_dle_intr: vcc is null\n");
1698 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1699 dev_kfree_skb_any(skb);
1703 iavcc = INPH_IA_VCC(vcc);
1705 printk("tx_dle_intr: iavcc is null\n");
1706 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1707 dev_kfree_skb_any(skb);
1710 if (vcc->qos.txtp.pcr >= iadev->rate_limit) {
1711 if ((vcc->pop) && (skb->len != 0))
1716 dev_kfree_skb_any(skb);
1719 else { /* Hold the rate-limited skb for flow control */
1720 IA_SKB_STATE(skb) |= IA_DLED;
1721 skb_queue_tail(&iavcc->txing_skb, skb);
1723 IF_EVENT(printk("tx_dle_intr: enque skb = 0x%x \n", (u32)skb);)
1724 if (++dle == iadev->tx_dle_q.end)
1725 dle = iadev->tx_dle_q.start;
1727 iadev->tx_dle_q.read = dle;
1728 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1731 static int open_tx(struct atm_vcc *vcc)
1733 struct ia_vcc *ia_vcc;
1738 IF_EVENT(printk("iadev: open_tx entered vcc->vci = %d\n", vcc->vci);)
1739 if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;
1740 iadev = INPH_IA_DEV(vcc->dev);
1742 if (iadev->phy_type & FE_25MBIT_PHY) {
1743 if (vcc->qos.txtp.traffic_class == ATM_ABR) {
1744 printk("IA: ABR not support\n");
1747 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1748 printk("IA: CBR not support\n");
1752 ia_vcc = INPH_IA_VCC(vcc);
1753 memset((caddr_t)ia_vcc, 0, sizeof(*ia_vcc));
1754 if (vcc->qos.txtp.max_sdu >
1755 (iadev->tx_buf_sz - sizeof(struct cpcs_trailer))){
1756 printk("IA: SDU size over (%d) the configured SDU size %d\n",
1757 vcc->qos.txtp.max_sdu,iadev->tx_buf_sz);
1758 vcc->dev_data = NULL;
1762 ia_vcc->vc_desc_cnt = 0;
1766 if (vcc->qos.txtp.max_pcr == ATM_MAX_PCR)
1767 vcc->qos.txtp.pcr = iadev->LineRate;
1768 else if ((vcc->qos.txtp.max_pcr == 0)&&( vcc->qos.txtp.pcr <= 0))
1769 vcc->qos.txtp.pcr = iadev->LineRate;
1770 else if ((vcc->qos.txtp.max_pcr > vcc->qos.txtp.pcr) && (vcc->qos.txtp.max_pcr> 0))
1771 vcc->qos.txtp.pcr = vcc->qos.txtp.max_pcr;
1772 if (vcc->qos.txtp.pcr > iadev->LineRate)
1773 vcc->qos.txtp.pcr = iadev->LineRate;
1774 ia_vcc->pcr = vcc->qos.txtp.pcr;
1776 if (ia_vcc->pcr > (iadev->LineRate / 6) ) ia_vcc->ltimeout = HZ / 10;
1777 else if (ia_vcc->pcr > (iadev->LineRate / 130)) ia_vcc->ltimeout = HZ;
1778 else if (ia_vcc->pcr <= 170) ia_vcc->ltimeout = 16 * HZ;
1779 else ia_vcc->ltimeout = 2700 * HZ / ia_vcc->pcr;
1780 if (ia_vcc->pcr < iadev->rate_limit)
1781 skb_queue_head_init (&ia_vcc->txing_skb);
1782 if (ia_vcc->pcr < iadev->rate_limit) {
1783 struct sock *sk = sk_atm(vcc);
1785 if (vcc->qos.txtp.max_sdu != 0) {
1786 if (ia_vcc->pcr > 60000)
1787 sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 5;
1788 else if (ia_vcc->pcr > 2000)
1789 sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 4;
1791 sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 3;
1794 sk->sk_sndbuf = 24576;
1797 vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
1798 evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
1801 memset((caddr_t)vc, 0, sizeof(*vc));
1802 memset((caddr_t)evc, 0, sizeof(*evc));
1804 /* store the most significant 4 bits of vci as the last 4 bits
1805 of first part of atm header.
1806 store the last 12 bits of vci as first 12 bits of the second
1807 part of the atm header.
1809 evc->atm_hdr1 = (vcc->vci >> 12) & 0x000f;
1810 evc->atm_hdr2 = (vcc->vci & 0x0fff) << 4;
1812 /* check the following for different traffic classes */
1813 if (vcc->qos.txtp.traffic_class == ATM_UBR)
1816 vc->status = CRC_APPEND;
1817 vc->acr = cellrate_to_float(iadev->LineRate);
1818 if (vcc->qos.txtp.pcr > 0)
1819 vc->acr = cellrate_to_float(vcc->qos.txtp.pcr);
1820 IF_UBR(printk("UBR: txtp.pcr = 0x%x f_rate = 0x%x\n",
1821 vcc->qos.txtp.max_pcr,vc->acr);)
1823 else if (vcc->qos.txtp.traffic_class == ATM_ABR)
1824 { srv_cls_param_t srv_p;
1825 IF_ABR(printk("Tx ABR VCC\n");)
1826 init_abr_vc(iadev, &srv_p);
1827 if (vcc->qos.txtp.pcr > 0)
1828 srv_p.pcr = vcc->qos.txtp.pcr;
1829 if (vcc->qos.txtp.min_pcr > 0) {
1830 int tmpsum = iadev->sum_mcr+iadev->sum_cbr+vcc->qos.txtp.min_pcr;
1831 if (tmpsum > iadev->LineRate)
1833 srv_p.mcr = vcc->qos.txtp.min_pcr;
1834 iadev->sum_mcr += vcc->qos.txtp.min_pcr;
1837 if (vcc->qos.txtp.icr)
1838 srv_p.icr = vcc->qos.txtp.icr;
1839 if (vcc->qos.txtp.tbe)
1840 srv_p.tbe = vcc->qos.txtp.tbe;
1841 if (vcc->qos.txtp.frtt)
1842 srv_p.frtt = vcc->qos.txtp.frtt;
1843 if (vcc->qos.txtp.rif)
1844 srv_p.rif = vcc->qos.txtp.rif;
1845 if (vcc->qos.txtp.rdf)
1846 srv_p.rdf = vcc->qos.txtp.rdf;
1847 if (vcc->qos.txtp.nrm_pres)
1848 srv_p.nrm = vcc->qos.txtp.nrm;
1849 if (vcc->qos.txtp.trm_pres)
1850 srv_p.trm = vcc->qos.txtp.trm;
1851 if (vcc->qos.txtp.adtf_pres)
1852 srv_p.adtf = vcc->qos.txtp.adtf;
1853 if (vcc->qos.txtp.cdf_pres)
1854 srv_p.cdf = vcc->qos.txtp.cdf;
1855 if (srv_p.icr > srv_p.pcr)
1856 srv_p.icr = srv_p.pcr;
1857 IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d mcr = %d\n",
1858 srv_p.pcr, srv_p.mcr);)
1859 ia_open_abr_vc(iadev, &srv_p, vcc, 1);
1860 } else if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1861 if (iadev->phy_type & FE_25MBIT_PHY) {
1862 printk("IA: CBR not support\n");
1865 if (vcc->qos.txtp.max_pcr > iadev->LineRate) {
1866 IF_CBR(printk("PCR is not availble\n");)
1870 vc->status = CRC_APPEND;
1871 if ((ret = ia_cbr_setup (iadev, vcc)) < 0) {
1876 printk("iadev: Non UBR, ABR and CBR traffic not supportedn");
1878 iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE;
1879 IF_EVENT(printk("ia open_tx returning \n");)
1884 static int tx_init(struct atm_dev *dev)
1887 struct tx_buf_desc *buf_desc_ptr;
1888 unsigned int tx_pkt_start;
1900 iadev = INPH_IA_DEV(dev);
1901 spin_lock_init(&iadev->tx_lock);
1903 IF_INIT(printk("Tx MASK REG: 0x%0x\n",
1904 readw(iadev->seg_reg+SEG_MASK_REG));)
1906 /* Allocate 4k (boundary aligned) bytes */
1907 dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
1908 &iadev->tx_dle_dma);
1910 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1913 iadev->tx_dle_q.start = (struct dle*)dle_addr;
1914 iadev->tx_dle_q.read = iadev->tx_dle_q.start;
1915 iadev->tx_dle_q.write = iadev->tx_dle_q.start;
1916 iadev->tx_dle_q.end = (struct dle*)((u32)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1918 /* write the upper 20 bits of the start address to tx list address register */
1919 writel(iadev->tx_dle_dma & 0xfffff000,
1920 iadev->dma + IPHASE5575_TX_LIST_ADDR);
1921 writew(0xffff, iadev->seg_reg+SEG_MASK_REG);
1922 writew(0, iadev->seg_reg+MODE_REG_0);
1923 writew(RESET_SEG, iadev->seg_reg+SEG_COMMAND_REG);
1924 iadev->MAIN_VC_TABLE_ADDR = iadev->seg_ram+MAIN_VC_TABLE*iadev->memSize;
1925 iadev->EXT_VC_TABLE_ADDR = iadev->seg_ram+EXT_VC_TABLE*iadev->memSize;
1926 iadev->ABR_SCHED_TABLE_ADDR=iadev->seg_ram+ABR_SCHED_TABLE*iadev->memSize;
1929 Transmit side control memory map
1930 --------------------------------
1931 Buffer descr 0x0000 (128 - 4K)
1932 Commn queues 0x1000 Transmit comp, Packet ready(0x1400)
1935 CBR Table 0x1800 (as needed) - 6K
1936 UBR Table 0x3000 (1K - 4K) - 12K
1937 UBR Wait queue 0x4000 (1K - 4K) - 16K
1938 ABR sched 0x5000 and ABR wait queue (1K - 2K) each
1939 ABR Tbl - 20K, ABR Wq - 22K
1940 extended VC 0x6000 (1K - 8K) - 24K
1941 VC Table 0x8000 (1K - 32K) - 32K
1943 Between 0x2000 (8K) and 0x3000 (12K) there is 4K space left for VBR Tbl
1944 and Wait q, which can be allotted later.
1947 /* Buffer Descriptor Table Base address */
1948 writew(TX_DESC_BASE, iadev->seg_reg+SEG_DESC_BASE);
1950 /* initialize each entry in the buffer descriptor table */
1951 buf_desc_ptr =(struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);
1952 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1954 tx_pkt_start = TX_PACKET_RAM;
1955 for(i=1; i<=iadev->num_tx_desc; i++)
1957 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1958 buf_desc_ptr->desc_mode = AAL5;
1959 buf_desc_ptr->buf_start_hi = tx_pkt_start >> 16;
1960 buf_desc_ptr->buf_start_lo = tx_pkt_start & 0x0000ffff;
1962 tx_pkt_start += iadev->tx_buf_sz;
1964 iadev->tx_buf = kmalloc(iadev->num_tx_desc*sizeof(struct cpcs_trailer_desc), GFP_KERNEL);
1965 if (!iadev->tx_buf) {
1966 printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1969 for (i= 0; i< iadev->num_tx_desc; i++)
1971 struct cpcs_trailer *cpcs;
1973 cpcs = kmalloc(sizeof(*cpcs), GFP_KERNEL|GFP_DMA);
1975 printk(KERN_ERR DEV_LABEL " couldn't get freepage\n");
1976 goto err_free_tx_bufs;
1978 iadev->tx_buf[i].cpcs = cpcs;
1979 iadev->tx_buf[i].dma_addr = pci_map_single(iadev->pci,
1980 cpcs, sizeof(*cpcs), PCI_DMA_TODEVICE);
1982 iadev->desc_tbl = kmalloc(iadev->num_tx_desc *
1983 sizeof(struct desc_tbl_t), GFP_KERNEL);
1984 if (!iadev->desc_tbl) {
1985 printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1986 goto err_free_all_tx_bufs;
1989 /* Communication Queues base address */
1990 i = TX_COMP_Q * iadev->memSize;
1991 writew(i >> 16, iadev->seg_reg+SEG_QUEUE_BASE);
1993 /* Transmit Complete Queue */
1994 writew(i, iadev->seg_reg+TCQ_ST_ADR);
1995 writew(i, iadev->seg_reg+TCQ_RD_PTR);
1996 writew(i+iadev->num_tx_desc*sizeof(u_short),iadev->seg_reg+TCQ_WR_PTR);
1997 iadev->host_tcq_wr = i + iadev->num_tx_desc*sizeof(u_short);
1998 writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
1999 iadev->seg_reg+TCQ_ED_ADR);
2000 /* Fill the TCQ with all the free descriptors. */
2001 tcq_st_adr = readw(iadev->seg_reg+TCQ_ST_ADR);
2002 tcq_start = (u_short *)(iadev->seg_ram+tcq_st_adr);
2003 for(i=1; i<=iadev->num_tx_desc; i++)
2005 *tcq_start = (u_short)i;
2009 /* Packet Ready Queue */
2010 i = PKT_RDY_Q * iadev->memSize;
2011 writew(i, iadev->seg_reg+PRQ_ST_ADR);
2012 writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
2013 iadev->seg_reg+PRQ_ED_ADR);
2014 writew(i, iadev->seg_reg+PRQ_RD_PTR);
2015 writew(i, iadev->seg_reg+PRQ_WR_PTR);
2017 /* Load local copy of PRQ and TCQ ptrs */
2018 iadev->ffL.prq_st = readw(iadev->seg_reg+PRQ_ST_ADR) & 0xffff;
2019 iadev->ffL.prq_ed = readw(iadev->seg_reg+PRQ_ED_ADR) & 0xffff;
2020 iadev->ffL.prq_wr = readw(iadev->seg_reg+PRQ_WR_PTR) & 0xffff;
2022 iadev->ffL.tcq_st = readw(iadev->seg_reg+TCQ_ST_ADR) & 0xffff;
2023 iadev->ffL.tcq_ed = readw(iadev->seg_reg+TCQ_ED_ADR) & 0xffff;
2024 iadev->ffL.tcq_rd = readw(iadev->seg_reg+TCQ_RD_PTR) & 0xffff;
2026 /* Just for safety initializing the queue to have desc 1 always */
2027 /* Fill the PRQ with all the free descriptors. */
2028 prq_st_adr = readw(iadev->seg_reg+PRQ_ST_ADR);
2029 prq_start = (u_short *)(iadev->seg_ram+prq_st_adr);
2030 for(i=1; i<=iadev->num_tx_desc; i++)
2032 *prq_start = (u_short)0; /* desc 1 in all entries */
2036 IF_INIT(printk("Start CBR Init\n");)
2037 #if 1 /* for 1K VC board, CBR_PTR_BASE is 0 */
2038 writew(0,iadev->seg_reg+CBR_PTR_BASE);
2039 #else /* Charlie's logic is wrong ? */
2040 tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize)>>17;
2041 IF_INIT(printk("cbr_ptr_base = 0x%x ", tmp16);)
2042 writew(tmp16,iadev->seg_reg+CBR_PTR_BASE);
2045 IF_INIT(printk("value in register = 0x%x\n",
2046 readw(iadev->seg_reg+CBR_PTR_BASE));)
2047 tmp16 = (CBR_SCHED_TABLE*iadev->memSize) >> 1;
2048 writew(tmp16, iadev->seg_reg+CBR_TAB_BEG);
2049 IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16,
2050 readw(iadev->seg_reg+CBR_TAB_BEG));)
2051 writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR;
2052 tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1;
2053 writew(tmp16, iadev->seg_reg+CBR_TAB_END);
2054 IF_INIT(printk("iadev->seg_reg = 0x%x CBR_PTR_BASE = 0x%x\n",
2055 (u32)iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));)
2056 IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n",
2057 readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END),
2058 readw(iadev->seg_reg+CBR_TAB_END+1));)
2060 /* Initialize the CBR Schedualing Table */
2061 memset_io(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize,
2062 0, iadev->num_vc*6);
2063 iadev->CbrRemEntries = iadev->CbrTotEntries = iadev->num_vc*3;
2064 iadev->CbrEntryPt = 0;
2065 iadev->Granularity = MAX_ATM_155 / iadev->CbrTotEntries;
2066 iadev->NumEnabledCBR = 0;
2068 /* UBR scheduling Table and wait queue */
2069 /* initialize all bytes of UBR scheduler table and wait queue to 0
2070 - SCHEDSZ is 1K (# of entries).
2071 - UBR Table size is 4K
2072 - UBR wait queue is 4K
2073 since the table and wait queues are contiguous, all the bytes
2074 can be initialized by one memeset.
2079 while (i != iadev->num_vc) {
2084 i = MAIN_VC_TABLE * iadev->memSize;
2085 writew(vcsize_sel | ((i >> 8) & 0xfff8),iadev->seg_reg+VCT_BASE);
2086 i = EXT_VC_TABLE * iadev->memSize;
2087 writew((i >> 8) & 0xfffe, iadev->seg_reg+VCTE_BASE);
2088 i = UBR_SCHED_TABLE * iadev->memSize;
2089 writew((i & 0xffff) >> 11, iadev->seg_reg+UBR_SBPTR_BASE);
2090 i = UBR_WAIT_Q * iadev->memSize;
2091 writew((i >> 7) & 0xffff, iadev->seg_reg+UBRWQ_BASE);
2092 memset((caddr_t)(iadev->seg_ram+UBR_SCHED_TABLE*iadev->memSize),
2093 0, iadev->num_vc*8);
2094 /* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/
2095 /* initialize all bytes of ABR scheduler table and wait queue to 0
2096 - SCHEDSZ is 1K (# of entries).
2097 - ABR Table size is 2K
2098 - ABR wait queue is 2K
2099 since the table and wait queues are contiguous, all the bytes
2100 can be intialized by one memeset.
2102 i = ABR_SCHED_TABLE * iadev->memSize;
2103 writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
2104 i = ABR_WAIT_Q * iadev->memSize;
2105 writew((i >> 7) & 0xffff, iadev->seg_reg+ABRWQ_BASE);
2107 i = ABR_SCHED_TABLE*iadev->memSize;
2108 memset((caddr_t)(iadev->seg_ram+i), 0, iadev->num_vc*4);
2109 vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
2110 evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
2111 iadev->testTable = kmalloc(sizeof(long)*iadev->num_vc, GFP_KERNEL);
2112 if (!iadev->testTable) {
2113 printk("Get freepage failed\n");
2114 goto err_free_desc_tbl;
2116 for(i=0; i<iadev->num_vc; i++)
2118 memset((caddr_t)vc, 0, sizeof(*vc));
2119 memset((caddr_t)evc, 0, sizeof(*evc));
2120 iadev->testTable[i] = kmalloc(sizeof(struct testTable_t),
2122 if (!iadev->testTable[i])
2123 goto err_free_test_tables;
2124 iadev->testTable[i]->lastTime = 0;
2125 iadev->testTable[i]->fract = 0;
2126 iadev->testTable[i]->vc_status = VC_UBR;
2131 /* Other Initialization */
2133 /* Max Rate Register */
2134 if (iadev->phy_type & FE_25MBIT_PHY) {
2135 writew(RATE25, iadev->seg_reg+MAXRATE);
2136 writew((UBR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
2139 writew(cellrate_to_float(iadev->LineRate),iadev->seg_reg+MAXRATE);
2140 writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
2142 /* Set Idle Header Reigisters to be sure */
2143 writew(0, iadev->seg_reg+IDLEHEADHI);
2144 writew(0, iadev->seg_reg+IDLEHEADLO);
2146 /* Program ABR UBR Priority Register as PRI_ABR_UBR_EQUAL */
2147 writew(0xaa00, iadev->seg_reg+ABRUBR_ARB);
2149 iadev->close_pending = 0;
2150 init_waitqueue_head(&iadev->close_wait);
2151 init_waitqueue_head(&iadev->timeout_wait);
2152 skb_queue_head_init(&iadev->tx_dma_q);
2153 ia_init_rtn_q(&iadev->tx_return_q);
2155 /* RM Cell Protocol ID and Message Type */
2156 writew(RM_TYPE_4_0, iadev->seg_reg+RM_TYPE);
2157 skb_queue_head_init (&iadev->tx_backlog);
2159 /* Mode Register 1 */
2160 writew(MODE_REG_1_VAL, iadev->seg_reg+MODE_REG_1);
2162 /* Mode Register 0 */
2163 writew(T_ONLINE, iadev->seg_reg+MODE_REG_0);
2165 /* Interrupt Status Register - read to clear */
2166 readw(iadev->seg_reg+SEG_INTR_STATUS_REG);
2168 /* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */
2169 writew(~(TRANSMIT_DONE | TCQ_NOT_EMPTY), iadev->seg_reg+SEG_MASK_REG);
2170 writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2171 iadev->tx_pkt_cnt = 0;
2172 iadev->rate_limit = iadev->LineRate / 3;
2176 err_free_test_tables:
2178 kfree(iadev->testTable[i]);
2179 kfree(iadev->testTable);
2181 kfree(iadev->desc_tbl);
2182 err_free_all_tx_bufs:
2183 i = iadev->num_tx_desc;
2186 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2188 pci_unmap_single(iadev->pci, desc->dma_addr,
2189 sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
2192 kfree(iadev->tx_buf);
2194 pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2200 static irqreturn_t ia_int(int irq, void *dev_id)
2202 struct atm_dev *dev;
2204 unsigned int status;
2208 iadev = INPH_IA_DEV(dev);
2209 while( (status = readl(iadev->reg+IPHASE5575_BUS_STATUS_REG) & 0x7f))
2212 IF_EVENT(printk("ia_int: status = 0x%x\n", status);)
2213 if (status & STAT_REASSINT)
2216 IF_EVENT(printk("REASSINT Bus status reg: %08x\n", status);)
2219 if (status & STAT_DLERINT)
2221 /* Clear this bit by writing a 1 to it. */
2222 *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLERINT;
2225 if (status & STAT_SEGINT)
2228 IF_EVENT(printk("IA: tx_intr \n");)
2231 if (status & STAT_DLETINT)
2233 *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLETINT;
2236 if (status & (STAT_FEINT | STAT_ERRINT | STAT_MARKINT))
2238 if (status & STAT_FEINT)
2239 IaFrontEndIntr(iadev);
2242 return IRQ_RETVAL(handled);
2247 /*----------------------------- entries --------------------------------*/
2248 static int get_esi(struct atm_dev *dev)
2255 iadev = INPH_IA_DEV(dev);
2256 mac1 = cpu_to_be32(le32_to_cpu(readl(
2257 iadev->reg+IPHASE5575_MAC1)));
2258 mac2 = cpu_to_be16(le16_to_cpu(readl(iadev->reg+IPHASE5575_MAC2)));
2259 IF_INIT(printk("ESI: 0x%08x%04x\n", mac1, mac2);)
2260 for (i=0; i<MAC1_LEN; i++)
2261 dev->esi[i] = mac1 >>(8*(MAC1_LEN-1-i));
2263 for (i=0; i<MAC2_LEN; i++)
2264 dev->esi[i+MAC1_LEN] = mac2 >>(8*(MAC2_LEN - 1 -i));
2268 static int reset_sar(struct atm_dev *dev)
2272 unsigned int pci[64];
2274 iadev = INPH_IA_DEV(dev);
2276 if ((error = pci_read_config_dword(iadev->pci,
2277 i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)
2279 writel(0, iadev->reg+IPHASE5575_EXT_RESET);
2281 if ((error = pci_write_config_dword(iadev->pci,
2282 i*4, pci[i])) != PCIBIOS_SUCCESSFUL)
2289 static int __devinit ia_init(struct atm_dev *dev)
2292 unsigned long real_base;
2294 unsigned short command;
2297 /* The device has been identified and registered. Now we read
2298 necessary configuration info like memory base address,
2299 interrupt number etc */
2301 IF_INIT(printk(">ia_init\n");)
2302 dev->ci_range.vpi_bits = 0;
2303 dev->ci_range.vci_bits = NR_VCI_LD;
2305 iadev = INPH_IA_DEV(dev);
2306 real_base = pci_resource_start (iadev->pci, 0);
2307 iadev->irq = iadev->pci->irq;
2309 error = pci_read_config_word(iadev->pci, PCI_COMMAND, &command);
2311 printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%x\n",
2315 IF_INIT(printk(DEV_LABEL "(itf %d): rev.%d,realbase=0x%lx,irq=%d\n",
2316 dev->number, iadev->pci->revision, real_base, iadev->irq);)
2318 /* find mapping size of board */
2320 iadev->pci_map_size = pci_resource_len(iadev->pci, 0);
2322 if (iadev->pci_map_size == 0x100000){
2323 iadev->num_vc = 4096;
2324 dev->ci_range.vci_bits = NR_VCI_4K_LD;
2327 else if (iadev->pci_map_size == 0x40000) {
2328 iadev->num_vc = 1024;
2332 printk("Unknown pci_map_size = 0x%x\n", iadev->pci_map_size);
2335 IF_INIT(printk (DEV_LABEL "map size: %i\n", iadev->pci_map_size);)
2337 /* enable bus mastering */
2338 pci_set_master(iadev->pci);
2341 * Delay at least 1us before doing any mem accesses (how 'bout 10?)
2345 /* mapping the physical address to a virtual address in address space */
2346 base = ioremap(real_base,iadev->pci_map_size); /* ioremap is not resolved ??? */
2350 printk(DEV_LABEL " (itf %d): can't set up page mapping\n",
2354 IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n",
2355 dev->number, iadev->pci->revision, base, iadev->irq);)
2357 /* filling the iphase dev structure */
2358 iadev->mem = iadev->pci_map_size /2;
2359 iadev->real_base = real_base;
2362 /* Bus Interface Control Registers */
2363 iadev->reg = base + REG_BASE;
2364 /* Segmentation Control Registers */
2365 iadev->seg_reg = base + SEG_BASE;
2366 /* Reassembly Control Registers */
2367 iadev->reass_reg = base + REASS_BASE;
2368 /* Front end/ DMA control registers */
2369 iadev->phy = base + PHY_BASE;
2370 iadev->dma = base + PHY_BASE;
2371 /* RAM - Segmentation RAm and Reassembly RAM */
2372 iadev->ram = base + ACTUAL_RAM_BASE;
2373 iadev->seg_ram = base + ACTUAL_SEG_RAM_BASE;
2374 iadev->reass_ram = base + ACTUAL_REASS_RAM_BASE;
2376 /* lets print out the above */
2377 IF_INIT(printk("Base addrs: %p %p %p \n %p %p %p %p\n",
2378 iadev->reg,iadev->seg_reg,iadev->reass_reg,
2379 iadev->phy, iadev->ram, iadev->seg_ram,
2382 /* lets try reading the MAC address */
2383 error = get_esi(dev);
2385 iounmap(iadev->base);
2389 for (i=0; i < ESI_LEN; i++)
2390 printk("%s%02X",i ? "-" : "",dev->esi[i]);
2394 if (reset_sar(dev)) {
2395 iounmap(iadev->base);
2396 printk("IA: reset SAR fail, please try again\n");
2402 static void ia_update_stats(IADEV *iadev) {
2403 if (!iadev->carrier_detect)
2405 iadev->rx_cell_cnt += readw(iadev->reass_reg+CELL_CTR0)&0xffff;
2406 iadev->rx_cell_cnt += (readw(iadev->reass_reg+CELL_CTR1) & 0xffff) << 16;
2407 iadev->drop_rxpkt += readw(iadev->reass_reg + DRP_PKT_CNTR ) & 0xffff;
2408 iadev->drop_rxcell += readw(iadev->reass_reg + ERR_CNTR) & 0xffff;
2409 iadev->tx_cell_cnt += readw(iadev->seg_reg + CELL_CTR_LO_AUTO)&0xffff;
2410 iadev->tx_cell_cnt += (readw(iadev->seg_reg+CELL_CTR_HIGH_AUTO)&0xffff)<<16;
2414 static void ia_led_timer(unsigned long arg) {
2415 unsigned long flags;
2416 static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
2418 static u32 ctrl_reg;
2419 for (i = 0; i < iadev_count; i++) {
2421 ctrl_reg = readl(ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2422 if (blinking[i] == 0) {
2424 ctrl_reg &= (~CTRL_LED);
2425 writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2426 ia_update_stats(ia_dev[i]);
2430 ctrl_reg |= CTRL_LED;
2431 writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2432 spin_lock_irqsave(&ia_dev[i]->tx_lock, flags);
2433 if (ia_dev[i]->close_pending)
2434 wake_up(&ia_dev[i]->close_wait);
2435 ia_tx_poll(ia_dev[i]);
2436 spin_unlock_irqrestore(&ia_dev[i]->tx_lock, flags);
2440 mod_timer(&ia_timer, jiffies + HZ / 4);
2444 static void ia_phy_put(struct atm_dev *dev, unsigned char value,
2447 writel(value, INPH_IA_DEV(dev)->phy+addr);
2450 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr)
2452 return readl(INPH_IA_DEV(dev)->phy+addr);
2455 static void ia_free_tx(IADEV *iadev)
2459 kfree(iadev->desc_tbl);
2460 for (i = 0; i < iadev->num_vc; i++)
2461 kfree(iadev->testTable[i]);
2462 kfree(iadev->testTable);
2463 for (i = 0; i < iadev->num_tx_desc; i++) {
2464 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2466 pci_unmap_single(iadev->pci, desc->dma_addr,
2467 sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
2470 kfree(iadev->tx_buf);
2471 pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2475 static void ia_free_rx(IADEV *iadev)
2477 kfree(iadev->rx_open);
2478 pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
2482 static int __devinit ia_start(struct atm_dev *dev)
2488 IF_EVENT(printk(">ia_start\n");)
2489 iadev = INPH_IA_DEV(dev);
2490 if (request_irq(iadev->irq, &ia_int, IRQF_SHARED, DEV_LABEL, dev)) {
2491 printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",
2492 dev->number, iadev->irq);
2496 /* @@@ should release IRQ on error */
2497 /* enabling memory + master */
2498 if ((error = pci_write_config_word(iadev->pci,
2500 PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER )))
2502 printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory+"
2503 "master (0x%x)\n",dev->number, error);
2509 /* Maybe we should reset the front end, initialize Bus Interface Control
2510 Registers and see. */
2512 IF_INIT(printk("Bus ctrl reg: %08x\n",
2513 readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
2514 ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2515 ctrl_reg = (ctrl_reg & (CTRL_LED | CTRL_FE_RST))
2523 | CTRL_DLETMASK /* shud be removed l8r */
2530 writel(ctrl_reg, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2532 IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2533 readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));
2534 printk("Bus status reg after init: %08x\n",
2535 readl(iadev->reg+IPHASE5575_BUS_STATUS_REG));)
2538 error = tx_init(dev);
2541 error = rx_init(dev);
2545 ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2546 writel(ctrl_reg | CTRL_FE_RST, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2547 IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2548 readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
2549 phy = 0; /* resolve compiler complaint */
2551 if ((phy=ia_phy_get(dev,0)) == 0x30)
2552 printk("IA: pm5346,rev.%d\n",phy&0x0f);
2554 printk("IA: utopia,rev.%0x\n",phy);)
2556 if (iadev->phy_type & FE_25MBIT_PHY)
2557 ia_mb25_init(iadev);
2558 else if (iadev->phy_type & (FE_DS3_PHY | FE_E3_PHY))
2559 ia_suni_pm7345_init(iadev);
2561 error = suni_init(dev);
2565 * Enable interrupt on loss of signal
2566 * SUNI_RSOP_CIE - 0x10
2567 * SUNI_RSOP_CIE_LOSE - 0x04
2569 ia_phy_put(dev, ia_phy_get(dev, 0x10) | 0x04, 0x10);
2571 error = dev->phy->start(dev);
2575 /* Get iadev->carrier_detect status */
2576 IaFrontEndIntr(iadev);
2585 free_irq(iadev->irq, dev);
2590 static void ia_close(struct atm_vcc *vcc)
2595 struct ia_vcc *ia_vcc;
2596 struct sk_buff *skb = NULL;
2597 struct sk_buff_head tmp_tx_backlog, tmp_vcc_backlog;
2598 unsigned long closetime, flags;
2600 iadev = INPH_IA_DEV(vcc->dev);
2601 ia_vcc = INPH_IA_VCC(vcc);
2602 if (!ia_vcc) return;
2604 IF_EVENT(printk("ia_close: ia_vcc->vc_desc_cnt = %d vci = %d\n",
2605 ia_vcc->vc_desc_cnt,vcc->vci);)
2606 clear_bit(ATM_VF_READY,&vcc->flags);
2607 skb_queue_head_init (&tmp_tx_backlog);
2608 skb_queue_head_init (&tmp_vcc_backlog);
2609 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2610 iadev->close_pending++;
2611 prepare_to_wait(&iadev->timeout_wait, &wait, TASK_UNINTERRUPTIBLE);
2612 schedule_timeout(50);
2613 finish_wait(&iadev->timeout_wait, &wait);
2614 spin_lock_irqsave(&iadev->tx_lock, flags);
2615 while((skb = skb_dequeue(&iadev->tx_backlog))) {
2616 if (ATM_SKB(skb)->vcc == vcc){
2617 if (vcc->pop) vcc->pop(vcc, skb);
2618 else dev_kfree_skb_any(skb);
2621 skb_queue_tail(&tmp_tx_backlog, skb);
2623 while((skb = skb_dequeue(&tmp_tx_backlog)))
2624 skb_queue_tail(&iadev->tx_backlog, skb);
2625 IF_EVENT(printk("IA TX Done decs_cnt = %d\n", ia_vcc->vc_desc_cnt);)
2626 closetime = 300000 / ia_vcc->pcr;
2629 spin_unlock_irqrestore(&iadev->tx_lock, flags);
2630 wait_event_timeout(iadev->close_wait, (ia_vcc->vc_desc_cnt <= 0), closetime);
2631 spin_lock_irqsave(&iadev->tx_lock, flags);
2632 iadev->close_pending--;
2633 iadev->testTable[vcc->vci]->lastTime = 0;
2634 iadev->testTable[vcc->vci]->fract = 0;
2635 iadev->testTable[vcc->vci]->vc_status = VC_UBR;
2636 if (vcc->qos.txtp.traffic_class == ATM_ABR) {
2637 if (vcc->qos.txtp.min_pcr > 0)
2638 iadev->sum_mcr -= vcc->qos.txtp.min_pcr;
2640 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2641 ia_vcc = INPH_IA_VCC(vcc);
2642 iadev->sum_mcr -= ia_vcc->NumCbrEntry*iadev->Granularity;
2643 ia_cbrVc_close (vcc);
2645 spin_unlock_irqrestore(&iadev->tx_lock, flags);
2648 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2649 // reset reass table
2650 vc_table = (u16 *)(iadev->reass_ram+REASS_TABLE*iadev->memSize);
2651 vc_table += vcc->vci;
2652 *vc_table = NO_AAL5_PKT;
2654 vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
2655 vc_table += vcc->vci;
2656 *vc_table = (vcc->vci << 6) | 15;
2657 if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
2658 struct abr_vc_table __iomem *abr_vc_table =
2659 (iadev->reass_ram+ABR_VC_TABLE*iadev->memSize);
2660 abr_vc_table += vcc->vci;
2661 abr_vc_table->rdf = 0x0003;
2662 abr_vc_table->air = 0x5eb1;
2664 // Drain the packets
2665 rx_dle_intr(vcc->dev);
2666 iadev->rx_open[vcc->vci] = NULL;
2668 kfree(INPH_IA_VCC(vcc));
2670 vcc->dev_data = NULL;
2671 clear_bit(ATM_VF_ADDR,&vcc->flags);
2675 static int ia_open(struct atm_vcc *vcc)
2678 struct ia_vcc *ia_vcc;
2680 if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))
2682 IF_EVENT(printk("ia: not partially allocated resources\n");)
2683 vcc->dev_data = NULL;
2685 iadev = INPH_IA_DEV(vcc->dev);
2686 if (vcc->vci != ATM_VPI_UNSPEC && vcc->vpi != ATM_VCI_UNSPEC)
2688 IF_EVENT(printk("iphase open: unspec part\n");)
2689 set_bit(ATM_VF_ADDR,&vcc->flags);
2691 if (vcc->qos.aal != ATM_AAL5)
2693 IF_EVENT(printk(DEV_LABEL "(itf %d): open %d.%d\n",
2694 vcc->dev->number, vcc->vpi, vcc->vci);)
2696 /* Device dependent initialization */
2697 ia_vcc = kmalloc(sizeof(*ia_vcc), GFP_KERNEL);
2698 if (!ia_vcc) return -ENOMEM;
2699 vcc->dev_data = ia_vcc;
2701 if ((error = open_rx(vcc)))
2703 IF_EVENT(printk("iadev: error in open_rx, closing\n");)
2708 if ((error = open_tx(vcc)))
2710 IF_EVENT(printk("iadev: error in open_tx, closing\n");)
2715 set_bit(ATM_VF_READY,&vcc->flags);
2719 static u8 first = 1;
2721 ia_timer.expires = jiffies + 3*HZ;
2722 add_timer(&ia_timer);
2727 IF_EVENT(printk("ia open returning\n");)
2731 static int ia_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags)
2733 IF_EVENT(printk(">ia_change_qos\n");)
2737 static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2743 IF_EVENT(printk(">ia_ioctl\n");)
2744 if (cmd != IA_CMD) {
2745 if (!dev->phy->ioctl) return -EINVAL;
2746 return dev->phy->ioctl(dev,cmd,arg);
2748 if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT;
2749 board = ia_cmds.status;
2750 if ((board < 0) || (board > iadev_count))
2752 iadev = ia_dev[board];
2753 switch (ia_cmds.cmd) {
2756 switch (ia_cmds.sub_cmd) {
2758 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2759 if (copy_to_user(ia_cmds.buf, iadev, sizeof(IADEV)))
2763 case MEMDUMP_SEGREG:
2764 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2765 tmps = (u16 __user *)ia_cmds.buf;
2766 for(i=0; i<0x80; i+=2, tmps++)
2767 if(put_user((u16)(readl(iadev->seg_reg+i) & 0xffff), tmps)) return -EFAULT;
2771 case MEMDUMP_REASSREG:
2772 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2773 tmps = (u16 __user *)ia_cmds.buf;
2774 for(i=0; i<0x80; i+=2, tmps++)
2775 if(put_user((u16)(readl(iadev->reass_reg+i) & 0xffff), tmps)) return -EFAULT;
2781 ia_regs_t *regs_local;
2785 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2786 regs_local = kmalloc(sizeof(*regs_local), GFP_KERNEL);
2787 if (!regs_local) return -ENOMEM;
2788 ffL = ®s_local->ffredn;
2789 rfL = ®s_local->rfredn;
2790 /* Copy real rfred registers into the local copy */
2791 for (i=0; i<(sizeof (rfredn_t))/4; i++)
2792 ((u_int *)rfL)[i] = readl(iadev->reass_reg + i) & 0xffff;
2793 /* Copy real ffred registers into the local copy */
2794 for (i=0; i<(sizeof (ffredn_t))/4; i++)
2795 ((u_int *)ffL)[i] = readl(iadev->seg_reg + i) & 0xffff;
2797 if (copy_to_user(ia_cmds.buf, regs_local,sizeof(ia_regs_t))) {
2802 printk("Board %d registers dumped\n", board);
2808 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2816 printk("skb = 0x%lx\n", (long)skb_peek(&iadev->tx_backlog));
2817 printk("rtn_q: 0x%lx\n",(long)ia_deque_rtn_q(&iadev->tx_return_q));
2822 struct k_sonet_stats *stats;
2823 stats = &PRIV(_ia_dev[board])->sonet_stats;
2824 printk("section_bip: %d\n", atomic_read(&stats->section_bip));
2825 printk("line_bip : %d\n", atomic_read(&stats->line_bip));
2826 printk("path_bip : %d\n", atomic_read(&stats->path_bip));
2827 printk("line_febe : %d\n", atomic_read(&stats->line_febe));
2828 printk("path_febe : %d\n", atomic_read(&stats->path_febe));
2829 printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
2830 printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
2831 printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
2832 printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
2837 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2838 for (i = 1; i <= iadev->num_rx_desc; i++)
2839 free_desc(_ia_dev[board], i);
2840 writew( ~(RX_FREEQ_EMPT | RX_EXCP_RCVD),
2841 iadev->reass_reg+REASS_MASK_REG);
2848 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2849 IaFrontEndIntr(iadev);
2852 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2855 IADebugFlag = ia_cmds.maddr;
2856 printk("New debug option loaded\n");
2872 static int ia_getsockopt(struct atm_vcc *vcc, int level, int optname,
2873 void __user *optval, int optlen)
2875 IF_EVENT(printk(">ia_getsockopt\n");)
2879 static int ia_setsockopt(struct atm_vcc *vcc, int level, int optname,
2880 void __user *optval, int optlen)
2882 IF_EVENT(printk(">ia_setsockopt\n");)
2886 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
2889 struct tx_buf_desc __iomem *buf_desc_ptr;
2893 struct cpcs_trailer *trailer;
2894 struct ia_vcc *iavcc;
2896 iadev = INPH_IA_DEV(vcc->dev);
2897 iavcc = INPH_IA_VCC(vcc);
2898 if (!iavcc->txing) {
2899 printk("discard packet on closed VC\n");
2903 dev_kfree_skb_any(skb);
2907 if (skb->len > iadev->tx_buf_sz - 8) {
2908 printk("Transmit size over tx buffer size\n");
2912 dev_kfree_skb_any(skb);
2915 if ((u32)skb->data & 3) {
2916 printk("Misaligned SKB\n");
2920 dev_kfree_skb_any(skb);
2923 /* Get a descriptor number from our free descriptor queue
2924 We get the descr number from the TCQ now, since I am using
2925 the TCQ as a free buffer queue. Initially TCQ will be
2926 initialized with all the descriptors and is hence, full.
2928 desc = get_desc (iadev, iavcc);
2931 comp_code = desc >> 13;
2934 if ((desc == 0) || (desc > iadev->num_tx_desc))
2936 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
2937 atomic_inc(&vcc->stats->tx);
2941 dev_kfree_skb_any(skb);
2942 return 0; /* return SUCCESS */
2947 IF_ERR(printk(DEV_LABEL "send desc:%d completion code %d error\n",
2951 /* remember the desc and vcc mapping */
2952 iavcc->vc_desc_cnt++;
2953 iadev->desc_tbl[desc-1].iavcc = iavcc;
2954 iadev->desc_tbl[desc-1].txskb = skb;
2955 IA_SKB_STATE(skb) = 0;
2957 iadev->ffL.tcq_rd += 2;
2958 if (iadev->ffL.tcq_rd > iadev->ffL.tcq_ed)
2959 iadev->ffL.tcq_rd = iadev->ffL.tcq_st;
2960 writew(iadev->ffL.tcq_rd, iadev->seg_reg+TCQ_RD_PTR);
2962 /* Put the descriptor number in the packet ready queue
2963 and put the updated write pointer in the DLE field
2965 *(u16*)(iadev->seg_ram+iadev->ffL.prq_wr) = desc;
2967 iadev->ffL.prq_wr += 2;
2968 if (iadev->ffL.prq_wr > iadev->ffL.prq_ed)
2969 iadev->ffL.prq_wr = iadev->ffL.prq_st;
2971 /* Figure out the exact length of the packet and padding required to
2972 make it aligned on a 48 byte boundary. */
2973 total_len = skb->len + sizeof(struct cpcs_trailer);
2974 total_len = ((total_len + 47) / 48) * 48;
2975 IF_TX(printk("ia packet len:%d padding:%d\n", total_len, total_len - skb->len);)
2977 /* Put the packet in a tx buffer */
2978 trailer = iadev->tx_buf[desc-1].cpcs;
2979 IF_TX(printk("Sent: skb = 0x%x skb->data: 0x%x len: %d, desc: %d\n",
2980 (u32)skb, (u32)skb->data, skb->len, desc);)
2981 trailer->control = 0;
2983 trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8);
2984 trailer->crc32 = 0; /* not needed - dummy bytes */
2986 /* Display the packet */
2987 IF_TXPKT(printk("Sent data: len = %d MsgNum = %d\n",
2988 skb->len, tcnter++);
2989 xdump(skb->data, skb->len, "TX: ");
2992 /* Build the buffer descriptor */
2993 buf_desc_ptr = iadev->seg_ram+TX_DESC_BASE;
2994 buf_desc_ptr += desc; /* points to the corresponding entry */
2995 buf_desc_ptr->desc_mode = AAL5 | EOM_EN | APP_CRC32 | CMPL_INT;
2996 /* Huh ? p.115 of users guide describes this as a read-only register */
2997 writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2998 buf_desc_ptr->vc_index = vcc->vci;
2999 buf_desc_ptr->bytes = total_len;
3001 if (vcc->qos.txtp.traffic_class == ATM_ABR)
3002 clear_lockup (vcc, iadev);
3004 /* Build the DLE structure */
3005 wr_ptr = iadev->tx_dle_q.write;
3006 memset((caddr_t)wr_ptr, 0, sizeof(*wr_ptr));
3007 wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
3008 skb->len, PCI_DMA_TODEVICE);
3009 wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) |
3010 buf_desc_ptr->buf_start_lo;
3011 /* wr_ptr->bytes = swap(total_len); didn't seem to affect ?? */
3012 wr_ptr->bytes = skb->len;
3014 /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
3015 if ((wr_ptr->bytes >> 2) == 0xb)
3016 wr_ptr->bytes = 0x30;
3018 wr_ptr->mode = TX_DLE_PSI;
3019 wr_ptr->prq_wr_ptr_data = 0;
3021 /* end is not to be used for the DLE q */
3022 if (++wr_ptr == iadev->tx_dle_q.end)
3023 wr_ptr = iadev->tx_dle_q.start;
3025 /* Build trailer dle */
3026 wr_ptr->sys_pkt_addr = iadev->tx_buf[desc-1].dma_addr;
3027 wr_ptr->local_pkt_addr = ((buf_desc_ptr->buf_start_hi << 16) |
3028 buf_desc_ptr->buf_start_lo) + total_len - sizeof(struct cpcs_trailer);
3030 wr_ptr->bytes = sizeof(struct cpcs_trailer);
3031 wr_ptr->mode = DMA_INT_ENABLE;
3032 wr_ptr->prq_wr_ptr_data = iadev->ffL.prq_wr;
3034 /* end is not to be used for the DLE q */
3035 if (++wr_ptr == iadev->tx_dle_q.end)
3036 wr_ptr = iadev->tx_dle_q.start;
3038 iadev->tx_dle_q.write = wr_ptr;
3039 ATM_DESC(skb) = vcc->vci;
3040 skb_queue_tail(&iadev->tx_dma_q, skb);
3042 atomic_inc(&vcc->stats->tx);
3043 iadev->tx_pkt_cnt++;
3044 /* Increment transaction counter */
3045 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
3048 /* add flow control logic */
3049 if (atomic_read(&vcc->stats->tx) % 20 == 0) {
3050 if (iavcc->vc_desc_cnt > 10) {
3051 vcc->tx_quota = vcc->tx_quota * 3 / 4;
3052 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3053 iavcc->flow_inc = -1;
3054 iavcc->saved_tx_quota = vcc->tx_quota;
3055 } else if ((iavcc->flow_inc < 0) && (iavcc->vc_desc_cnt < 3)) {
3056 // vcc->tx_quota = 3 * iavcc->saved_tx_quota / 4;
3057 printk("Tx2: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3058 iavcc->flow_inc = 0;
3062 IF_TX(printk("ia send done\n");)
3066 static int ia_send(struct atm_vcc *vcc, struct sk_buff *skb)
3069 struct ia_vcc *iavcc;
3070 unsigned long flags;
3072 iadev = INPH_IA_DEV(vcc->dev);
3073 iavcc = INPH_IA_VCC(vcc);
3074 if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer))))
3077 printk(KERN_CRIT "null skb in ia_send\n");
3078 else dev_kfree_skb_any(skb);
3081 spin_lock_irqsave(&iadev->tx_lock, flags);
3082 if (!test_bit(ATM_VF_READY,&vcc->flags)){
3083 dev_kfree_skb_any(skb);
3084 spin_unlock_irqrestore(&iadev->tx_lock, flags);
3087 ATM_SKB(skb)->vcc = vcc;
3089 if (skb_peek(&iadev->tx_backlog)) {
3090 skb_queue_tail(&iadev->tx_backlog, skb);
3093 if (ia_pkt_tx (vcc, skb)) {
3094 skb_queue_tail(&iadev->tx_backlog, skb);
3097 spin_unlock_irqrestore(&iadev->tx_lock, flags);
3102 static int ia_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
3106 IADEV *iadev = INPH_IA_DEV(dev);
3108 if (iadev->phy_type == FE_25MBIT_PHY) {
3109 n = sprintf(page, " Board Type : Iphase5525-1KVC-128K\n");
3112 if (iadev->phy_type == FE_DS3_PHY)
3113 n = sprintf(page, " Board Type : Iphase-ATM-DS3");
3114 else if (iadev->phy_type == FE_E3_PHY)
3115 n = sprintf(page, " Board Type : Iphase-ATM-E3");
3116 else if (iadev->phy_type == FE_UTP_OPTION)
3117 n = sprintf(page, " Board Type : Iphase-ATM-UTP155");
3119 n = sprintf(page, " Board Type : Iphase-ATM-OC3");
3121 if (iadev->pci_map_size == 0x40000)
3122 n += sprintf(tmpPtr, "-1KVC-");
3124 n += sprintf(tmpPtr, "-4KVC-");
3126 if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_1M)
3127 n += sprintf(tmpPtr, "1M \n");
3128 else if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_512K)
3129 n += sprintf(tmpPtr, "512K\n");
3131 n += sprintf(tmpPtr, "128K\n");
3135 return sprintf(page, " Number of Tx Buffer: %u\n"
3136 " Size of Tx Buffer : %u\n"
3137 " Number of Rx Buffer: %u\n"
3138 " Size of Rx Buffer : %u\n"
3139 " Packets Receiverd : %u\n"
3140 " Packets Transmitted: %u\n"
3141 " Cells Received : %u\n"
3142 " Cells Transmitted : %u\n"
3143 " Board Dropped Cells: %u\n"
3144 " Board Dropped Pkts : %u\n",
3145 iadev->num_tx_desc, iadev->tx_buf_sz,
3146 iadev->num_rx_desc, iadev->rx_buf_sz,
3147 iadev->rx_pkt_cnt, iadev->tx_pkt_cnt,
3148 iadev->rx_cell_cnt, iadev->tx_cell_cnt,
3149 iadev->drop_rxcell, iadev->drop_rxpkt);
3154 static const struct atmdev_ops ops = {
3158 .getsockopt = ia_getsockopt,
3159 .setsockopt = ia_setsockopt,
3161 .phy_put = ia_phy_put,
3162 .phy_get = ia_phy_get,
3163 .change_qos = ia_change_qos,
3164 .proc_read = ia_proc_read,
3165 .owner = THIS_MODULE,
3168 static int __devinit ia_init_one(struct pci_dev *pdev,
3169 const struct pci_device_id *ent)
3171 struct atm_dev *dev;
3173 unsigned long flags;
3176 iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
3184 IF_INIT(printk("ia detected at bus:%d dev: %d function:%d\n",
3185 pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));)
3186 if (pci_enable_device(pdev)) {
3188 goto err_out_free_iadev;
3190 dev = atm_dev_register(DEV_LABEL, &ops, -1, NULL);
3193 goto err_out_disable_dev;
3195 dev->dev_data = iadev;
3196 IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n", dev->number);)
3197 IF_INIT(printk("dev_id = 0x%x iadev->LineRate = %d \n", (u32)dev,
3200 ia_dev[iadev_count] = iadev;
3201 _ia_dev[iadev_count] = dev;
3203 spin_lock_init(&iadev->misc_lock);
3204 /* First fixes first. I don't want to think about this now. */
3205 spin_lock_irqsave(&iadev->misc_lock, flags);
3206 if (ia_init(dev) || ia_start(dev)) {
3207 IF_INIT(printk("IA register failed!\n");)
3209 ia_dev[iadev_count] = NULL;
3210 _ia_dev[iadev_count] = NULL;
3211 spin_unlock_irqrestore(&iadev->misc_lock, flags);
3213 goto err_out_deregister_dev;
3215 spin_unlock_irqrestore(&iadev->misc_lock, flags);
3216 IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
3218 iadev->next_board = ia_boards;
3221 pci_set_drvdata(pdev, dev);
3225 err_out_deregister_dev:
3226 atm_dev_deregister(dev);
3227 err_out_disable_dev:
3228 pci_disable_device(pdev);
3235 static void __devexit ia_remove_one(struct pci_dev *pdev)
3237 struct atm_dev *dev = pci_get_drvdata(pdev);
3238 IADEV *iadev = INPH_IA_DEV(dev);
3240 ia_phy_put(dev, ia_phy_get(dev,0x10) & ~(0x4), 0x10);
3243 /* De-register device */
3244 free_irq(iadev->irq, dev);
3246 ia_dev[iadev_count] = NULL;
3247 _ia_dev[iadev_count] = NULL;
3248 IF_EVENT(printk("deregistering iav at (itf:%d)\n", dev->number);)
3249 atm_dev_deregister(dev);
3251 iounmap(iadev->base);
3252 pci_disable_device(pdev);
3260 static struct pci_device_id ia_pci_tbl[] = {
3261 { PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, },
3262 { PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, },
3265 MODULE_DEVICE_TABLE(pci, ia_pci_tbl);
3267 static struct pci_driver ia_driver = {
3269 .id_table = ia_pci_tbl,
3270 .probe = ia_init_one,
3271 .remove = __devexit_p(ia_remove_one),
3274 static int __init ia_module_init(void)
3278 ret = pci_register_driver(&ia_driver);
3280 ia_timer.expires = jiffies + 3*HZ;
3281 add_timer(&ia_timer);
3283 printk(KERN_ERR DEV_LABEL ": no adapter found\n");
3287 static void __exit ia_module_exit(void)
3289 pci_unregister_driver(&ia_driver);
3291 del_timer(&ia_timer);
3294 module_init(ia_module_init);
3295 module_exit(ia_module_exit);