Merge branch 'core/topology' of git://git.kernel.org/pub/scm/linux/kernel/git/tip...
[linux-2.6] / drivers / atm / iphase.c
1 /******************************************************************************
2          iphase.c: Device driver for Interphase ATM PCI adapter cards 
3                     Author: Peter Wang  <pwang@iphase.com>            
4                    Some fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
5                    Interphase Corporation  <www.iphase.com>           
6                                Version: 1.0                           
7 *******************************************************************************
8       
9       This software may be used and distributed according to the terms
10       of the GNU General Public License (GPL), incorporated herein by reference.
11       Drivers based on this skeleton fall under the GPL and must retain
12       the authorship (implicit copyright) notice.
13
14       This program is distributed in the hope that it will be useful, but
15       WITHOUT ANY WARRANTY; without even the implied warranty of
16       MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17       General Public License for more details.
18       
19       Modified from an incomplete driver for Interphase 5575 1KVC 1M card which 
20       was originally written by Monalisa Agrawal at UNH. Now this driver 
21       supports a variety of varients of Interphase ATM PCI (i)Chip adapter 
22       card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM) 
23       in terms of PHY type, the size of control memory and the size of 
24       packet memory. The followings are the change log and history:
25      
26           Bugfix the Mona's UBR driver.
27           Modify the basic memory allocation and dma logic.
28           Port the driver to the latest kernel from 2.0.46.
29           Complete the ABR logic of the driver, and added the ABR work-
30               around for the hardware anormalies.
31           Add the CBR support.
32           Add the flow control logic to the driver to allow rate-limit VC.
33           Add 4K VC support to the board with 512K control memory.
34           Add the support of all the variants of the Interphase ATM PCI 
35           (i)Chip adapter cards including x575 (155M OC3 and UTP155), x525
36           (25M UTP25) and x531 (DS3 and E3).
37           Add SMP support.
38
39       Support and updates available at: ftp://ftp.iphase.com/pub/atm
40
41 *******************************************************************************/
42
43 #include <linux/module.h>  
44 #include <linux/kernel.h>  
45 #include <linux/mm.h>  
46 #include <linux/pci.h>  
47 #include <linux/errno.h>  
48 #include <linux/atm.h>  
49 #include <linux/atmdev.h>  
50 #include <linux/sonet.h>  
51 #include <linux/skbuff.h>  
52 #include <linux/time.h>  
53 #include <linux/delay.h>  
54 #include <linux/uio.h>  
55 #include <linux/init.h>  
56 #include <linux/wait.h>
57 #include <asm/system.h>  
58 #include <asm/io.h>  
59 #include <asm/atomic.h>  
60 #include <asm/uaccess.h>  
61 #include <asm/string.h>  
62 #include <asm/byteorder.h>  
63 #include <linux/vmalloc.h>
64 #include <linux/jiffies.h>
65 #include "iphase.h"               
66 #include "suni.h"                 
67 #define swap(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))  
68 struct suni_priv {
69         struct k_sonet_stats sonet_stats; /* link diagnostics */
70         unsigned char loop_mode;        /* loopback mode */
71         struct atm_dev *dev;            /* device back-pointer */
72         struct suni_priv *next;         /* next SUNI */
73 }; 
74 #define PRIV(dev) ((struct suni_priv *) dev->phy_data)
75
76 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr);
77 static void desc_dbg(IADEV *iadev);
78
79 static IADEV *ia_dev[8];
80 static struct atm_dev *_ia_dev[8];
81 static int iadev_count;
82 static void ia_led_timer(unsigned long arg);
83 static DEFINE_TIMER(ia_timer, ia_led_timer, 0, 0);
84 static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
85 static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
86 static uint IADebugFlag = /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER
87             |IF_IADBG_ABR | IF_IADBG_EVENT*/ 0; 
88
89 module_param(IA_TX_BUF, int, 0);
90 module_param(IA_TX_BUF_SZ, int, 0);
91 module_param(IA_RX_BUF, int, 0);
92 module_param(IA_RX_BUF_SZ, int, 0);
93 module_param(IADebugFlag, uint, 0644);
94
95 MODULE_LICENSE("GPL");
96
97 #if BITS_PER_LONG != 32
98 #  error FIXME: this driver only works on 32-bit platforms
99 #endif
100
101 /**************************** IA_LIB **********************************/
102
103 static void ia_init_rtn_q (IARTN_Q *que) 
104
105    que->next = NULL; 
106    que->tail = NULL; 
107 }
108
109 static void ia_enque_head_rtn_q (IARTN_Q *que, IARTN_Q * data) 
110 {
111    data->next = NULL;
112    if (que->next == NULL) 
113       que->next = que->tail = data;
114    else {
115       data->next = que->next;
116       que->next = data;
117    } 
118    return;
119 }
120
121 static int ia_enque_rtn_q (IARTN_Q *que, struct desc_tbl_t data) {
122    IARTN_Q *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
123    if (!entry) return -1;
124    entry->data = data;
125    entry->next = NULL;
126    if (que->next == NULL) 
127       que->next = que->tail = entry;
128    else {
129       que->tail->next = entry;
130       que->tail = que->tail->next;
131    }      
132    return 1;
133 }
134
135 static IARTN_Q * ia_deque_rtn_q (IARTN_Q *que) {
136    IARTN_Q *tmpdata;
137    if (que->next == NULL)
138       return NULL;
139    tmpdata = que->next;
140    if ( que->next == que->tail)  
141       que->next = que->tail = NULL;
142    else 
143       que->next = que->next->next;
144    return tmpdata;
145 }
146
147 static void ia_hack_tcq(IADEV *dev) {
148
149   u_short               desc1;
150   u_short               tcq_wr;
151   struct ia_vcc         *iavcc_r = NULL; 
152
153   tcq_wr = readl(dev->seg_reg+TCQ_WR_PTR) & 0xffff;
154   while (dev->host_tcq_wr != tcq_wr) {
155      desc1 = *(u_short *)(dev->seg_ram + dev->host_tcq_wr);
156      if (!desc1) ;
157      else if (!dev->desc_tbl[desc1 -1].timestamp) {
158         IF_ABR(printk(" Desc %d is reset at %ld\n", desc1 -1, jiffies);)
159         *(u_short *) (dev->seg_ram + dev->host_tcq_wr) = 0;
160      }                                 
161      else if (dev->desc_tbl[desc1 -1].timestamp) {
162         if (!(iavcc_r = dev->desc_tbl[desc1 -1].iavcc)) { 
163            printk("IA: Fatal err in get_desc\n");
164            continue;
165         }
166         iavcc_r->vc_desc_cnt--;
167         dev->desc_tbl[desc1 -1].timestamp = 0;
168         IF_EVENT(printk("ia_hack: return_q skb = 0x%x desc = %d\n", 
169                                    (u32)dev->desc_tbl[desc1 -1].txskb, desc1);)
170         if (iavcc_r->pcr < dev->rate_limit) {
171            IA_SKB_STATE (dev->desc_tbl[desc1-1].txskb) |= IA_TX_DONE;
172            if (ia_enque_rtn_q(&dev->tx_return_q, dev->desc_tbl[desc1 -1]) < 0)
173               printk("ia_hack_tcq: No memory available\n");
174         } 
175         dev->desc_tbl[desc1 -1].iavcc = NULL;
176         dev->desc_tbl[desc1 -1].txskb = NULL;
177      }
178      dev->host_tcq_wr += 2;
179      if (dev->host_tcq_wr > dev->ffL.tcq_ed) 
180         dev->host_tcq_wr = dev->ffL.tcq_st;
181   }
182 } /* ia_hack_tcq */
183
184 static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
185   u_short               desc_num, i;
186   struct sk_buff        *skb;
187   struct ia_vcc         *iavcc_r = NULL; 
188   unsigned long delta;
189   static unsigned long timer = 0;
190   int ltimeout;
191
192   ia_hack_tcq (dev);
193   if((time_after(jiffies,timer+50)) || ((dev->ffL.tcq_rd==dev->host_tcq_wr))) {
194      timer = jiffies; 
195      i=0;
196      while (i < dev->num_tx_desc) {
197         if (!dev->desc_tbl[i].timestamp) {
198            i++;
199            continue;
200         }
201         ltimeout = dev->desc_tbl[i].iavcc->ltimeout; 
202         delta = jiffies - dev->desc_tbl[i].timestamp;
203         if (delta >= ltimeout) {
204            IF_ABR(printk("RECOVER run!! desc_tbl %d = %d  delta = %ld, time = %ld\n", i,dev->desc_tbl[i].timestamp, delta, jiffies);)
205            if (dev->ffL.tcq_rd == dev->ffL.tcq_st) 
206               dev->ffL.tcq_rd =  dev->ffL.tcq_ed;
207            else 
208               dev->ffL.tcq_rd -= 2;
209            *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1;
210            if (!(skb = dev->desc_tbl[i].txskb) || 
211                           !(iavcc_r = dev->desc_tbl[i].iavcc))
212               printk("Fatal err, desc table vcc or skb is NULL\n");
213            else 
214               iavcc_r->vc_desc_cnt--;
215            dev->desc_tbl[i].timestamp = 0;
216            dev->desc_tbl[i].iavcc = NULL;
217            dev->desc_tbl[i].txskb = NULL;
218         }
219         i++;
220      } /* while */
221   }
222   if (dev->ffL.tcq_rd == dev->host_tcq_wr) 
223      return 0xFFFF;
224     
225   /* Get the next available descriptor number from TCQ */
226   desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
227
228   while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) {
229      dev->ffL.tcq_rd += 2;
230      if (dev->ffL.tcq_rd > dev->ffL.tcq_ed) 
231      dev->ffL.tcq_rd = dev->ffL.tcq_st;
232      if (dev->ffL.tcq_rd == dev->host_tcq_wr) 
233         return 0xFFFF; 
234      desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
235   }
236
237   /* get system time */
238   dev->desc_tbl[desc_num -1].timestamp = jiffies;
239   return desc_num;
240 }
241
242 static void clear_lockup (struct atm_vcc *vcc, IADEV *dev) {
243   u_char                foundLockUp;
244   vcstatus_t            *vcstatus;
245   u_short               *shd_tbl;
246   u_short               tempCellSlot, tempFract;
247   struct main_vc *abr_vc = (struct main_vc *)dev->MAIN_VC_TABLE_ADDR;
248   struct ext_vc *eabr_vc = (struct ext_vc *)dev->EXT_VC_TABLE_ADDR;
249   u_int  i;
250
251   if (vcc->qos.txtp.traffic_class == ATM_ABR) {
252      vcstatus = (vcstatus_t *) &(dev->testTable[vcc->vci]->vc_status);
253      vcstatus->cnt++;
254      foundLockUp = 0;
255      if( vcstatus->cnt == 0x05 ) {
256         abr_vc += vcc->vci;
257         eabr_vc += vcc->vci;
258         if( eabr_vc->last_desc ) {
259            if( (abr_vc->status & 0x07) == ABR_STATE /* 0x2 */ ) {
260               /* Wait for 10 Micro sec */
261               udelay(10);
262               if ((eabr_vc->last_desc)&&((abr_vc->status & 0x07)==ABR_STATE))
263                  foundLockUp = 1;
264            }
265            else {
266               tempCellSlot = abr_vc->last_cell_slot;
267               tempFract    = abr_vc->fraction;
268               if((tempCellSlot == dev->testTable[vcc->vci]->lastTime)
269                          && (tempFract == dev->testTable[vcc->vci]->fract))
270                  foundLockUp = 1;                   
271               dev->testTable[vcc->vci]->lastTime = tempCellSlot;   
272               dev->testTable[vcc->vci]->fract = tempFract; 
273            }        
274         } /* last descriptor */            
275         vcstatus->cnt = 0;      
276      } /* vcstatus->cnt */
277         
278      if (foundLockUp) {
279         IF_ABR(printk("LOCK UP found\n");) 
280         writew(0xFFFD, dev->seg_reg+MODE_REG_0);
281         /* Wait for 10 Micro sec */
282         udelay(10); 
283         abr_vc->status &= 0xFFF8;
284         abr_vc->status |= 0x0001;  /* state is idle */
285         shd_tbl = (u_short *)dev->ABR_SCHED_TABLE_ADDR;                
286         for( i = 0; ((i < dev->num_vc) && (shd_tbl[i])); i++ );
287         if (i < dev->num_vc)
288            shd_tbl[i] = vcc->vci;
289         else
290            IF_ERR(printk("ABR Seg. may not continue on VC %x\n",vcc->vci);)
291         writew(T_ONLINE, dev->seg_reg+MODE_REG_0);
292         writew(~(TRANSMIT_DONE|TCQ_NOT_EMPTY), dev->seg_reg+SEG_MASK_REG);
293         writew(TRANSMIT_DONE, dev->seg_reg+SEG_INTR_STATUS_REG);       
294         vcstatus->cnt = 0;
295      } /* foundLockUp */
296
297   } /* if an ABR VC */
298
299
300 }
301  
302 /*
303 ** Conversion of 24-bit cellrate (cells/sec) to 16-bit floating point format.
304 **
305 **  +----+----+------------------+-------------------------------+
306 **  |  R | NZ |  5-bit exponent  |        9-bit mantissa         |
307 **  +----+----+------------------+-------------------------------+
308 ** 
309 **    R = reserved (written as 0)
310 **    NZ = 0 if 0 cells/sec; 1 otherwise
311 **
312 **    if NZ = 1, rate = 1.mmmmmmmmm x 2^(eeeee) cells/sec
313 */
314 static u16
315 cellrate_to_float(u32 cr)
316 {
317
318 #define NZ              0x4000
319 #define M_BITS          9               /* Number of bits in mantissa */
320 #define E_BITS          5               /* Number of bits in exponent */
321 #define M_MASK          0x1ff           
322 #define E_MASK          0x1f
323   u16   flot;
324   u32   tmp = cr & 0x00ffffff;
325   int   i   = 0;
326   if (cr == 0)
327      return 0;
328   while (tmp != 1) {
329      tmp >>= 1;
330      i++;
331   }
332   if (i == M_BITS)
333      flot = NZ | (i << M_BITS) | (cr & M_MASK);
334   else if (i < M_BITS)
335      flot = NZ | (i << M_BITS) | ((cr << (M_BITS - i)) & M_MASK);
336   else
337      flot = NZ | (i << M_BITS) | ((cr >> (i - M_BITS)) & M_MASK);
338   return flot;
339 }
340
341 #if 0
342 /*
343 ** Conversion of 16-bit floating point format to 24-bit cellrate (cells/sec).
344 */
345 static u32
346 float_to_cellrate(u16 rate)
347 {
348   u32   exp, mantissa, cps;
349   if ((rate & NZ) == 0)
350      return 0;
351   exp = (rate >> M_BITS) & E_MASK;
352   mantissa = rate & M_MASK;
353   if (exp == 0)
354      return 1;
355   cps = (1 << M_BITS) | mantissa;
356   if (exp == M_BITS)
357      cps = cps;
358   else if (exp > M_BITS)
359      cps <<= (exp - M_BITS);
360   else
361      cps >>= (M_BITS - exp);
362   return cps;
363 }
364 #endif 
365
366 static void init_abr_vc (IADEV *dev, srv_cls_param_t *srv_p) {
367   srv_p->class_type = ATM_ABR;
368   srv_p->pcr        = dev->LineRate;
369   srv_p->mcr        = 0;
370   srv_p->icr        = 0x055cb7;
371   srv_p->tbe        = 0xffffff;
372   srv_p->frtt       = 0x3a;
373   srv_p->rif        = 0xf;
374   srv_p->rdf        = 0xb;
375   srv_p->nrm        = 0x4;
376   srv_p->trm        = 0x7;
377   srv_p->cdf        = 0x3;
378   srv_p->adtf       = 50;
379 }
380
381 static int
382 ia_open_abr_vc(IADEV *dev, srv_cls_param_t *srv_p, 
383                                                 struct atm_vcc *vcc, u8 flag)
384 {
385   f_vc_abr_entry  *f_abr_vc;
386   r_vc_abr_entry  *r_abr_vc;
387   u32           icr;
388   u8            trm, nrm, crm;
389   u16           adtf, air, *ptr16;      
390   f_abr_vc =(f_vc_abr_entry *)dev->MAIN_VC_TABLE_ADDR;
391   f_abr_vc += vcc->vci;       
392   switch (flag) {
393      case 1: /* FFRED initialization */
394 #if 0  /* sanity check */
395        if (srv_p->pcr == 0)
396           return INVALID_PCR;
397        if (srv_p->pcr > dev->LineRate)
398           srv_p->pcr = dev->LineRate;
399        if ((srv_p->mcr + dev->sum_mcr) > dev->LineRate)
400           return MCR_UNAVAILABLE;
401        if (srv_p->mcr > srv_p->pcr)
402           return INVALID_MCR;
403        if (!(srv_p->icr))
404           srv_p->icr = srv_p->pcr;
405        if ((srv_p->icr < srv_p->mcr) || (srv_p->icr > srv_p->pcr))
406           return INVALID_ICR;
407        if ((srv_p->tbe < MIN_TBE) || (srv_p->tbe > MAX_TBE))
408           return INVALID_TBE;
409        if ((srv_p->frtt < MIN_FRTT) || (srv_p->frtt > MAX_FRTT))
410           return INVALID_FRTT;
411        if (srv_p->nrm > MAX_NRM)
412           return INVALID_NRM;
413        if (srv_p->trm > MAX_TRM)
414           return INVALID_TRM;
415        if (srv_p->adtf > MAX_ADTF)
416           return INVALID_ADTF;
417        else if (srv_p->adtf == 0)
418           srv_p->adtf = 1;
419        if (srv_p->cdf > MAX_CDF)
420           return INVALID_CDF;
421        if (srv_p->rif > MAX_RIF)
422           return INVALID_RIF;
423        if (srv_p->rdf > MAX_RDF)
424           return INVALID_RDF;
425 #endif
426        memset ((caddr_t)f_abr_vc, 0, sizeof(*f_abr_vc));
427        f_abr_vc->f_vc_type = ABR;
428        nrm = 2 << srv_p->nrm;     /* (2 ** (srv_p->nrm +1)) */
429                                   /* i.e 2**n = 2 << (n-1) */
430        f_abr_vc->f_nrm = nrm << 8 | nrm;
431        trm = 100000/(2 << (16 - srv_p->trm));
432        if ( trm == 0) trm = 1;
433        f_abr_vc->f_nrmexp =(((srv_p->nrm +1) & 0x0f) << 12)|(MRM << 8) | trm;
434        crm = srv_p->tbe / nrm;
435        if (crm == 0) crm = 1;
436        f_abr_vc->f_crm = crm & 0xff;
437        f_abr_vc->f_pcr = cellrate_to_float(srv_p->pcr);
438        icr = min( srv_p->icr, (srv_p->tbe > srv_p->frtt) ?
439                                 ((srv_p->tbe/srv_p->frtt)*1000000) :
440                                 (1000000/(srv_p->frtt/srv_p->tbe)));
441        f_abr_vc->f_icr = cellrate_to_float(icr);
442        adtf = (10000 * srv_p->adtf)/8192;
443        if (adtf == 0) adtf = 1; 
444        f_abr_vc->f_cdf = ((7 - srv_p->cdf) << 12 | adtf) & 0xfff;
445        f_abr_vc->f_mcr = cellrate_to_float(srv_p->mcr);
446        f_abr_vc->f_acr = f_abr_vc->f_icr;
447        f_abr_vc->f_status = 0x0042;
448        break;
449     case 0: /* RFRED initialization */  
450        ptr16 = (u_short *)(dev->reass_ram + REASS_TABLE*dev->memSize); 
451        *(ptr16 + vcc->vci) = NO_AAL5_PKT | REASS_ABR;
452        r_abr_vc = (r_vc_abr_entry*)(dev->reass_ram+ABR_VC_TABLE*dev->memSize);
453        r_abr_vc += vcc->vci;
454        r_abr_vc->r_status_rdf = (15 - srv_p->rdf) & 0x000f;
455        air = srv_p->pcr << (15 - srv_p->rif);
456        if (air == 0) air = 1;
457        r_abr_vc->r_air = cellrate_to_float(air);
458        dev->testTable[vcc->vci]->vc_status = VC_ACTIVE | VC_ABR;
459        dev->sum_mcr        += srv_p->mcr;
460        dev->n_abr++;
461        break;
462     default:
463        break;
464   }
465   return        0;
466 }
467 static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
468    u32 rateLow=0, rateHigh, rate;
469    int entries;
470    struct ia_vcc *ia_vcc;
471
472    int   idealSlot =0, testSlot, toBeAssigned, inc;
473    u32   spacing;
474    u16  *SchedTbl, *TstSchedTbl;
475    u16  cbrVC, vcIndex;
476    u32   fracSlot    = 0;
477    u32   sp_mod      = 0;
478    u32   sp_mod2     = 0;
479
480    /* IpAdjustTrafficParams */
481    if (vcc->qos.txtp.max_pcr <= 0) {
482       IF_ERR(printk("PCR for CBR not defined\n");)
483       return -1;
484    }
485    rate = vcc->qos.txtp.max_pcr;
486    entries = rate / dev->Granularity;
487    IF_CBR(printk("CBR: CBR entries=0x%x for rate=0x%x & Gran=0x%x\n",
488                                 entries, rate, dev->Granularity);)
489    if (entries < 1)
490       IF_CBR(printk("CBR: Bandwidth smaller than granularity of CBR table\n");) 
491    rateLow  =  entries * dev->Granularity;
492    rateHigh = (entries + 1) * dev->Granularity;
493    if (3*(rate - rateLow) > (rateHigh - rate))
494       entries++;
495    if (entries > dev->CbrRemEntries) {
496       IF_CBR(printk("CBR: Not enough bandwidth to support this PCR.\n");)
497       IF_CBR(printk("Entries = 0x%x, CbrRemEntries = 0x%x.\n",
498                                        entries, dev->CbrRemEntries);)
499       return -EBUSY;
500    }   
501
502    ia_vcc = INPH_IA_VCC(vcc);
503    ia_vcc->NumCbrEntry = entries; 
504    dev->sum_mcr += entries * dev->Granularity; 
505    /* IaFFrednInsertCbrSched */
506    // Starting at an arbitrary location, place the entries into the table
507    // as smoothly as possible
508    cbrVC   = 0;
509    spacing = dev->CbrTotEntries / entries;
510    sp_mod  = dev->CbrTotEntries % entries; // get modulo
511    toBeAssigned = entries;
512    fracSlot = 0;
513    vcIndex  = vcc->vci;
514    IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex,spacing,sp_mod);)
515    while (toBeAssigned)
516    {
517       // If this is the first time, start the table loading for this connection
518       // as close to entryPoint as possible.
519       if (toBeAssigned == entries)
520       {
521          idealSlot = dev->CbrEntryPt;
522          dev->CbrEntryPt += 2;    // Adding 2 helps to prevent clumping
523          if (dev->CbrEntryPt >= dev->CbrTotEntries) 
524             dev->CbrEntryPt -= dev->CbrTotEntries;// Wrap if necessary
525       } else {
526          idealSlot += (u32)(spacing + fracSlot); // Point to the next location
527          // in the table that would be  smoothest
528          fracSlot = ((sp_mod + sp_mod2) / entries);  // get new integer part
529          sp_mod2  = ((sp_mod + sp_mod2) % entries);  // calc new fractional part
530       }
531       if (idealSlot >= (int)dev->CbrTotEntries) 
532          idealSlot -= dev->CbrTotEntries;  
533       // Continuously check around this ideal value until a null
534       // location is encountered.
535       SchedTbl = (u16*)(dev->seg_ram+CBR_SCHED_TABLE*dev->memSize); 
536       inc = 0;
537       testSlot = idealSlot;
538       TstSchedTbl = (u16*)(SchedTbl+testSlot);  //set index and read in value
539       IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%x, NumToAssign=%d\n",
540                                 testSlot, (u32)TstSchedTbl,toBeAssigned);) 
541       memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
542       while (cbrVC)  // If another VC at this location, we have to keep looking
543       {
544           inc++;
545           testSlot = idealSlot - inc;
546           if (testSlot < 0) { // Wrap if necessary
547              testSlot += dev->CbrTotEntries;
548              IF_CBR(printk("Testslot Wrap. STable Start=0x%x,Testslot=%d\n",
549                                                        (u32)SchedTbl,testSlot);)
550           }
551           TstSchedTbl = (u16 *)(SchedTbl + testSlot);  // set table index
552           memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC)); 
553           if (!cbrVC)
554              break;
555           testSlot = idealSlot + inc;
556           if (testSlot >= (int)dev->CbrTotEntries) { // Wrap if necessary
557              testSlot -= dev->CbrTotEntries;
558              IF_CBR(printk("TotCbrEntries=%d",dev->CbrTotEntries);)
559              IF_CBR(printk(" Testslot=0x%x ToBeAssgned=%d\n", 
560                                             testSlot, toBeAssigned);)
561           } 
562           // set table index and read in value
563           TstSchedTbl = (u16*)(SchedTbl + testSlot);
564           IF_CBR(printk("Reading CBR Tbl from 0x%x, CbrVal=0x%x Iteration %d\n",
565                           (u32)TstSchedTbl,cbrVC,inc);) 
566           memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
567        } /* while */
568        // Move this VCI number into this location of the CBR Sched table.
569        memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex,sizeof(TstSchedTbl));
570        dev->CbrRemEntries--;
571        toBeAssigned--;
572    } /* while */ 
573
574    /* IaFFrednCbrEnable */
575    dev->NumEnabledCBR++;
576    if (dev->NumEnabledCBR == 1) {
577        writew((CBR_EN | UBR_EN | ABR_EN | (0x23 << 2)), dev->seg_reg+STPARMS);
578        IF_CBR(printk("CBR is enabled\n");)
579    }
580    return 0;
581 }
582 static void ia_cbrVc_close (struct atm_vcc *vcc) {
583    IADEV *iadev;
584    u16 *SchedTbl, NullVci = 0;
585    u32 i, NumFound;
586
587    iadev = INPH_IA_DEV(vcc->dev);
588    iadev->NumEnabledCBR--;
589    SchedTbl = (u16*)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
590    if (iadev->NumEnabledCBR == 0) {
591       writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
592       IF_CBR (printk("CBR support disabled\n");)
593    }
594    NumFound = 0;
595    for (i=0; i < iadev->CbrTotEntries; i++)
596    {
597       if (*SchedTbl == vcc->vci) {
598          iadev->CbrRemEntries++;
599          *SchedTbl = NullVci;
600          IF_CBR(NumFound++;)
601       }
602       SchedTbl++;   
603    } 
604    IF_CBR(printk("Exit ia_cbrVc_close, NumRemoved=%d\n",NumFound);)
605 }
606
607 static int ia_avail_descs(IADEV *iadev) {
608    int tmp = 0;
609    ia_hack_tcq(iadev);
610    if (iadev->host_tcq_wr >= iadev->ffL.tcq_rd)
611       tmp = (iadev->host_tcq_wr - iadev->ffL.tcq_rd) / 2;
612    else
613       tmp = (iadev->ffL.tcq_ed - iadev->ffL.tcq_rd + 2 + iadev->host_tcq_wr -
614                    iadev->ffL.tcq_st) / 2;
615    return tmp;
616 }    
617
618 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb);
619
620 static int ia_que_tx (IADEV *iadev) { 
621    struct sk_buff *skb;
622    int num_desc;
623    struct atm_vcc *vcc;
624    struct ia_vcc *iavcc;
625    num_desc = ia_avail_descs(iadev);
626
627    while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) {
628       if (!(vcc = ATM_SKB(skb)->vcc)) {
629          dev_kfree_skb_any(skb);
630          printk("ia_que_tx: Null vcc\n");
631          break;
632       }
633       if (!test_bit(ATM_VF_READY,&vcc->flags)) {
634          dev_kfree_skb_any(skb);
635          printk("Free the SKB on closed vci %d \n", vcc->vci);
636          break;
637       }
638       iavcc = INPH_IA_VCC(vcc);
639       if (ia_pkt_tx (vcc, skb)) {
640          skb_queue_head(&iadev->tx_backlog, skb);
641       }
642       num_desc--;
643    }
644    return 0;
645 }
646
647 static void ia_tx_poll (IADEV *iadev) {
648    struct atm_vcc *vcc = NULL;
649    struct sk_buff *skb = NULL, *skb1 = NULL;
650    struct ia_vcc *iavcc;
651    IARTN_Q *  rtne;
652
653    ia_hack_tcq(iadev);
654    while ( (rtne = ia_deque_rtn_q(&iadev->tx_return_q))) {
655        skb = rtne->data.txskb;
656        if (!skb) {
657            printk("ia_tx_poll: skb is null\n");
658            goto out;
659        }
660        vcc = ATM_SKB(skb)->vcc;
661        if (!vcc) {
662            printk("ia_tx_poll: vcc is null\n");
663            dev_kfree_skb_any(skb);
664            goto out;
665        }
666
667        iavcc = INPH_IA_VCC(vcc);
668        if (!iavcc) {
669            printk("ia_tx_poll: iavcc is null\n");
670            dev_kfree_skb_any(skb);
671            goto out;
672        }
673
674        skb1 = skb_dequeue(&iavcc->txing_skb);
675        while (skb1 && (skb1 != skb)) {
676           if (!(IA_SKB_STATE(skb1) & IA_TX_DONE)) {
677              printk("IA_tx_intr: Vci %d lost pkt!!!\n", vcc->vci);
678           }
679           IF_ERR(printk("Release the SKB not match\n");)
680           if ((vcc->pop) && (skb1->len != 0))
681           {
682              vcc->pop(vcc, skb1);
683              IF_EVENT(printk("Tansmit Done - skb 0x%lx return\n",
684                                                           (long)skb1);)
685           }
686           else 
687              dev_kfree_skb_any(skb1);
688           skb1 = skb_dequeue(&iavcc->txing_skb);
689        }                                                        
690        if (!skb1) {
691           IF_EVENT(printk("IA: Vci %d - skb not found requed\n",vcc->vci);)
692           ia_enque_head_rtn_q (&iadev->tx_return_q, rtne);
693           break;
694        }
695        if ((vcc->pop) && (skb->len != 0))
696        {
697           vcc->pop(vcc, skb);
698           IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb);)
699        }
700        else 
701           dev_kfree_skb_any(skb);
702        kfree(rtne);
703     }
704     ia_que_tx(iadev);
705 out:
706     return;
707 }
708 #if 0
709 static void ia_eeprom_put (IADEV *iadev, u32 addr, u_short val)
710 {
711         u32     t;
712         int     i;
713         /*
714          * Issue a command to enable writes to the NOVRAM
715          */
716         NVRAM_CMD (EXTEND + EWEN);
717         NVRAM_CLR_CE;
718         /*
719          * issue the write command
720          */
721         NVRAM_CMD(IAWRITE + addr);
722         /* 
723          * Send the data, starting with D15, then D14, and so on for 16 bits
724          */
725         for (i=15; i>=0; i--) {
726                 NVRAM_CLKOUT (val & 0x8000);
727                 val <<= 1;
728         }
729         NVRAM_CLR_CE;
730         CFG_OR(NVCE);
731         t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS); 
732         while (!(t & NVDO))
733                 t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS); 
734
735         NVRAM_CLR_CE;
736         /*
737          * disable writes again
738          */
739         NVRAM_CMD(EXTEND + EWDS)
740         NVRAM_CLR_CE;
741         CFG_AND(~NVDI);
742 }
743 #endif
744
745 static u16 ia_eeprom_get (IADEV *iadev, u32 addr)
746 {
747         u_short val;
748         u32     t;
749         int     i;
750         /*
751          * Read the first bit that was clocked with the falling edge of the
752          * the last command data clock
753          */
754         NVRAM_CMD(IAREAD + addr);
755         /*
756          * Now read the rest of the bits, the next bit read is D14, then D13,
757          * and so on.
758          */
759         val = 0;
760         for (i=15; i>=0; i--) {
761                 NVRAM_CLKIN(t);
762                 val |= (t << i);
763         }
764         NVRAM_CLR_CE;
765         CFG_AND(~NVDI);
766         return val;
767 }
768
769 static void ia_hw_type(IADEV *iadev) {
770    u_short memType = ia_eeprom_get(iadev, 25);   
771    iadev->memType = memType;
772    if ((memType & MEM_SIZE_MASK) == MEM_SIZE_1M) {
773       iadev->num_tx_desc = IA_TX_BUF;
774       iadev->tx_buf_sz = IA_TX_BUF_SZ;
775       iadev->num_rx_desc = IA_RX_BUF;
776       iadev->rx_buf_sz = IA_RX_BUF_SZ; 
777    } else if ((memType & MEM_SIZE_MASK) == MEM_SIZE_512K) {
778       if (IA_TX_BUF == DFL_TX_BUFFERS)
779         iadev->num_tx_desc = IA_TX_BUF / 2;
780       else 
781         iadev->num_tx_desc = IA_TX_BUF;
782       iadev->tx_buf_sz = IA_TX_BUF_SZ;
783       if (IA_RX_BUF == DFL_RX_BUFFERS)
784         iadev->num_rx_desc = IA_RX_BUF / 2;
785       else
786         iadev->num_rx_desc = IA_RX_BUF;
787       iadev->rx_buf_sz = IA_RX_BUF_SZ;
788    }
789    else {
790       if (IA_TX_BUF == DFL_TX_BUFFERS) 
791         iadev->num_tx_desc = IA_TX_BUF / 8;
792       else
793         iadev->num_tx_desc = IA_TX_BUF;
794       iadev->tx_buf_sz = IA_TX_BUF_SZ;
795       if (IA_RX_BUF == DFL_RX_BUFFERS)
796         iadev->num_rx_desc = IA_RX_BUF / 8;
797       else
798         iadev->num_rx_desc = IA_RX_BUF;
799       iadev->rx_buf_sz = IA_RX_BUF_SZ; 
800    } 
801    iadev->rx_pkt_ram = TX_PACKET_RAM + (iadev->num_tx_desc * iadev->tx_buf_sz); 
802    IF_INIT(printk("BUF: tx=%d,sz=%d rx=%d sz= %d rx_pkt_ram=%d\n",
803          iadev->num_tx_desc, iadev->tx_buf_sz, iadev->num_rx_desc,
804          iadev->rx_buf_sz, iadev->rx_pkt_ram);)
805
806 #if 0
807    if ((memType & FE_MASK) == FE_SINGLE_MODE) {
808       iadev->phy_type = PHY_OC3C_S;
809    else if ((memType & FE_MASK) == FE_UTP_OPTION)
810       iadev->phy_type = PHY_UTP155;
811    else
812      iadev->phy_type = PHY_OC3C_M;
813 #endif
814    
815    iadev->phy_type = memType & FE_MASK;
816    IF_INIT(printk("memType = 0x%x iadev->phy_type = 0x%x\n", 
817                                          memType,iadev->phy_type);)
818    if (iadev->phy_type == FE_25MBIT_PHY) 
819       iadev->LineRate = (u32)(((25600000/8)*26)/(27*53));
820    else if (iadev->phy_type == FE_DS3_PHY)
821       iadev->LineRate = (u32)(((44736000/8)*26)/(27*53));
822    else if (iadev->phy_type == FE_E3_PHY) 
823       iadev->LineRate = (u32)(((34368000/8)*26)/(27*53));
824    else
825        iadev->LineRate = (u32)(ATM_OC3_PCR);
826    IF_INIT(printk("iadev->LineRate = %d \n", iadev->LineRate);)
827
828 }
829
830 static void IaFrontEndIntr(IADEV *iadev) {
831   volatile IA_SUNI *suni;
832   volatile ia_mb25_t *mb25;
833   volatile suni_pm7345_t *suni_pm7345;
834   u32 intr_status;
835   u_int frmr_intr;
836
837   if(iadev->phy_type & FE_25MBIT_PHY) {
838      mb25 = (ia_mb25_t*)iadev->phy;
839      iadev->carrier_detect =  Boolean(mb25->mb25_intr_status & MB25_IS_GSB);
840   } else if (iadev->phy_type & FE_DS3_PHY) {
841      suni_pm7345 = (suni_pm7345_t *)iadev->phy;
842      /* clear FRMR interrupts */
843      frmr_intr   = suni_pm7345->suni_ds3_frm_intr_stat; 
844      iadev->carrier_detect =  
845            Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV));
846   } else if (iadev->phy_type & FE_E3_PHY ) {
847      suni_pm7345 = (suni_pm7345_t *)iadev->phy;
848      frmr_intr   = suni_pm7345->suni_e3_frm_maint_intr_ind;
849      iadev->carrier_detect =
850            Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat&SUNI_E3_LOS));
851   }
852   else { 
853      suni = (IA_SUNI *)iadev->phy;
854      intr_status = suni->suni_rsop_status & 0xff;
855      iadev->carrier_detect = Boolean(!(suni->suni_rsop_status & SUNI_LOSV));
856   }
857   if (iadev->carrier_detect)
858     printk("IA: SUNI carrier detected\n");
859   else
860     printk("IA: SUNI carrier lost signal\n"); 
861   return;
862 }
863
864 static void ia_mb25_init (IADEV *iadev)
865 {
866    volatile ia_mb25_t  *mb25 = (ia_mb25_t*)iadev->phy;
867 #if 0
868    mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC | MB25_MC_ENABLED;
869 #endif
870    mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC;
871    mb25->mb25_diag_control = 0;
872    /*
873     * Initialize carrier detect state
874     */
875    iadev->carrier_detect =  Boolean(mb25->mb25_intr_status & MB25_IS_GSB);
876    return;
877 }                   
878
879 static void ia_suni_pm7345_init (IADEV *iadev)
880 {
881    volatile suni_pm7345_t *suni_pm7345 = (suni_pm7345_t *)iadev->phy;
882    if (iadev->phy_type & FE_DS3_PHY)
883    {
884       iadev->carrier_detect = 
885           Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV)); 
886       suni_pm7345->suni_ds3_frm_intr_enbl = 0x17;
887       suni_pm7345->suni_ds3_frm_cfg = 1;
888       suni_pm7345->suni_ds3_tran_cfg = 1;
889       suni_pm7345->suni_config = 0;
890       suni_pm7345->suni_splr_cfg = 0;
891       suni_pm7345->suni_splt_cfg = 0;
892    }
893    else 
894    {
895       iadev->carrier_detect = 
896           Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat & SUNI_E3_LOS));
897       suni_pm7345->suni_e3_frm_fram_options = 0x4;
898       suni_pm7345->suni_e3_frm_maint_options = 0x20;
899       suni_pm7345->suni_e3_frm_fram_intr_enbl = 0x1d;
900       suni_pm7345->suni_e3_frm_maint_intr_enbl = 0x30;
901       suni_pm7345->suni_e3_tran_stat_diag_options = 0x0;
902       suni_pm7345->suni_e3_tran_fram_options = 0x1;
903       suni_pm7345->suni_config = SUNI_PM7345_E3ENBL;
904       suni_pm7345->suni_splr_cfg = 0x41;
905       suni_pm7345->suni_splt_cfg = 0x41;
906    } 
907    /*
908     * Enable RSOP loss of signal interrupt.
909     */
910    suni_pm7345->suni_intr_enbl = 0x28;
911  
912    /*
913     * Clear error counters
914     */
915    suni_pm7345->suni_id_reset = 0;
916
917    /*
918     * Clear "PMCTST" in master test register.
919     */
920    suni_pm7345->suni_master_test = 0;
921
922    suni_pm7345->suni_rxcp_ctrl = 0x2c;
923    suni_pm7345->suni_rxcp_fctrl = 0x81;
924  
925    suni_pm7345->suni_rxcp_idle_pat_h1 =
926         suni_pm7345->suni_rxcp_idle_pat_h2 =
927         suni_pm7345->suni_rxcp_idle_pat_h3 = 0;
928    suni_pm7345->suni_rxcp_idle_pat_h4 = 1;
929  
930    suni_pm7345->suni_rxcp_idle_mask_h1 = 0xff;
931    suni_pm7345->suni_rxcp_idle_mask_h2 = 0xff;
932    suni_pm7345->suni_rxcp_idle_mask_h3 = 0xff;
933    suni_pm7345->suni_rxcp_idle_mask_h4 = 0xfe;
934  
935    suni_pm7345->suni_rxcp_cell_pat_h1 =
936         suni_pm7345->suni_rxcp_cell_pat_h2 =
937         suni_pm7345->suni_rxcp_cell_pat_h3 = 0;
938    suni_pm7345->suni_rxcp_cell_pat_h4 = 1;
939  
940    suni_pm7345->suni_rxcp_cell_mask_h1 =
941         suni_pm7345->suni_rxcp_cell_mask_h2 =
942         suni_pm7345->suni_rxcp_cell_mask_h3 =
943         suni_pm7345->suni_rxcp_cell_mask_h4 = 0xff;
944  
945    suni_pm7345->suni_txcp_ctrl = 0xa4;
946    suni_pm7345->suni_txcp_intr_en_sts = 0x10;
947    suni_pm7345->suni_txcp_idle_pat_h5 = 0x55;
948  
949    suni_pm7345->suni_config &= ~(SUNI_PM7345_LLB |
950                                  SUNI_PM7345_CLB |
951                                  SUNI_PM7345_DLB |
952                                   SUNI_PM7345_PLB);
953 #ifdef __SNMP__
954    suni_pm7345->suni_rxcp_intr_en_sts |= SUNI_OOCDE;
955 #endif /* __SNMP__ */
956    return;
957 }
958
959
960 /***************************** IA_LIB END *****************************/
961     
962 #ifdef CONFIG_ATM_IA_DEBUG
963 static int tcnter = 0;
964 static void xdump( u_char*  cp, int  length, char*  prefix )
965 {
966     int col, count;
967     u_char prntBuf[120];
968     u_char*  pBuf = prntBuf;
969     count = 0;
970     while(count < length){
971         pBuf += sprintf( pBuf, "%s", prefix );
972         for(col = 0;count + col < length && col < 16; col++){
973             if (col != 0 && (col % 4) == 0)
974                 pBuf += sprintf( pBuf, " " );
975             pBuf += sprintf( pBuf, "%02X ", cp[count + col] );
976         }
977         while(col++ < 16){      /* pad end of buffer with blanks */
978             if ((col % 4) == 0)
979                 sprintf( pBuf, " " );
980             pBuf += sprintf( pBuf, "   " );
981         }
982         pBuf += sprintf( pBuf, "  " );
983         for(col = 0;count + col < length && col < 16; col++){
984             if (isprint((int)cp[count + col]))
985                 pBuf += sprintf( pBuf, "%c", cp[count + col] );
986             else
987                 pBuf += sprintf( pBuf, "." );
988                 }
989         sprintf( pBuf, "\n" );
990         // SPrint(prntBuf);
991         printk(prntBuf);
992         count += col;
993         pBuf = prntBuf;
994     }
995
996 }  /* close xdump(... */
997 #endif /* CONFIG_ATM_IA_DEBUG */
998
999   
1000 static struct atm_dev *ia_boards = NULL;  
1001   
1002 #define ACTUAL_RAM_BASE \
1003         RAM_BASE*((iadev->mem)/(128 * 1024))  
1004 #define ACTUAL_SEG_RAM_BASE \
1005         IPHASE5575_FRAG_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))  
1006 #define ACTUAL_REASS_RAM_BASE \
1007         IPHASE5575_REASS_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))  
1008   
1009   
1010 /*-- some utilities and memory allocation stuff will come here -------------*/  
1011   
1012 static void desc_dbg(IADEV *iadev) {
1013
1014   u_short tcq_wr_ptr, tcq_st_ptr, tcq_ed_ptr;
1015   u32 i;
1016   void __iomem *tmp;
1017   // regval = readl((u32)ia_cmds->maddr);
1018   tcq_wr_ptr =  readw(iadev->seg_reg+TCQ_WR_PTR);
1019   printk("B_tcq_wr = 0x%x desc = %d last desc = %d\n",
1020                      tcq_wr_ptr, readw(iadev->seg_ram+tcq_wr_ptr),
1021                      readw(iadev->seg_ram+tcq_wr_ptr-2));
1022   printk(" host_tcq_wr = 0x%x  host_tcq_rd = 0x%x \n",  iadev->host_tcq_wr, 
1023                    iadev->ffL.tcq_rd);
1024   tcq_st_ptr =  readw(iadev->seg_reg+TCQ_ST_ADR);
1025   tcq_ed_ptr =  readw(iadev->seg_reg+TCQ_ED_ADR);
1026   printk("tcq_st_ptr = 0x%x    tcq_ed_ptr = 0x%x \n", tcq_st_ptr, tcq_ed_ptr);
1027   i = 0;
1028   while (tcq_st_ptr != tcq_ed_ptr) {
1029       tmp = iadev->seg_ram+tcq_st_ptr;
1030       printk("TCQ slot %d desc = %d  Addr = %p\n", i++, readw(tmp), tmp);
1031       tcq_st_ptr += 2;
1032   }
1033   for(i=0; i <iadev->num_tx_desc; i++)
1034       printk("Desc_tbl[%d] = %d \n", i, iadev->desc_tbl[i].timestamp);
1035
1036   
1037   
1038 /*----------------------------- Recieving side stuff --------------------------*/  
1039  
1040 static void rx_excp_rcvd(struct atm_dev *dev)  
1041 {  
1042 #if 0 /* closing the receiving size will cause too many excp int */  
1043   IADEV *iadev;  
1044   u_short state;  
1045   u_short excpq_rd_ptr;  
1046   //u_short *ptr;  
1047   int vci, error = 1;  
1048   iadev = INPH_IA_DEV(dev);  
1049   state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1050   while((state & EXCPQ_EMPTY) != EXCPQ_EMPTY)  
1051   { printk("state = %x \n", state); 
1052         excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_RD_PTR) & 0xffff;  
1053  printk("state = %x excpq_rd_ptr = %x \n", state, excpq_rd_ptr); 
1054         if (excpq_rd_ptr == *(u16*)(iadev->reass_reg + EXCP_Q_WR_PTR))
1055             IF_ERR(printk("excpq_rd_ptr is wrong!!!\n");)
1056         // TODO: update exception stat
1057         vci = readw(iadev->reass_ram+excpq_rd_ptr);  
1058         error = readw(iadev->reass_ram+excpq_rd_ptr+2) & 0x0007;  
1059         // pwang_test
1060         excpq_rd_ptr += 4;  
1061         if (excpq_rd_ptr > (readw(iadev->reass_reg + EXCP_Q_ED_ADR)& 0xffff))  
1062             excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_ST_ADR)& 0xffff;
1063         writew( excpq_rd_ptr, iadev->reass_reg + EXCP_Q_RD_PTR);  
1064         state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1065   }  
1066 #endif
1067 }  
1068   
1069 static void free_desc(struct atm_dev *dev, int desc)  
1070 {  
1071         IADEV *iadev;  
1072         iadev = INPH_IA_DEV(dev);  
1073         writew(desc, iadev->reass_ram+iadev->rfL.fdq_wr); 
1074         iadev->rfL.fdq_wr +=2;
1075         if (iadev->rfL.fdq_wr > iadev->rfL.fdq_ed)
1076                 iadev->rfL.fdq_wr =  iadev->rfL.fdq_st;  
1077         writew(iadev->rfL.fdq_wr, iadev->reass_reg+FREEQ_WR_PTR);  
1078 }  
1079   
1080   
1081 static int rx_pkt(struct atm_dev *dev)  
1082 {  
1083         IADEV *iadev;  
1084         struct atm_vcc *vcc;  
1085         unsigned short status;  
1086         struct rx_buf_desc __iomem *buf_desc_ptr;  
1087         int desc;   
1088         struct dle* wr_ptr;  
1089         int len;  
1090         struct sk_buff *skb;  
1091         u_int buf_addr, dma_addr;  
1092
1093         iadev = INPH_IA_DEV(dev);  
1094         if (iadev->rfL.pcq_rd == (readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff)) 
1095         {  
1096             printk(KERN_ERR DEV_LABEL "(itf %d) Receive queue empty\n", dev->number);  
1097             return -EINVAL;  
1098         }  
1099         /* mask 1st 3 bits to get the actual descno. */  
1100         desc = readw(iadev->reass_ram+iadev->rfL.pcq_rd) & 0x1fff;  
1101         IF_RX(printk("reass_ram = %p iadev->rfL.pcq_rd = 0x%x desc = %d\n", 
1102                                     iadev->reass_ram, iadev->rfL.pcq_rd, desc);
1103               printk(" pcq_wr_ptr = 0x%x\n",
1104                                readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff);)
1105         /* update the read pointer  - maybe we shud do this in the end*/  
1106         if ( iadev->rfL.pcq_rd== iadev->rfL.pcq_ed) 
1107                 iadev->rfL.pcq_rd = iadev->rfL.pcq_st;  
1108         else  
1109                 iadev->rfL.pcq_rd += 2;
1110         writew(iadev->rfL.pcq_rd, iadev->reass_reg+PCQ_RD_PTR);  
1111   
1112         /* get the buffer desc entry.  
1113                 update stuff. - doesn't seem to be any update necessary  
1114         */  
1115         buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1116         /* make the ptr point to the corresponding buffer desc entry */  
1117         buf_desc_ptr += desc;     
1118         if (!desc || (desc > iadev->num_rx_desc) || 
1119                       ((buf_desc_ptr->vc_index & 0xffff) > iadev->num_vc)) { 
1120             free_desc(dev, desc);
1121             IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
1122             return -1;
1123         }
1124         vcc = iadev->rx_open[buf_desc_ptr->vc_index & 0xffff];  
1125         if (!vcc)  
1126         {      
1127                 free_desc(dev, desc); 
1128                 printk("IA: null vcc, drop PDU\n");  
1129                 return -1;  
1130         }  
1131           
1132   
1133         /* might want to check the status bits for errors */  
1134         status = (u_short) (buf_desc_ptr->desc_mode);  
1135         if (status & (RX_CER | RX_PTE | RX_OFL))  
1136         {  
1137                 atomic_inc(&vcc->stats->rx_err);
1138                 IF_ERR(printk("IA: bad packet, dropping it");)  
1139                 if (status & RX_CER) { 
1140                     IF_ERR(printk(" cause: packet CRC error\n");)
1141                 }
1142                 else if (status & RX_PTE) {
1143                     IF_ERR(printk(" cause: packet time out\n");)
1144                 }
1145                 else {
1146                     IF_ERR(printk(" cause: buffer over flow\n");)
1147                 }
1148                 goto out_free_desc;
1149         }  
1150   
1151         /*  
1152                 build DLE.        
1153         */  
1154   
1155         buf_addr = (buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo;  
1156         dma_addr = (buf_desc_ptr->dma_start_hi << 16) | buf_desc_ptr->dma_start_lo;  
1157         len = dma_addr - buf_addr;  
1158         if (len > iadev->rx_buf_sz) {
1159            printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
1160            atomic_inc(&vcc->stats->rx_err);
1161            goto out_free_desc;
1162         }
1163                   
1164         if (!(skb = atm_alloc_charge(vcc, len, GFP_ATOMIC))) {
1165            if (vcc->vci < 32)
1166               printk("Drop control packets\n");
1167               goto out_free_desc;
1168         }
1169         skb_put(skb,len);  
1170         // pwang_test
1171         ATM_SKB(skb)->vcc = vcc;
1172         ATM_DESC(skb) = desc;        
1173         skb_queue_tail(&iadev->rx_dma_q, skb);  
1174
1175         /* Build the DLE structure */  
1176         wr_ptr = iadev->rx_dle_q.write;  
1177         wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
1178                 len, PCI_DMA_FROMDEVICE);
1179         wr_ptr->local_pkt_addr = buf_addr;  
1180         wr_ptr->bytes = len;    /* We don't know this do we ?? */  
1181         wr_ptr->mode = DMA_INT_ENABLE;  
1182   
1183         /* shud take care of wrap around here too. */  
1184         if(++wr_ptr == iadev->rx_dle_q.end)
1185              wr_ptr = iadev->rx_dle_q.start;
1186         iadev->rx_dle_q.write = wr_ptr;  
1187         udelay(1);  
1188         /* Increment transaction counter */  
1189         writel(1, iadev->dma+IPHASE5575_RX_COUNTER);   
1190 out:    return 0;  
1191 out_free_desc:
1192         free_desc(dev, desc);
1193         goto out;
1194 }  
1195   
1196 static void rx_intr(struct atm_dev *dev)  
1197 {  
1198   IADEV *iadev;  
1199   u_short status;  
1200   u_short state, i;  
1201   
1202   iadev = INPH_IA_DEV(dev);  
1203   status = readl(iadev->reass_reg+REASS_INTR_STATUS_REG) & 0xffff;  
1204   IF_EVENT(printk("rx_intr: status = 0x%x\n", status);)
1205   if (status & RX_PKT_RCVD)  
1206   {  
1207         /* do something */  
1208         /* Basically recvd an interrupt for receving a packet.  
1209         A descriptor would have been written to the packet complete   
1210         queue. Get all the descriptors and set up dma to move the   
1211         packets till the packet complete queue is empty..  
1212         */  
1213         state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1214         IF_EVENT(printk("Rx intr status: RX_PKT_RCVD %08x\n", status);) 
1215         while(!(state & PCQ_EMPTY))  
1216         {  
1217              rx_pkt(dev);  
1218              state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1219         }  
1220         iadev->rxing = 1;
1221   }  
1222   if (status & RX_FREEQ_EMPT)  
1223   {   
1224      if (iadev->rxing) {
1225         iadev->rx_tmp_cnt = iadev->rx_pkt_cnt;
1226         iadev->rx_tmp_jif = jiffies; 
1227         iadev->rxing = 0;
1228      } 
1229      else if ((time_after(jiffies, iadev->rx_tmp_jif + 50)) &&
1230                ((iadev->rx_pkt_cnt - iadev->rx_tmp_cnt) == 0)) {
1231         for (i = 1; i <= iadev->num_rx_desc; i++)
1232                free_desc(dev, i);
1233 printk("Test logic RUN!!!!\n");
1234         writew( ~(RX_FREEQ_EMPT|RX_EXCP_RCVD),iadev->reass_reg+REASS_MASK_REG);
1235         iadev->rxing = 1;
1236      }
1237      IF_EVENT(printk("Rx intr status: RX_FREEQ_EMPT %08x\n", status);)  
1238   }  
1239
1240   if (status & RX_EXCP_RCVD)  
1241   {  
1242         /* probably need to handle the exception queue also. */  
1243         IF_EVENT(printk("Rx intr status: RX_EXCP_RCVD %08x\n", status);)  
1244         rx_excp_rcvd(dev);  
1245   }  
1246
1247
1248   if (status & RX_RAW_RCVD)  
1249   {  
1250         /* need to handle the raw incoming cells. This deepnds on   
1251         whether we have programmed to receive the raw cells or not.  
1252         Else ignore. */  
1253         IF_EVENT(printk("Rx intr status:  RX_RAW_RCVD %08x\n", status);)  
1254   }  
1255 }  
1256   
1257   
1258 static void rx_dle_intr(struct atm_dev *dev)  
1259 {  
1260   IADEV *iadev;  
1261   struct atm_vcc *vcc;   
1262   struct sk_buff *skb;  
1263   int desc;  
1264   u_short state;   
1265   struct dle *dle, *cur_dle;  
1266   u_int dle_lp;  
1267   int len;
1268   iadev = INPH_IA_DEV(dev);  
1269  
1270   /* free all the dles done, that is just update our own dle read pointer   
1271         - do we really need to do this. Think not. */  
1272   /* DMA is done, just get all the recevie buffers from the rx dma queue  
1273         and push them up to the higher layer protocol. Also free the desc  
1274         associated with the buffer. */  
1275   dle = iadev->rx_dle_q.read;  
1276   dle_lp = readl(iadev->dma+IPHASE5575_RX_LIST_ADDR) & (sizeof(struct dle)*DLE_ENTRIES - 1);  
1277   cur_dle = (struct dle*)(iadev->rx_dle_q.start + (dle_lp >> 4));  
1278   while(dle != cur_dle)  
1279   {  
1280       /* free the DMAed skb */  
1281       skb = skb_dequeue(&iadev->rx_dma_q);  
1282       if (!skb)  
1283          goto INCR_DLE;
1284       desc = ATM_DESC(skb);
1285       free_desc(dev, desc);  
1286                
1287       if (!(len = skb->len))
1288       {  
1289           printk("rx_dle_intr: skb len 0\n");  
1290           dev_kfree_skb_any(skb);  
1291       }  
1292       else  
1293       {  
1294           struct cpcs_trailer *trailer;
1295           u_short length;
1296           struct ia_vcc *ia_vcc;
1297
1298           pci_unmap_single(iadev->pci, iadev->rx_dle_q.write->sys_pkt_addr,
1299                 len, PCI_DMA_FROMDEVICE);
1300           /* no VCC related housekeeping done as yet. lets see */  
1301           vcc = ATM_SKB(skb)->vcc;
1302           if (!vcc) {
1303               printk("IA: null vcc\n");  
1304               dev_kfree_skb_any(skb);
1305               goto INCR_DLE;
1306           }
1307           ia_vcc = INPH_IA_VCC(vcc);
1308           if (ia_vcc == NULL)
1309           {
1310              atomic_inc(&vcc->stats->rx_err);
1311              dev_kfree_skb_any(skb);
1312              atm_return(vcc, atm_guess_pdu2truesize(len));
1313              goto INCR_DLE;
1314            }
1315           // get real pkt length  pwang_test
1316           trailer = (struct cpcs_trailer*)((u_char *)skb->data +
1317                                  skb->len - sizeof(*trailer));
1318           length =  swap(trailer->length);
1319           if ((length > iadev->rx_buf_sz) || (length > 
1320                               (skb->len - sizeof(struct cpcs_trailer))))
1321           {
1322              atomic_inc(&vcc->stats->rx_err);
1323              IF_ERR(printk("rx_dle_intr: Bad  AAL5 trailer %d (skb len %d)", 
1324                                                             length, skb->len);)
1325              dev_kfree_skb_any(skb);
1326              atm_return(vcc, atm_guess_pdu2truesize(len));
1327              goto INCR_DLE;
1328           }
1329           skb_trim(skb, length);
1330           
1331           /* Display the packet */  
1332           IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb->len);  
1333           xdump(skb->data, skb->len, "RX: ");
1334           printk("\n");)
1335
1336           IF_RX(printk("rx_dle_intr: skb push");)  
1337           vcc->push(vcc,skb);  
1338           atomic_inc(&vcc->stats->rx);
1339           iadev->rx_pkt_cnt++;
1340       }  
1341 INCR_DLE:
1342       if (++dle == iadev->rx_dle_q.end)  
1343           dle = iadev->rx_dle_q.start;  
1344   }  
1345   iadev->rx_dle_q.read = dle;  
1346   
1347   /* if the interrupts are masked because there were no free desc available,  
1348                 unmask them now. */ 
1349   if (!iadev->rxing) {
1350      state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1351      if (!(state & FREEQ_EMPTY)) {
1352         state = readl(iadev->reass_reg + REASS_MASK_REG) & 0xffff;
1353         writel(state & ~(RX_FREEQ_EMPT |/* RX_EXCP_RCVD |*/ RX_PKT_RCVD),
1354                                       iadev->reass_reg+REASS_MASK_REG);
1355         iadev->rxing++; 
1356      }
1357   }
1358 }  
1359   
1360   
1361 static int open_rx(struct atm_vcc *vcc)  
1362 {  
1363         IADEV *iadev;  
1364         u_short __iomem *vc_table;  
1365         u_short __iomem *reass_ptr;  
1366         IF_EVENT(printk("iadev: open_rx %d.%d\n", vcc->vpi, vcc->vci);)
1367
1368         if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;    
1369         iadev = INPH_IA_DEV(vcc->dev);  
1370         if (vcc->qos.rxtp.traffic_class == ATM_ABR) {  
1371            if (iadev->phy_type & FE_25MBIT_PHY) {
1372                printk("IA:  ABR not support\n");
1373                return -EINVAL; 
1374            }
1375         }
1376         /* Make only this VCI in the vc table valid and let all   
1377                 others be invalid entries */  
1378         vc_table = iadev->reass_ram+RX_VC_TABLE*iadev->memSize;
1379         vc_table += vcc->vci;
1380         /* mask the last 6 bits and OR it with 3 for 1K VCs */  
1381
1382         *vc_table = vcc->vci << 6;
1383         /* Also keep a list of open rx vcs so that we can attach them with  
1384                 incoming PDUs later. */  
1385         if ((vcc->qos.rxtp.traffic_class == ATM_ABR) || 
1386                                 (vcc->qos.txtp.traffic_class == ATM_ABR))  
1387         {  
1388                 srv_cls_param_t srv_p;
1389                 init_abr_vc(iadev, &srv_p);
1390                 ia_open_abr_vc(iadev, &srv_p, vcc, 0);
1391         } 
1392         else {  /* for UBR  later may need to add CBR logic */
1393                 reass_ptr = iadev->reass_ram+REASS_TABLE*iadev->memSize;
1394                 reass_ptr += vcc->vci;
1395                 *reass_ptr = NO_AAL5_PKT;
1396         }
1397         
1398         if (iadev->rx_open[vcc->vci])  
1399                 printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d already open\n",  
1400                         vcc->dev->number, vcc->vci);  
1401         iadev->rx_open[vcc->vci] = vcc;  
1402         return 0;  
1403 }  
1404   
1405 static int rx_init(struct atm_dev *dev)  
1406 {  
1407         IADEV *iadev;  
1408         struct rx_buf_desc __iomem *buf_desc_ptr;  
1409         unsigned long rx_pkt_start = 0;  
1410         void *dle_addr;  
1411         struct abr_vc_table  *abr_vc_table; 
1412         u16 *vc_table;  
1413         u16 *reass_table;  
1414         u16 *ptr16;
1415         int i,j, vcsize_sel;  
1416         u_short freeq_st_adr;  
1417         u_short *freeq_start;  
1418   
1419         iadev = INPH_IA_DEV(dev);  
1420   //    spin_lock_init(&iadev->rx_lock); 
1421   
1422         /* Allocate 4k bytes - more aligned than needed (4k boundary) */
1423         dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
1424                                         &iadev->rx_dle_dma);  
1425         if (!dle_addr)  {  
1426                 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1427                 goto err_out;
1428         }
1429         iadev->rx_dle_q.start = (struct dle*)dle_addr;  
1430         iadev->rx_dle_q.read = iadev->rx_dle_q.start;  
1431         iadev->rx_dle_q.write = iadev->rx_dle_q.start;  
1432         iadev->rx_dle_q.end = (struct dle*)((u32)dle_addr+sizeof(struct dle)*DLE_ENTRIES);  
1433         /* the end of the dle q points to the entry after the last  
1434         DLE that can be used. */  
1435   
1436         /* write the upper 20 bits of the start address to rx list address register */  
1437         writel(iadev->rx_dle_dma & 0xfffff000,
1438                iadev->dma + IPHASE5575_RX_LIST_ADDR);  
1439         IF_INIT(printk("Tx Dle list addr: 0x%08x value: 0x%0x\n", 
1440                       (u32)(iadev->dma+IPHASE5575_TX_LIST_ADDR), 
1441                       *(u32*)(iadev->dma+IPHASE5575_TX_LIST_ADDR));  
1442         printk("Rx Dle list addr: 0x%08x value: 0x%0x\n", 
1443                       (u32)(iadev->dma+IPHASE5575_RX_LIST_ADDR), 
1444                       *(u32*)(iadev->dma+IPHASE5575_RX_LIST_ADDR));)  
1445   
1446         writew(0xffff, iadev->reass_reg+REASS_MASK_REG);  
1447         writew(0, iadev->reass_reg+MODE_REG);  
1448         writew(RESET_REASS, iadev->reass_reg+REASS_COMMAND_REG);  
1449   
1450         /* Receive side control memory map  
1451            -------------------------------  
1452   
1453                 Buffer descr    0x0000 (736 - 23K)  
1454                 VP Table        0x5c00 (256 - 512)  
1455                 Except q        0x5e00 (128 - 512)  
1456                 Free buffer q   0x6000 (1K - 2K)  
1457                 Packet comp q   0x6800 (1K - 2K)  
1458                 Reass Table     0x7000 (1K - 2K)  
1459                 VC Table        0x7800 (1K - 2K)  
1460                 ABR VC Table    0x8000 (1K - 32K)  
1461         */  
1462           
1463         /* Base address for Buffer Descriptor Table */  
1464         writew(RX_DESC_BASE >> 16, iadev->reass_reg+REASS_DESC_BASE);  
1465         /* Set the buffer size register */  
1466         writew(iadev->rx_buf_sz, iadev->reass_reg+BUF_SIZE);  
1467   
1468         /* Initialize each entry in the Buffer Descriptor Table */  
1469         iadev->RX_DESC_BASE_ADDR = iadev->reass_ram+RX_DESC_BASE*iadev->memSize;
1470         buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1471         memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1472         buf_desc_ptr++;  
1473         rx_pkt_start = iadev->rx_pkt_ram;  
1474         for(i=1; i<=iadev->num_rx_desc; i++)  
1475         {  
1476                 memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1477                 buf_desc_ptr->buf_start_hi = rx_pkt_start >> 16;  
1478                 buf_desc_ptr->buf_start_lo = rx_pkt_start & 0x0000ffff;  
1479                 buf_desc_ptr++;           
1480                 rx_pkt_start += iadev->rx_buf_sz;  
1481         }  
1482         IF_INIT(printk("Rx Buffer desc ptr: 0x%0x\n", (u32)(buf_desc_ptr));)  
1483         i = FREE_BUF_DESC_Q*iadev->memSize; 
1484         writew(i >> 16,  iadev->reass_reg+REASS_QUEUE_BASE); 
1485         writew(i, iadev->reass_reg+FREEQ_ST_ADR);
1486         writew(i+iadev->num_rx_desc*sizeof(u_short), 
1487                                          iadev->reass_reg+FREEQ_ED_ADR);
1488         writew(i, iadev->reass_reg+FREEQ_RD_PTR);
1489         writew(i+iadev->num_rx_desc*sizeof(u_short), 
1490                                         iadev->reass_reg+FREEQ_WR_PTR);    
1491         /* Fill the FREEQ with all the free descriptors. */  
1492         freeq_st_adr = readw(iadev->reass_reg+FREEQ_ST_ADR);  
1493         freeq_start = (u_short *)(iadev->reass_ram+freeq_st_adr);  
1494         for(i=1; i<=iadev->num_rx_desc; i++)  
1495         {  
1496                 *freeq_start = (u_short)i;  
1497                 freeq_start++;  
1498         }  
1499         IF_INIT(printk("freeq_start: 0x%0x\n", (u32)freeq_start);)  
1500         /* Packet Complete Queue */
1501         i = (PKT_COMP_Q * iadev->memSize) & 0xffff;
1502         writew(i, iadev->reass_reg+PCQ_ST_ADR);
1503         writew(i+iadev->num_vc*sizeof(u_short), iadev->reass_reg+PCQ_ED_ADR);
1504         writew(i, iadev->reass_reg+PCQ_RD_PTR);
1505         writew(i, iadev->reass_reg+PCQ_WR_PTR);
1506
1507         /* Exception Queue */
1508         i = (EXCEPTION_Q * iadev->memSize) & 0xffff;
1509         writew(i, iadev->reass_reg+EXCP_Q_ST_ADR);
1510         writew(i + NUM_RX_EXCP * sizeof(RX_ERROR_Q), 
1511                                              iadev->reass_reg+EXCP_Q_ED_ADR);
1512         writew(i, iadev->reass_reg+EXCP_Q_RD_PTR);
1513         writew(i, iadev->reass_reg+EXCP_Q_WR_PTR); 
1514  
1515         /* Load local copy of FREEQ and PCQ ptrs */
1516         iadev->rfL.fdq_st = readw(iadev->reass_reg+FREEQ_ST_ADR) & 0xffff;
1517         iadev->rfL.fdq_ed = readw(iadev->reass_reg+FREEQ_ED_ADR) & 0xffff ;
1518         iadev->rfL.fdq_rd = readw(iadev->reass_reg+FREEQ_RD_PTR) & 0xffff;
1519         iadev->rfL.fdq_wr = readw(iadev->reass_reg+FREEQ_WR_PTR) & 0xffff;
1520         iadev->rfL.pcq_st = readw(iadev->reass_reg+PCQ_ST_ADR) & 0xffff;
1521         iadev->rfL.pcq_ed = readw(iadev->reass_reg+PCQ_ED_ADR) & 0xffff;
1522         iadev->rfL.pcq_rd = readw(iadev->reass_reg+PCQ_RD_PTR) & 0xffff;
1523         iadev->rfL.pcq_wr = readw(iadev->reass_reg+PCQ_WR_PTR) & 0xffff;
1524         
1525         IF_INIT(printk("INIT:pcq_st:0x%x pcq_ed:0x%x pcq_rd:0x%x pcq_wr:0x%x", 
1526               iadev->rfL.pcq_st, iadev->rfL.pcq_ed, iadev->rfL.pcq_rd, 
1527               iadev->rfL.pcq_wr);)                
1528         /* just for check - no VP TBL */  
1529         /* VP Table */  
1530         /* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */  
1531         /* initialize VP Table for invalid VPIs  
1532                 - I guess we can write all 1s or 0x000f in the entire memory  
1533                   space or something similar.  
1534         */  
1535   
1536         /* This seems to work and looks right to me too !!! */  
1537         i =  REASS_TABLE * iadev->memSize;
1538         writew((i >> 3), iadev->reass_reg+REASS_TABLE_BASE);   
1539         /* initialize Reassembly table to I don't know what ???? */  
1540         reass_table = (u16 *)(iadev->reass_ram+i);  
1541         j = REASS_TABLE_SZ * iadev->memSize;
1542         for(i=0; i < j; i++)  
1543                 *reass_table++ = NO_AAL5_PKT;  
1544        i = 8*1024;
1545        vcsize_sel =  0;
1546        while (i != iadev->num_vc) {
1547           i /= 2;
1548           vcsize_sel++;
1549        }
1550        i = RX_VC_TABLE * iadev->memSize;
1551        writew(((i>>3) & 0xfff8) | vcsize_sel, iadev->reass_reg+VC_LKUP_BASE);
1552        vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);  
1553         j = RX_VC_TABLE_SZ * iadev->memSize;
1554         for(i = 0; i < j; i++)  
1555         {  
1556                 /* shift the reassembly pointer by 3 + lower 3 bits of   
1557                 vc_lkup_base register (=3 for 1K VCs) and the last byte   
1558                 is those low 3 bits.   
1559                 Shall program this later.  
1560                 */  
1561                 *vc_table = (i << 6) | 15;      /* for invalid VCI */  
1562                 vc_table++;  
1563         }  
1564         /* ABR VC table */
1565         i =  ABR_VC_TABLE * iadev->memSize;
1566         writew(i >> 3, iadev->reass_reg+ABR_LKUP_BASE);
1567                    
1568         i = ABR_VC_TABLE * iadev->memSize;
1569         abr_vc_table = (struct abr_vc_table *)(iadev->reass_ram+i);  
1570         j = REASS_TABLE_SZ * iadev->memSize;
1571         memset ((char*)abr_vc_table, 0, j * sizeof(*abr_vc_table));
1572         for(i = 0; i < j; i++) {                
1573                 abr_vc_table->rdf = 0x0003;
1574                 abr_vc_table->air = 0x5eb1;
1575                 abr_vc_table++;         
1576         }  
1577
1578         /* Initialize other registers */  
1579   
1580         /* VP Filter Register set for VC Reassembly only */  
1581         writew(0xff00, iadev->reass_reg+VP_FILTER);  
1582         writew(0, iadev->reass_reg+XTRA_RM_OFFSET);
1583         writew(0x1,  iadev->reass_reg+PROTOCOL_ID);
1584
1585         /* Packet Timeout Count  related Registers : 
1586            Set packet timeout to occur in about 3 seconds
1587            Set Packet Aging Interval count register to overflow in about 4 us
1588         */  
1589         writew(0xF6F8, iadev->reass_reg+PKT_TM_CNT );
1590         ptr16 = (u16*)j;
1591         i = ((u32)ptr16 >> 6) & 0xff;
1592         ptr16  += j - 1;
1593         i |=(((u32)ptr16 << 2) & 0xff00);
1594         writew(i, iadev->reass_reg+TMOUT_RANGE);
1595         /* initiate the desc_tble */
1596         for(i=0; i<iadev->num_tx_desc;i++)
1597             iadev->desc_tbl[i].timestamp = 0;
1598
1599         /* to clear the interrupt status register - read it */  
1600         readw(iadev->reass_reg+REASS_INTR_STATUS_REG);   
1601   
1602         /* Mask Register - clear it */  
1603         writew(~(RX_FREEQ_EMPT|RX_PKT_RCVD), iadev->reass_reg+REASS_MASK_REG);  
1604   
1605         skb_queue_head_init(&iadev->rx_dma_q);  
1606         iadev->rx_free_desc_qhead = NULL;   
1607
1608         iadev->rx_open = kzalloc(4 * iadev->num_vc, GFP_KERNEL);
1609         if (!iadev->rx_open) {
1610                 printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
1611                 dev->number);  
1612                 goto err_free_dle;
1613         }  
1614
1615         iadev->rxing = 1;
1616         iadev->rx_pkt_cnt = 0;
1617         /* Mode Register */  
1618         writew(R_ONLINE, iadev->reass_reg+MODE_REG);  
1619         return 0;  
1620
1621 err_free_dle:
1622         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
1623                             iadev->rx_dle_dma);  
1624 err_out:
1625         return -ENOMEM;
1626 }  
1627   
1628
1629 /*  
1630         The memory map suggested in appendix A and the coding for it.   
1631         Keeping it around just in case we change our mind later.  
1632   
1633                 Buffer descr    0x0000 (128 - 4K)  
1634                 UBR sched       0x1000 (1K - 4K)  
1635                 UBR Wait q      0x2000 (1K - 4K)  
1636                 Commn queues    0x3000 Packet Ready, Trasmit comp(0x3100)  
1637                                         (128 - 256) each  
1638                 extended VC     0x4000 (1K - 8K)  
1639                 ABR sched       0x6000  and ABR wait queue (1K - 2K) each  
1640                 CBR sched       0x7000 (as needed)  
1641                 VC table        0x8000 (1K - 32K)  
1642 */  
1643   
1644 static void tx_intr(struct atm_dev *dev)  
1645 {  
1646         IADEV *iadev;  
1647         unsigned short status;  
1648         unsigned long flags;
1649
1650         iadev = INPH_IA_DEV(dev);  
1651   
1652         status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);  
1653         if (status & TRANSMIT_DONE){
1654
1655            IF_EVENT(printk("Tansmit Done Intr logic run\n");)
1656            spin_lock_irqsave(&iadev->tx_lock, flags);
1657            ia_tx_poll(iadev);
1658            spin_unlock_irqrestore(&iadev->tx_lock, flags);
1659            writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
1660            if (iadev->close_pending)  
1661                wake_up(&iadev->close_wait);
1662         }         
1663         if (status & TCQ_NOT_EMPTY)  
1664         {  
1665             IF_EVENT(printk("TCQ_NOT_EMPTY int received\n");)  
1666         }  
1667 }  
1668   
1669 static void tx_dle_intr(struct atm_dev *dev)
1670 {
1671         IADEV *iadev;
1672         struct dle *dle, *cur_dle; 
1673         struct sk_buff *skb;
1674         struct atm_vcc *vcc;
1675         struct ia_vcc  *iavcc;
1676         u_int dle_lp;
1677         unsigned long flags;
1678
1679         iadev = INPH_IA_DEV(dev);
1680         spin_lock_irqsave(&iadev->tx_lock, flags);   
1681         dle = iadev->tx_dle_q.read;
1682         dle_lp = readl(iadev->dma+IPHASE5575_TX_LIST_ADDR) & 
1683                                         (sizeof(struct dle)*DLE_ENTRIES - 1);
1684         cur_dle = (struct dle*)(iadev->tx_dle_q.start + (dle_lp >> 4));
1685         while (dle != cur_dle)
1686         {
1687             /* free the DMAed skb */ 
1688             skb = skb_dequeue(&iadev->tx_dma_q); 
1689             if (!skb) break;
1690
1691             /* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */
1692             if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
1693                 pci_unmap_single(iadev->pci, dle->sys_pkt_addr, skb->len,
1694                                  PCI_DMA_TODEVICE);
1695             }
1696             vcc = ATM_SKB(skb)->vcc;
1697             if (!vcc) {
1698                   printk("tx_dle_intr: vcc is null\n");
1699                   spin_unlock_irqrestore(&iadev->tx_lock, flags);
1700                   dev_kfree_skb_any(skb);
1701
1702                   return;
1703             }
1704             iavcc = INPH_IA_VCC(vcc);
1705             if (!iavcc) {
1706                   printk("tx_dle_intr: iavcc is null\n");
1707                   spin_unlock_irqrestore(&iadev->tx_lock, flags);
1708                   dev_kfree_skb_any(skb);
1709                   return;
1710             }
1711             if (vcc->qos.txtp.pcr >= iadev->rate_limit) {
1712                if ((vcc->pop) && (skb->len != 0))
1713                {     
1714                  vcc->pop(vcc, skb);
1715                } 
1716                else {
1717                  dev_kfree_skb_any(skb);
1718                }
1719             }
1720             else { /* Hold the rate-limited skb for flow control */
1721                IA_SKB_STATE(skb) |= IA_DLED;
1722                skb_queue_tail(&iavcc->txing_skb, skb);
1723             }
1724             IF_EVENT(printk("tx_dle_intr: enque skb = 0x%x \n", (u32)skb);)
1725             if (++dle == iadev->tx_dle_q.end)
1726                  dle = iadev->tx_dle_q.start;
1727         }
1728         iadev->tx_dle_q.read = dle;
1729         spin_unlock_irqrestore(&iadev->tx_lock, flags);
1730 }
1731   
1732 static int open_tx(struct atm_vcc *vcc)  
1733 {  
1734         struct ia_vcc *ia_vcc;  
1735         IADEV *iadev;  
1736         struct main_vc *vc;  
1737         struct ext_vc *evc;  
1738         int ret;
1739         IF_EVENT(printk("iadev: open_tx entered vcc->vci = %d\n", vcc->vci);)  
1740         if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;  
1741         iadev = INPH_IA_DEV(vcc->dev);  
1742         
1743         if (iadev->phy_type & FE_25MBIT_PHY) {
1744            if (vcc->qos.txtp.traffic_class == ATM_ABR) {
1745                printk("IA:  ABR not support\n");
1746                return -EINVAL; 
1747            }
1748           if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1749                printk("IA:  CBR not support\n");
1750                return -EINVAL; 
1751           }
1752         }
1753         ia_vcc =  INPH_IA_VCC(vcc);
1754         memset((caddr_t)ia_vcc, 0, sizeof(*ia_vcc));
1755         if (vcc->qos.txtp.max_sdu > 
1756                          (iadev->tx_buf_sz - sizeof(struct cpcs_trailer))){
1757            printk("IA:  SDU size over (%d) the configured SDU size %d\n",
1758                   vcc->qos.txtp.max_sdu,iadev->tx_buf_sz);
1759            vcc->dev_data = NULL;
1760            kfree(ia_vcc);
1761            return -EINVAL; 
1762         }
1763         ia_vcc->vc_desc_cnt = 0;
1764         ia_vcc->txing = 1;
1765
1766         /* find pcr */
1767         if (vcc->qos.txtp.max_pcr == ATM_MAX_PCR) 
1768            vcc->qos.txtp.pcr = iadev->LineRate;
1769         else if ((vcc->qos.txtp.max_pcr == 0)&&( vcc->qos.txtp.pcr <= 0))
1770            vcc->qos.txtp.pcr = iadev->LineRate;
1771         else if ((vcc->qos.txtp.max_pcr > vcc->qos.txtp.pcr) && (vcc->qos.txtp.max_pcr> 0)) 
1772            vcc->qos.txtp.pcr = vcc->qos.txtp.max_pcr;
1773         if (vcc->qos.txtp.pcr > iadev->LineRate)
1774              vcc->qos.txtp.pcr = iadev->LineRate;
1775         ia_vcc->pcr = vcc->qos.txtp.pcr;
1776
1777         if (ia_vcc->pcr > (iadev->LineRate / 6) ) ia_vcc->ltimeout = HZ / 10;
1778         else if (ia_vcc->pcr > (iadev->LineRate / 130)) ia_vcc->ltimeout = HZ;
1779         else if (ia_vcc->pcr <= 170) ia_vcc->ltimeout = 16 * HZ;
1780         else ia_vcc->ltimeout = 2700 * HZ  / ia_vcc->pcr;
1781         if (ia_vcc->pcr < iadev->rate_limit)
1782            skb_queue_head_init (&ia_vcc->txing_skb);
1783         if (ia_vcc->pcr < iadev->rate_limit) {
1784            struct sock *sk = sk_atm(vcc);
1785
1786            if (vcc->qos.txtp.max_sdu != 0) {
1787                if (ia_vcc->pcr > 60000)
1788                   sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 5;
1789                else if (ia_vcc->pcr > 2000)
1790                   sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 4;
1791                else
1792                  sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 3;
1793            }
1794            else
1795              sk->sk_sndbuf = 24576;
1796         }
1797            
1798         vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;  
1799         evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;  
1800         vc += vcc->vci;  
1801         evc += vcc->vci;  
1802         memset((caddr_t)vc, 0, sizeof(*vc));  
1803         memset((caddr_t)evc, 0, sizeof(*evc));  
1804           
1805         /* store the most significant 4 bits of vci as the last 4 bits   
1806                 of first part of atm header.  
1807            store the last 12 bits of vci as first 12 bits of the second  
1808                 part of the atm header.  
1809         */  
1810         evc->atm_hdr1 = (vcc->vci >> 12) & 0x000f;  
1811         evc->atm_hdr2 = (vcc->vci & 0x0fff) << 4;  
1812  
1813         /* check the following for different traffic classes */  
1814         if (vcc->qos.txtp.traffic_class == ATM_UBR)  
1815         {  
1816                 vc->type = UBR;  
1817                 vc->status = CRC_APPEND;
1818                 vc->acr = cellrate_to_float(iadev->LineRate);  
1819                 if (vcc->qos.txtp.pcr > 0) 
1820                    vc->acr = cellrate_to_float(vcc->qos.txtp.pcr);  
1821                 IF_UBR(printk("UBR: txtp.pcr = 0x%x f_rate = 0x%x\n", 
1822                                              vcc->qos.txtp.max_pcr,vc->acr);)
1823         }  
1824         else if (vcc->qos.txtp.traffic_class == ATM_ABR)  
1825         {       srv_cls_param_t srv_p;
1826                 IF_ABR(printk("Tx ABR VCC\n");)  
1827                 init_abr_vc(iadev, &srv_p);
1828                 if (vcc->qos.txtp.pcr > 0) 
1829                    srv_p.pcr = vcc->qos.txtp.pcr;
1830                 if (vcc->qos.txtp.min_pcr > 0) {
1831                    int tmpsum = iadev->sum_mcr+iadev->sum_cbr+vcc->qos.txtp.min_pcr;
1832                    if (tmpsum > iadev->LineRate)
1833                        return -EBUSY;
1834                    srv_p.mcr = vcc->qos.txtp.min_pcr;
1835                    iadev->sum_mcr += vcc->qos.txtp.min_pcr;
1836                 } 
1837                 else srv_p.mcr = 0;
1838                 if (vcc->qos.txtp.icr)
1839                    srv_p.icr = vcc->qos.txtp.icr;
1840                 if (vcc->qos.txtp.tbe)
1841                    srv_p.tbe = vcc->qos.txtp.tbe;
1842                 if (vcc->qos.txtp.frtt)
1843                    srv_p.frtt = vcc->qos.txtp.frtt;
1844                 if (vcc->qos.txtp.rif)
1845                    srv_p.rif = vcc->qos.txtp.rif;
1846                 if (vcc->qos.txtp.rdf)
1847                    srv_p.rdf = vcc->qos.txtp.rdf;
1848                 if (vcc->qos.txtp.nrm_pres)
1849                    srv_p.nrm = vcc->qos.txtp.nrm;
1850                 if (vcc->qos.txtp.trm_pres)
1851                    srv_p.trm = vcc->qos.txtp.trm;
1852                 if (vcc->qos.txtp.adtf_pres)
1853                    srv_p.adtf = vcc->qos.txtp.adtf;
1854                 if (vcc->qos.txtp.cdf_pres)
1855                    srv_p.cdf = vcc->qos.txtp.cdf;    
1856                 if (srv_p.icr > srv_p.pcr)
1857                    srv_p.icr = srv_p.pcr;    
1858                 IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d  mcr = %d\n", 
1859                                                       srv_p.pcr, srv_p.mcr);)
1860                 ia_open_abr_vc(iadev, &srv_p, vcc, 1);
1861         } else if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1862                 if (iadev->phy_type & FE_25MBIT_PHY) {
1863                     printk("IA:  CBR not support\n");
1864                     return -EINVAL; 
1865                 }
1866                 if (vcc->qos.txtp.max_pcr > iadev->LineRate) {
1867                    IF_CBR(printk("PCR is not availble\n");)
1868                    return -1;
1869                 }
1870                 vc->type = CBR;
1871                 vc->status = CRC_APPEND;
1872                 if ((ret = ia_cbr_setup (iadev, vcc)) < 0) {     
1873                     return ret;
1874                 }
1875        } 
1876         else  
1877            printk("iadev:  Non UBR, ABR and CBR traffic not supportedn"); 
1878         
1879         iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE;
1880         IF_EVENT(printk("ia open_tx returning \n");)  
1881         return 0;  
1882 }  
1883   
1884   
1885 static int tx_init(struct atm_dev *dev)  
1886 {  
1887         IADEV *iadev;  
1888         struct tx_buf_desc *buf_desc_ptr;
1889         unsigned int tx_pkt_start;  
1890         void *dle_addr;  
1891         int i;  
1892         u_short tcq_st_adr;  
1893         u_short *tcq_start;  
1894         u_short prq_st_adr;  
1895         u_short *prq_start;  
1896         struct main_vc *vc;  
1897         struct ext_vc *evc;   
1898         u_short tmp16;
1899         u32 vcsize_sel;
1900  
1901         iadev = INPH_IA_DEV(dev);  
1902         spin_lock_init(&iadev->tx_lock);
1903  
1904         IF_INIT(printk("Tx MASK REG: 0x%0x\n", 
1905                                 readw(iadev->seg_reg+SEG_MASK_REG));)  
1906
1907         /* Allocate 4k (boundary aligned) bytes */
1908         dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
1909                                         &iadev->tx_dle_dma);  
1910         if (!dle_addr)  {
1911                 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1912                 goto err_out;
1913         }
1914         iadev->tx_dle_q.start = (struct dle*)dle_addr;  
1915         iadev->tx_dle_q.read = iadev->tx_dle_q.start;  
1916         iadev->tx_dle_q.write = iadev->tx_dle_q.start;  
1917         iadev->tx_dle_q.end = (struct dle*)((u32)dle_addr+sizeof(struct dle)*DLE_ENTRIES);  
1918
1919         /* write the upper 20 bits of the start address to tx list address register */  
1920         writel(iadev->tx_dle_dma & 0xfffff000,
1921                iadev->dma + IPHASE5575_TX_LIST_ADDR);  
1922         writew(0xffff, iadev->seg_reg+SEG_MASK_REG);  
1923         writew(0, iadev->seg_reg+MODE_REG_0);  
1924         writew(RESET_SEG, iadev->seg_reg+SEG_COMMAND_REG);  
1925         iadev->MAIN_VC_TABLE_ADDR = iadev->seg_ram+MAIN_VC_TABLE*iadev->memSize;
1926         iadev->EXT_VC_TABLE_ADDR = iadev->seg_ram+EXT_VC_TABLE*iadev->memSize;
1927         iadev->ABR_SCHED_TABLE_ADDR=iadev->seg_ram+ABR_SCHED_TABLE*iadev->memSize;
1928   
1929         /*  
1930            Transmit side control memory map  
1931            --------------------------------    
1932          Buffer descr   0x0000 (128 - 4K)  
1933          Commn queues   0x1000  Transmit comp, Packet ready(0x1400)   
1934                                         (512 - 1K) each  
1935                                         TCQ - 4K, PRQ - 5K  
1936          CBR Table      0x1800 (as needed) - 6K  
1937          UBR Table      0x3000 (1K - 4K) - 12K  
1938          UBR Wait queue 0x4000 (1K - 4K) - 16K  
1939          ABR sched      0x5000  and ABR wait queue (1K - 2K) each  
1940                                 ABR Tbl - 20K, ABR Wq - 22K   
1941          extended VC    0x6000 (1K - 8K) - 24K  
1942          VC Table       0x8000 (1K - 32K) - 32K  
1943           
1944         Between 0x2000 (8K) and 0x3000 (12K) there is 4K space left for VBR Tbl  
1945         and Wait q, which can be allotted later.  
1946         */  
1947      
1948         /* Buffer Descriptor Table Base address */  
1949         writew(TX_DESC_BASE, iadev->seg_reg+SEG_DESC_BASE);  
1950   
1951         /* initialize each entry in the buffer descriptor table */  
1952         buf_desc_ptr =(struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);  
1953         memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1954         buf_desc_ptr++;  
1955         tx_pkt_start = TX_PACKET_RAM;  
1956         for(i=1; i<=iadev->num_tx_desc; i++)  
1957         {  
1958                 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1959                 buf_desc_ptr->desc_mode = AAL5;  
1960                 buf_desc_ptr->buf_start_hi = tx_pkt_start >> 16;  
1961                 buf_desc_ptr->buf_start_lo = tx_pkt_start & 0x0000ffff;  
1962                 buf_desc_ptr++;           
1963                 tx_pkt_start += iadev->tx_buf_sz;  
1964         }  
1965         iadev->tx_buf = kmalloc(iadev->num_tx_desc*sizeof(struct cpcs_trailer_desc), GFP_KERNEL);
1966         if (!iadev->tx_buf) {
1967             printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1968             goto err_free_dle;
1969         }
1970         for (i= 0; i< iadev->num_tx_desc; i++)
1971         {
1972             struct cpcs_trailer *cpcs;
1973  
1974             cpcs = kmalloc(sizeof(*cpcs), GFP_KERNEL|GFP_DMA);
1975             if(!cpcs) {                
1976                 printk(KERN_ERR DEV_LABEL " couldn't get freepage\n"); 
1977                 goto err_free_tx_bufs;
1978             }
1979             iadev->tx_buf[i].cpcs = cpcs;
1980             iadev->tx_buf[i].dma_addr = pci_map_single(iadev->pci,
1981                 cpcs, sizeof(*cpcs), PCI_DMA_TODEVICE);
1982         }
1983         iadev->desc_tbl = kmalloc(iadev->num_tx_desc *
1984                                    sizeof(struct desc_tbl_t), GFP_KERNEL);
1985         if (!iadev->desc_tbl) {
1986                 printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1987                 goto err_free_all_tx_bufs;
1988         }
1989   
1990         /* Communication Queues base address */  
1991         i = TX_COMP_Q * iadev->memSize;
1992         writew(i >> 16, iadev->seg_reg+SEG_QUEUE_BASE);  
1993   
1994         /* Transmit Complete Queue */  
1995         writew(i, iadev->seg_reg+TCQ_ST_ADR);  
1996         writew(i, iadev->seg_reg+TCQ_RD_PTR);  
1997         writew(i+iadev->num_tx_desc*sizeof(u_short),iadev->seg_reg+TCQ_WR_PTR); 
1998         iadev->host_tcq_wr = i + iadev->num_tx_desc*sizeof(u_short);
1999         writew(i+2 * iadev->num_tx_desc * sizeof(u_short), 
2000                                               iadev->seg_reg+TCQ_ED_ADR); 
2001         /* Fill the TCQ with all the free descriptors. */  
2002         tcq_st_adr = readw(iadev->seg_reg+TCQ_ST_ADR);  
2003         tcq_start = (u_short *)(iadev->seg_ram+tcq_st_adr);  
2004         for(i=1; i<=iadev->num_tx_desc; i++)  
2005         {  
2006                 *tcq_start = (u_short)i;  
2007                 tcq_start++;  
2008         }  
2009   
2010         /* Packet Ready Queue */  
2011         i = PKT_RDY_Q * iadev->memSize; 
2012         writew(i, iadev->seg_reg+PRQ_ST_ADR);  
2013         writew(i+2 * iadev->num_tx_desc * sizeof(u_short), 
2014                                               iadev->seg_reg+PRQ_ED_ADR);
2015         writew(i, iadev->seg_reg+PRQ_RD_PTR);  
2016         writew(i, iadev->seg_reg+PRQ_WR_PTR);  
2017          
2018         /* Load local copy of PRQ and TCQ ptrs */
2019         iadev->ffL.prq_st = readw(iadev->seg_reg+PRQ_ST_ADR) & 0xffff;
2020         iadev->ffL.prq_ed = readw(iadev->seg_reg+PRQ_ED_ADR) & 0xffff;
2021         iadev->ffL.prq_wr = readw(iadev->seg_reg+PRQ_WR_PTR) & 0xffff;
2022
2023         iadev->ffL.tcq_st = readw(iadev->seg_reg+TCQ_ST_ADR) & 0xffff;
2024         iadev->ffL.tcq_ed = readw(iadev->seg_reg+TCQ_ED_ADR) & 0xffff;
2025         iadev->ffL.tcq_rd = readw(iadev->seg_reg+TCQ_RD_PTR) & 0xffff;
2026
2027         /* Just for safety initializing the queue to have desc 1 always */  
2028         /* Fill the PRQ with all the free descriptors. */  
2029         prq_st_adr = readw(iadev->seg_reg+PRQ_ST_ADR);  
2030         prq_start = (u_short *)(iadev->seg_ram+prq_st_adr);  
2031         for(i=1; i<=iadev->num_tx_desc; i++)  
2032         {  
2033                 *prq_start = (u_short)0;        /* desc 1 in all entries */  
2034                 prq_start++;  
2035         }  
2036         /* CBR Table */  
2037         IF_INIT(printk("Start CBR Init\n");)
2038 #if 1  /* for 1K VC board, CBR_PTR_BASE is 0 */
2039         writew(0,iadev->seg_reg+CBR_PTR_BASE);
2040 #else /* Charlie's logic is wrong ? */
2041         tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize)>>17;
2042         IF_INIT(printk("cbr_ptr_base = 0x%x ", tmp16);)
2043         writew(tmp16,iadev->seg_reg+CBR_PTR_BASE);
2044 #endif
2045
2046         IF_INIT(printk("value in register = 0x%x\n",
2047                                    readw(iadev->seg_reg+CBR_PTR_BASE));)
2048         tmp16 = (CBR_SCHED_TABLE*iadev->memSize) >> 1;
2049         writew(tmp16, iadev->seg_reg+CBR_TAB_BEG);
2050         IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16,
2051                                         readw(iadev->seg_reg+CBR_TAB_BEG));)
2052         writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR;
2053         tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1;
2054         writew(tmp16, iadev->seg_reg+CBR_TAB_END);
2055         IF_INIT(printk("iadev->seg_reg = 0x%x CBR_PTR_BASE = 0x%x\n",
2056                (u32)iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));)
2057         IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n",
2058           readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END),
2059           readw(iadev->seg_reg+CBR_TAB_END+1));)
2060
2061         /* Initialize the CBR Schedualing Table */
2062         memset_io(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize, 
2063                                                           0, iadev->num_vc*6); 
2064         iadev->CbrRemEntries = iadev->CbrTotEntries = iadev->num_vc*3;
2065         iadev->CbrEntryPt = 0;
2066         iadev->Granularity = MAX_ATM_155 / iadev->CbrTotEntries;
2067         iadev->NumEnabledCBR = 0;
2068
2069         /* UBR scheduling Table and wait queue */  
2070         /* initialize all bytes of UBR scheduler table and wait queue to 0   
2071                 - SCHEDSZ is 1K (# of entries).  
2072                 - UBR Table size is 4K  
2073                 - UBR wait queue is 4K  
2074            since the table and wait queues are contiguous, all the bytes   
2075            can be initialized by one memeset.  
2076         */  
2077         
2078         vcsize_sel = 0;
2079         i = 8*1024;
2080         while (i != iadev->num_vc) {
2081           i /= 2;
2082           vcsize_sel++;
2083         }
2084  
2085         i = MAIN_VC_TABLE * iadev->memSize;
2086         writew(vcsize_sel | ((i >> 8) & 0xfff8),iadev->seg_reg+VCT_BASE);
2087         i =  EXT_VC_TABLE * iadev->memSize;
2088         writew((i >> 8) & 0xfffe, iadev->seg_reg+VCTE_BASE);
2089         i = UBR_SCHED_TABLE * iadev->memSize;
2090         writew((i & 0xffff) >> 11,  iadev->seg_reg+UBR_SBPTR_BASE);
2091         i = UBR_WAIT_Q * iadev->memSize; 
2092         writew((i >> 7) & 0xffff,  iadev->seg_reg+UBRWQ_BASE);
2093         memset((caddr_t)(iadev->seg_ram+UBR_SCHED_TABLE*iadev->memSize),
2094                                                        0, iadev->num_vc*8);
2095         /* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/  
2096         /* initialize all bytes of ABR scheduler table and wait queue to 0   
2097                 - SCHEDSZ is 1K (# of entries).  
2098                 - ABR Table size is 2K  
2099                 - ABR wait queue is 2K  
2100            since the table and wait queues are contiguous, all the bytes   
2101            can be intialized by one memeset.  
2102         */  
2103         i = ABR_SCHED_TABLE * iadev->memSize;
2104         writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
2105         i = ABR_WAIT_Q * iadev->memSize;
2106         writew((i >> 7) & 0xffff, iadev->seg_reg+ABRWQ_BASE);
2107  
2108         i = ABR_SCHED_TABLE*iadev->memSize;
2109         memset((caddr_t)(iadev->seg_ram+i),  0, iadev->num_vc*4);
2110         vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;  
2111         evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;  
2112         iadev->testTable = kmalloc(sizeof(long)*iadev->num_vc, GFP_KERNEL); 
2113         if (!iadev->testTable) {
2114            printk("Get freepage  failed\n");
2115            goto err_free_desc_tbl;
2116         }
2117         for(i=0; i<iadev->num_vc; i++)  
2118         {  
2119                 memset((caddr_t)vc, 0, sizeof(*vc));  
2120                 memset((caddr_t)evc, 0, sizeof(*evc));  
2121                 iadev->testTable[i] = kmalloc(sizeof(struct testTable_t),
2122                                                 GFP_KERNEL);
2123                 if (!iadev->testTable[i])
2124                         goto err_free_test_tables;
2125                 iadev->testTable[i]->lastTime = 0;
2126                 iadev->testTable[i]->fract = 0;
2127                 iadev->testTable[i]->vc_status = VC_UBR;
2128                 vc++;  
2129                 evc++;  
2130         }  
2131   
2132         /* Other Initialization */  
2133           
2134         /* Max Rate Register */  
2135         if (iadev->phy_type & FE_25MBIT_PHY) {
2136            writew(RATE25, iadev->seg_reg+MAXRATE);  
2137            writew((UBR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);  
2138         }
2139         else {
2140            writew(cellrate_to_float(iadev->LineRate),iadev->seg_reg+MAXRATE);
2141            writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);  
2142         }
2143         /* Set Idle Header Reigisters to be sure */  
2144         writew(0, iadev->seg_reg+IDLEHEADHI);  
2145         writew(0, iadev->seg_reg+IDLEHEADLO);  
2146   
2147         /* Program ABR UBR Priority Register  as  PRI_ABR_UBR_EQUAL */
2148         writew(0xaa00, iadev->seg_reg+ABRUBR_ARB); 
2149
2150         iadev->close_pending = 0;
2151         init_waitqueue_head(&iadev->close_wait);
2152         init_waitqueue_head(&iadev->timeout_wait);
2153         skb_queue_head_init(&iadev->tx_dma_q);  
2154         ia_init_rtn_q(&iadev->tx_return_q);  
2155
2156         /* RM Cell Protocol ID and Message Type */  
2157         writew(RM_TYPE_4_0, iadev->seg_reg+RM_TYPE);  
2158         skb_queue_head_init (&iadev->tx_backlog);
2159   
2160         /* Mode Register 1 */  
2161         writew(MODE_REG_1_VAL, iadev->seg_reg+MODE_REG_1);  
2162   
2163         /* Mode Register 0 */  
2164         writew(T_ONLINE, iadev->seg_reg+MODE_REG_0);  
2165   
2166         /* Interrupt Status Register - read to clear */  
2167         readw(iadev->seg_reg+SEG_INTR_STATUS_REG);  
2168   
2169         /* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */  
2170         writew(~(TRANSMIT_DONE | TCQ_NOT_EMPTY), iadev->seg_reg+SEG_MASK_REG);
2171         writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);  
2172         iadev->tx_pkt_cnt = 0;
2173         iadev->rate_limit = iadev->LineRate / 3;
2174   
2175         return 0;
2176
2177 err_free_test_tables:
2178         while (--i >= 0)
2179                 kfree(iadev->testTable[i]);
2180         kfree(iadev->testTable);
2181 err_free_desc_tbl:
2182         kfree(iadev->desc_tbl);
2183 err_free_all_tx_bufs:
2184         i = iadev->num_tx_desc;
2185 err_free_tx_bufs:
2186         while (--i >= 0) {
2187                 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2188
2189                 pci_unmap_single(iadev->pci, desc->dma_addr,
2190                         sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
2191                 kfree(desc->cpcs);
2192         }
2193         kfree(iadev->tx_buf);
2194 err_free_dle:
2195         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2196                             iadev->tx_dle_dma);  
2197 err_out:
2198         return -ENOMEM;
2199 }   
2200    
2201 static irqreturn_t ia_int(int irq, void *dev_id)  
2202 {  
2203    struct atm_dev *dev;  
2204    IADEV *iadev;  
2205    unsigned int status;  
2206    int handled = 0;
2207
2208    dev = dev_id;  
2209    iadev = INPH_IA_DEV(dev);  
2210    while( (status = readl(iadev->reg+IPHASE5575_BUS_STATUS_REG) & 0x7f))  
2211    { 
2212         handled = 1;
2213         IF_EVENT(printk("ia_int: status = 0x%x\n", status);) 
2214         if (status & STAT_REASSINT)  
2215         {  
2216            /* do something */  
2217            IF_EVENT(printk("REASSINT Bus status reg: %08x\n", status);) 
2218            rx_intr(dev);  
2219         }  
2220         if (status & STAT_DLERINT)  
2221         {  
2222            /* Clear this bit by writing a 1 to it. */  
2223            *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLERINT;
2224            rx_dle_intr(dev);  
2225         }  
2226         if (status & STAT_SEGINT)  
2227         {  
2228            /* do something */ 
2229            IF_EVENT(printk("IA: tx_intr \n");) 
2230            tx_intr(dev);  
2231         }  
2232         if (status & STAT_DLETINT)  
2233         {  
2234            *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLETINT;  
2235            tx_dle_intr(dev);  
2236         }  
2237         if (status & (STAT_FEINT | STAT_ERRINT | STAT_MARKINT))  
2238         {  
2239            if (status & STAT_FEINT) 
2240                IaFrontEndIntr(iadev);
2241         }  
2242    }
2243    return IRQ_RETVAL(handled);
2244 }  
2245           
2246           
2247           
2248 /*----------------------------- entries --------------------------------*/  
2249 static int get_esi(struct atm_dev *dev)  
2250 {  
2251         IADEV *iadev;  
2252         int i;  
2253         u32 mac1;  
2254         u16 mac2;  
2255           
2256         iadev = INPH_IA_DEV(dev);  
2257         mac1 = cpu_to_be32(le32_to_cpu(readl(  
2258                                 iadev->reg+IPHASE5575_MAC1)));  
2259         mac2 = cpu_to_be16(le16_to_cpu(readl(iadev->reg+IPHASE5575_MAC2)));  
2260         IF_INIT(printk("ESI: 0x%08x%04x\n", mac1, mac2);)  
2261         for (i=0; i<MAC1_LEN; i++)  
2262                 dev->esi[i] = mac1 >>(8*(MAC1_LEN-1-i));  
2263           
2264         for (i=0; i<MAC2_LEN; i++)  
2265                 dev->esi[i+MAC1_LEN] = mac2 >>(8*(MAC2_LEN - 1 -i));  
2266         return 0;  
2267 }  
2268           
2269 static int reset_sar(struct atm_dev *dev)  
2270 {  
2271         IADEV *iadev;  
2272         int i, error = 1;  
2273         unsigned int pci[64];  
2274           
2275         iadev = INPH_IA_DEV(dev);  
2276         for(i=0; i<64; i++)  
2277           if ((error = pci_read_config_dword(iadev->pci,  
2278                                 i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)  
2279               return error;  
2280         writel(0, iadev->reg+IPHASE5575_EXT_RESET);  
2281         for(i=0; i<64; i++)  
2282           if ((error = pci_write_config_dword(iadev->pci,  
2283                                         i*4, pci[i])) != PCIBIOS_SUCCESSFUL)  
2284             return error;  
2285         udelay(5);  
2286         return 0;  
2287 }  
2288           
2289           
2290 static int __devinit ia_init(struct atm_dev *dev)
2291 {  
2292         IADEV *iadev;  
2293         unsigned long real_base;
2294         void __iomem *base;
2295         unsigned short command;  
2296         int error, i; 
2297           
2298         /* The device has been identified and registered. Now we read   
2299            necessary configuration info like memory base address,   
2300            interrupt number etc */  
2301           
2302         IF_INIT(printk(">ia_init\n");)  
2303         dev->ci_range.vpi_bits = 0;  
2304         dev->ci_range.vci_bits = NR_VCI_LD;  
2305
2306         iadev = INPH_IA_DEV(dev);  
2307         real_base = pci_resource_start (iadev->pci, 0);
2308         iadev->irq = iadev->pci->irq;
2309                   
2310         error = pci_read_config_word(iadev->pci, PCI_COMMAND, &command);
2311         if (error) {
2312                 printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%x\n",  
2313                                 dev->number,error);  
2314                 return -EINVAL;  
2315         }  
2316         IF_INIT(printk(DEV_LABEL "(itf %d): rev.%d,realbase=0x%lx,irq=%d\n",  
2317                         dev->number, iadev->pci->revision, real_base, iadev->irq);)
2318           
2319         /* find mapping size of board */  
2320           
2321         iadev->pci_map_size = pci_resource_len(iadev->pci, 0);
2322
2323         if (iadev->pci_map_size == 0x100000){
2324           iadev->num_vc = 4096;
2325           dev->ci_range.vci_bits = NR_VCI_4K_LD;  
2326           iadev->memSize = 4;
2327         }
2328         else if (iadev->pci_map_size == 0x40000) {
2329           iadev->num_vc = 1024;
2330           iadev->memSize = 1;
2331         }
2332         else {
2333            printk("Unknown pci_map_size = 0x%x\n", iadev->pci_map_size);
2334            return -EINVAL;
2335         }
2336         IF_INIT(printk (DEV_LABEL "map size: %i\n", iadev->pci_map_size);)  
2337           
2338         /* enable bus mastering */
2339         pci_set_master(iadev->pci);
2340
2341         /*  
2342          * Delay at least 1us before doing any mem accesses (how 'bout 10?)  
2343          */  
2344         udelay(10);  
2345           
2346         /* mapping the physical address to a virtual address in address space */  
2347         base = ioremap(real_base,iadev->pci_map_size);  /* ioremap is not resolved ??? */  
2348           
2349         if (!base)  
2350         {  
2351                 printk(DEV_LABEL " (itf %d): can't set up page mapping\n",  
2352                             dev->number);  
2353                 return error;  
2354         }  
2355         IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n",  
2356                         dev->number, iadev->pci->revision, base, iadev->irq);)
2357           
2358         /* filling the iphase dev structure */  
2359         iadev->mem = iadev->pci_map_size /2;  
2360         iadev->real_base = real_base;  
2361         iadev->base = base;  
2362                   
2363         /* Bus Interface Control Registers */  
2364         iadev->reg = base + REG_BASE;
2365         /* Segmentation Control Registers */  
2366         iadev->seg_reg = base + SEG_BASE;
2367         /* Reassembly Control Registers */  
2368         iadev->reass_reg = base + REASS_BASE;  
2369         /* Front end/ DMA control registers */  
2370         iadev->phy = base + PHY_BASE;  
2371         iadev->dma = base + PHY_BASE;  
2372         /* RAM - Segmentation RAm and Reassembly RAM */  
2373         iadev->ram = base + ACTUAL_RAM_BASE;  
2374         iadev->seg_ram = base + ACTUAL_SEG_RAM_BASE;  
2375         iadev->reass_ram = base + ACTUAL_REASS_RAM_BASE;  
2376   
2377         /* lets print out the above */  
2378         IF_INIT(printk("Base addrs: %p %p %p \n %p %p %p %p\n", 
2379           iadev->reg,iadev->seg_reg,iadev->reass_reg, 
2380           iadev->phy, iadev->ram, iadev->seg_ram, 
2381           iadev->reass_ram);) 
2382           
2383         /* lets try reading the MAC address */  
2384         error = get_esi(dev);  
2385         if (error) {
2386           iounmap(iadev->base);
2387           return error;  
2388         }
2389         printk("IA: ");
2390         for (i=0; i < ESI_LEN; i++)  
2391                 printk("%s%02X",i ? "-" : "",dev->esi[i]);  
2392         printk("\n");  
2393   
2394         /* reset SAR */  
2395         if (reset_sar(dev)) {
2396            iounmap(iadev->base);
2397            printk("IA: reset SAR fail, please try again\n");
2398            return 1;
2399         }
2400         return 0;  
2401 }  
2402
2403 static void ia_update_stats(IADEV *iadev) {
2404     if (!iadev->carrier_detect)
2405         return;
2406     iadev->rx_cell_cnt += readw(iadev->reass_reg+CELL_CTR0)&0xffff;
2407     iadev->rx_cell_cnt += (readw(iadev->reass_reg+CELL_CTR1) & 0xffff) << 16;
2408     iadev->drop_rxpkt +=  readw(iadev->reass_reg + DRP_PKT_CNTR ) & 0xffff;
2409     iadev->drop_rxcell += readw(iadev->reass_reg + ERR_CNTR) & 0xffff;
2410     iadev->tx_cell_cnt += readw(iadev->seg_reg + CELL_CTR_LO_AUTO)&0xffff;
2411     iadev->tx_cell_cnt += (readw(iadev->seg_reg+CELL_CTR_HIGH_AUTO)&0xffff)<<16;
2412     return;
2413 }
2414   
2415 static void ia_led_timer(unsigned long arg) {
2416         unsigned long flags;
2417         static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
2418         u_char i;
2419         static u32 ctrl_reg; 
2420         for (i = 0; i < iadev_count; i++) {
2421            if (ia_dev[i]) {
2422               ctrl_reg = readl(ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2423               if (blinking[i] == 0) {
2424                  blinking[i]++;
2425                  ctrl_reg &= (~CTRL_LED);
2426                  writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2427                  ia_update_stats(ia_dev[i]);
2428               }
2429               else {
2430                  blinking[i] = 0;
2431                  ctrl_reg |= CTRL_LED;
2432                  writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2433                  spin_lock_irqsave(&ia_dev[i]->tx_lock, flags);
2434                  if (ia_dev[i]->close_pending)  
2435                     wake_up(&ia_dev[i]->close_wait);
2436                  ia_tx_poll(ia_dev[i]);
2437                  spin_unlock_irqrestore(&ia_dev[i]->tx_lock, flags);
2438               }
2439            }
2440         }
2441         mod_timer(&ia_timer, jiffies + HZ / 4);
2442         return;
2443 }
2444
2445 static void ia_phy_put(struct atm_dev *dev, unsigned char value,   
2446         unsigned long addr)  
2447 {  
2448         writel(value, INPH_IA_DEV(dev)->phy+addr);  
2449 }  
2450   
2451 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr)  
2452 {  
2453         return readl(INPH_IA_DEV(dev)->phy+addr);  
2454 }  
2455
2456 static void ia_free_tx(IADEV *iadev)
2457 {
2458         int i;
2459
2460         kfree(iadev->desc_tbl);
2461         for (i = 0; i < iadev->num_vc; i++)
2462                 kfree(iadev->testTable[i]);
2463         kfree(iadev->testTable);
2464         for (i = 0; i < iadev->num_tx_desc; i++) {
2465                 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2466
2467                 pci_unmap_single(iadev->pci, desc->dma_addr,
2468                         sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
2469                 kfree(desc->cpcs);
2470         }
2471         kfree(iadev->tx_buf);
2472         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2473                             iadev->tx_dle_dma);  
2474 }
2475
2476 static void ia_free_rx(IADEV *iadev)
2477 {
2478         kfree(iadev->rx_open);
2479         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
2480                           iadev->rx_dle_dma);  
2481 }
2482
2483 static int __devinit ia_start(struct atm_dev *dev)
2484 {  
2485         IADEV *iadev;  
2486         int error;  
2487         unsigned char phy;  
2488         u32 ctrl_reg;  
2489         IF_EVENT(printk(">ia_start\n");)  
2490         iadev = INPH_IA_DEV(dev);  
2491         if (request_irq(iadev->irq, &ia_int, IRQF_SHARED, DEV_LABEL, dev)) {
2492                 printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",  
2493                     dev->number, iadev->irq);  
2494                 error = -EAGAIN;
2495                 goto err_out;
2496         }  
2497         /* @@@ should release IRQ on error */  
2498         /* enabling memory + master */  
2499         if ((error = pci_write_config_word(iadev->pci,   
2500                                 PCI_COMMAND,   
2501                                 PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER )))   
2502         {  
2503                 printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory+"  
2504                     "master (0x%x)\n",dev->number, error);  
2505                 error = -EIO;  
2506                 goto err_free_irq;
2507         }  
2508         udelay(10);  
2509   
2510         /* Maybe we should reset the front end, initialize Bus Interface Control   
2511                 Registers and see. */  
2512   
2513         IF_INIT(printk("Bus ctrl reg: %08x\n", 
2514                             readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)  
2515         ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);  
2516         ctrl_reg = (ctrl_reg & (CTRL_LED | CTRL_FE_RST))  
2517                         | CTRL_B8  
2518                         | CTRL_B16  
2519                         | CTRL_B32  
2520                         | CTRL_B48  
2521                         | CTRL_B64  
2522                         | CTRL_B128  
2523                         | CTRL_ERRMASK  
2524                         | CTRL_DLETMASK         /* shud be removed l8r */  
2525                         | CTRL_DLERMASK  
2526                         | CTRL_SEGMASK  
2527                         | CTRL_REASSMASK          
2528                         | CTRL_FEMASK  
2529                         | CTRL_CSPREEMPT;  
2530   
2531        writel(ctrl_reg, iadev->reg+IPHASE5575_BUS_CONTROL_REG);   
2532   
2533         IF_INIT(printk("Bus ctrl reg after initializing: %08x\n", 
2534                            readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));  
2535            printk("Bus status reg after init: %08x\n", 
2536                             readl(iadev->reg+IPHASE5575_BUS_STATUS_REG));)  
2537     
2538         ia_hw_type(iadev); 
2539         error = tx_init(dev);  
2540         if (error)
2541                 goto err_free_irq;
2542         error = rx_init(dev);  
2543         if (error)
2544                 goto err_free_tx;
2545   
2546         ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);  
2547         writel(ctrl_reg | CTRL_FE_RST, iadev->reg+IPHASE5575_BUS_CONTROL_REG);   
2548         IF_INIT(printk("Bus ctrl reg after initializing: %08x\n", 
2549                                readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)  
2550         phy = 0; /* resolve compiler complaint */
2551         IF_INIT ( 
2552         if ((phy=ia_phy_get(dev,0)) == 0x30)  
2553                 printk("IA: pm5346,rev.%d\n",phy&0x0f);  
2554         else  
2555                 printk("IA: utopia,rev.%0x\n",phy);) 
2556
2557         if (iadev->phy_type &  FE_25MBIT_PHY)
2558            ia_mb25_init(iadev);
2559         else if (iadev->phy_type & (FE_DS3_PHY | FE_E3_PHY))
2560            ia_suni_pm7345_init(iadev);
2561         else {
2562                 error = suni_init(dev);
2563                 if (error)
2564                         goto err_free_rx;
2565                 if (dev->phy->start) {
2566                         error = dev->phy->start(dev);
2567                         if (error)
2568                                 goto err_free_rx;
2569                 }
2570                 /* Get iadev->carrier_detect status */
2571                 IaFrontEndIntr(iadev);
2572         }
2573         return 0;
2574
2575 err_free_rx:
2576         ia_free_rx(iadev);
2577 err_free_tx:
2578         ia_free_tx(iadev);
2579 err_free_irq:
2580         free_irq(iadev->irq, dev);  
2581 err_out:
2582         return error;
2583 }  
2584   
2585 static void ia_close(struct atm_vcc *vcc)  
2586 {
2587         DEFINE_WAIT(wait);
2588         u16 *vc_table;
2589         IADEV *iadev;
2590         struct ia_vcc *ia_vcc;
2591         struct sk_buff *skb = NULL;
2592         struct sk_buff_head tmp_tx_backlog, tmp_vcc_backlog;
2593         unsigned long closetime, flags;
2594
2595         iadev = INPH_IA_DEV(vcc->dev);
2596         ia_vcc = INPH_IA_VCC(vcc);
2597         if (!ia_vcc) return;  
2598
2599         IF_EVENT(printk("ia_close: ia_vcc->vc_desc_cnt = %d  vci = %d\n", 
2600                                               ia_vcc->vc_desc_cnt,vcc->vci);)
2601         clear_bit(ATM_VF_READY,&vcc->flags);
2602         skb_queue_head_init (&tmp_tx_backlog);
2603         skb_queue_head_init (&tmp_vcc_backlog); 
2604         if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2605            iadev->close_pending++;
2606            prepare_to_wait(&iadev->timeout_wait, &wait, TASK_UNINTERRUPTIBLE);
2607            schedule_timeout(50);
2608            finish_wait(&iadev->timeout_wait, &wait);
2609            spin_lock_irqsave(&iadev->tx_lock, flags); 
2610            while((skb = skb_dequeue(&iadev->tx_backlog))) {
2611               if (ATM_SKB(skb)->vcc == vcc){ 
2612                  if (vcc->pop) vcc->pop(vcc, skb);
2613                  else dev_kfree_skb_any(skb);
2614               }
2615               else 
2616                  skb_queue_tail(&tmp_tx_backlog, skb);
2617            } 
2618            while((skb = skb_dequeue(&tmp_tx_backlog))) 
2619              skb_queue_tail(&iadev->tx_backlog, skb);
2620            IF_EVENT(printk("IA TX Done decs_cnt = %d\n", ia_vcc->vc_desc_cnt);) 
2621            closetime = 300000 / ia_vcc->pcr;
2622            if (closetime == 0)
2623               closetime = 1;
2624            spin_unlock_irqrestore(&iadev->tx_lock, flags);
2625            wait_event_timeout(iadev->close_wait, (ia_vcc->vc_desc_cnt <= 0), closetime);
2626            spin_lock_irqsave(&iadev->tx_lock, flags);
2627            iadev->close_pending--;
2628            iadev->testTable[vcc->vci]->lastTime = 0;
2629            iadev->testTable[vcc->vci]->fract = 0; 
2630            iadev->testTable[vcc->vci]->vc_status = VC_UBR; 
2631            if (vcc->qos.txtp.traffic_class == ATM_ABR) {
2632               if (vcc->qos.txtp.min_pcr > 0)
2633                  iadev->sum_mcr -= vcc->qos.txtp.min_pcr;
2634            }
2635            if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2636               ia_vcc = INPH_IA_VCC(vcc); 
2637               iadev->sum_mcr -= ia_vcc->NumCbrEntry*iadev->Granularity;
2638               ia_cbrVc_close (vcc);
2639            }
2640            spin_unlock_irqrestore(&iadev->tx_lock, flags);
2641         }
2642         
2643         if (vcc->qos.rxtp.traffic_class != ATM_NONE) {   
2644            // reset reass table
2645            vc_table = (u16 *)(iadev->reass_ram+REASS_TABLE*iadev->memSize);
2646            vc_table += vcc->vci; 
2647            *vc_table = NO_AAL5_PKT;
2648            // reset vc table
2649            vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
2650            vc_table += vcc->vci;
2651            *vc_table = (vcc->vci << 6) | 15;
2652            if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
2653               struct abr_vc_table __iomem *abr_vc_table = 
2654                                 (iadev->reass_ram+ABR_VC_TABLE*iadev->memSize);
2655               abr_vc_table +=  vcc->vci;
2656               abr_vc_table->rdf = 0x0003;
2657               abr_vc_table->air = 0x5eb1;
2658            }                                 
2659            // Drain the packets
2660            rx_dle_intr(vcc->dev); 
2661            iadev->rx_open[vcc->vci] = NULL;
2662         }
2663         kfree(INPH_IA_VCC(vcc));  
2664         ia_vcc = NULL;
2665         vcc->dev_data = NULL;
2666         clear_bit(ATM_VF_ADDR,&vcc->flags);
2667         return;        
2668 }  
2669   
2670 static int ia_open(struct atm_vcc *vcc)
2671 {  
2672         IADEV *iadev;  
2673         struct ia_vcc *ia_vcc;  
2674         int error;  
2675         if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))  
2676         {  
2677                 IF_EVENT(printk("ia: not partially allocated resources\n");)  
2678                 vcc->dev_data = NULL;
2679         }  
2680         iadev = INPH_IA_DEV(vcc->dev);  
2681         if (vcc->vci != ATM_VPI_UNSPEC && vcc->vpi != ATM_VCI_UNSPEC)  
2682         {  
2683                 IF_EVENT(printk("iphase open: unspec part\n");)  
2684                 set_bit(ATM_VF_ADDR,&vcc->flags);
2685         }  
2686         if (vcc->qos.aal != ATM_AAL5)  
2687                 return -EINVAL;  
2688         IF_EVENT(printk(DEV_LABEL "(itf %d): open %d.%d\n", 
2689                                  vcc->dev->number, vcc->vpi, vcc->vci);)  
2690   
2691         /* Device dependent initialization */  
2692         ia_vcc = kmalloc(sizeof(*ia_vcc), GFP_KERNEL);  
2693         if (!ia_vcc) return -ENOMEM;  
2694         vcc->dev_data = ia_vcc;
2695   
2696         if ((error = open_rx(vcc)))  
2697         {  
2698                 IF_EVENT(printk("iadev: error in open_rx, closing\n");)  
2699                 ia_close(vcc);  
2700                 return error;  
2701         }  
2702   
2703         if ((error = open_tx(vcc)))  
2704         {  
2705                 IF_EVENT(printk("iadev: error in open_tx, closing\n");)  
2706                 ia_close(vcc);  
2707                 return error;  
2708         }  
2709   
2710         set_bit(ATM_VF_READY,&vcc->flags);
2711
2712 #if 0
2713         {
2714            static u8 first = 1; 
2715            if (first) {
2716               ia_timer.expires = jiffies + 3*HZ;
2717               add_timer(&ia_timer);
2718               first = 0;
2719            }           
2720         }
2721 #endif
2722         IF_EVENT(printk("ia open returning\n");)  
2723         return 0;  
2724 }  
2725   
2726 static int ia_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags)  
2727 {  
2728         IF_EVENT(printk(">ia_change_qos\n");)  
2729         return 0;  
2730 }  
2731   
2732 static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)  
2733 {  
2734    IA_CMDBUF ia_cmds;
2735    IADEV *iadev;
2736    int i, board;
2737    u16 __user *tmps;
2738    IF_EVENT(printk(">ia_ioctl\n");)  
2739    if (cmd != IA_CMD) {
2740       if (!dev->phy->ioctl) return -EINVAL;
2741       return dev->phy->ioctl(dev,cmd,arg);
2742    }
2743    if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT; 
2744    board = ia_cmds.status;
2745    if ((board < 0) || (board > iadev_count))
2746          board = 0;    
2747    iadev = ia_dev[board];
2748    switch (ia_cmds.cmd) {
2749    case MEMDUMP:
2750    {
2751         switch (ia_cmds.sub_cmd) {
2752           case MEMDUMP_DEV:     
2753              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2754              if (copy_to_user(ia_cmds.buf, iadev, sizeof(IADEV)))
2755                 return -EFAULT;
2756              ia_cmds.status = 0;
2757              break;
2758           case MEMDUMP_SEGREG:
2759              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2760              tmps = (u16 __user *)ia_cmds.buf;
2761              for(i=0; i<0x80; i+=2, tmps++)
2762                 if(put_user((u16)(readl(iadev->seg_reg+i) & 0xffff), tmps)) return -EFAULT;
2763              ia_cmds.status = 0;
2764              ia_cmds.len = 0x80;
2765              break;
2766           case MEMDUMP_REASSREG:
2767              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2768              tmps = (u16 __user *)ia_cmds.buf;
2769              for(i=0; i<0x80; i+=2, tmps++)
2770                 if(put_user((u16)(readl(iadev->reass_reg+i) & 0xffff), tmps)) return -EFAULT;
2771              ia_cmds.status = 0;
2772              ia_cmds.len = 0x80;
2773              break;
2774           case MEMDUMP_FFL:
2775           {  
2776              ia_regs_t       *regs_local;
2777              ffredn_t        *ffL;
2778              rfredn_t        *rfL;
2779                      
2780              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2781              regs_local = kmalloc(sizeof(*regs_local), GFP_KERNEL);
2782              if (!regs_local) return -ENOMEM;
2783              ffL = &regs_local->ffredn;
2784              rfL = &regs_local->rfredn;
2785              /* Copy real rfred registers into the local copy */
2786              for (i=0; i<(sizeof (rfredn_t))/4; i++)
2787                 ((u_int *)rfL)[i] = readl(iadev->reass_reg + i) & 0xffff;
2788                 /* Copy real ffred registers into the local copy */
2789              for (i=0; i<(sizeof (ffredn_t))/4; i++)
2790                 ((u_int *)ffL)[i] = readl(iadev->seg_reg + i) & 0xffff;
2791
2792              if (copy_to_user(ia_cmds.buf, regs_local,sizeof(ia_regs_t))) {
2793                 kfree(regs_local);
2794                 return -EFAULT;
2795              }
2796              kfree(regs_local);
2797              printk("Board %d registers dumped\n", board);
2798              ia_cmds.status = 0;                  
2799          }      
2800              break;        
2801          case READ_REG:
2802          {  
2803              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2804              desc_dbg(iadev); 
2805              ia_cmds.status = 0; 
2806          }
2807              break;
2808          case 0x6:
2809          {  
2810              ia_cmds.status = 0; 
2811              printk("skb = 0x%lx\n", (long)skb_peek(&iadev->tx_backlog));
2812              printk("rtn_q: 0x%lx\n",(long)ia_deque_rtn_q(&iadev->tx_return_q));
2813          }
2814              break;
2815          case 0x8:
2816          {
2817              struct k_sonet_stats *stats;
2818              stats = &PRIV(_ia_dev[board])->sonet_stats;
2819              printk("section_bip: %d\n", atomic_read(&stats->section_bip));
2820              printk("line_bip   : %d\n", atomic_read(&stats->line_bip));
2821              printk("path_bip   : %d\n", atomic_read(&stats->path_bip));
2822              printk("line_febe  : %d\n", atomic_read(&stats->line_febe));
2823              printk("path_febe  : %d\n", atomic_read(&stats->path_febe));
2824              printk("corr_hcs   : %d\n", atomic_read(&stats->corr_hcs));
2825              printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
2826              printk("tx_cells   : %d\n", atomic_read(&stats->tx_cells));
2827              printk("rx_cells   : %d\n", atomic_read(&stats->rx_cells));
2828          }
2829             ia_cmds.status = 0;
2830             break;
2831          case 0x9:
2832             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2833             for (i = 1; i <= iadev->num_rx_desc; i++)
2834                free_desc(_ia_dev[board], i);
2835             writew( ~(RX_FREEQ_EMPT | RX_EXCP_RCVD), 
2836                                             iadev->reass_reg+REASS_MASK_REG);
2837             iadev->rxing = 1;
2838             
2839             ia_cmds.status = 0;
2840             break;
2841
2842          case 0xb:
2843             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2844             IaFrontEndIntr(iadev);
2845             break;
2846          case 0xa:
2847             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2848          {  
2849              ia_cmds.status = 0; 
2850              IADebugFlag = ia_cmds.maddr;
2851              printk("New debug option loaded\n");
2852          }
2853              break;
2854          default:
2855              ia_cmds.status = 0;
2856              break;
2857       } 
2858    }
2859       break;
2860    default:
2861       break;
2862
2863    }    
2864    return 0;  
2865 }  
2866   
2867 static int ia_getsockopt(struct atm_vcc *vcc, int level, int optname,   
2868         void __user *optval, int optlen)  
2869 {  
2870         IF_EVENT(printk(">ia_getsockopt\n");)  
2871         return -EINVAL;  
2872 }  
2873   
2874 static int ia_setsockopt(struct atm_vcc *vcc, int level, int optname,   
2875         void __user *optval, int optlen)  
2876 {  
2877         IF_EVENT(printk(">ia_setsockopt\n");)  
2878         return -EINVAL;  
2879 }  
2880   
2881 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
2882         IADEV *iadev;
2883         struct dle *wr_ptr;
2884         struct tx_buf_desc __iomem *buf_desc_ptr;
2885         int desc;
2886         int comp_code;
2887         int total_len;
2888         struct cpcs_trailer *trailer;
2889         struct ia_vcc *iavcc;
2890
2891         iadev = INPH_IA_DEV(vcc->dev);  
2892         iavcc = INPH_IA_VCC(vcc);
2893         if (!iavcc->txing) {
2894            printk("discard packet on closed VC\n");
2895            if (vcc->pop)
2896                 vcc->pop(vcc, skb);
2897            else
2898                 dev_kfree_skb_any(skb);
2899            return 0;
2900         }
2901
2902         if (skb->len > iadev->tx_buf_sz - 8) {
2903            printk("Transmit size over tx buffer size\n");
2904            if (vcc->pop)
2905                  vcc->pop(vcc, skb);
2906            else
2907                  dev_kfree_skb_any(skb);
2908           return 0;
2909         }
2910         if ((u32)skb->data & 3) {
2911            printk("Misaligned SKB\n");
2912            if (vcc->pop)
2913                  vcc->pop(vcc, skb);
2914            else
2915                  dev_kfree_skb_any(skb);
2916            return 0;
2917         }       
2918         /* Get a descriptor number from our free descriptor queue  
2919            We get the descr number from the TCQ now, since I am using  
2920            the TCQ as a free buffer queue. Initially TCQ will be   
2921            initialized with all the descriptors and is hence, full.  
2922         */
2923         desc = get_desc (iadev, iavcc);
2924         if (desc == 0xffff) 
2925             return 1;
2926         comp_code = desc >> 13;  
2927         desc &= 0x1fff;  
2928   
2929         if ((desc == 0) || (desc > iadev->num_tx_desc))  
2930         {  
2931                 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);) 
2932                 atomic_inc(&vcc->stats->tx);
2933                 if (vcc->pop)   
2934                     vcc->pop(vcc, skb);   
2935                 else  
2936                     dev_kfree_skb_any(skb);
2937                 return 0;   /* return SUCCESS */
2938         }  
2939   
2940         if (comp_code)  
2941         {  
2942             IF_ERR(printk(DEV_LABEL "send desc:%d completion code %d error\n", 
2943                                                             desc, comp_code);)  
2944         }  
2945        
2946         /* remember the desc and vcc mapping */
2947         iavcc->vc_desc_cnt++;
2948         iadev->desc_tbl[desc-1].iavcc = iavcc;
2949         iadev->desc_tbl[desc-1].txskb = skb;
2950         IA_SKB_STATE(skb) = 0;
2951
2952         iadev->ffL.tcq_rd += 2;
2953         if (iadev->ffL.tcq_rd > iadev->ffL.tcq_ed)
2954                 iadev->ffL.tcq_rd  = iadev->ffL.tcq_st;
2955         writew(iadev->ffL.tcq_rd, iadev->seg_reg+TCQ_RD_PTR);
2956   
2957         /* Put the descriptor number in the packet ready queue  
2958                 and put the updated write pointer in the DLE field   
2959         */   
2960         *(u16*)(iadev->seg_ram+iadev->ffL.prq_wr) = desc; 
2961
2962         iadev->ffL.prq_wr += 2;
2963         if (iadev->ffL.prq_wr > iadev->ffL.prq_ed)
2964                 iadev->ffL.prq_wr = iadev->ffL.prq_st;
2965           
2966         /* Figure out the exact length of the packet and padding required to 
2967            make it  aligned on a 48 byte boundary.  */
2968         total_len = skb->len + sizeof(struct cpcs_trailer);  
2969         total_len = ((total_len + 47) / 48) * 48;
2970         IF_TX(printk("ia packet len:%d padding:%d\n", total_len, total_len - skb->len);)  
2971  
2972         /* Put the packet in a tx buffer */   
2973         trailer = iadev->tx_buf[desc-1].cpcs;
2974         IF_TX(printk("Sent: skb = 0x%x skb->data: 0x%x len: %d, desc: %d\n",
2975                   (u32)skb, (u32)skb->data, skb->len, desc);)
2976         trailer->control = 0; 
2977         /*big endian*/ 
2978         trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8);
2979         trailer->crc32 = 0;     /* not needed - dummy bytes */  
2980
2981         /* Display the packet */  
2982         IF_TXPKT(printk("Sent data: len = %d MsgNum = %d\n", 
2983                                                         skb->len, tcnter++);  
2984         xdump(skb->data, skb->len, "TX: ");
2985         printk("\n");)
2986
2987         /* Build the buffer descriptor */  
2988         buf_desc_ptr = iadev->seg_ram+TX_DESC_BASE;
2989         buf_desc_ptr += desc;   /* points to the corresponding entry */  
2990         buf_desc_ptr->desc_mode = AAL5 | EOM_EN | APP_CRC32 | CMPL_INT;   
2991         /* Huh ? p.115 of users guide describes this as a read-only register */
2992         writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2993         buf_desc_ptr->vc_index = vcc->vci;
2994         buf_desc_ptr->bytes = total_len;  
2995
2996         if (vcc->qos.txtp.traffic_class == ATM_ABR)  
2997            clear_lockup (vcc, iadev);
2998
2999         /* Build the DLE structure */  
3000         wr_ptr = iadev->tx_dle_q.write;  
3001         memset((caddr_t)wr_ptr, 0, sizeof(*wr_ptr));  
3002         wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
3003                 skb->len, PCI_DMA_TODEVICE);
3004         wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) | 
3005                                                   buf_desc_ptr->buf_start_lo;  
3006         /* wr_ptr->bytes = swap(total_len);     didn't seem to affect ?? */  
3007         wr_ptr->bytes = skb->len;  
3008
3009         /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
3010         if ((wr_ptr->bytes >> 2) == 0xb)
3011            wr_ptr->bytes = 0x30;
3012
3013         wr_ptr->mode = TX_DLE_PSI; 
3014         wr_ptr->prq_wr_ptr_data = 0;
3015   
3016         /* end is not to be used for the DLE q */  
3017         if (++wr_ptr == iadev->tx_dle_q.end)  
3018                 wr_ptr = iadev->tx_dle_q.start;  
3019         
3020         /* Build trailer dle */
3021         wr_ptr->sys_pkt_addr = iadev->tx_buf[desc-1].dma_addr;
3022         wr_ptr->local_pkt_addr = ((buf_desc_ptr->buf_start_hi << 16) | 
3023           buf_desc_ptr->buf_start_lo) + total_len - sizeof(struct cpcs_trailer);
3024
3025         wr_ptr->bytes = sizeof(struct cpcs_trailer);
3026         wr_ptr->mode = DMA_INT_ENABLE; 
3027         wr_ptr->prq_wr_ptr_data = iadev->ffL.prq_wr;
3028         
3029         /* end is not to be used for the DLE q */
3030         if (++wr_ptr == iadev->tx_dle_q.end)  
3031                 wr_ptr = iadev->tx_dle_q.start;
3032
3033         iadev->tx_dle_q.write = wr_ptr;  
3034         ATM_DESC(skb) = vcc->vci;
3035         skb_queue_tail(&iadev->tx_dma_q, skb);
3036
3037         atomic_inc(&vcc->stats->tx);
3038         iadev->tx_pkt_cnt++;
3039         /* Increment transaction counter */  
3040         writel(2, iadev->dma+IPHASE5575_TX_COUNTER);  
3041         
3042 #if 0        
3043         /* add flow control logic */ 
3044         if (atomic_read(&vcc->stats->tx) % 20 == 0) {
3045           if (iavcc->vc_desc_cnt > 10) {
3046              vcc->tx_quota =  vcc->tx_quota * 3 / 4;
3047             printk("Tx1:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3048               iavcc->flow_inc = -1;
3049               iavcc->saved_tx_quota = vcc->tx_quota;
3050            } else if ((iavcc->flow_inc < 0) && (iavcc->vc_desc_cnt < 3)) {
3051              // vcc->tx_quota = 3 * iavcc->saved_tx_quota / 4;
3052              printk("Tx2:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota ); 
3053               iavcc->flow_inc = 0;
3054            }
3055         }
3056 #endif
3057         IF_TX(printk("ia send done\n");)  
3058         return 0;  
3059 }  
3060
3061 static int ia_send(struct atm_vcc *vcc, struct sk_buff *skb)
3062 {
3063         IADEV *iadev; 
3064         struct ia_vcc *iavcc;
3065         unsigned long flags;
3066
3067         iadev = INPH_IA_DEV(vcc->dev);
3068         iavcc = INPH_IA_VCC(vcc); 
3069         if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer))))
3070         {
3071             if (!skb)
3072                 printk(KERN_CRIT "null skb in ia_send\n");
3073             else dev_kfree_skb_any(skb);
3074             return -EINVAL;
3075         }                         
3076         spin_lock_irqsave(&iadev->tx_lock, flags); 
3077         if (!test_bit(ATM_VF_READY,&vcc->flags)){ 
3078             dev_kfree_skb_any(skb);
3079             spin_unlock_irqrestore(&iadev->tx_lock, flags);
3080             return -EINVAL; 
3081         }
3082         ATM_SKB(skb)->vcc = vcc;
3083  
3084         if (skb_peek(&iadev->tx_backlog)) {
3085            skb_queue_tail(&iadev->tx_backlog, skb);
3086         }
3087         else {
3088            if (ia_pkt_tx (vcc, skb)) {
3089               skb_queue_tail(&iadev->tx_backlog, skb);
3090            }
3091         }
3092         spin_unlock_irqrestore(&iadev->tx_lock, flags);
3093         return 0;
3094
3095 }
3096
3097 static int ia_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
3098
3099   int   left = *pos, n;   
3100   char  *tmpPtr;
3101   IADEV *iadev = INPH_IA_DEV(dev);
3102   if(!left--) {
3103      if (iadev->phy_type == FE_25MBIT_PHY) {
3104        n = sprintf(page, "  Board Type         :  Iphase5525-1KVC-128K\n");
3105        return n;
3106      }
3107      if (iadev->phy_type == FE_DS3_PHY)
3108         n = sprintf(page, "  Board Type         :  Iphase-ATM-DS3");
3109      else if (iadev->phy_type == FE_E3_PHY)
3110         n = sprintf(page, "  Board Type         :  Iphase-ATM-E3");
3111      else if (iadev->phy_type == FE_UTP_OPTION)
3112          n = sprintf(page, "  Board Type         :  Iphase-ATM-UTP155"); 
3113      else
3114         n = sprintf(page, "  Board Type         :  Iphase-ATM-OC3");
3115      tmpPtr = page + n;
3116      if (iadev->pci_map_size == 0x40000)
3117         n += sprintf(tmpPtr, "-1KVC-");
3118      else
3119         n += sprintf(tmpPtr, "-4KVC-");  
3120      tmpPtr = page + n; 
3121      if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_1M)
3122         n += sprintf(tmpPtr, "1M  \n");
3123      else if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_512K)
3124         n += sprintf(tmpPtr, "512K\n");
3125      else
3126        n += sprintf(tmpPtr, "128K\n");
3127      return n;
3128   }
3129   if (!left) {
3130      return  sprintf(page, "  Number of Tx Buffer:  %u\n"
3131                            "  Size of Tx Buffer  :  %u\n"
3132                            "  Number of Rx Buffer:  %u\n"
3133                            "  Size of Rx Buffer  :  %u\n"
3134                            "  Packets Receiverd  :  %u\n"
3135                            "  Packets Transmitted:  %u\n"
3136                            "  Cells Received     :  %u\n"
3137                            "  Cells Transmitted  :  %u\n"
3138                            "  Board Dropped Cells:  %u\n"
3139                            "  Board Dropped Pkts :  %u\n",
3140                            iadev->num_tx_desc,  iadev->tx_buf_sz,
3141                            iadev->num_rx_desc,  iadev->rx_buf_sz,
3142                            iadev->rx_pkt_cnt,   iadev->tx_pkt_cnt,
3143                            iadev->rx_cell_cnt, iadev->tx_cell_cnt,
3144                            iadev->drop_rxcell, iadev->drop_rxpkt);                        
3145   }
3146   return 0;
3147 }
3148   
3149 static const struct atmdev_ops ops = {  
3150         .open           = ia_open,  
3151         .close          = ia_close,  
3152         .ioctl          = ia_ioctl,  
3153         .getsockopt     = ia_getsockopt,  
3154         .setsockopt     = ia_setsockopt,  
3155         .send           = ia_send,  
3156         .phy_put        = ia_phy_put,  
3157         .phy_get        = ia_phy_get,  
3158         .change_qos     = ia_change_qos,  
3159         .proc_read      = ia_proc_read,
3160         .owner          = THIS_MODULE,
3161 };  
3162           
3163 static int __devinit ia_init_one(struct pci_dev *pdev,
3164                                  const struct pci_device_id *ent)
3165 {  
3166         struct atm_dev *dev;  
3167         IADEV *iadev;  
3168         unsigned long flags;
3169         int ret;
3170
3171         iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
3172         if (!iadev) {
3173                 ret = -ENOMEM;
3174                 goto err_out;
3175         }
3176
3177         iadev->pci = pdev;
3178
3179         IF_INIT(printk("ia detected at bus:%d dev: %d function:%d\n",
3180                 pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));)
3181         if (pci_enable_device(pdev)) {
3182                 ret = -ENODEV;
3183                 goto err_out_free_iadev;
3184         }
3185         dev = atm_dev_register(DEV_LABEL, &ops, -1, NULL);
3186         if (!dev) {
3187                 ret = -ENOMEM;
3188                 goto err_out_disable_dev;
3189         }
3190         dev->dev_data = iadev;
3191         IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n", dev->number);)
3192         IF_INIT(printk("dev_id = 0x%x iadev->LineRate = %d \n", (u32)dev,
3193                 iadev->LineRate);)
3194
3195         pci_set_drvdata(pdev, dev);
3196
3197         ia_dev[iadev_count] = iadev;
3198         _ia_dev[iadev_count] = dev;
3199         iadev_count++;
3200         spin_lock_init(&iadev->misc_lock);
3201         /* First fixes first. I don't want to think about this now. */
3202         spin_lock_irqsave(&iadev->misc_lock, flags); 
3203         if (ia_init(dev) || ia_start(dev)) {  
3204                 IF_INIT(printk("IA register failed!\n");)
3205                 iadev_count--;
3206                 ia_dev[iadev_count] = NULL;
3207                 _ia_dev[iadev_count] = NULL;
3208                 spin_unlock_irqrestore(&iadev->misc_lock, flags); 
3209                 ret = -EINVAL;
3210                 goto err_out_deregister_dev;
3211         }
3212         spin_unlock_irqrestore(&iadev->misc_lock, flags); 
3213         IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
3214
3215         iadev->next_board = ia_boards;  
3216         ia_boards = dev;  
3217
3218         return 0;
3219
3220 err_out_deregister_dev:
3221         atm_dev_deregister(dev);  
3222 err_out_disable_dev:
3223         pci_disable_device(pdev);
3224 err_out_free_iadev:
3225         kfree(iadev);
3226 err_out:
3227         return ret;
3228 }
3229
3230 static void __devexit ia_remove_one(struct pci_dev *pdev)
3231 {
3232         struct atm_dev *dev = pci_get_drvdata(pdev);
3233         IADEV *iadev = INPH_IA_DEV(dev);
3234
3235         /* Disable phy interrupts */
3236         ia_phy_put(dev, ia_phy_get(dev, SUNI_RSOP_CIE) & ~(SUNI_RSOP_CIE_LOSE),
3237                                    SUNI_RSOP_CIE);
3238         udelay(1);
3239
3240         if (dev->phy && dev->phy->stop)
3241                 dev->phy->stop(dev);
3242
3243         /* De-register device */  
3244         free_irq(iadev->irq, dev);
3245         iadev_count--;
3246         ia_dev[iadev_count] = NULL;
3247         _ia_dev[iadev_count] = NULL;
3248         IF_EVENT(printk("deregistering iav at (itf:%d)\n", dev->number);)
3249         atm_dev_deregister(dev);
3250
3251         iounmap(iadev->base);  
3252         pci_disable_device(pdev);
3253
3254         ia_free_rx(iadev);
3255         ia_free_tx(iadev);
3256
3257         kfree(iadev);
3258 }
3259
3260 static struct pci_device_id ia_pci_tbl[] = {
3261         { PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, },
3262         { PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, },
3263         { 0,}
3264 };
3265 MODULE_DEVICE_TABLE(pci, ia_pci_tbl);
3266
3267 static struct pci_driver ia_driver = {
3268         .name =         DEV_LABEL,
3269         .id_table =     ia_pci_tbl,
3270         .probe =        ia_init_one,
3271         .remove =       __devexit_p(ia_remove_one),
3272 };
3273
3274 static int __init ia_module_init(void)
3275 {
3276         int ret;
3277
3278         ret = pci_register_driver(&ia_driver);
3279         if (ret >= 0) {
3280                 ia_timer.expires = jiffies + 3*HZ;
3281                 add_timer(&ia_timer); 
3282         } else
3283                 printk(KERN_ERR DEV_LABEL ": no adapter found\n");  
3284         return ret;
3285 }
3286
3287 static void __exit ia_module_exit(void)
3288 {
3289         pci_unregister_driver(&ia_driver);
3290
3291         del_timer(&ia_timer);
3292 }
3293
3294 module_init(ia_module_init);
3295 module_exit(ia_module_exit);