s3c-fb: CPUFREQ frequency scaling support
[linux-2.6] / drivers / atm / iphase.c
1 /******************************************************************************
2          iphase.c: Device driver for Interphase ATM PCI adapter cards 
3                     Author: Peter Wang  <pwang@iphase.com>            
4                    Some fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
5                    Interphase Corporation  <www.iphase.com>           
6                                Version: 1.0                           
7 *******************************************************************************
8       
9       This software may be used and distributed according to the terms
10       of the GNU General Public License (GPL), incorporated herein by reference.
11       Drivers based on this skeleton fall under the GPL and must retain
12       the authorship (implicit copyright) notice.
13
14       This program is distributed in the hope that it will be useful, but
15       WITHOUT ANY WARRANTY; without even the implied warranty of
16       MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17       General Public License for more details.
18       
19       Modified from an incomplete driver for Interphase 5575 1KVC 1M card which 
20       was originally written by Monalisa Agrawal at UNH. Now this driver 
21       supports a variety of varients of Interphase ATM PCI (i)Chip adapter 
22       card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM) 
23       in terms of PHY type, the size of control memory and the size of 
24       packet memory. The followings are the change log and history:
25      
26           Bugfix the Mona's UBR driver.
27           Modify the basic memory allocation and dma logic.
28           Port the driver to the latest kernel from 2.0.46.
29           Complete the ABR logic of the driver, and added the ABR work-
30               around for the hardware anormalies.
31           Add the CBR support.
32           Add the flow control logic to the driver to allow rate-limit VC.
33           Add 4K VC support to the board with 512K control memory.
34           Add the support of all the variants of the Interphase ATM PCI 
35           (i)Chip adapter cards including x575 (155M OC3 and UTP155), x525
36           (25M UTP25) and x531 (DS3 and E3).
37           Add SMP support.
38
39       Support and updates available at: ftp://ftp.iphase.com/pub/atm
40
41 *******************************************************************************/
42
43 #include <linux/module.h>  
44 #include <linux/kernel.h>  
45 #include <linux/mm.h>  
46 #include <linux/pci.h>  
47 #include <linux/errno.h>  
48 #include <linux/atm.h>  
49 #include <linux/atmdev.h>  
50 #include <linux/sonet.h>  
51 #include <linux/skbuff.h>  
52 #include <linux/time.h>  
53 #include <linux/delay.h>  
54 #include <linux/uio.h>  
55 #include <linux/init.h>  
56 #include <linux/wait.h>
57 #include <asm/system.h>  
58 #include <asm/io.h>  
59 #include <asm/atomic.h>  
60 #include <asm/uaccess.h>  
61 #include <asm/string.h>  
62 #include <asm/byteorder.h>  
63 #include <linux/vmalloc.h>
64 #include <linux/jiffies.h>
65 #include "iphase.h"               
66 #include "suni.h"                 
67 #define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
68
69 #define PRIV(dev) ((struct suni_priv *) dev->phy_data)
70
71 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr);
72 static void desc_dbg(IADEV *iadev);
73
74 static IADEV *ia_dev[8];
75 static struct atm_dev *_ia_dev[8];
76 static int iadev_count;
77 static void ia_led_timer(unsigned long arg);
78 static DEFINE_TIMER(ia_timer, ia_led_timer, 0, 0);
79 static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
80 static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
81 static uint IADebugFlag = /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER
82             |IF_IADBG_ABR | IF_IADBG_EVENT*/ 0; 
83
84 module_param(IA_TX_BUF, int, 0);
85 module_param(IA_TX_BUF_SZ, int, 0);
86 module_param(IA_RX_BUF, int, 0);
87 module_param(IA_RX_BUF_SZ, int, 0);
88 module_param(IADebugFlag, uint, 0644);
89
90 MODULE_LICENSE("GPL");
91
92 /**************************** IA_LIB **********************************/
93
94 static void ia_init_rtn_q (IARTN_Q *que) 
95
96    que->next = NULL; 
97    que->tail = NULL; 
98 }
99
100 static void ia_enque_head_rtn_q (IARTN_Q *que, IARTN_Q * data) 
101 {
102    data->next = NULL;
103    if (que->next == NULL) 
104       que->next = que->tail = data;
105    else {
106       data->next = que->next;
107       que->next = data;
108    } 
109    return;
110 }
111
112 static int ia_enque_rtn_q (IARTN_Q *que, struct desc_tbl_t data) {
113    IARTN_Q *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
114    if (!entry) return -1;
115    entry->data = data;
116    entry->next = NULL;
117    if (que->next == NULL) 
118       que->next = que->tail = entry;
119    else {
120       que->tail->next = entry;
121       que->tail = que->tail->next;
122    }      
123    return 1;
124 }
125
126 static IARTN_Q * ia_deque_rtn_q (IARTN_Q *que) {
127    IARTN_Q *tmpdata;
128    if (que->next == NULL)
129       return NULL;
130    tmpdata = que->next;
131    if ( que->next == que->tail)  
132       que->next = que->tail = NULL;
133    else 
134       que->next = que->next->next;
135    return tmpdata;
136 }
137
138 static void ia_hack_tcq(IADEV *dev) {
139
140   u_short               desc1;
141   u_short               tcq_wr;
142   struct ia_vcc         *iavcc_r = NULL; 
143
144   tcq_wr = readl(dev->seg_reg+TCQ_WR_PTR) & 0xffff;
145   while (dev->host_tcq_wr != tcq_wr) {
146      desc1 = *(u_short *)(dev->seg_ram + dev->host_tcq_wr);
147      if (!desc1) ;
148      else if (!dev->desc_tbl[desc1 -1].timestamp) {
149         IF_ABR(printk(" Desc %d is reset at %ld\n", desc1 -1, jiffies);)
150         *(u_short *) (dev->seg_ram + dev->host_tcq_wr) = 0;
151      }                                 
152      else if (dev->desc_tbl[desc1 -1].timestamp) {
153         if (!(iavcc_r = dev->desc_tbl[desc1 -1].iavcc)) { 
154            printk("IA: Fatal err in get_desc\n");
155            continue;
156         }
157         iavcc_r->vc_desc_cnt--;
158         dev->desc_tbl[desc1 -1].timestamp = 0;
159         IF_EVENT(printk("ia_hack: return_q skb = 0x%p desc = %d\n",
160                                    dev->desc_tbl[desc1 -1].txskb, desc1);)
161         if (iavcc_r->pcr < dev->rate_limit) {
162            IA_SKB_STATE (dev->desc_tbl[desc1-1].txskb) |= IA_TX_DONE;
163            if (ia_enque_rtn_q(&dev->tx_return_q, dev->desc_tbl[desc1 -1]) < 0)
164               printk("ia_hack_tcq: No memory available\n");
165         } 
166         dev->desc_tbl[desc1 -1].iavcc = NULL;
167         dev->desc_tbl[desc1 -1].txskb = NULL;
168      }
169      dev->host_tcq_wr += 2;
170      if (dev->host_tcq_wr > dev->ffL.tcq_ed) 
171         dev->host_tcq_wr = dev->ffL.tcq_st;
172   }
173 } /* ia_hack_tcq */
174
175 static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
176   u_short               desc_num, i;
177   struct sk_buff        *skb;
178   struct ia_vcc         *iavcc_r = NULL; 
179   unsigned long delta;
180   static unsigned long timer = 0;
181   int ltimeout;
182
183   ia_hack_tcq (dev);
184   if((time_after(jiffies,timer+50)) || ((dev->ffL.tcq_rd==dev->host_tcq_wr))) {
185      timer = jiffies; 
186      i=0;
187      while (i < dev->num_tx_desc) {
188         if (!dev->desc_tbl[i].timestamp) {
189            i++;
190            continue;
191         }
192         ltimeout = dev->desc_tbl[i].iavcc->ltimeout; 
193         delta = jiffies - dev->desc_tbl[i].timestamp;
194         if (delta >= ltimeout) {
195            IF_ABR(printk("RECOVER run!! desc_tbl %d = %d  delta = %ld, time = %ld\n", i,dev->desc_tbl[i].timestamp, delta, jiffies);)
196            if (dev->ffL.tcq_rd == dev->ffL.tcq_st) 
197               dev->ffL.tcq_rd =  dev->ffL.tcq_ed;
198            else 
199               dev->ffL.tcq_rd -= 2;
200            *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1;
201            if (!(skb = dev->desc_tbl[i].txskb) || 
202                           !(iavcc_r = dev->desc_tbl[i].iavcc))
203               printk("Fatal err, desc table vcc or skb is NULL\n");
204            else 
205               iavcc_r->vc_desc_cnt--;
206            dev->desc_tbl[i].timestamp = 0;
207            dev->desc_tbl[i].iavcc = NULL;
208            dev->desc_tbl[i].txskb = NULL;
209         }
210         i++;
211      } /* while */
212   }
213   if (dev->ffL.tcq_rd == dev->host_tcq_wr) 
214      return 0xFFFF;
215     
216   /* Get the next available descriptor number from TCQ */
217   desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
218
219   while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) {
220      dev->ffL.tcq_rd += 2;
221      if (dev->ffL.tcq_rd > dev->ffL.tcq_ed) 
222      dev->ffL.tcq_rd = dev->ffL.tcq_st;
223      if (dev->ffL.tcq_rd == dev->host_tcq_wr) 
224         return 0xFFFF; 
225      desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
226   }
227
228   /* get system time */
229   dev->desc_tbl[desc_num -1].timestamp = jiffies;
230   return desc_num;
231 }
232
233 static void clear_lockup (struct atm_vcc *vcc, IADEV *dev) {
234   u_char                foundLockUp;
235   vcstatus_t            *vcstatus;
236   u_short               *shd_tbl;
237   u_short               tempCellSlot, tempFract;
238   struct main_vc *abr_vc = (struct main_vc *)dev->MAIN_VC_TABLE_ADDR;
239   struct ext_vc *eabr_vc = (struct ext_vc *)dev->EXT_VC_TABLE_ADDR;
240   u_int  i;
241
242   if (vcc->qos.txtp.traffic_class == ATM_ABR) {
243      vcstatus = (vcstatus_t *) &(dev->testTable[vcc->vci]->vc_status);
244      vcstatus->cnt++;
245      foundLockUp = 0;
246      if( vcstatus->cnt == 0x05 ) {
247         abr_vc += vcc->vci;
248         eabr_vc += vcc->vci;
249         if( eabr_vc->last_desc ) {
250            if( (abr_vc->status & 0x07) == ABR_STATE /* 0x2 */ ) {
251               /* Wait for 10 Micro sec */
252               udelay(10);
253               if ((eabr_vc->last_desc)&&((abr_vc->status & 0x07)==ABR_STATE))
254                  foundLockUp = 1;
255            }
256            else {
257               tempCellSlot = abr_vc->last_cell_slot;
258               tempFract    = abr_vc->fraction;
259               if((tempCellSlot == dev->testTable[vcc->vci]->lastTime)
260                          && (tempFract == dev->testTable[vcc->vci]->fract))
261                  foundLockUp = 1;                   
262               dev->testTable[vcc->vci]->lastTime = tempCellSlot;   
263               dev->testTable[vcc->vci]->fract = tempFract; 
264            }        
265         } /* last descriptor */            
266         vcstatus->cnt = 0;      
267      } /* vcstatus->cnt */
268         
269      if (foundLockUp) {
270         IF_ABR(printk("LOCK UP found\n");) 
271         writew(0xFFFD, dev->seg_reg+MODE_REG_0);
272         /* Wait for 10 Micro sec */
273         udelay(10); 
274         abr_vc->status &= 0xFFF8;
275         abr_vc->status |= 0x0001;  /* state is idle */
276         shd_tbl = (u_short *)dev->ABR_SCHED_TABLE_ADDR;                
277         for( i = 0; ((i < dev->num_vc) && (shd_tbl[i])); i++ );
278         if (i < dev->num_vc)
279            shd_tbl[i] = vcc->vci;
280         else
281            IF_ERR(printk("ABR Seg. may not continue on VC %x\n",vcc->vci);)
282         writew(T_ONLINE, dev->seg_reg+MODE_REG_0);
283         writew(~(TRANSMIT_DONE|TCQ_NOT_EMPTY), dev->seg_reg+SEG_MASK_REG);
284         writew(TRANSMIT_DONE, dev->seg_reg+SEG_INTR_STATUS_REG);       
285         vcstatus->cnt = 0;
286      } /* foundLockUp */
287
288   } /* if an ABR VC */
289
290
291 }
292  
293 /*
294 ** Conversion of 24-bit cellrate (cells/sec) to 16-bit floating point format.
295 **
296 **  +----+----+------------------+-------------------------------+
297 **  |  R | NZ |  5-bit exponent  |        9-bit mantissa         |
298 **  +----+----+------------------+-------------------------------+
299 ** 
300 **    R = reserved (written as 0)
301 **    NZ = 0 if 0 cells/sec; 1 otherwise
302 **
303 **    if NZ = 1, rate = 1.mmmmmmmmm x 2^(eeeee) cells/sec
304 */
305 static u16
306 cellrate_to_float(u32 cr)
307 {
308
309 #define NZ              0x4000
310 #define M_BITS          9               /* Number of bits in mantissa */
311 #define E_BITS          5               /* Number of bits in exponent */
312 #define M_MASK          0x1ff           
313 #define E_MASK          0x1f
314   u16   flot;
315   u32   tmp = cr & 0x00ffffff;
316   int   i   = 0;
317   if (cr == 0)
318      return 0;
319   while (tmp != 1) {
320      tmp >>= 1;
321      i++;
322   }
323   if (i == M_BITS)
324      flot = NZ | (i << M_BITS) | (cr & M_MASK);
325   else if (i < M_BITS)
326      flot = NZ | (i << M_BITS) | ((cr << (M_BITS - i)) & M_MASK);
327   else
328      flot = NZ | (i << M_BITS) | ((cr >> (i - M_BITS)) & M_MASK);
329   return flot;
330 }
331
332 #if 0
333 /*
334 ** Conversion of 16-bit floating point format to 24-bit cellrate (cells/sec).
335 */
336 static u32
337 float_to_cellrate(u16 rate)
338 {
339   u32   exp, mantissa, cps;
340   if ((rate & NZ) == 0)
341      return 0;
342   exp = (rate >> M_BITS) & E_MASK;
343   mantissa = rate & M_MASK;
344   if (exp == 0)
345      return 1;
346   cps = (1 << M_BITS) | mantissa;
347   if (exp == M_BITS)
348      cps = cps;
349   else if (exp > M_BITS)
350      cps <<= (exp - M_BITS);
351   else
352      cps >>= (M_BITS - exp);
353   return cps;
354 }
355 #endif 
356
357 static void init_abr_vc (IADEV *dev, srv_cls_param_t *srv_p) {
358   srv_p->class_type = ATM_ABR;
359   srv_p->pcr        = dev->LineRate;
360   srv_p->mcr        = 0;
361   srv_p->icr        = 0x055cb7;
362   srv_p->tbe        = 0xffffff;
363   srv_p->frtt       = 0x3a;
364   srv_p->rif        = 0xf;
365   srv_p->rdf        = 0xb;
366   srv_p->nrm        = 0x4;
367   srv_p->trm        = 0x7;
368   srv_p->cdf        = 0x3;
369   srv_p->adtf       = 50;
370 }
371
372 static int
373 ia_open_abr_vc(IADEV *dev, srv_cls_param_t *srv_p, 
374                                                 struct atm_vcc *vcc, u8 flag)
375 {
376   f_vc_abr_entry  *f_abr_vc;
377   r_vc_abr_entry  *r_abr_vc;
378   u32           icr;
379   u8            trm, nrm, crm;
380   u16           adtf, air, *ptr16;      
381   f_abr_vc =(f_vc_abr_entry *)dev->MAIN_VC_TABLE_ADDR;
382   f_abr_vc += vcc->vci;       
383   switch (flag) {
384      case 1: /* FFRED initialization */
385 #if 0  /* sanity check */
386        if (srv_p->pcr == 0)
387           return INVALID_PCR;
388        if (srv_p->pcr > dev->LineRate)
389           srv_p->pcr = dev->LineRate;
390        if ((srv_p->mcr + dev->sum_mcr) > dev->LineRate)
391           return MCR_UNAVAILABLE;
392        if (srv_p->mcr > srv_p->pcr)
393           return INVALID_MCR;
394        if (!(srv_p->icr))
395           srv_p->icr = srv_p->pcr;
396        if ((srv_p->icr < srv_p->mcr) || (srv_p->icr > srv_p->pcr))
397           return INVALID_ICR;
398        if ((srv_p->tbe < MIN_TBE) || (srv_p->tbe > MAX_TBE))
399           return INVALID_TBE;
400        if ((srv_p->frtt < MIN_FRTT) || (srv_p->frtt > MAX_FRTT))
401           return INVALID_FRTT;
402        if (srv_p->nrm > MAX_NRM)
403           return INVALID_NRM;
404        if (srv_p->trm > MAX_TRM)
405           return INVALID_TRM;
406        if (srv_p->adtf > MAX_ADTF)
407           return INVALID_ADTF;
408        else if (srv_p->adtf == 0)
409           srv_p->adtf = 1;
410        if (srv_p->cdf > MAX_CDF)
411           return INVALID_CDF;
412        if (srv_p->rif > MAX_RIF)
413           return INVALID_RIF;
414        if (srv_p->rdf > MAX_RDF)
415           return INVALID_RDF;
416 #endif
417        memset ((caddr_t)f_abr_vc, 0, sizeof(*f_abr_vc));
418        f_abr_vc->f_vc_type = ABR;
419        nrm = 2 << srv_p->nrm;     /* (2 ** (srv_p->nrm +1)) */
420                                   /* i.e 2**n = 2 << (n-1) */
421        f_abr_vc->f_nrm = nrm << 8 | nrm;
422        trm = 100000/(2 << (16 - srv_p->trm));
423        if ( trm == 0) trm = 1;
424        f_abr_vc->f_nrmexp =(((srv_p->nrm +1) & 0x0f) << 12)|(MRM << 8) | trm;
425        crm = srv_p->tbe / nrm;
426        if (crm == 0) crm = 1;
427        f_abr_vc->f_crm = crm & 0xff;
428        f_abr_vc->f_pcr = cellrate_to_float(srv_p->pcr);
429        icr = min( srv_p->icr, (srv_p->tbe > srv_p->frtt) ?
430                                 ((srv_p->tbe/srv_p->frtt)*1000000) :
431                                 (1000000/(srv_p->frtt/srv_p->tbe)));
432        f_abr_vc->f_icr = cellrate_to_float(icr);
433        adtf = (10000 * srv_p->adtf)/8192;
434        if (adtf == 0) adtf = 1; 
435        f_abr_vc->f_cdf = ((7 - srv_p->cdf) << 12 | adtf) & 0xfff;
436        f_abr_vc->f_mcr = cellrate_to_float(srv_p->mcr);
437        f_abr_vc->f_acr = f_abr_vc->f_icr;
438        f_abr_vc->f_status = 0x0042;
439        break;
440     case 0: /* RFRED initialization */  
441        ptr16 = (u_short *)(dev->reass_ram + REASS_TABLE*dev->memSize); 
442        *(ptr16 + vcc->vci) = NO_AAL5_PKT | REASS_ABR;
443        r_abr_vc = (r_vc_abr_entry*)(dev->reass_ram+ABR_VC_TABLE*dev->memSize);
444        r_abr_vc += vcc->vci;
445        r_abr_vc->r_status_rdf = (15 - srv_p->rdf) & 0x000f;
446        air = srv_p->pcr << (15 - srv_p->rif);
447        if (air == 0) air = 1;
448        r_abr_vc->r_air = cellrate_to_float(air);
449        dev->testTable[vcc->vci]->vc_status = VC_ACTIVE | VC_ABR;
450        dev->sum_mcr        += srv_p->mcr;
451        dev->n_abr++;
452        break;
453     default:
454        break;
455   }
456   return        0;
457 }
458 static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
459    u32 rateLow=0, rateHigh, rate;
460    int entries;
461    struct ia_vcc *ia_vcc;
462
463    int   idealSlot =0, testSlot, toBeAssigned, inc;
464    u32   spacing;
465    u16  *SchedTbl, *TstSchedTbl;
466    u16  cbrVC, vcIndex;
467    u32   fracSlot    = 0;
468    u32   sp_mod      = 0;
469    u32   sp_mod2     = 0;
470
471    /* IpAdjustTrafficParams */
472    if (vcc->qos.txtp.max_pcr <= 0) {
473       IF_ERR(printk("PCR for CBR not defined\n");)
474       return -1;
475    }
476    rate = vcc->qos.txtp.max_pcr;
477    entries = rate / dev->Granularity;
478    IF_CBR(printk("CBR: CBR entries=0x%x for rate=0x%x & Gran=0x%x\n",
479                                 entries, rate, dev->Granularity);)
480    if (entries < 1)
481       IF_CBR(printk("CBR: Bandwidth smaller than granularity of CBR table\n");) 
482    rateLow  =  entries * dev->Granularity;
483    rateHigh = (entries + 1) * dev->Granularity;
484    if (3*(rate - rateLow) > (rateHigh - rate))
485       entries++;
486    if (entries > dev->CbrRemEntries) {
487       IF_CBR(printk("CBR: Not enough bandwidth to support this PCR.\n");)
488       IF_CBR(printk("Entries = 0x%x, CbrRemEntries = 0x%x.\n",
489                                        entries, dev->CbrRemEntries);)
490       return -EBUSY;
491    }   
492
493    ia_vcc = INPH_IA_VCC(vcc);
494    ia_vcc->NumCbrEntry = entries; 
495    dev->sum_mcr += entries * dev->Granularity; 
496    /* IaFFrednInsertCbrSched */
497    // Starting at an arbitrary location, place the entries into the table
498    // as smoothly as possible
499    cbrVC   = 0;
500    spacing = dev->CbrTotEntries / entries;
501    sp_mod  = dev->CbrTotEntries % entries; // get modulo
502    toBeAssigned = entries;
503    fracSlot = 0;
504    vcIndex  = vcc->vci;
505    IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex,spacing,sp_mod);)
506    while (toBeAssigned)
507    {
508       // If this is the first time, start the table loading for this connection
509       // as close to entryPoint as possible.
510       if (toBeAssigned == entries)
511       {
512          idealSlot = dev->CbrEntryPt;
513          dev->CbrEntryPt += 2;    // Adding 2 helps to prevent clumping
514          if (dev->CbrEntryPt >= dev->CbrTotEntries) 
515             dev->CbrEntryPt -= dev->CbrTotEntries;// Wrap if necessary
516       } else {
517          idealSlot += (u32)(spacing + fracSlot); // Point to the next location
518          // in the table that would be  smoothest
519          fracSlot = ((sp_mod + sp_mod2) / entries);  // get new integer part
520          sp_mod2  = ((sp_mod + sp_mod2) % entries);  // calc new fractional part
521       }
522       if (idealSlot >= (int)dev->CbrTotEntries) 
523          idealSlot -= dev->CbrTotEntries;  
524       // Continuously check around this ideal value until a null
525       // location is encountered.
526       SchedTbl = (u16*)(dev->seg_ram+CBR_SCHED_TABLE*dev->memSize); 
527       inc = 0;
528       testSlot = idealSlot;
529       TstSchedTbl = (u16*)(SchedTbl+testSlot);  //set index and read in value
530       IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%p, NumToAssign=%d\n",
531                                 testSlot, TstSchedTbl,toBeAssigned);)
532       memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
533       while (cbrVC)  // If another VC at this location, we have to keep looking
534       {
535           inc++;
536           testSlot = idealSlot - inc;
537           if (testSlot < 0) { // Wrap if necessary
538              testSlot += dev->CbrTotEntries;
539              IF_CBR(printk("Testslot Wrap. STable Start=0x%p,Testslot=%d\n",
540                                                        SchedTbl,testSlot);)
541           }
542           TstSchedTbl = (u16 *)(SchedTbl + testSlot);  // set table index
543           memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC)); 
544           if (!cbrVC)
545              break;
546           testSlot = idealSlot + inc;
547           if (testSlot >= (int)dev->CbrTotEntries) { // Wrap if necessary
548              testSlot -= dev->CbrTotEntries;
549              IF_CBR(printk("TotCbrEntries=%d",dev->CbrTotEntries);)
550              IF_CBR(printk(" Testslot=0x%x ToBeAssgned=%d\n", 
551                                             testSlot, toBeAssigned);)
552           } 
553           // set table index and read in value
554           TstSchedTbl = (u16*)(SchedTbl + testSlot);
555           IF_CBR(printk("Reading CBR Tbl from 0x%p, CbrVal=0x%x Iteration %d\n",
556                           TstSchedTbl,cbrVC,inc);)
557           memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
558        } /* while */
559        // Move this VCI number into this location of the CBR Sched table.
560        memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex,sizeof(TstSchedTbl));
561        dev->CbrRemEntries--;
562        toBeAssigned--;
563    } /* while */ 
564
565    /* IaFFrednCbrEnable */
566    dev->NumEnabledCBR++;
567    if (dev->NumEnabledCBR == 1) {
568        writew((CBR_EN | UBR_EN | ABR_EN | (0x23 << 2)), dev->seg_reg+STPARMS);
569        IF_CBR(printk("CBR is enabled\n");)
570    }
571    return 0;
572 }
573 static void ia_cbrVc_close (struct atm_vcc *vcc) {
574    IADEV *iadev;
575    u16 *SchedTbl, NullVci = 0;
576    u32 i, NumFound;
577
578    iadev = INPH_IA_DEV(vcc->dev);
579    iadev->NumEnabledCBR--;
580    SchedTbl = (u16*)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
581    if (iadev->NumEnabledCBR == 0) {
582       writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
583       IF_CBR (printk("CBR support disabled\n");)
584    }
585    NumFound = 0;
586    for (i=0; i < iadev->CbrTotEntries; i++)
587    {
588       if (*SchedTbl == vcc->vci) {
589          iadev->CbrRemEntries++;
590          *SchedTbl = NullVci;
591          IF_CBR(NumFound++;)
592       }
593       SchedTbl++;   
594    } 
595    IF_CBR(printk("Exit ia_cbrVc_close, NumRemoved=%d\n",NumFound);)
596 }
597
598 static int ia_avail_descs(IADEV *iadev) {
599    int tmp = 0;
600    ia_hack_tcq(iadev);
601    if (iadev->host_tcq_wr >= iadev->ffL.tcq_rd)
602       tmp = (iadev->host_tcq_wr - iadev->ffL.tcq_rd) / 2;
603    else
604       tmp = (iadev->ffL.tcq_ed - iadev->ffL.tcq_rd + 2 + iadev->host_tcq_wr -
605                    iadev->ffL.tcq_st) / 2;
606    return tmp;
607 }    
608
609 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb);
610
611 static int ia_que_tx (IADEV *iadev) { 
612    struct sk_buff *skb;
613    int num_desc;
614    struct atm_vcc *vcc;
615    struct ia_vcc *iavcc;
616    num_desc = ia_avail_descs(iadev);
617
618    while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) {
619       if (!(vcc = ATM_SKB(skb)->vcc)) {
620          dev_kfree_skb_any(skb);
621          printk("ia_que_tx: Null vcc\n");
622          break;
623       }
624       if (!test_bit(ATM_VF_READY,&vcc->flags)) {
625          dev_kfree_skb_any(skb);
626          printk("Free the SKB on closed vci %d \n", vcc->vci);
627          break;
628       }
629       iavcc = INPH_IA_VCC(vcc);
630       if (ia_pkt_tx (vcc, skb)) {
631          skb_queue_head(&iadev->tx_backlog, skb);
632       }
633       num_desc--;
634    }
635    return 0;
636 }
637
638 static void ia_tx_poll (IADEV *iadev) {
639    struct atm_vcc *vcc = NULL;
640    struct sk_buff *skb = NULL, *skb1 = NULL;
641    struct ia_vcc *iavcc;
642    IARTN_Q *  rtne;
643
644    ia_hack_tcq(iadev);
645    while ( (rtne = ia_deque_rtn_q(&iadev->tx_return_q))) {
646        skb = rtne->data.txskb;
647        if (!skb) {
648            printk("ia_tx_poll: skb is null\n");
649            goto out;
650        }
651        vcc = ATM_SKB(skb)->vcc;
652        if (!vcc) {
653            printk("ia_tx_poll: vcc is null\n");
654            dev_kfree_skb_any(skb);
655            goto out;
656        }
657
658        iavcc = INPH_IA_VCC(vcc);
659        if (!iavcc) {
660            printk("ia_tx_poll: iavcc is null\n");
661            dev_kfree_skb_any(skb);
662            goto out;
663        }
664
665        skb1 = skb_dequeue(&iavcc->txing_skb);
666        while (skb1 && (skb1 != skb)) {
667           if (!(IA_SKB_STATE(skb1) & IA_TX_DONE)) {
668              printk("IA_tx_intr: Vci %d lost pkt!!!\n", vcc->vci);
669           }
670           IF_ERR(printk("Release the SKB not match\n");)
671           if ((vcc->pop) && (skb1->len != 0))
672           {
673              vcc->pop(vcc, skb1);
674              IF_EVENT(printk("Tansmit Done - skb 0x%lx return\n",
675                                                           (long)skb1);)
676           }
677           else 
678              dev_kfree_skb_any(skb1);
679           skb1 = skb_dequeue(&iavcc->txing_skb);
680        }                                                        
681        if (!skb1) {
682           IF_EVENT(printk("IA: Vci %d - skb not found requed\n",vcc->vci);)
683           ia_enque_head_rtn_q (&iadev->tx_return_q, rtne);
684           break;
685        }
686        if ((vcc->pop) && (skb->len != 0))
687        {
688           vcc->pop(vcc, skb);
689           IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb);)
690        }
691        else 
692           dev_kfree_skb_any(skb);
693        kfree(rtne);
694     }
695     ia_que_tx(iadev);
696 out:
697     return;
698 }
699 #if 0
700 static void ia_eeprom_put (IADEV *iadev, u32 addr, u_short val)
701 {
702         u32     t;
703         int     i;
704         /*
705          * Issue a command to enable writes to the NOVRAM
706          */
707         NVRAM_CMD (EXTEND + EWEN);
708         NVRAM_CLR_CE;
709         /*
710          * issue the write command
711          */
712         NVRAM_CMD(IAWRITE + addr);
713         /* 
714          * Send the data, starting with D15, then D14, and so on for 16 bits
715          */
716         for (i=15; i>=0; i--) {
717                 NVRAM_CLKOUT (val & 0x8000);
718                 val <<= 1;
719         }
720         NVRAM_CLR_CE;
721         CFG_OR(NVCE);
722         t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS); 
723         while (!(t & NVDO))
724                 t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS); 
725
726         NVRAM_CLR_CE;
727         /*
728          * disable writes again
729          */
730         NVRAM_CMD(EXTEND + EWDS)
731         NVRAM_CLR_CE;
732         CFG_AND(~NVDI);
733 }
734 #endif
735
736 static u16 ia_eeprom_get (IADEV *iadev, u32 addr)
737 {
738         u_short val;
739         u32     t;
740         int     i;
741         /*
742          * Read the first bit that was clocked with the falling edge of the
743          * the last command data clock
744          */
745         NVRAM_CMD(IAREAD + addr);
746         /*
747          * Now read the rest of the bits, the next bit read is D14, then D13,
748          * and so on.
749          */
750         val = 0;
751         for (i=15; i>=0; i--) {
752                 NVRAM_CLKIN(t);
753                 val |= (t << i);
754         }
755         NVRAM_CLR_CE;
756         CFG_AND(~NVDI);
757         return val;
758 }
759
760 static void ia_hw_type(IADEV *iadev) {
761    u_short memType = ia_eeprom_get(iadev, 25);   
762    iadev->memType = memType;
763    if ((memType & MEM_SIZE_MASK) == MEM_SIZE_1M) {
764       iadev->num_tx_desc = IA_TX_BUF;
765       iadev->tx_buf_sz = IA_TX_BUF_SZ;
766       iadev->num_rx_desc = IA_RX_BUF;
767       iadev->rx_buf_sz = IA_RX_BUF_SZ; 
768    } else if ((memType & MEM_SIZE_MASK) == MEM_SIZE_512K) {
769       if (IA_TX_BUF == DFL_TX_BUFFERS)
770         iadev->num_tx_desc = IA_TX_BUF / 2;
771       else 
772         iadev->num_tx_desc = IA_TX_BUF;
773       iadev->tx_buf_sz = IA_TX_BUF_SZ;
774       if (IA_RX_BUF == DFL_RX_BUFFERS)
775         iadev->num_rx_desc = IA_RX_BUF / 2;
776       else
777         iadev->num_rx_desc = IA_RX_BUF;
778       iadev->rx_buf_sz = IA_RX_BUF_SZ;
779    }
780    else {
781       if (IA_TX_BUF == DFL_TX_BUFFERS) 
782         iadev->num_tx_desc = IA_TX_BUF / 8;
783       else
784         iadev->num_tx_desc = IA_TX_BUF;
785       iadev->tx_buf_sz = IA_TX_BUF_SZ;
786       if (IA_RX_BUF == DFL_RX_BUFFERS)
787         iadev->num_rx_desc = IA_RX_BUF / 8;
788       else
789         iadev->num_rx_desc = IA_RX_BUF;
790       iadev->rx_buf_sz = IA_RX_BUF_SZ; 
791    } 
792    iadev->rx_pkt_ram = TX_PACKET_RAM + (iadev->num_tx_desc * iadev->tx_buf_sz); 
793    IF_INIT(printk("BUF: tx=%d,sz=%d rx=%d sz= %d rx_pkt_ram=%d\n",
794          iadev->num_tx_desc, iadev->tx_buf_sz, iadev->num_rx_desc,
795          iadev->rx_buf_sz, iadev->rx_pkt_ram);)
796
797 #if 0
798    if ((memType & FE_MASK) == FE_SINGLE_MODE) {
799       iadev->phy_type = PHY_OC3C_S;
800    else if ((memType & FE_MASK) == FE_UTP_OPTION)
801       iadev->phy_type = PHY_UTP155;
802    else
803      iadev->phy_type = PHY_OC3C_M;
804 #endif
805    
806    iadev->phy_type = memType & FE_MASK;
807    IF_INIT(printk("memType = 0x%x iadev->phy_type = 0x%x\n", 
808                                          memType,iadev->phy_type);)
809    if (iadev->phy_type == FE_25MBIT_PHY) 
810       iadev->LineRate = (u32)(((25600000/8)*26)/(27*53));
811    else if (iadev->phy_type == FE_DS3_PHY)
812       iadev->LineRate = (u32)(((44736000/8)*26)/(27*53));
813    else if (iadev->phy_type == FE_E3_PHY) 
814       iadev->LineRate = (u32)(((34368000/8)*26)/(27*53));
815    else
816        iadev->LineRate = (u32)(ATM_OC3_PCR);
817    IF_INIT(printk("iadev->LineRate = %d \n", iadev->LineRate);)
818
819 }
820
821 static void IaFrontEndIntr(IADEV *iadev) {
822   volatile IA_SUNI *suni;
823   volatile ia_mb25_t *mb25;
824   volatile suni_pm7345_t *suni_pm7345;
825   u32 intr_status;
826   u_int frmr_intr;
827
828   if(iadev->phy_type & FE_25MBIT_PHY) {
829      mb25 = (ia_mb25_t*)iadev->phy;
830      iadev->carrier_detect =  Boolean(mb25->mb25_intr_status & MB25_IS_GSB);
831   } else if (iadev->phy_type & FE_DS3_PHY) {
832      suni_pm7345 = (suni_pm7345_t *)iadev->phy;
833      /* clear FRMR interrupts */
834      frmr_intr   = suni_pm7345->suni_ds3_frm_intr_stat; 
835      iadev->carrier_detect =  
836            Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV));
837   } else if (iadev->phy_type & FE_E3_PHY ) {
838      suni_pm7345 = (suni_pm7345_t *)iadev->phy;
839      frmr_intr   = suni_pm7345->suni_e3_frm_maint_intr_ind;
840      iadev->carrier_detect =
841            Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat&SUNI_E3_LOS));
842   }
843   else { 
844      suni = (IA_SUNI *)iadev->phy;
845      intr_status = suni->suni_rsop_status & 0xff;
846      iadev->carrier_detect = Boolean(!(suni->suni_rsop_status & SUNI_LOSV));
847   }
848   if (iadev->carrier_detect)
849     printk("IA: SUNI carrier detected\n");
850   else
851     printk("IA: SUNI carrier lost signal\n"); 
852   return;
853 }
854
855 static void ia_mb25_init (IADEV *iadev)
856 {
857    volatile ia_mb25_t  *mb25 = (ia_mb25_t*)iadev->phy;
858 #if 0
859    mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC | MB25_MC_ENABLED;
860 #endif
861    mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC;
862    mb25->mb25_diag_control = 0;
863    /*
864     * Initialize carrier detect state
865     */
866    iadev->carrier_detect =  Boolean(mb25->mb25_intr_status & MB25_IS_GSB);
867    return;
868 }                   
869
870 static void ia_suni_pm7345_init (IADEV *iadev)
871 {
872    volatile suni_pm7345_t *suni_pm7345 = (suni_pm7345_t *)iadev->phy;
873    if (iadev->phy_type & FE_DS3_PHY)
874    {
875       iadev->carrier_detect = 
876           Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV)); 
877       suni_pm7345->suni_ds3_frm_intr_enbl = 0x17;
878       suni_pm7345->suni_ds3_frm_cfg = 1;
879       suni_pm7345->suni_ds3_tran_cfg = 1;
880       suni_pm7345->suni_config = 0;
881       suni_pm7345->suni_splr_cfg = 0;
882       suni_pm7345->suni_splt_cfg = 0;
883    }
884    else 
885    {
886       iadev->carrier_detect = 
887           Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat & SUNI_E3_LOS));
888       suni_pm7345->suni_e3_frm_fram_options = 0x4;
889       suni_pm7345->suni_e3_frm_maint_options = 0x20;
890       suni_pm7345->suni_e3_frm_fram_intr_enbl = 0x1d;
891       suni_pm7345->suni_e3_frm_maint_intr_enbl = 0x30;
892       suni_pm7345->suni_e3_tran_stat_diag_options = 0x0;
893       suni_pm7345->suni_e3_tran_fram_options = 0x1;
894       suni_pm7345->suni_config = SUNI_PM7345_E3ENBL;
895       suni_pm7345->suni_splr_cfg = 0x41;
896       suni_pm7345->suni_splt_cfg = 0x41;
897    } 
898    /*
899     * Enable RSOP loss of signal interrupt.
900     */
901    suni_pm7345->suni_intr_enbl = 0x28;
902  
903    /*
904     * Clear error counters
905     */
906    suni_pm7345->suni_id_reset = 0;
907
908    /*
909     * Clear "PMCTST" in master test register.
910     */
911    suni_pm7345->suni_master_test = 0;
912
913    suni_pm7345->suni_rxcp_ctrl = 0x2c;
914    suni_pm7345->suni_rxcp_fctrl = 0x81;
915  
916    suni_pm7345->suni_rxcp_idle_pat_h1 =
917         suni_pm7345->suni_rxcp_idle_pat_h2 =
918         suni_pm7345->suni_rxcp_idle_pat_h3 = 0;
919    suni_pm7345->suni_rxcp_idle_pat_h4 = 1;
920  
921    suni_pm7345->suni_rxcp_idle_mask_h1 = 0xff;
922    suni_pm7345->suni_rxcp_idle_mask_h2 = 0xff;
923    suni_pm7345->suni_rxcp_idle_mask_h3 = 0xff;
924    suni_pm7345->suni_rxcp_idle_mask_h4 = 0xfe;
925  
926    suni_pm7345->suni_rxcp_cell_pat_h1 =
927         suni_pm7345->suni_rxcp_cell_pat_h2 =
928         suni_pm7345->suni_rxcp_cell_pat_h3 = 0;
929    suni_pm7345->suni_rxcp_cell_pat_h4 = 1;
930  
931    suni_pm7345->suni_rxcp_cell_mask_h1 =
932         suni_pm7345->suni_rxcp_cell_mask_h2 =
933         suni_pm7345->suni_rxcp_cell_mask_h3 =
934         suni_pm7345->suni_rxcp_cell_mask_h4 = 0xff;
935  
936    suni_pm7345->suni_txcp_ctrl = 0xa4;
937    suni_pm7345->suni_txcp_intr_en_sts = 0x10;
938    suni_pm7345->suni_txcp_idle_pat_h5 = 0x55;
939  
940    suni_pm7345->suni_config &= ~(SUNI_PM7345_LLB |
941                                  SUNI_PM7345_CLB |
942                                  SUNI_PM7345_DLB |
943                                   SUNI_PM7345_PLB);
944 #ifdef __SNMP__
945    suni_pm7345->suni_rxcp_intr_en_sts |= SUNI_OOCDE;
946 #endif /* __SNMP__ */
947    return;
948 }
949
950
951 /***************************** IA_LIB END *****************************/
952     
953 #ifdef CONFIG_ATM_IA_DEBUG
954 static int tcnter = 0;
955 static void xdump( u_char*  cp, int  length, char*  prefix )
956 {
957     int col, count;
958     u_char prntBuf[120];
959     u_char*  pBuf = prntBuf;
960     count = 0;
961     while(count < length){
962         pBuf += sprintf( pBuf, "%s", prefix );
963         for(col = 0;count + col < length && col < 16; col++){
964             if (col != 0 && (col % 4) == 0)
965                 pBuf += sprintf( pBuf, " " );
966             pBuf += sprintf( pBuf, "%02X ", cp[count + col] );
967         }
968         while(col++ < 16){      /* pad end of buffer with blanks */
969             if ((col % 4) == 0)
970                 sprintf( pBuf, " " );
971             pBuf += sprintf( pBuf, "   " );
972         }
973         pBuf += sprintf( pBuf, "  " );
974         for(col = 0;count + col < length && col < 16; col++){
975             if (isprint((int)cp[count + col]))
976                 pBuf += sprintf( pBuf, "%c", cp[count + col] );
977             else
978                 pBuf += sprintf( pBuf, "." );
979                 }
980         printk("%s\n", prntBuf);
981         count += col;
982         pBuf = prntBuf;
983     }
984
985 }  /* close xdump(... */
986 #endif /* CONFIG_ATM_IA_DEBUG */
987
988   
989 static struct atm_dev *ia_boards = NULL;  
990   
991 #define ACTUAL_RAM_BASE \
992         RAM_BASE*((iadev->mem)/(128 * 1024))  
993 #define ACTUAL_SEG_RAM_BASE \
994         IPHASE5575_FRAG_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))  
995 #define ACTUAL_REASS_RAM_BASE \
996         IPHASE5575_REASS_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))  
997   
998   
999 /*-- some utilities and memory allocation stuff will come here -------------*/  
1000   
1001 static void desc_dbg(IADEV *iadev) {
1002
1003   u_short tcq_wr_ptr, tcq_st_ptr, tcq_ed_ptr;
1004   u32 i;
1005   void __iomem *tmp;
1006   // regval = readl((u32)ia_cmds->maddr);
1007   tcq_wr_ptr =  readw(iadev->seg_reg+TCQ_WR_PTR);
1008   printk("B_tcq_wr = 0x%x desc = %d last desc = %d\n",
1009                      tcq_wr_ptr, readw(iadev->seg_ram+tcq_wr_ptr),
1010                      readw(iadev->seg_ram+tcq_wr_ptr-2));
1011   printk(" host_tcq_wr = 0x%x  host_tcq_rd = 0x%x \n",  iadev->host_tcq_wr, 
1012                    iadev->ffL.tcq_rd);
1013   tcq_st_ptr =  readw(iadev->seg_reg+TCQ_ST_ADR);
1014   tcq_ed_ptr =  readw(iadev->seg_reg+TCQ_ED_ADR);
1015   printk("tcq_st_ptr = 0x%x    tcq_ed_ptr = 0x%x \n", tcq_st_ptr, tcq_ed_ptr);
1016   i = 0;
1017   while (tcq_st_ptr != tcq_ed_ptr) {
1018       tmp = iadev->seg_ram+tcq_st_ptr;
1019       printk("TCQ slot %d desc = %d  Addr = %p\n", i++, readw(tmp), tmp);
1020       tcq_st_ptr += 2;
1021   }
1022   for(i=0; i <iadev->num_tx_desc; i++)
1023       printk("Desc_tbl[%d] = %d \n", i, iadev->desc_tbl[i].timestamp);
1024
1025   
1026   
1027 /*----------------------------- Recieving side stuff --------------------------*/  
1028  
1029 static void rx_excp_rcvd(struct atm_dev *dev)  
1030 {  
1031 #if 0 /* closing the receiving size will cause too many excp int */  
1032   IADEV *iadev;  
1033   u_short state;  
1034   u_short excpq_rd_ptr;  
1035   //u_short *ptr;  
1036   int vci, error = 1;  
1037   iadev = INPH_IA_DEV(dev);  
1038   state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1039   while((state & EXCPQ_EMPTY) != EXCPQ_EMPTY)  
1040   { printk("state = %x \n", state); 
1041         excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_RD_PTR) & 0xffff;  
1042  printk("state = %x excpq_rd_ptr = %x \n", state, excpq_rd_ptr); 
1043         if (excpq_rd_ptr == *(u16*)(iadev->reass_reg + EXCP_Q_WR_PTR))
1044             IF_ERR(printk("excpq_rd_ptr is wrong!!!\n");)
1045         // TODO: update exception stat
1046         vci = readw(iadev->reass_ram+excpq_rd_ptr);  
1047         error = readw(iadev->reass_ram+excpq_rd_ptr+2) & 0x0007;  
1048         // pwang_test
1049         excpq_rd_ptr += 4;  
1050         if (excpq_rd_ptr > (readw(iadev->reass_reg + EXCP_Q_ED_ADR)& 0xffff))  
1051             excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_ST_ADR)& 0xffff;
1052         writew( excpq_rd_ptr, iadev->reass_reg + EXCP_Q_RD_PTR);  
1053         state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1054   }  
1055 #endif
1056 }  
1057   
1058 static void free_desc(struct atm_dev *dev, int desc)  
1059 {  
1060         IADEV *iadev;  
1061         iadev = INPH_IA_DEV(dev);  
1062         writew(desc, iadev->reass_ram+iadev->rfL.fdq_wr); 
1063         iadev->rfL.fdq_wr +=2;
1064         if (iadev->rfL.fdq_wr > iadev->rfL.fdq_ed)
1065                 iadev->rfL.fdq_wr =  iadev->rfL.fdq_st;  
1066         writew(iadev->rfL.fdq_wr, iadev->reass_reg+FREEQ_WR_PTR);  
1067 }  
1068   
1069   
1070 static int rx_pkt(struct atm_dev *dev)  
1071 {  
1072         IADEV *iadev;  
1073         struct atm_vcc *vcc;  
1074         unsigned short status;  
1075         struct rx_buf_desc __iomem *buf_desc_ptr;  
1076         int desc;   
1077         struct dle* wr_ptr;  
1078         int len;  
1079         struct sk_buff *skb;  
1080         u_int buf_addr, dma_addr;  
1081
1082         iadev = INPH_IA_DEV(dev);  
1083         if (iadev->rfL.pcq_rd == (readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff)) 
1084         {  
1085             printk(KERN_ERR DEV_LABEL "(itf %d) Receive queue empty\n", dev->number);  
1086             return -EINVAL;  
1087         }  
1088         /* mask 1st 3 bits to get the actual descno. */  
1089         desc = readw(iadev->reass_ram+iadev->rfL.pcq_rd) & 0x1fff;  
1090         IF_RX(printk("reass_ram = %p iadev->rfL.pcq_rd = 0x%x desc = %d\n", 
1091                                     iadev->reass_ram, iadev->rfL.pcq_rd, desc);
1092               printk(" pcq_wr_ptr = 0x%x\n",
1093                                readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff);)
1094         /* update the read pointer  - maybe we shud do this in the end*/  
1095         if ( iadev->rfL.pcq_rd== iadev->rfL.pcq_ed) 
1096                 iadev->rfL.pcq_rd = iadev->rfL.pcq_st;  
1097         else  
1098                 iadev->rfL.pcq_rd += 2;
1099         writew(iadev->rfL.pcq_rd, iadev->reass_reg+PCQ_RD_PTR);  
1100   
1101         /* get the buffer desc entry.  
1102                 update stuff. - doesn't seem to be any update necessary  
1103         */  
1104         buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1105         /* make the ptr point to the corresponding buffer desc entry */  
1106         buf_desc_ptr += desc;     
1107         if (!desc || (desc > iadev->num_rx_desc) || 
1108                       ((buf_desc_ptr->vc_index & 0xffff) > iadev->num_vc)) { 
1109             free_desc(dev, desc);
1110             IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
1111             return -1;
1112         }
1113         vcc = iadev->rx_open[buf_desc_ptr->vc_index & 0xffff];  
1114         if (!vcc)  
1115         {      
1116                 free_desc(dev, desc); 
1117                 printk("IA: null vcc, drop PDU\n");  
1118                 return -1;  
1119         }  
1120           
1121   
1122         /* might want to check the status bits for errors */  
1123         status = (u_short) (buf_desc_ptr->desc_mode);  
1124         if (status & (RX_CER | RX_PTE | RX_OFL))  
1125         {  
1126                 atomic_inc(&vcc->stats->rx_err);
1127                 IF_ERR(printk("IA: bad packet, dropping it");)  
1128                 if (status & RX_CER) { 
1129                     IF_ERR(printk(" cause: packet CRC error\n");)
1130                 }
1131                 else if (status & RX_PTE) {
1132                     IF_ERR(printk(" cause: packet time out\n");)
1133                 }
1134                 else {
1135                     IF_ERR(printk(" cause: buffer over flow\n");)
1136                 }
1137                 goto out_free_desc;
1138         }  
1139   
1140         /*  
1141                 build DLE.        
1142         */  
1143   
1144         buf_addr = (buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo;  
1145         dma_addr = (buf_desc_ptr->dma_start_hi << 16) | buf_desc_ptr->dma_start_lo;  
1146         len = dma_addr - buf_addr;  
1147         if (len > iadev->rx_buf_sz) {
1148            printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
1149            atomic_inc(&vcc->stats->rx_err);
1150            goto out_free_desc;
1151         }
1152                   
1153         if (!(skb = atm_alloc_charge(vcc, len, GFP_ATOMIC))) {
1154            if (vcc->vci < 32)
1155               printk("Drop control packets\n");
1156               goto out_free_desc;
1157         }
1158         skb_put(skb,len);  
1159         // pwang_test
1160         ATM_SKB(skb)->vcc = vcc;
1161         ATM_DESC(skb) = desc;        
1162         skb_queue_tail(&iadev->rx_dma_q, skb);  
1163
1164         /* Build the DLE structure */  
1165         wr_ptr = iadev->rx_dle_q.write;  
1166         wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
1167                 len, PCI_DMA_FROMDEVICE);
1168         wr_ptr->local_pkt_addr = buf_addr;  
1169         wr_ptr->bytes = len;    /* We don't know this do we ?? */  
1170         wr_ptr->mode = DMA_INT_ENABLE;  
1171   
1172         /* shud take care of wrap around here too. */  
1173         if(++wr_ptr == iadev->rx_dle_q.end)
1174              wr_ptr = iadev->rx_dle_q.start;
1175         iadev->rx_dle_q.write = wr_ptr;  
1176         udelay(1);  
1177         /* Increment transaction counter */  
1178         writel(1, iadev->dma+IPHASE5575_RX_COUNTER);   
1179 out:    return 0;  
1180 out_free_desc:
1181         free_desc(dev, desc);
1182         goto out;
1183 }  
1184   
1185 static void rx_intr(struct atm_dev *dev)  
1186 {  
1187   IADEV *iadev;  
1188   u_short status;  
1189   u_short state, i;  
1190   
1191   iadev = INPH_IA_DEV(dev);  
1192   status = readl(iadev->reass_reg+REASS_INTR_STATUS_REG) & 0xffff;  
1193   IF_EVENT(printk("rx_intr: status = 0x%x\n", status);)
1194   if (status & RX_PKT_RCVD)  
1195   {  
1196         /* do something */  
1197         /* Basically recvd an interrupt for receving a packet.  
1198         A descriptor would have been written to the packet complete   
1199         queue. Get all the descriptors and set up dma to move the   
1200         packets till the packet complete queue is empty..  
1201         */  
1202         state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1203         IF_EVENT(printk("Rx intr status: RX_PKT_RCVD %08x\n", status);) 
1204         while(!(state & PCQ_EMPTY))  
1205         {  
1206              rx_pkt(dev);  
1207              state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1208         }  
1209         iadev->rxing = 1;
1210   }  
1211   if (status & RX_FREEQ_EMPT)  
1212   {   
1213      if (iadev->rxing) {
1214         iadev->rx_tmp_cnt = iadev->rx_pkt_cnt;
1215         iadev->rx_tmp_jif = jiffies; 
1216         iadev->rxing = 0;
1217      } 
1218      else if ((time_after(jiffies, iadev->rx_tmp_jif + 50)) &&
1219                ((iadev->rx_pkt_cnt - iadev->rx_tmp_cnt) == 0)) {
1220         for (i = 1; i <= iadev->num_rx_desc; i++)
1221                free_desc(dev, i);
1222 printk("Test logic RUN!!!!\n");
1223         writew( ~(RX_FREEQ_EMPT|RX_EXCP_RCVD),iadev->reass_reg+REASS_MASK_REG);
1224         iadev->rxing = 1;
1225      }
1226      IF_EVENT(printk("Rx intr status: RX_FREEQ_EMPT %08x\n", status);)  
1227   }  
1228
1229   if (status & RX_EXCP_RCVD)  
1230   {  
1231         /* probably need to handle the exception queue also. */  
1232         IF_EVENT(printk("Rx intr status: RX_EXCP_RCVD %08x\n", status);)  
1233         rx_excp_rcvd(dev);  
1234   }  
1235
1236
1237   if (status & RX_RAW_RCVD)  
1238   {  
1239         /* need to handle the raw incoming cells. This deepnds on   
1240         whether we have programmed to receive the raw cells or not.  
1241         Else ignore. */  
1242         IF_EVENT(printk("Rx intr status:  RX_RAW_RCVD %08x\n", status);)  
1243   }  
1244 }  
1245   
1246   
1247 static void rx_dle_intr(struct atm_dev *dev)  
1248 {  
1249   IADEV *iadev;  
1250   struct atm_vcc *vcc;   
1251   struct sk_buff *skb;  
1252   int desc;  
1253   u_short state;   
1254   struct dle *dle, *cur_dle;  
1255   u_int dle_lp;  
1256   int len;
1257   iadev = INPH_IA_DEV(dev);  
1258  
1259   /* free all the dles done, that is just update our own dle read pointer   
1260         - do we really need to do this. Think not. */  
1261   /* DMA is done, just get all the recevie buffers from the rx dma queue  
1262         and push them up to the higher layer protocol. Also free the desc  
1263         associated with the buffer. */  
1264   dle = iadev->rx_dle_q.read;  
1265   dle_lp = readl(iadev->dma+IPHASE5575_RX_LIST_ADDR) & (sizeof(struct dle)*DLE_ENTRIES - 1);  
1266   cur_dle = (struct dle*)(iadev->rx_dle_q.start + (dle_lp >> 4));  
1267   while(dle != cur_dle)  
1268   {  
1269       /* free the DMAed skb */  
1270       skb = skb_dequeue(&iadev->rx_dma_q);  
1271       if (!skb)  
1272          goto INCR_DLE;
1273       desc = ATM_DESC(skb);
1274       free_desc(dev, desc);  
1275                
1276       if (!(len = skb->len))
1277       {  
1278           printk("rx_dle_intr: skb len 0\n");  
1279           dev_kfree_skb_any(skb);  
1280       }  
1281       else  
1282       {  
1283           struct cpcs_trailer *trailer;
1284           u_short length;
1285           struct ia_vcc *ia_vcc;
1286
1287           pci_unmap_single(iadev->pci, iadev->rx_dle_q.write->sys_pkt_addr,
1288                 len, PCI_DMA_FROMDEVICE);
1289           /* no VCC related housekeeping done as yet. lets see */  
1290           vcc = ATM_SKB(skb)->vcc;
1291           if (!vcc) {
1292               printk("IA: null vcc\n");  
1293               dev_kfree_skb_any(skb);
1294               goto INCR_DLE;
1295           }
1296           ia_vcc = INPH_IA_VCC(vcc);
1297           if (ia_vcc == NULL)
1298           {
1299              atomic_inc(&vcc->stats->rx_err);
1300              dev_kfree_skb_any(skb);
1301              atm_return(vcc, atm_guess_pdu2truesize(len));
1302              goto INCR_DLE;
1303            }
1304           // get real pkt length  pwang_test
1305           trailer = (struct cpcs_trailer*)((u_char *)skb->data +
1306                                  skb->len - sizeof(*trailer));
1307           length = swap_byte_order(trailer->length);
1308           if ((length > iadev->rx_buf_sz) || (length > 
1309                               (skb->len - sizeof(struct cpcs_trailer))))
1310           {
1311              atomic_inc(&vcc->stats->rx_err);
1312              IF_ERR(printk("rx_dle_intr: Bad  AAL5 trailer %d (skb len %d)", 
1313                                                             length, skb->len);)
1314              dev_kfree_skb_any(skb);
1315              atm_return(vcc, atm_guess_pdu2truesize(len));
1316              goto INCR_DLE;
1317           }
1318           skb_trim(skb, length);
1319           
1320           /* Display the packet */  
1321           IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb->len);  
1322           xdump(skb->data, skb->len, "RX: ");
1323           printk("\n");)
1324
1325           IF_RX(printk("rx_dle_intr: skb push");)  
1326           vcc->push(vcc,skb);  
1327           atomic_inc(&vcc->stats->rx);
1328           iadev->rx_pkt_cnt++;
1329       }  
1330 INCR_DLE:
1331       if (++dle == iadev->rx_dle_q.end)  
1332           dle = iadev->rx_dle_q.start;  
1333   }  
1334   iadev->rx_dle_q.read = dle;  
1335   
1336   /* if the interrupts are masked because there were no free desc available,  
1337                 unmask them now. */ 
1338   if (!iadev->rxing) {
1339      state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1340      if (!(state & FREEQ_EMPTY)) {
1341         state = readl(iadev->reass_reg + REASS_MASK_REG) & 0xffff;
1342         writel(state & ~(RX_FREEQ_EMPT |/* RX_EXCP_RCVD |*/ RX_PKT_RCVD),
1343                                       iadev->reass_reg+REASS_MASK_REG);
1344         iadev->rxing++; 
1345      }
1346   }
1347 }  
1348   
1349   
1350 static int open_rx(struct atm_vcc *vcc)  
1351 {  
1352         IADEV *iadev;  
1353         u_short __iomem *vc_table;  
1354         u_short __iomem *reass_ptr;  
1355         IF_EVENT(printk("iadev: open_rx %d.%d\n", vcc->vpi, vcc->vci);)
1356
1357         if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;    
1358         iadev = INPH_IA_DEV(vcc->dev);  
1359         if (vcc->qos.rxtp.traffic_class == ATM_ABR) {  
1360            if (iadev->phy_type & FE_25MBIT_PHY) {
1361                printk("IA:  ABR not support\n");
1362                return -EINVAL; 
1363            }
1364         }
1365         /* Make only this VCI in the vc table valid and let all   
1366                 others be invalid entries */  
1367         vc_table = iadev->reass_ram+RX_VC_TABLE*iadev->memSize;
1368         vc_table += vcc->vci;
1369         /* mask the last 6 bits and OR it with 3 for 1K VCs */  
1370
1371         *vc_table = vcc->vci << 6;
1372         /* Also keep a list of open rx vcs so that we can attach them with  
1373                 incoming PDUs later. */  
1374         if ((vcc->qos.rxtp.traffic_class == ATM_ABR) || 
1375                                 (vcc->qos.txtp.traffic_class == ATM_ABR))  
1376         {  
1377                 srv_cls_param_t srv_p;
1378                 init_abr_vc(iadev, &srv_p);
1379                 ia_open_abr_vc(iadev, &srv_p, vcc, 0);
1380         } 
1381         else {  /* for UBR  later may need to add CBR logic */
1382                 reass_ptr = iadev->reass_ram+REASS_TABLE*iadev->memSize;
1383                 reass_ptr += vcc->vci;
1384                 *reass_ptr = NO_AAL5_PKT;
1385         }
1386         
1387         if (iadev->rx_open[vcc->vci])  
1388                 printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d already open\n",  
1389                         vcc->dev->number, vcc->vci);  
1390         iadev->rx_open[vcc->vci] = vcc;  
1391         return 0;  
1392 }  
1393   
1394 static int rx_init(struct atm_dev *dev)  
1395 {  
1396         IADEV *iadev;  
1397         struct rx_buf_desc __iomem *buf_desc_ptr;  
1398         unsigned long rx_pkt_start = 0;  
1399         void *dle_addr;  
1400         struct abr_vc_table  *abr_vc_table; 
1401         u16 *vc_table;  
1402         u16 *reass_table;  
1403         int i,j, vcsize_sel;  
1404         u_short freeq_st_adr;  
1405         u_short *freeq_start;  
1406   
1407         iadev = INPH_IA_DEV(dev);  
1408   //    spin_lock_init(&iadev->rx_lock); 
1409   
1410         /* Allocate 4k bytes - more aligned than needed (4k boundary) */
1411         dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
1412                                         &iadev->rx_dle_dma);  
1413         if (!dle_addr)  {  
1414                 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1415                 goto err_out;
1416         }
1417         iadev->rx_dle_q.start = (struct dle *)dle_addr;
1418         iadev->rx_dle_q.read = iadev->rx_dle_q.start;  
1419         iadev->rx_dle_q.write = iadev->rx_dle_q.start;  
1420         iadev->rx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1421         /* the end of the dle q points to the entry after the last  
1422         DLE that can be used. */  
1423   
1424         /* write the upper 20 bits of the start address to rx list address register */  
1425         /* We know this is 32bit bus addressed so the following is safe */
1426         writel(iadev->rx_dle_dma & 0xfffff000,
1427                iadev->dma + IPHASE5575_RX_LIST_ADDR);  
1428         IF_INIT(printk("Tx Dle list addr: 0x%p value: 0x%0x\n",
1429                       iadev->dma+IPHASE5575_TX_LIST_ADDR,
1430                       *(u32*)(iadev->dma+IPHASE5575_TX_LIST_ADDR));  
1431         printk("Rx Dle list addr: 0x%p value: 0x%0x\n",
1432                       iadev->dma+IPHASE5575_RX_LIST_ADDR,
1433                       *(u32*)(iadev->dma+IPHASE5575_RX_LIST_ADDR));)  
1434   
1435         writew(0xffff, iadev->reass_reg+REASS_MASK_REG);  
1436         writew(0, iadev->reass_reg+MODE_REG);  
1437         writew(RESET_REASS, iadev->reass_reg+REASS_COMMAND_REG);  
1438   
1439         /* Receive side control memory map  
1440            -------------------------------  
1441   
1442                 Buffer descr    0x0000 (736 - 23K)  
1443                 VP Table        0x5c00 (256 - 512)  
1444                 Except q        0x5e00 (128 - 512)  
1445                 Free buffer q   0x6000 (1K - 2K)  
1446                 Packet comp q   0x6800 (1K - 2K)  
1447                 Reass Table     0x7000 (1K - 2K)  
1448                 VC Table        0x7800 (1K - 2K)  
1449                 ABR VC Table    0x8000 (1K - 32K)  
1450         */  
1451           
1452         /* Base address for Buffer Descriptor Table */  
1453         writew(RX_DESC_BASE >> 16, iadev->reass_reg+REASS_DESC_BASE);  
1454         /* Set the buffer size register */  
1455         writew(iadev->rx_buf_sz, iadev->reass_reg+BUF_SIZE);  
1456   
1457         /* Initialize each entry in the Buffer Descriptor Table */  
1458         iadev->RX_DESC_BASE_ADDR = iadev->reass_ram+RX_DESC_BASE*iadev->memSize;
1459         buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1460         memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1461         buf_desc_ptr++;  
1462         rx_pkt_start = iadev->rx_pkt_ram;  
1463         for(i=1; i<=iadev->num_rx_desc; i++)  
1464         {  
1465                 memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1466                 buf_desc_ptr->buf_start_hi = rx_pkt_start >> 16;  
1467                 buf_desc_ptr->buf_start_lo = rx_pkt_start & 0x0000ffff;  
1468                 buf_desc_ptr++;           
1469                 rx_pkt_start += iadev->rx_buf_sz;  
1470         }  
1471         IF_INIT(printk("Rx Buffer desc ptr: 0x%p\n", buf_desc_ptr);)
1472         i = FREE_BUF_DESC_Q*iadev->memSize; 
1473         writew(i >> 16,  iadev->reass_reg+REASS_QUEUE_BASE); 
1474         writew(i, iadev->reass_reg+FREEQ_ST_ADR);
1475         writew(i+iadev->num_rx_desc*sizeof(u_short), 
1476                                          iadev->reass_reg+FREEQ_ED_ADR);
1477         writew(i, iadev->reass_reg+FREEQ_RD_PTR);
1478         writew(i+iadev->num_rx_desc*sizeof(u_short), 
1479                                         iadev->reass_reg+FREEQ_WR_PTR);    
1480         /* Fill the FREEQ with all the free descriptors. */  
1481         freeq_st_adr = readw(iadev->reass_reg+FREEQ_ST_ADR);  
1482         freeq_start = (u_short *)(iadev->reass_ram+freeq_st_adr);  
1483         for(i=1; i<=iadev->num_rx_desc; i++)  
1484         {  
1485                 *freeq_start = (u_short)i;  
1486                 freeq_start++;  
1487         }  
1488         IF_INIT(printk("freeq_start: 0x%p\n", freeq_start);)
1489         /* Packet Complete Queue */
1490         i = (PKT_COMP_Q * iadev->memSize) & 0xffff;
1491         writew(i, iadev->reass_reg+PCQ_ST_ADR);
1492         writew(i+iadev->num_vc*sizeof(u_short), iadev->reass_reg+PCQ_ED_ADR);
1493         writew(i, iadev->reass_reg+PCQ_RD_PTR);
1494         writew(i, iadev->reass_reg+PCQ_WR_PTR);
1495
1496         /* Exception Queue */
1497         i = (EXCEPTION_Q * iadev->memSize) & 0xffff;
1498         writew(i, iadev->reass_reg+EXCP_Q_ST_ADR);
1499         writew(i + NUM_RX_EXCP * sizeof(RX_ERROR_Q), 
1500                                              iadev->reass_reg+EXCP_Q_ED_ADR);
1501         writew(i, iadev->reass_reg+EXCP_Q_RD_PTR);
1502         writew(i, iadev->reass_reg+EXCP_Q_WR_PTR); 
1503  
1504         /* Load local copy of FREEQ and PCQ ptrs */
1505         iadev->rfL.fdq_st = readw(iadev->reass_reg+FREEQ_ST_ADR) & 0xffff;
1506         iadev->rfL.fdq_ed = readw(iadev->reass_reg+FREEQ_ED_ADR) & 0xffff ;
1507         iadev->rfL.fdq_rd = readw(iadev->reass_reg+FREEQ_RD_PTR) & 0xffff;
1508         iadev->rfL.fdq_wr = readw(iadev->reass_reg+FREEQ_WR_PTR) & 0xffff;
1509         iadev->rfL.pcq_st = readw(iadev->reass_reg+PCQ_ST_ADR) & 0xffff;
1510         iadev->rfL.pcq_ed = readw(iadev->reass_reg+PCQ_ED_ADR) & 0xffff;
1511         iadev->rfL.pcq_rd = readw(iadev->reass_reg+PCQ_RD_PTR) & 0xffff;
1512         iadev->rfL.pcq_wr = readw(iadev->reass_reg+PCQ_WR_PTR) & 0xffff;
1513         
1514         IF_INIT(printk("INIT:pcq_st:0x%x pcq_ed:0x%x pcq_rd:0x%x pcq_wr:0x%x", 
1515               iadev->rfL.pcq_st, iadev->rfL.pcq_ed, iadev->rfL.pcq_rd, 
1516               iadev->rfL.pcq_wr);)                
1517         /* just for check - no VP TBL */  
1518         /* VP Table */  
1519         /* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */  
1520         /* initialize VP Table for invalid VPIs  
1521                 - I guess we can write all 1s or 0x000f in the entire memory  
1522                   space or something similar.  
1523         */  
1524   
1525         /* This seems to work and looks right to me too !!! */  
1526         i =  REASS_TABLE * iadev->memSize;
1527         writew((i >> 3), iadev->reass_reg+REASS_TABLE_BASE);   
1528         /* initialize Reassembly table to I don't know what ???? */  
1529         reass_table = (u16 *)(iadev->reass_ram+i);  
1530         j = REASS_TABLE_SZ * iadev->memSize;
1531         for(i=0; i < j; i++)  
1532                 *reass_table++ = NO_AAL5_PKT;  
1533        i = 8*1024;
1534        vcsize_sel =  0;
1535        while (i != iadev->num_vc) {
1536           i /= 2;
1537           vcsize_sel++;
1538        }
1539        i = RX_VC_TABLE * iadev->memSize;
1540        writew(((i>>3) & 0xfff8) | vcsize_sel, iadev->reass_reg+VC_LKUP_BASE);
1541        vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);  
1542         j = RX_VC_TABLE_SZ * iadev->memSize;
1543         for(i = 0; i < j; i++)  
1544         {  
1545                 /* shift the reassembly pointer by 3 + lower 3 bits of   
1546                 vc_lkup_base register (=3 for 1K VCs) and the last byte   
1547                 is those low 3 bits.   
1548                 Shall program this later.  
1549                 */  
1550                 *vc_table = (i << 6) | 15;      /* for invalid VCI */  
1551                 vc_table++;  
1552         }  
1553         /* ABR VC table */
1554         i =  ABR_VC_TABLE * iadev->memSize;
1555         writew(i >> 3, iadev->reass_reg+ABR_LKUP_BASE);
1556                    
1557         i = ABR_VC_TABLE * iadev->memSize;
1558         abr_vc_table = (struct abr_vc_table *)(iadev->reass_ram+i);  
1559         j = REASS_TABLE_SZ * iadev->memSize;
1560         memset ((char*)abr_vc_table, 0, j * sizeof(*abr_vc_table));
1561         for(i = 0; i < j; i++) {                
1562                 abr_vc_table->rdf = 0x0003;
1563                 abr_vc_table->air = 0x5eb1;
1564                 abr_vc_table++;         
1565         }  
1566
1567         /* Initialize other registers */  
1568   
1569         /* VP Filter Register set for VC Reassembly only */  
1570         writew(0xff00, iadev->reass_reg+VP_FILTER);  
1571         writew(0, iadev->reass_reg+XTRA_RM_OFFSET);
1572         writew(0x1,  iadev->reass_reg+PROTOCOL_ID);
1573
1574         /* Packet Timeout Count  related Registers : 
1575            Set packet timeout to occur in about 3 seconds
1576            Set Packet Aging Interval count register to overflow in about 4 us
1577         */  
1578         writew(0xF6F8, iadev->reass_reg+PKT_TM_CNT );
1579
1580         i = (j >> 6) & 0xFF;
1581         j += 2 * (j - 1);
1582         i |= ((j << 2) & 0xFF00);
1583         writew(i, iadev->reass_reg+TMOUT_RANGE);
1584
1585         /* initiate the desc_tble */
1586         for(i=0; i<iadev->num_tx_desc;i++)
1587             iadev->desc_tbl[i].timestamp = 0;
1588
1589         /* to clear the interrupt status register - read it */  
1590         readw(iadev->reass_reg+REASS_INTR_STATUS_REG);   
1591   
1592         /* Mask Register - clear it */  
1593         writew(~(RX_FREEQ_EMPT|RX_PKT_RCVD), iadev->reass_reg+REASS_MASK_REG);  
1594   
1595         skb_queue_head_init(&iadev->rx_dma_q);  
1596         iadev->rx_free_desc_qhead = NULL;   
1597
1598         iadev->rx_open = kzalloc(4 * iadev->num_vc, GFP_KERNEL);
1599         if (!iadev->rx_open) {
1600                 printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
1601                 dev->number);  
1602                 goto err_free_dle;
1603         }  
1604
1605         iadev->rxing = 1;
1606         iadev->rx_pkt_cnt = 0;
1607         /* Mode Register */  
1608         writew(R_ONLINE, iadev->reass_reg+MODE_REG);  
1609         return 0;  
1610
1611 err_free_dle:
1612         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
1613                             iadev->rx_dle_dma);  
1614 err_out:
1615         return -ENOMEM;
1616 }  
1617   
1618
1619 /*  
1620         The memory map suggested in appendix A and the coding for it.   
1621         Keeping it around just in case we change our mind later.  
1622   
1623                 Buffer descr    0x0000 (128 - 4K)  
1624                 UBR sched       0x1000 (1K - 4K)  
1625                 UBR Wait q      0x2000 (1K - 4K)  
1626                 Commn queues    0x3000 Packet Ready, Trasmit comp(0x3100)  
1627                                         (128 - 256) each  
1628                 extended VC     0x4000 (1K - 8K)  
1629                 ABR sched       0x6000  and ABR wait queue (1K - 2K) each  
1630                 CBR sched       0x7000 (as needed)  
1631                 VC table        0x8000 (1K - 32K)  
1632 */  
1633   
1634 static void tx_intr(struct atm_dev *dev)  
1635 {  
1636         IADEV *iadev;  
1637         unsigned short status;  
1638         unsigned long flags;
1639
1640         iadev = INPH_IA_DEV(dev);  
1641   
1642         status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);  
1643         if (status & TRANSMIT_DONE){
1644
1645            IF_EVENT(printk("Tansmit Done Intr logic run\n");)
1646            spin_lock_irqsave(&iadev->tx_lock, flags);
1647            ia_tx_poll(iadev);
1648            spin_unlock_irqrestore(&iadev->tx_lock, flags);
1649            writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
1650            if (iadev->close_pending)  
1651                wake_up(&iadev->close_wait);
1652         }         
1653         if (status & TCQ_NOT_EMPTY)  
1654         {  
1655             IF_EVENT(printk("TCQ_NOT_EMPTY int received\n");)  
1656         }  
1657 }  
1658   
1659 static void tx_dle_intr(struct atm_dev *dev)
1660 {
1661         IADEV *iadev;
1662         struct dle *dle, *cur_dle; 
1663         struct sk_buff *skb;
1664         struct atm_vcc *vcc;
1665         struct ia_vcc  *iavcc;
1666         u_int dle_lp;
1667         unsigned long flags;
1668
1669         iadev = INPH_IA_DEV(dev);
1670         spin_lock_irqsave(&iadev->tx_lock, flags);   
1671         dle = iadev->tx_dle_q.read;
1672         dle_lp = readl(iadev->dma+IPHASE5575_TX_LIST_ADDR) & 
1673                                         (sizeof(struct dle)*DLE_ENTRIES - 1);
1674         cur_dle = (struct dle*)(iadev->tx_dle_q.start + (dle_lp >> 4));
1675         while (dle != cur_dle)
1676         {
1677             /* free the DMAed skb */ 
1678             skb = skb_dequeue(&iadev->tx_dma_q); 
1679             if (!skb) break;
1680
1681             /* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */
1682             if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
1683                 pci_unmap_single(iadev->pci, dle->sys_pkt_addr, skb->len,
1684                                  PCI_DMA_TODEVICE);
1685             }
1686             vcc = ATM_SKB(skb)->vcc;
1687             if (!vcc) {
1688                   printk("tx_dle_intr: vcc is null\n");
1689                   spin_unlock_irqrestore(&iadev->tx_lock, flags);
1690                   dev_kfree_skb_any(skb);
1691
1692                   return;
1693             }
1694             iavcc = INPH_IA_VCC(vcc);
1695             if (!iavcc) {
1696                   printk("tx_dle_intr: iavcc is null\n");
1697                   spin_unlock_irqrestore(&iadev->tx_lock, flags);
1698                   dev_kfree_skb_any(skb);
1699                   return;
1700             }
1701             if (vcc->qos.txtp.pcr >= iadev->rate_limit) {
1702                if ((vcc->pop) && (skb->len != 0))
1703                {     
1704                  vcc->pop(vcc, skb);
1705                } 
1706                else {
1707                  dev_kfree_skb_any(skb);
1708                }
1709             }
1710             else { /* Hold the rate-limited skb for flow control */
1711                IA_SKB_STATE(skb) |= IA_DLED;
1712                skb_queue_tail(&iavcc->txing_skb, skb);
1713             }
1714             IF_EVENT(printk("tx_dle_intr: enque skb = 0x%p \n", skb);)
1715             if (++dle == iadev->tx_dle_q.end)
1716                  dle = iadev->tx_dle_q.start;
1717         }
1718         iadev->tx_dle_q.read = dle;
1719         spin_unlock_irqrestore(&iadev->tx_lock, flags);
1720 }
1721   
1722 static int open_tx(struct atm_vcc *vcc)  
1723 {  
1724         struct ia_vcc *ia_vcc;  
1725         IADEV *iadev;  
1726         struct main_vc *vc;  
1727         struct ext_vc *evc;  
1728         int ret;
1729         IF_EVENT(printk("iadev: open_tx entered vcc->vci = %d\n", vcc->vci);)  
1730         if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;  
1731         iadev = INPH_IA_DEV(vcc->dev);  
1732         
1733         if (iadev->phy_type & FE_25MBIT_PHY) {
1734            if (vcc->qos.txtp.traffic_class == ATM_ABR) {
1735                printk("IA:  ABR not support\n");
1736                return -EINVAL; 
1737            }
1738           if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1739                printk("IA:  CBR not support\n");
1740                return -EINVAL; 
1741           }
1742         }
1743         ia_vcc =  INPH_IA_VCC(vcc);
1744         memset((caddr_t)ia_vcc, 0, sizeof(*ia_vcc));
1745         if (vcc->qos.txtp.max_sdu > 
1746                          (iadev->tx_buf_sz - sizeof(struct cpcs_trailer))){
1747            printk("IA:  SDU size over (%d) the configured SDU size %d\n",
1748                   vcc->qos.txtp.max_sdu,iadev->tx_buf_sz);
1749            vcc->dev_data = NULL;
1750            kfree(ia_vcc);
1751            return -EINVAL; 
1752         }
1753         ia_vcc->vc_desc_cnt = 0;
1754         ia_vcc->txing = 1;
1755
1756         /* find pcr */
1757         if (vcc->qos.txtp.max_pcr == ATM_MAX_PCR) 
1758            vcc->qos.txtp.pcr = iadev->LineRate;
1759         else if ((vcc->qos.txtp.max_pcr == 0)&&( vcc->qos.txtp.pcr <= 0))
1760            vcc->qos.txtp.pcr = iadev->LineRate;
1761         else if ((vcc->qos.txtp.max_pcr > vcc->qos.txtp.pcr) && (vcc->qos.txtp.max_pcr> 0)) 
1762            vcc->qos.txtp.pcr = vcc->qos.txtp.max_pcr;
1763         if (vcc->qos.txtp.pcr > iadev->LineRate)
1764              vcc->qos.txtp.pcr = iadev->LineRate;
1765         ia_vcc->pcr = vcc->qos.txtp.pcr;
1766
1767         if (ia_vcc->pcr > (iadev->LineRate / 6) ) ia_vcc->ltimeout = HZ / 10;
1768         else if (ia_vcc->pcr > (iadev->LineRate / 130)) ia_vcc->ltimeout = HZ;
1769         else if (ia_vcc->pcr <= 170) ia_vcc->ltimeout = 16 * HZ;
1770         else ia_vcc->ltimeout = 2700 * HZ  / ia_vcc->pcr;
1771         if (ia_vcc->pcr < iadev->rate_limit)
1772            skb_queue_head_init (&ia_vcc->txing_skb);
1773         if (ia_vcc->pcr < iadev->rate_limit) {
1774            struct sock *sk = sk_atm(vcc);
1775
1776            if (vcc->qos.txtp.max_sdu != 0) {
1777                if (ia_vcc->pcr > 60000)
1778                   sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 5;
1779                else if (ia_vcc->pcr > 2000)
1780                   sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 4;
1781                else
1782                  sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 3;
1783            }
1784            else
1785              sk->sk_sndbuf = 24576;
1786         }
1787            
1788         vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;  
1789         evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;  
1790         vc += vcc->vci;  
1791         evc += vcc->vci;  
1792         memset((caddr_t)vc, 0, sizeof(*vc));  
1793         memset((caddr_t)evc, 0, sizeof(*evc));  
1794           
1795         /* store the most significant 4 bits of vci as the last 4 bits   
1796                 of first part of atm header.  
1797            store the last 12 bits of vci as first 12 bits of the second  
1798                 part of the atm header.  
1799         */  
1800         evc->atm_hdr1 = (vcc->vci >> 12) & 0x000f;  
1801         evc->atm_hdr2 = (vcc->vci & 0x0fff) << 4;  
1802  
1803         /* check the following for different traffic classes */  
1804         if (vcc->qos.txtp.traffic_class == ATM_UBR)  
1805         {  
1806                 vc->type = UBR;  
1807                 vc->status = CRC_APPEND;
1808                 vc->acr = cellrate_to_float(iadev->LineRate);  
1809                 if (vcc->qos.txtp.pcr > 0) 
1810                    vc->acr = cellrate_to_float(vcc->qos.txtp.pcr);  
1811                 IF_UBR(printk("UBR: txtp.pcr = 0x%x f_rate = 0x%x\n", 
1812                                              vcc->qos.txtp.max_pcr,vc->acr);)
1813         }  
1814         else if (vcc->qos.txtp.traffic_class == ATM_ABR)  
1815         {       srv_cls_param_t srv_p;
1816                 IF_ABR(printk("Tx ABR VCC\n");)  
1817                 init_abr_vc(iadev, &srv_p);
1818                 if (vcc->qos.txtp.pcr > 0) 
1819                    srv_p.pcr = vcc->qos.txtp.pcr;
1820                 if (vcc->qos.txtp.min_pcr > 0) {
1821                    int tmpsum = iadev->sum_mcr+iadev->sum_cbr+vcc->qos.txtp.min_pcr;
1822                    if (tmpsum > iadev->LineRate)
1823                        return -EBUSY;
1824                    srv_p.mcr = vcc->qos.txtp.min_pcr;
1825                    iadev->sum_mcr += vcc->qos.txtp.min_pcr;
1826                 } 
1827                 else srv_p.mcr = 0;
1828                 if (vcc->qos.txtp.icr)
1829                    srv_p.icr = vcc->qos.txtp.icr;
1830                 if (vcc->qos.txtp.tbe)
1831                    srv_p.tbe = vcc->qos.txtp.tbe;
1832                 if (vcc->qos.txtp.frtt)
1833                    srv_p.frtt = vcc->qos.txtp.frtt;
1834                 if (vcc->qos.txtp.rif)
1835                    srv_p.rif = vcc->qos.txtp.rif;
1836                 if (vcc->qos.txtp.rdf)
1837                    srv_p.rdf = vcc->qos.txtp.rdf;
1838                 if (vcc->qos.txtp.nrm_pres)
1839                    srv_p.nrm = vcc->qos.txtp.nrm;
1840                 if (vcc->qos.txtp.trm_pres)
1841                    srv_p.trm = vcc->qos.txtp.trm;
1842                 if (vcc->qos.txtp.adtf_pres)
1843                    srv_p.adtf = vcc->qos.txtp.adtf;
1844                 if (vcc->qos.txtp.cdf_pres)
1845                    srv_p.cdf = vcc->qos.txtp.cdf;    
1846                 if (srv_p.icr > srv_p.pcr)
1847                    srv_p.icr = srv_p.pcr;    
1848                 IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d  mcr = %d\n", 
1849                                                       srv_p.pcr, srv_p.mcr);)
1850                 ia_open_abr_vc(iadev, &srv_p, vcc, 1);
1851         } else if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1852                 if (iadev->phy_type & FE_25MBIT_PHY) {
1853                     printk("IA:  CBR not support\n");
1854                     return -EINVAL; 
1855                 }
1856                 if (vcc->qos.txtp.max_pcr > iadev->LineRate) {
1857                    IF_CBR(printk("PCR is not availble\n");)
1858                    return -1;
1859                 }
1860                 vc->type = CBR;
1861                 vc->status = CRC_APPEND;
1862                 if ((ret = ia_cbr_setup (iadev, vcc)) < 0) {     
1863                     return ret;
1864                 }
1865        } 
1866         else  
1867            printk("iadev:  Non UBR, ABR and CBR traffic not supportedn"); 
1868         
1869         iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE;
1870         IF_EVENT(printk("ia open_tx returning \n");)  
1871         return 0;  
1872 }  
1873   
1874   
1875 static int tx_init(struct atm_dev *dev)  
1876 {  
1877         IADEV *iadev;  
1878         struct tx_buf_desc *buf_desc_ptr;
1879         unsigned int tx_pkt_start;  
1880         void *dle_addr;  
1881         int i;  
1882         u_short tcq_st_adr;  
1883         u_short *tcq_start;  
1884         u_short prq_st_adr;  
1885         u_short *prq_start;  
1886         struct main_vc *vc;  
1887         struct ext_vc *evc;   
1888         u_short tmp16;
1889         u32 vcsize_sel;
1890  
1891         iadev = INPH_IA_DEV(dev);  
1892         spin_lock_init(&iadev->tx_lock);
1893  
1894         IF_INIT(printk("Tx MASK REG: 0x%0x\n", 
1895                                 readw(iadev->seg_reg+SEG_MASK_REG));)  
1896
1897         /* Allocate 4k (boundary aligned) bytes */
1898         dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
1899                                         &iadev->tx_dle_dma);  
1900         if (!dle_addr)  {
1901                 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1902                 goto err_out;
1903         }
1904         iadev->tx_dle_q.start = (struct dle*)dle_addr;  
1905         iadev->tx_dle_q.read = iadev->tx_dle_q.start;  
1906         iadev->tx_dle_q.write = iadev->tx_dle_q.start;  
1907         iadev->tx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1908
1909         /* write the upper 20 bits of the start address to tx list address register */  
1910         writel(iadev->tx_dle_dma & 0xfffff000,
1911                iadev->dma + IPHASE5575_TX_LIST_ADDR);  
1912         writew(0xffff, iadev->seg_reg+SEG_MASK_REG);  
1913         writew(0, iadev->seg_reg+MODE_REG_0);  
1914         writew(RESET_SEG, iadev->seg_reg+SEG_COMMAND_REG);  
1915         iadev->MAIN_VC_TABLE_ADDR = iadev->seg_ram+MAIN_VC_TABLE*iadev->memSize;
1916         iadev->EXT_VC_TABLE_ADDR = iadev->seg_ram+EXT_VC_TABLE*iadev->memSize;
1917         iadev->ABR_SCHED_TABLE_ADDR=iadev->seg_ram+ABR_SCHED_TABLE*iadev->memSize;
1918   
1919         /*  
1920            Transmit side control memory map  
1921            --------------------------------    
1922          Buffer descr   0x0000 (128 - 4K)  
1923          Commn queues   0x1000  Transmit comp, Packet ready(0x1400)   
1924                                         (512 - 1K) each  
1925                                         TCQ - 4K, PRQ - 5K  
1926          CBR Table      0x1800 (as needed) - 6K  
1927          UBR Table      0x3000 (1K - 4K) - 12K  
1928          UBR Wait queue 0x4000 (1K - 4K) - 16K  
1929          ABR sched      0x5000  and ABR wait queue (1K - 2K) each  
1930                                 ABR Tbl - 20K, ABR Wq - 22K   
1931          extended VC    0x6000 (1K - 8K) - 24K  
1932          VC Table       0x8000 (1K - 32K) - 32K  
1933           
1934         Between 0x2000 (8K) and 0x3000 (12K) there is 4K space left for VBR Tbl  
1935         and Wait q, which can be allotted later.  
1936         */  
1937      
1938         /* Buffer Descriptor Table Base address */  
1939         writew(TX_DESC_BASE, iadev->seg_reg+SEG_DESC_BASE);  
1940   
1941         /* initialize each entry in the buffer descriptor table */  
1942         buf_desc_ptr =(struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);  
1943         memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1944         buf_desc_ptr++;  
1945         tx_pkt_start = TX_PACKET_RAM;  
1946         for(i=1; i<=iadev->num_tx_desc; i++)  
1947         {  
1948                 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1949                 buf_desc_ptr->desc_mode = AAL5;  
1950                 buf_desc_ptr->buf_start_hi = tx_pkt_start >> 16;  
1951                 buf_desc_ptr->buf_start_lo = tx_pkt_start & 0x0000ffff;  
1952                 buf_desc_ptr++;           
1953                 tx_pkt_start += iadev->tx_buf_sz;  
1954         }  
1955         iadev->tx_buf = kmalloc(iadev->num_tx_desc*sizeof(struct cpcs_trailer_desc), GFP_KERNEL);
1956         if (!iadev->tx_buf) {
1957             printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1958             goto err_free_dle;
1959         }
1960         for (i= 0; i< iadev->num_tx_desc; i++)
1961         {
1962             struct cpcs_trailer *cpcs;
1963  
1964             cpcs = kmalloc(sizeof(*cpcs), GFP_KERNEL|GFP_DMA);
1965             if(!cpcs) {                
1966                 printk(KERN_ERR DEV_LABEL " couldn't get freepage\n"); 
1967                 goto err_free_tx_bufs;
1968             }
1969             iadev->tx_buf[i].cpcs = cpcs;
1970             iadev->tx_buf[i].dma_addr = pci_map_single(iadev->pci,
1971                 cpcs, sizeof(*cpcs), PCI_DMA_TODEVICE);
1972         }
1973         iadev->desc_tbl = kmalloc(iadev->num_tx_desc *
1974                                    sizeof(struct desc_tbl_t), GFP_KERNEL);
1975         if (!iadev->desc_tbl) {
1976                 printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1977                 goto err_free_all_tx_bufs;
1978         }
1979   
1980         /* Communication Queues base address */  
1981         i = TX_COMP_Q * iadev->memSize;
1982         writew(i >> 16, iadev->seg_reg+SEG_QUEUE_BASE);  
1983   
1984         /* Transmit Complete Queue */  
1985         writew(i, iadev->seg_reg+TCQ_ST_ADR);  
1986         writew(i, iadev->seg_reg+TCQ_RD_PTR);  
1987         writew(i+iadev->num_tx_desc*sizeof(u_short),iadev->seg_reg+TCQ_WR_PTR); 
1988         iadev->host_tcq_wr = i + iadev->num_tx_desc*sizeof(u_short);
1989         writew(i+2 * iadev->num_tx_desc * sizeof(u_short), 
1990                                               iadev->seg_reg+TCQ_ED_ADR); 
1991         /* Fill the TCQ with all the free descriptors. */  
1992         tcq_st_adr = readw(iadev->seg_reg+TCQ_ST_ADR);  
1993         tcq_start = (u_short *)(iadev->seg_ram+tcq_st_adr);  
1994         for(i=1; i<=iadev->num_tx_desc; i++)  
1995         {  
1996                 *tcq_start = (u_short)i;  
1997                 tcq_start++;  
1998         }  
1999   
2000         /* Packet Ready Queue */  
2001         i = PKT_RDY_Q * iadev->memSize; 
2002         writew(i, iadev->seg_reg+PRQ_ST_ADR);  
2003         writew(i+2 * iadev->num_tx_desc * sizeof(u_short), 
2004                                               iadev->seg_reg+PRQ_ED_ADR);
2005         writew(i, iadev->seg_reg+PRQ_RD_PTR);  
2006         writew(i, iadev->seg_reg+PRQ_WR_PTR);  
2007          
2008         /* Load local copy of PRQ and TCQ ptrs */
2009         iadev->ffL.prq_st = readw(iadev->seg_reg+PRQ_ST_ADR) & 0xffff;
2010         iadev->ffL.prq_ed = readw(iadev->seg_reg+PRQ_ED_ADR) & 0xffff;
2011         iadev->ffL.prq_wr = readw(iadev->seg_reg+PRQ_WR_PTR) & 0xffff;
2012
2013         iadev->ffL.tcq_st = readw(iadev->seg_reg+TCQ_ST_ADR) & 0xffff;
2014         iadev->ffL.tcq_ed = readw(iadev->seg_reg+TCQ_ED_ADR) & 0xffff;
2015         iadev->ffL.tcq_rd = readw(iadev->seg_reg+TCQ_RD_PTR) & 0xffff;
2016
2017         /* Just for safety initializing the queue to have desc 1 always */  
2018         /* Fill the PRQ with all the free descriptors. */  
2019         prq_st_adr = readw(iadev->seg_reg+PRQ_ST_ADR);  
2020         prq_start = (u_short *)(iadev->seg_ram+prq_st_adr);  
2021         for(i=1; i<=iadev->num_tx_desc; i++)  
2022         {  
2023                 *prq_start = (u_short)0;        /* desc 1 in all entries */  
2024                 prq_start++;  
2025         }  
2026         /* CBR Table */  
2027         IF_INIT(printk("Start CBR Init\n");)
2028 #if 1  /* for 1K VC board, CBR_PTR_BASE is 0 */
2029         writew(0,iadev->seg_reg+CBR_PTR_BASE);
2030 #else /* Charlie's logic is wrong ? */
2031         tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize)>>17;
2032         IF_INIT(printk("cbr_ptr_base = 0x%x ", tmp16);)
2033         writew(tmp16,iadev->seg_reg+CBR_PTR_BASE);
2034 #endif
2035
2036         IF_INIT(printk("value in register = 0x%x\n",
2037                                    readw(iadev->seg_reg+CBR_PTR_BASE));)
2038         tmp16 = (CBR_SCHED_TABLE*iadev->memSize) >> 1;
2039         writew(tmp16, iadev->seg_reg+CBR_TAB_BEG);
2040         IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16,
2041                                         readw(iadev->seg_reg+CBR_TAB_BEG));)
2042         writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR;
2043         tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1;
2044         writew(tmp16, iadev->seg_reg+CBR_TAB_END);
2045         IF_INIT(printk("iadev->seg_reg = 0x%p CBR_PTR_BASE = 0x%x\n",
2046                iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));)
2047         IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n",
2048           readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END),
2049           readw(iadev->seg_reg+CBR_TAB_END+1));)
2050
2051         /* Initialize the CBR Schedualing Table */
2052         memset_io(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize, 
2053                                                           0, iadev->num_vc*6); 
2054         iadev->CbrRemEntries = iadev->CbrTotEntries = iadev->num_vc*3;
2055         iadev->CbrEntryPt = 0;
2056         iadev->Granularity = MAX_ATM_155 / iadev->CbrTotEntries;
2057         iadev->NumEnabledCBR = 0;
2058
2059         /* UBR scheduling Table and wait queue */  
2060         /* initialize all bytes of UBR scheduler table and wait queue to 0   
2061                 - SCHEDSZ is 1K (# of entries).  
2062                 - UBR Table size is 4K  
2063                 - UBR wait queue is 4K  
2064            since the table and wait queues are contiguous, all the bytes   
2065            can be initialized by one memeset.  
2066         */  
2067         
2068         vcsize_sel = 0;
2069         i = 8*1024;
2070         while (i != iadev->num_vc) {
2071           i /= 2;
2072           vcsize_sel++;
2073         }
2074  
2075         i = MAIN_VC_TABLE * iadev->memSize;
2076         writew(vcsize_sel | ((i >> 8) & 0xfff8),iadev->seg_reg+VCT_BASE);
2077         i =  EXT_VC_TABLE * iadev->memSize;
2078         writew((i >> 8) & 0xfffe, iadev->seg_reg+VCTE_BASE);
2079         i = UBR_SCHED_TABLE * iadev->memSize;
2080         writew((i & 0xffff) >> 11,  iadev->seg_reg+UBR_SBPTR_BASE);
2081         i = UBR_WAIT_Q * iadev->memSize; 
2082         writew((i >> 7) & 0xffff,  iadev->seg_reg+UBRWQ_BASE);
2083         memset((caddr_t)(iadev->seg_ram+UBR_SCHED_TABLE*iadev->memSize),
2084                                                        0, iadev->num_vc*8);
2085         /* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/  
2086         /* initialize all bytes of ABR scheduler table and wait queue to 0   
2087                 - SCHEDSZ is 1K (# of entries).  
2088                 - ABR Table size is 2K  
2089                 - ABR wait queue is 2K  
2090            since the table and wait queues are contiguous, all the bytes   
2091            can be intialized by one memeset.  
2092         */  
2093         i = ABR_SCHED_TABLE * iadev->memSize;
2094         writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
2095         i = ABR_WAIT_Q * iadev->memSize;
2096         writew((i >> 7) & 0xffff, iadev->seg_reg+ABRWQ_BASE);
2097  
2098         i = ABR_SCHED_TABLE*iadev->memSize;
2099         memset((caddr_t)(iadev->seg_ram+i),  0, iadev->num_vc*4);
2100         vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;  
2101         evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;  
2102         iadev->testTable = kmalloc(sizeof(long)*iadev->num_vc, GFP_KERNEL); 
2103         if (!iadev->testTable) {
2104            printk("Get freepage  failed\n");
2105            goto err_free_desc_tbl;
2106         }
2107         for(i=0; i<iadev->num_vc; i++)  
2108         {  
2109                 memset((caddr_t)vc, 0, sizeof(*vc));  
2110                 memset((caddr_t)evc, 0, sizeof(*evc));  
2111                 iadev->testTable[i] = kmalloc(sizeof(struct testTable_t),
2112                                                 GFP_KERNEL);
2113                 if (!iadev->testTable[i])
2114                         goto err_free_test_tables;
2115                 iadev->testTable[i]->lastTime = 0;
2116                 iadev->testTable[i]->fract = 0;
2117                 iadev->testTable[i]->vc_status = VC_UBR;
2118                 vc++;  
2119                 evc++;  
2120         }  
2121   
2122         /* Other Initialization */  
2123           
2124         /* Max Rate Register */  
2125         if (iadev->phy_type & FE_25MBIT_PHY) {
2126            writew(RATE25, iadev->seg_reg+MAXRATE);  
2127            writew((UBR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);  
2128         }
2129         else {
2130            writew(cellrate_to_float(iadev->LineRate),iadev->seg_reg+MAXRATE);
2131            writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);  
2132         }
2133         /* Set Idle Header Reigisters to be sure */  
2134         writew(0, iadev->seg_reg+IDLEHEADHI);  
2135         writew(0, iadev->seg_reg+IDLEHEADLO);  
2136   
2137         /* Program ABR UBR Priority Register  as  PRI_ABR_UBR_EQUAL */
2138         writew(0xaa00, iadev->seg_reg+ABRUBR_ARB); 
2139
2140         iadev->close_pending = 0;
2141         init_waitqueue_head(&iadev->close_wait);
2142         init_waitqueue_head(&iadev->timeout_wait);
2143         skb_queue_head_init(&iadev->tx_dma_q);  
2144         ia_init_rtn_q(&iadev->tx_return_q);  
2145
2146         /* RM Cell Protocol ID and Message Type */  
2147         writew(RM_TYPE_4_0, iadev->seg_reg+RM_TYPE);  
2148         skb_queue_head_init (&iadev->tx_backlog);
2149   
2150         /* Mode Register 1 */  
2151         writew(MODE_REG_1_VAL, iadev->seg_reg+MODE_REG_1);  
2152   
2153         /* Mode Register 0 */  
2154         writew(T_ONLINE, iadev->seg_reg+MODE_REG_0);  
2155   
2156         /* Interrupt Status Register - read to clear */  
2157         readw(iadev->seg_reg+SEG_INTR_STATUS_REG);  
2158   
2159         /* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */  
2160         writew(~(TRANSMIT_DONE | TCQ_NOT_EMPTY), iadev->seg_reg+SEG_MASK_REG);
2161         writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);  
2162         iadev->tx_pkt_cnt = 0;
2163         iadev->rate_limit = iadev->LineRate / 3;
2164   
2165         return 0;
2166
2167 err_free_test_tables:
2168         while (--i >= 0)
2169                 kfree(iadev->testTable[i]);
2170         kfree(iadev->testTable);
2171 err_free_desc_tbl:
2172         kfree(iadev->desc_tbl);
2173 err_free_all_tx_bufs:
2174         i = iadev->num_tx_desc;
2175 err_free_tx_bufs:
2176         while (--i >= 0) {
2177                 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2178
2179                 pci_unmap_single(iadev->pci, desc->dma_addr,
2180                         sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
2181                 kfree(desc->cpcs);
2182         }
2183         kfree(iadev->tx_buf);
2184 err_free_dle:
2185         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2186                             iadev->tx_dle_dma);  
2187 err_out:
2188         return -ENOMEM;
2189 }   
2190    
2191 static irqreturn_t ia_int(int irq, void *dev_id)  
2192 {  
2193    struct atm_dev *dev;  
2194    IADEV *iadev;  
2195    unsigned int status;  
2196    int handled = 0;
2197
2198    dev = dev_id;  
2199    iadev = INPH_IA_DEV(dev);  
2200    while( (status = readl(iadev->reg+IPHASE5575_BUS_STATUS_REG) & 0x7f))  
2201    { 
2202         handled = 1;
2203         IF_EVENT(printk("ia_int: status = 0x%x\n", status);) 
2204         if (status & STAT_REASSINT)  
2205         {  
2206            /* do something */  
2207            IF_EVENT(printk("REASSINT Bus status reg: %08x\n", status);) 
2208            rx_intr(dev);  
2209         }  
2210         if (status & STAT_DLERINT)  
2211         {  
2212            /* Clear this bit by writing a 1 to it. */  
2213            *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLERINT;
2214            rx_dle_intr(dev);  
2215         }  
2216         if (status & STAT_SEGINT)  
2217         {  
2218            /* do something */ 
2219            IF_EVENT(printk("IA: tx_intr \n");) 
2220            tx_intr(dev);  
2221         }  
2222         if (status & STAT_DLETINT)  
2223         {  
2224            *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLETINT;  
2225            tx_dle_intr(dev);  
2226         }  
2227         if (status & (STAT_FEINT | STAT_ERRINT | STAT_MARKINT))  
2228         {  
2229            if (status & STAT_FEINT) 
2230                IaFrontEndIntr(iadev);
2231         }  
2232    }
2233    return IRQ_RETVAL(handled);
2234 }  
2235           
2236           
2237           
2238 /*----------------------------- entries --------------------------------*/  
2239 static int get_esi(struct atm_dev *dev)  
2240 {  
2241         IADEV *iadev;  
2242         int i;  
2243         u32 mac1;  
2244         u16 mac2;  
2245           
2246         iadev = INPH_IA_DEV(dev);  
2247         mac1 = cpu_to_be32(le32_to_cpu(readl(  
2248                                 iadev->reg+IPHASE5575_MAC1)));  
2249         mac2 = cpu_to_be16(le16_to_cpu(readl(iadev->reg+IPHASE5575_MAC2)));  
2250         IF_INIT(printk("ESI: 0x%08x%04x\n", mac1, mac2);)  
2251         for (i=0; i<MAC1_LEN; i++)  
2252                 dev->esi[i] = mac1 >>(8*(MAC1_LEN-1-i));  
2253           
2254         for (i=0; i<MAC2_LEN; i++)  
2255                 dev->esi[i+MAC1_LEN] = mac2 >>(8*(MAC2_LEN - 1 -i));  
2256         return 0;  
2257 }  
2258           
2259 static int reset_sar(struct atm_dev *dev)  
2260 {  
2261         IADEV *iadev;  
2262         int i, error = 1;  
2263         unsigned int pci[64];  
2264           
2265         iadev = INPH_IA_DEV(dev);  
2266         for(i=0; i<64; i++)  
2267           if ((error = pci_read_config_dword(iadev->pci,  
2268                                 i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)  
2269               return error;  
2270         writel(0, iadev->reg+IPHASE5575_EXT_RESET);  
2271         for(i=0; i<64; i++)  
2272           if ((error = pci_write_config_dword(iadev->pci,  
2273                                         i*4, pci[i])) != PCIBIOS_SUCCESSFUL)  
2274             return error;  
2275         udelay(5);  
2276         return 0;  
2277 }  
2278           
2279           
2280 static int __devinit ia_init(struct atm_dev *dev)
2281 {  
2282         IADEV *iadev;  
2283         unsigned long real_base;
2284         void __iomem *base;
2285         unsigned short command;  
2286         int error, i; 
2287           
2288         /* The device has been identified and registered. Now we read   
2289            necessary configuration info like memory base address,   
2290            interrupt number etc */  
2291           
2292         IF_INIT(printk(">ia_init\n");)  
2293         dev->ci_range.vpi_bits = 0;  
2294         dev->ci_range.vci_bits = NR_VCI_LD;  
2295
2296         iadev = INPH_IA_DEV(dev);  
2297         real_base = pci_resource_start (iadev->pci, 0);
2298         iadev->irq = iadev->pci->irq;
2299                   
2300         error = pci_read_config_word(iadev->pci, PCI_COMMAND, &command);
2301         if (error) {
2302                 printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%x\n",  
2303                                 dev->number,error);  
2304                 return -EINVAL;  
2305         }  
2306         IF_INIT(printk(DEV_LABEL "(itf %d): rev.%d,realbase=0x%lx,irq=%d\n",  
2307                         dev->number, iadev->pci->revision, real_base, iadev->irq);)
2308           
2309         /* find mapping size of board */  
2310           
2311         iadev->pci_map_size = pci_resource_len(iadev->pci, 0);
2312
2313         if (iadev->pci_map_size == 0x100000){
2314           iadev->num_vc = 4096;
2315           dev->ci_range.vci_bits = NR_VCI_4K_LD;  
2316           iadev->memSize = 4;
2317         }
2318         else if (iadev->pci_map_size == 0x40000) {
2319           iadev->num_vc = 1024;
2320           iadev->memSize = 1;
2321         }
2322         else {
2323            printk("Unknown pci_map_size = 0x%x\n", iadev->pci_map_size);
2324            return -EINVAL;
2325         }
2326         IF_INIT(printk (DEV_LABEL "map size: %i\n", iadev->pci_map_size);)  
2327           
2328         /* enable bus mastering */
2329         pci_set_master(iadev->pci);
2330
2331         /*  
2332          * Delay at least 1us before doing any mem accesses (how 'bout 10?)  
2333          */  
2334         udelay(10);  
2335           
2336         /* mapping the physical address to a virtual address in address space */  
2337         base = ioremap(real_base,iadev->pci_map_size);  /* ioremap is not resolved ??? */  
2338           
2339         if (!base)  
2340         {  
2341                 printk(DEV_LABEL " (itf %d): can't set up page mapping\n",  
2342                             dev->number);  
2343                 return error;  
2344         }  
2345         IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n",  
2346                         dev->number, iadev->pci->revision, base, iadev->irq);)
2347           
2348         /* filling the iphase dev structure */  
2349         iadev->mem = iadev->pci_map_size /2;  
2350         iadev->real_base = real_base;  
2351         iadev->base = base;  
2352                   
2353         /* Bus Interface Control Registers */  
2354         iadev->reg = base + REG_BASE;
2355         /* Segmentation Control Registers */  
2356         iadev->seg_reg = base + SEG_BASE;
2357         /* Reassembly Control Registers */  
2358         iadev->reass_reg = base + REASS_BASE;  
2359         /* Front end/ DMA control registers */  
2360         iadev->phy = base + PHY_BASE;  
2361         iadev->dma = base + PHY_BASE;  
2362         /* RAM - Segmentation RAm and Reassembly RAM */  
2363         iadev->ram = base + ACTUAL_RAM_BASE;  
2364         iadev->seg_ram = base + ACTUAL_SEG_RAM_BASE;  
2365         iadev->reass_ram = base + ACTUAL_REASS_RAM_BASE;  
2366   
2367         /* lets print out the above */  
2368         IF_INIT(printk("Base addrs: %p %p %p \n %p %p %p %p\n", 
2369           iadev->reg,iadev->seg_reg,iadev->reass_reg, 
2370           iadev->phy, iadev->ram, iadev->seg_ram, 
2371           iadev->reass_ram);) 
2372           
2373         /* lets try reading the MAC address */  
2374         error = get_esi(dev);  
2375         if (error) {
2376           iounmap(iadev->base);
2377           return error;  
2378         }
2379         printk("IA: ");
2380         for (i=0; i < ESI_LEN; i++)  
2381                 printk("%s%02X",i ? "-" : "",dev->esi[i]);  
2382         printk("\n");  
2383   
2384         /* reset SAR */  
2385         if (reset_sar(dev)) {
2386            iounmap(iadev->base);
2387            printk("IA: reset SAR fail, please try again\n");
2388            return 1;
2389         }
2390         return 0;  
2391 }  
2392
2393 static void ia_update_stats(IADEV *iadev) {
2394     if (!iadev->carrier_detect)
2395         return;
2396     iadev->rx_cell_cnt += readw(iadev->reass_reg+CELL_CTR0)&0xffff;
2397     iadev->rx_cell_cnt += (readw(iadev->reass_reg+CELL_CTR1) & 0xffff) << 16;
2398     iadev->drop_rxpkt +=  readw(iadev->reass_reg + DRP_PKT_CNTR ) & 0xffff;
2399     iadev->drop_rxcell += readw(iadev->reass_reg + ERR_CNTR) & 0xffff;
2400     iadev->tx_cell_cnt += readw(iadev->seg_reg + CELL_CTR_LO_AUTO)&0xffff;
2401     iadev->tx_cell_cnt += (readw(iadev->seg_reg+CELL_CTR_HIGH_AUTO)&0xffff)<<16;
2402     return;
2403 }
2404   
2405 static void ia_led_timer(unsigned long arg) {
2406         unsigned long flags;
2407         static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
2408         u_char i;
2409         static u32 ctrl_reg; 
2410         for (i = 0; i < iadev_count; i++) {
2411            if (ia_dev[i]) {
2412               ctrl_reg = readl(ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2413               if (blinking[i] == 0) {
2414                  blinking[i]++;
2415                  ctrl_reg &= (~CTRL_LED);
2416                  writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2417                  ia_update_stats(ia_dev[i]);
2418               }
2419               else {
2420                  blinking[i] = 0;
2421                  ctrl_reg |= CTRL_LED;
2422                  writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2423                  spin_lock_irqsave(&ia_dev[i]->tx_lock, flags);
2424                  if (ia_dev[i]->close_pending)  
2425                     wake_up(&ia_dev[i]->close_wait);
2426                  ia_tx_poll(ia_dev[i]);
2427                  spin_unlock_irqrestore(&ia_dev[i]->tx_lock, flags);
2428               }
2429            }
2430         }
2431         mod_timer(&ia_timer, jiffies + HZ / 4);
2432         return;
2433 }
2434
2435 static void ia_phy_put(struct atm_dev *dev, unsigned char value,   
2436         unsigned long addr)  
2437 {  
2438         writel(value, INPH_IA_DEV(dev)->phy+addr);  
2439 }  
2440   
2441 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr)  
2442 {  
2443         return readl(INPH_IA_DEV(dev)->phy+addr);  
2444 }  
2445
2446 static void ia_free_tx(IADEV *iadev)
2447 {
2448         int i;
2449
2450         kfree(iadev->desc_tbl);
2451         for (i = 0; i < iadev->num_vc; i++)
2452                 kfree(iadev->testTable[i]);
2453         kfree(iadev->testTable);
2454         for (i = 0; i < iadev->num_tx_desc; i++) {
2455                 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2456
2457                 pci_unmap_single(iadev->pci, desc->dma_addr,
2458                         sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
2459                 kfree(desc->cpcs);
2460         }
2461         kfree(iadev->tx_buf);
2462         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2463                             iadev->tx_dle_dma);  
2464 }
2465
2466 static void ia_free_rx(IADEV *iadev)
2467 {
2468         kfree(iadev->rx_open);
2469         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
2470                           iadev->rx_dle_dma);  
2471 }
2472
2473 static int __devinit ia_start(struct atm_dev *dev)
2474 {  
2475         IADEV *iadev;  
2476         int error;  
2477         unsigned char phy;  
2478         u32 ctrl_reg;  
2479         IF_EVENT(printk(">ia_start\n");)  
2480         iadev = INPH_IA_DEV(dev);  
2481         if (request_irq(iadev->irq, &ia_int, IRQF_SHARED, DEV_LABEL, dev)) {
2482                 printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",  
2483                     dev->number, iadev->irq);  
2484                 error = -EAGAIN;
2485                 goto err_out;
2486         }  
2487         /* @@@ should release IRQ on error */  
2488         /* enabling memory + master */  
2489         if ((error = pci_write_config_word(iadev->pci,   
2490                                 PCI_COMMAND,   
2491                                 PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER )))   
2492         {  
2493                 printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory+"  
2494                     "master (0x%x)\n",dev->number, error);  
2495                 error = -EIO;  
2496                 goto err_free_irq;
2497         }  
2498         udelay(10);  
2499   
2500         /* Maybe we should reset the front end, initialize Bus Interface Control   
2501                 Registers and see. */  
2502   
2503         IF_INIT(printk("Bus ctrl reg: %08x\n", 
2504                             readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)  
2505         ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);  
2506         ctrl_reg = (ctrl_reg & (CTRL_LED | CTRL_FE_RST))  
2507                         | CTRL_B8  
2508                         | CTRL_B16  
2509                         | CTRL_B32  
2510                         | CTRL_B48  
2511                         | CTRL_B64  
2512                         | CTRL_B128  
2513                         | CTRL_ERRMASK  
2514                         | CTRL_DLETMASK         /* shud be removed l8r */  
2515                         | CTRL_DLERMASK  
2516                         | CTRL_SEGMASK  
2517                         | CTRL_REASSMASK          
2518                         | CTRL_FEMASK  
2519                         | CTRL_CSPREEMPT;  
2520   
2521        writel(ctrl_reg, iadev->reg+IPHASE5575_BUS_CONTROL_REG);   
2522   
2523         IF_INIT(printk("Bus ctrl reg after initializing: %08x\n", 
2524                            readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));  
2525            printk("Bus status reg after init: %08x\n", 
2526                             readl(iadev->reg+IPHASE5575_BUS_STATUS_REG));)  
2527     
2528         ia_hw_type(iadev); 
2529         error = tx_init(dev);  
2530         if (error)
2531                 goto err_free_irq;
2532         error = rx_init(dev);  
2533         if (error)
2534                 goto err_free_tx;
2535   
2536         ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);  
2537         writel(ctrl_reg | CTRL_FE_RST, iadev->reg+IPHASE5575_BUS_CONTROL_REG);   
2538         IF_INIT(printk("Bus ctrl reg after initializing: %08x\n", 
2539                                readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)  
2540         phy = 0; /* resolve compiler complaint */
2541         IF_INIT ( 
2542         if ((phy=ia_phy_get(dev,0)) == 0x30)  
2543                 printk("IA: pm5346,rev.%d\n",phy&0x0f);  
2544         else  
2545                 printk("IA: utopia,rev.%0x\n",phy);) 
2546
2547         if (iadev->phy_type &  FE_25MBIT_PHY)
2548            ia_mb25_init(iadev);
2549         else if (iadev->phy_type & (FE_DS3_PHY | FE_E3_PHY))
2550            ia_suni_pm7345_init(iadev);
2551         else {
2552                 error = suni_init(dev);
2553                 if (error)
2554                         goto err_free_rx;
2555                 if (dev->phy->start) {
2556                         error = dev->phy->start(dev);
2557                         if (error)
2558                                 goto err_free_rx;
2559                 }
2560                 /* Get iadev->carrier_detect status */
2561                 IaFrontEndIntr(iadev);
2562         }
2563         return 0;
2564
2565 err_free_rx:
2566         ia_free_rx(iadev);
2567 err_free_tx:
2568         ia_free_tx(iadev);
2569 err_free_irq:
2570         free_irq(iadev->irq, dev);  
2571 err_out:
2572         return error;
2573 }  
2574   
2575 static void ia_close(struct atm_vcc *vcc)  
2576 {
2577         DEFINE_WAIT(wait);
2578         u16 *vc_table;
2579         IADEV *iadev;
2580         struct ia_vcc *ia_vcc;
2581         struct sk_buff *skb = NULL;
2582         struct sk_buff_head tmp_tx_backlog, tmp_vcc_backlog;
2583         unsigned long closetime, flags;
2584
2585         iadev = INPH_IA_DEV(vcc->dev);
2586         ia_vcc = INPH_IA_VCC(vcc);
2587         if (!ia_vcc) return;  
2588
2589         IF_EVENT(printk("ia_close: ia_vcc->vc_desc_cnt = %d  vci = %d\n", 
2590                                               ia_vcc->vc_desc_cnt,vcc->vci);)
2591         clear_bit(ATM_VF_READY,&vcc->flags);
2592         skb_queue_head_init (&tmp_tx_backlog);
2593         skb_queue_head_init (&tmp_vcc_backlog); 
2594         if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2595            iadev->close_pending++;
2596            prepare_to_wait(&iadev->timeout_wait, &wait, TASK_UNINTERRUPTIBLE);
2597            schedule_timeout(50);
2598            finish_wait(&iadev->timeout_wait, &wait);
2599            spin_lock_irqsave(&iadev->tx_lock, flags); 
2600            while((skb = skb_dequeue(&iadev->tx_backlog))) {
2601               if (ATM_SKB(skb)->vcc == vcc){ 
2602                  if (vcc->pop) vcc->pop(vcc, skb);
2603                  else dev_kfree_skb_any(skb);
2604               }
2605               else 
2606                  skb_queue_tail(&tmp_tx_backlog, skb);
2607            } 
2608            while((skb = skb_dequeue(&tmp_tx_backlog))) 
2609              skb_queue_tail(&iadev->tx_backlog, skb);
2610            IF_EVENT(printk("IA TX Done decs_cnt = %d\n", ia_vcc->vc_desc_cnt);) 
2611            closetime = 300000 / ia_vcc->pcr;
2612            if (closetime == 0)
2613               closetime = 1;
2614            spin_unlock_irqrestore(&iadev->tx_lock, flags);
2615            wait_event_timeout(iadev->close_wait, (ia_vcc->vc_desc_cnt <= 0), closetime);
2616            spin_lock_irqsave(&iadev->tx_lock, flags);
2617            iadev->close_pending--;
2618            iadev->testTable[vcc->vci]->lastTime = 0;
2619            iadev->testTable[vcc->vci]->fract = 0; 
2620            iadev->testTable[vcc->vci]->vc_status = VC_UBR; 
2621            if (vcc->qos.txtp.traffic_class == ATM_ABR) {
2622               if (vcc->qos.txtp.min_pcr > 0)
2623                  iadev->sum_mcr -= vcc->qos.txtp.min_pcr;
2624            }
2625            if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2626               ia_vcc = INPH_IA_VCC(vcc); 
2627               iadev->sum_mcr -= ia_vcc->NumCbrEntry*iadev->Granularity;
2628               ia_cbrVc_close (vcc);
2629            }
2630            spin_unlock_irqrestore(&iadev->tx_lock, flags);
2631         }
2632         
2633         if (vcc->qos.rxtp.traffic_class != ATM_NONE) {   
2634            // reset reass table
2635            vc_table = (u16 *)(iadev->reass_ram+REASS_TABLE*iadev->memSize);
2636            vc_table += vcc->vci; 
2637            *vc_table = NO_AAL5_PKT;
2638            // reset vc table
2639            vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
2640            vc_table += vcc->vci;
2641            *vc_table = (vcc->vci << 6) | 15;
2642            if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
2643               struct abr_vc_table __iomem *abr_vc_table = 
2644                                 (iadev->reass_ram+ABR_VC_TABLE*iadev->memSize);
2645               abr_vc_table +=  vcc->vci;
2646               abr_vc_table->rdf = 0x0003;
2647               abr_vc_table->air = 0x5eb1;
2648            }                                 
2649            // Drain the packets
2650            rx_dle_intr(vcc->dev); 
2651            iadev->rx_open[vcc->vci] = NULL;
2652         }
2653         kfree(INPH_IA_VCC(vcc));  
2654         ia_vcc = NULL;
2655         vcc->dev_data = NULL;
2656         clear_bit(ATM_VF_ADDR,&vcc->flags);
2657         return;        
2658 }  
2659   
2660 static int ia_open(struct atm_vcc *vcc)
2661 {  
2662         IADEV *iadev;  
2663         struct ia_vcc *ia_vcc;  
2664         int error;  
2665         if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))  
2666         {  
2667                 IF_EVENT(printk("ia: not partially allocated resources\n");)  
2668                 vcc->dev_data = NULL;
2669         }  
2670         iadev = INPH_IA_DEV(vcc->dev);  
2671         if (vcc->vci != ATM_VPI_UNSPEC && vcc->vpi != ATM_VCI_UNSPEC)  
2672         {  
2673                 IF_EVENT(printk("iphase open: unspec part\n");)  
2674                 set_bit(ATM_VF_ADDR,&vcc->flags);
2675         }  
2676         if (vcc->qos.aal != ATM_AAL5)  
2677                 return -EINVAL;  
2678         IF_EVENT(printk(DEV_LABEL "(itf %d): open %d.%d\n", 
2679                                  vcc->dev->number, vcc->vpi, vcc->vci);)  
2680   
2681         /* Device dependent initialization */  
2682         ia_vcc = kmalloc(sizeof(*ia_vcc), GFP_KERNEL);  
2683         if (!ia_vcc) return -ENOMEM;  
2684         vcc->dev_data = ia_vcc;
2685   
2686         if ((error = open_rx(vcc)))  
2687         {  
2688                 IF_EVENT(printk("iadev: error in open_rx, closing\n");)  
2689                 ia_close(vcc);  
2690                 return error;  
2691         }  
2692   
2693         if ((error = open_tx(vcc)))  
2694         {  
2695                 IF_EVENT(printk("iadev: error in open_tx, closing\n");)  
2696                 ia_close(vcc);  
2697                 return error;  
2698         }  
2699   
2700         set_bit(ATM_VF_READY,&vcc->flags);
2701
2702 #if 0
2703         {
2704            static u8 first = 1; 
2705            if (first) {
2706               ia_timer.expires = jiffies + 3*HZ;
2707               add_timer(&ia_timer);
2708               first = 0;
2709            }           
2710         }
2711 #endif
2712         IF_EVENT(printk("ia open returning\n");)  
2713         return 0;  
2714 }  
2715   
2716 static int ia_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags)  
2717 {  
2718         IF_EVENT(printk(">ia_change_qos\n");)  
2719         return 0;  
2720 }  
2721   
2722 static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)  
2723 {  
2724    IA_CMDBUF ia_cmds;
2725    IADEV *iadev;
2726    int i, board;
2727    u16 __user *tmps;
2728    IF_EVENT(printk(">ia_ioctl\n");)  
2729    if (cmd != IA_CMD) {
2730       if (!dev->phy->ioctl) return -EINVAL;
2731       return dev->phy->ioctl(dev,cmd,arg);
2732    }
2733    if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT; 
2734    board = ia_cmds.status;
2735    if ((board < 0) || (board > iadev_count))
2736          board = 0;    
2737    iadev = ia_dev[board];
2738    switch (ia_cmds.cmd) {
2739    case MEMDUMP:
2740    {
2741         switch (ia_cmds.sub_cmd) {
2742           case MEMDUMP_DEV:     
2743              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2744              if (copy_to_user(ia_cmds.buf, iadev, sizeof(IADEV)))
2745                 return -EFAULT;
2746              ia_cmds.status = 0;
2747              break;
2748           case MEMDUMP_SEGREG:
2749              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2750              tmps = (u16 __user *)ia_cmds.buf;
2751              for(i=0; i<0x80; i+=2, tmps++)
2752                 if(put_user((u16)(readl(iadev->seg_reg+i) & 0xffff), tmps)) return -EFAULT;
2753              ia_cmds.status = 0;
2754              ia_cmds.len = 0x80;
2755              break;
2756           case MEMDUMP_REASSREG:
2757              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2758              tmps = (u16 __user *)ia_cmds.buf;
2759              for(i=0; i<0x80; i+=2, tmps++)
2760                 if(put_user((u16)(readl(iadev->reass_reg+i) & 0xffff), tmps)) return -EFAULT;
2761              ia_cmds.status = 0;
2762              ia_cmds.len = 0x80;
2763              break;
2764           case MEMDUMP_FFL:
2765           {  
2766              ia_regs_t       *regs_local;
2767              ffredn_t        *ffL;
2768              rfredn_t        *rfL;
2769                      
2770              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2771              regs_local = kmalloc(sizeof(*regs_local), GFP_KERNEL);
2772              if (!regs_local) return -ENOMEM;
2773              ffL = &regs_local->ffredn;
2774              rfL = &regs_local->rfredn;
2775              /* Copy real rfred registers into the local copy */
2776              for (i=0; i<(sizeof (rfredn_t))/4; i++)
2777                 ((u_int *)rfL)[i] = readl(iadev->reass_reg + i) & 0xffff;
2778                 /* Copy real ffred registers into the local copy */
2779              for (i=0; i<(sizeof (ffredn_t))/4; i++)
2780                 ((u_int *)ffL)[i] = readl(iadev->seg_reg + i) & 0xffff;
2781
2782              if (copy_to_user(ia_cmds.buf, regs_local,sizeof(ia_regs_t))) {
2783                 kfree(regs_local);
2784                 return -EFAULT;
2785              }
2786              kfree(regs_local);
2787              printk("Board %d registers dumped\n", board);
2788              ia_cmds.status = 0;                  
2789          }      
2790              break;        
2791          case READ_REG:
2792          {  
2793              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2794              desc_dbg(iadev); 
2795              ia_cmds.status = 0; 
2796          }
2797              break;
2798          case 0x6:
2799          {  
2800              ia_cmds.status = 0; 
2801              printk("skb = 0x%lx\n", (long)skb_peek(&iadev->tx_backlog));
2802              printk("rtn_q: 0x%lx\n",(long)ia_deque_rtn_q(&iadev->tx_return_q));
2803          }
2804              break;
2805          case 0x8:
2806          {
2807              struct k_sonet_stats *stats;
2808              stats = &PRIV(_ia_dev[board])->sonet_stats;
2809              printk("section_bip: %d\n", atomic_read(&stats->section_bip));
2810              printk("line_bip   : %d\n", atomic_read(&stats->line_bip));
2811              printk("path_bip   : %d\n", atomic_read(&stats->path_bip));
2812              printk("line_febe  : %d\n", atomic_read(&stats->line_febe));
2813              printk("path_febe  : %d\n", atomic_read(&stats->path_febe));
2814              printk("corr_hcs   : %d\n", atomic_read(&stats->corr_hcs));
2815              printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
2816              printk("tx_cells   : %d\n", atomic_read(&stats->tx_cells));
2817              printk("rx_cells   : %d\n", atomic_read(&stats->rx_cells));
2818          }
2819             ia_cmds.status = 0;
2820             break;
2821          case 0x9:
2822             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2823             for (i = 1; i <= iadev->num_rx_desc; i++)
2824                free_desc(_ia_dev[board], i);
2825             writew( ~(RX_FREEQ_EMPT | RX_EXCP_RCVD), 
2826                                             iadev->reass_reg+REASS_MASK_REG);
2827             iadev->rxing = 1;
2828             
2829             ia_cmds.status = 0;
2830             break;
2831
2832          case 0xb:
2833             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2834             IaFrontEndIntr(iadev);
2835             break;
2836          case 0xa:
2837             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2838          {  
2839              ia_cmds.status = 0; 
2840              IADebugFlag = ia_cmds.maddr;
2841              printk("New debug option loaded\n");
2842          }
2843              break;
2844          default:
2845              ia_cmds.status = 0;
2846              break;
2847       } 
2848    }
2849       break;
2850    default:
2851       break;
2852
2853    }    
2854    return 0;  
2855 }  
2856   
2857 static int ia_getsockopt(struct atm_vcc *vcc, int level, int optname,   
2858         void __user *optval, int optlen)  
2859 {  
2860         IF_EVENT(printk(">ia_getsockopt\n");)  
2861         return -EINVAL;  
2862 }  
2863   
2864 static int ia_setsockopt(struct atm_vcc *vcc, int level, int optname,   
2865         void __user *optval, int optlen)  
2866 {  
2867         IF_EVENT(printk(">ia_setsockopt\n");)  
2868         return -EINVAL;  
2869 }  
2870   
2871 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
2872         IADEV *iadev;
2873         struct dle *wr_ptr;
2874         struct tx_buf_desc __iomem *buf_desc_ptr;
2875         int desc;
2876         int comp_code;
2877         int total_len;
2878         struct cpcs_trailer *trailer;
2879         struct ia_vcc *iavcc;
2880
2881         iadev = INPH_IA_DEV(vcc->dev);  
2882         iavcc = INPH_IA_VCC(vcc);
2883         if (!iavcc->txing) {
2884            printk("discard packet on closed VC\n");
2885            if (vcc->pop)
2886                 vcc->pop(vcc, skb);
2887            else
2888                 dev_kfree_skb_any(skb);
2889            return 0;
2890         }
2891
2892         if (skb->len > iadev->tx_buf_sz - 8) {
2893            printk("Transmit size over tx buffer size\n");
2894            if (vcc->pop)
2895                  vcc->pop(vcc, skb);
2896            else
2897                  dev_kfree_skb_any(skb);
2898           return 0;
2899         }
2900         if ((unsigned long)skb->data & 3) {
2901            printk("Misaligned SKB\n");
2902            if (vcc->pop)
2903                  vcc->pop(vcc, skb);
2904            else
2905                  dev_kfree_skb_any(skb);
2906            return 0;
2907         }       
2908         /* Get a descriptor number from our free descriptor queue  
2909            We get the descr number from the TCQ now, since I am using  
2910            the TCQ as a free buffer queue. Initially TCQ will be   
2911            initialized with all the descriptors and is hence, full.  
2912         */
2913         desc = get_desc (iadev, iavcc);
2914         if (desc == 0xffff) 
2915             return 1;
2916         comp_code = desc >> 13;  
2917         desc &= 0x1fff;  
2918   
2919         if ((desc == 0) || (desc > iadev->num_tx_desc))  
2920         {  
2921                 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);) 
2922                 atomic_inc(&vcc->stats->tx);
2923                 if (vcc->pop)   
2924                     vcc->pop(vcc, skb);   
2925                 else  
2926                     dev_kfree_skb_any(skb);
2927                 return 0;   /* return SUCCESS */
2928         }  
2929   
2930         if (comp_code)  
2931         {  
2932             IF_ERR(printk(DEV_LABEL "send desc:%d completion code %d error\n", 
2933                                                             desc, comp_code);)  
2934         }  
2935        
2936         /* remember the desc and vcc mapping */
2937         iavcc->vc_desc_cnt++;
2938         iadev->desc_tbl[desc-1].iavcc = iavcc;
2939         iadev->desc_tbl[desc-1].txskb = skb;
2940         IA_SKB_STATE(skb) = 0;
2941
2942         iadev->ffL.tcq_rd += 2;
2943         if (iadev->ffL.tcq_rd > iadev->ffL.tcq_ed)
2944                 iadev->ffL.tcq_rd  = iadev->ffL.tcq_st;
2945         writew(iadev->ffL.tcq_rd, iadev->seg_reg+TCQ_RD_PTR);
2946   
2947         /* Put the descriptor number in the packet ready queue  
2948                 and put the updated write pointer in the DLE field   
2949         */   
2950         *(u16*)(iadev->seg_ram+iadev->ffL.prq_wr) = desc; 
2951
2952         iadev->ffL.prq_wr += 2;
2953         if (iadev->ffL.prq_wr > iadev->ffL.prq_ed)
2954                 iadev->ffL.prq_wr = iadev->ffL.prq_st;
2955           
2956         /* Figure out the exact length of the packet and padding required to 
2957            make it  aligned on a 48 byte boundary.  */
2958         total_len = skb->len + sizeof(struct cpcs_trailer);  
2959         total_len = ((total_len + 47) / 48) * 48;
2960         IF_TX(printk("ia packet len:%d padding:%d\n", total_len, total_len - skb->len);)  
2961  
2962         /* Put the packet in a tx buffer */   
2963         trailer = iadev->tx_buf[desc-1].cpcs;
2964         IF_TX(printk("Sent: skb = 0x%p skb->data: 0x%p len: %d, desc: %d\n",
2965                   skb, skb->data, skb->len, desc);)
2966         trailer->control = 0; 
2967         /*big endian*/ 
2968         trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8);
2969         trailer->crc32 = 0;     /* not needed - dummy bytes */  
2970
2971         /* Display the packet */  
2972         IF_TXPKT(printk("Sent data: len = %d MsgNum = %d\n", 
2973                                                         skb->len, tcnter++);  
2974         xdump(skb->data, skb->len, "TX: ");
2975         printk("\n");)
2976
2977         /* Build the buffer descriptor */  
2978         buf_desc_ptr = iadev->seg_ram+TX_DESC_BASE;
2979         buf_desc_ptr += desc;   /* points to the corresponding entry */  
2980         buf_desc_ptr->desc_mode = AAL5 | EOM_EN | APP_CRC32 | CMPL_INT;   
2981         /* Huh ? p.115 of users guide describes this as a read-only register */
2982         writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2983         buf_desc_ptr->vc_index = vcc->vci;
2984         buf_desc_ptr->bytes = total_len;  
2985
2986         if (vcc->qos.txtp.traffic_class == ATM_ABR)  
2987            clear_lockup (vcc, iadev);
2988
2989         /* Build the DLE structure */  
2990         wr_ptr = iadev->tx_dle_q.write;  
2991         memset((caddr_t)wr_ptr, 0, sizeof(*wr_ptr));  
2992         wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
2993                 skb->len, PCI_DMA_TODEVICE);
2994         wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) | 
2995                                                   buf_desc_ptr->buf_start_lo;  
2996         /* wr_ptr->bytes = swap_byte_order(total_len); didn't seem to affect?? */
2997         wr_ptr->bytes = skb->len;  
2998
2999         /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
3000         if ((wr_ptr->bytes >> 2) == 0xb)
3001            wr_ptr->bytes = 0x30;
3002
3003         wr_ptr->mode = TX_DLE_PSI; 
3004         wr_ptr->prq_wr_ptr_data = 0;
3005   
3006         /* end is not to be used for the DLE q */  
3007         if (++wr_ptr == iadev->tx_dle_q.end)  
3008                 wr_ptr = iadev->tx_dle_q.start;  
3009         
3010         /* Build trailer dle */
3011         wr_ptr->sys_pkt_addr = iadev->tx_buf[desc-1].dma_addr;
3012         wr_ptr->local_pkt_addr = ((buf_desc_ptr->buf_start_hi << 16) | 
3013           buf_desc_ptr->buf_start_lo) + total_len - sizeof(struct cpcs_trailer);
3014
3015         wr_ptr->bytes = sizeof(struct cpcs_trailer);
3016         wr_ptr->mode = DMA_INT_ENABLE; 
3017         wr_ptr->prq_wr_ptr_data = iadev->ffL.prq_wr;
3018         
3019         /* end is not to be used for the DLE q */
3020         if (++wr_ptr == iadev->tx_dle_q.end)  
3021                 wr_ptr = iadev->tx_dle_q.start;
3022
3023         iadev->tx_dle_q.write = wr_ptr;  
3024         ATM_DESC(skb) = vcc->vci;
3025         skb_queue_tail(&iadev->tx_dma_q, skb);
3026
3027         atomic_inc(&vcc->stats->tx);
3028         iadev->tx_pkt_cnt++;
3029         /* Increment transaction counter */  
3030         writel(2, iadev->dma+IPHASE5575_TX_COUNTER);  
3031         
3032 #if 0        
3033         /* add flow control logic */ 
3034         if (atomic_read(&vcc->stats->tx) % 20 == 0) {
3035           if (iavcc->vc_desc_cnt > 10) {
3036              vcc->tx_quota =  vcc->tx_quota * 3 / 4;
3037             printk("Tx1:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3038               iavcc->flow_inc = -1;
3039               iavcc->saved_tx_quota = vcc->tx_quota;
3040            } else if ((iavcc->flow_inc < 0) && (iavcc->vc_desc_cnt < 3)) {
3041              // vcc->tx_quota = 3 * iavcc->saved_tx_quota / 4;
3042              printk("Tx2:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota ); 
3043               iavcc->flow_inc = 0;
3044            }
3045         }
3046 #endif
3047         IF_TX(printk("ia send done\n");)  
3048         return 0;  
3049 }  
3050
3051 static int ia_send(struct atm_vcc *vcc, struct sk_buff *skb)
3052 {
3053         IADEV *iadev; 
3054         struct ia_vcc *iavcc;
3055         unsigned long flags;
3056
3057         iadev = INPH_IA_DEV(vcc->dev);
3058         iavcc = INPH_IA_VCC(vcc); 
3059         if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer))))
3060         {
3061             if (!skb)
3062                 printk(KERN_CRIT "null skb in ia_send\n");
3063             else dev_kfree_skb_any(skb);
3064             return -EINVAL;
3065         }                         
3066         spin_lock_irqsave(&iadev->tx_lock, flags); 
3067         if (!test_bit(ATM_VF_READY,&vcc->flags)){ 
3068             dev_kfree_skb_any(skb);
3069             spin_unlock_irqrestore(&iadev->tx_lock, flags);
3070             return -EINVAL; 
3071         }
3072         ATM_SKB(skb)->vcc = vcc;
3073  
3074         if (skb_peek(&iadev->tx_backlog)) {
3075            skb_queue_tail(&iadev->tx_backlog, skb);
3076         }
3077         else {
3078            if (ia_pkt_tx (vcc, skb)) {
3079               skb_queue_tail(&iadev->tx_backlog, skb);
3080            }
3081         }
3082         spin_unlock_irqrestore(&iadev->tx_lock, flags);
3083         return 0;
3084
3085 }
3086
3087 static int ia_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
3088
3089   int   left = *pos, n;   
3090   char  *tmpPtr;
3091   IADEV *iadev = INPH_IA_DEV(dev);
3092   if(!left--) {
3093      if (iadev->phy_type == FE_25MBIT_PHY) {
3094        n = sprintf(page, "  Board Type         :  Iphase5525-1KVC-128K\n");
3095        return n;
3096      }
3097      if (iadev->phy_type == FE_DS3_PHY)
3098         n = sprintf(page, "  Board Type         :  Iphase-ATM-DS3");
3099      else if (iadev->phy_type == FE_E3_PHY)
3100         n = sprintf(page, "  Board Type         :  Iphase-ATM-E3");
3101      else if (iadev->phy_type == FE_UTP_OPTION)
3102          n = sprintf(page, "  Board Type         :  Iphase-ATM-UTP155"); 
3103      else
3104         n = sprintf(page, "  Board Type         :  Iphase-ATM-OC3");
3105      tmpPtr = page + n;
3106      if (iadev->pci_map_size == 0x40000)
3107         n += sprintf(tmpPtr, "-1KVC-");
3108      else
3109         n += sprintf(tmpPtr, "-4KVC-");  
3110      tmpPtr = page + n; 
3111      if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_1M)
3112         n += sprintf(tmpPtr, "1M  \n");
3113      else if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_512K)
3114         n += sprintf(tmpPtr, "512K\n");
3115      else
3116        n += sprintf(tmpPtr, "128K\n");
3117      return n;
3118   }
3119   if (!left) {
3120      return  sprintf(page, "  Number of Tx Buffer:  %u\n"
3121                            "  Size of Tx Buffer  :  %u\n"
3122                            "  Number of Rx Buffer:  %u\n"
3123                            "  Size of Rx Buffer  :  %u\n"
3124                            "  Packets Receiverd  :  %u\n"
3125                            "  Packets Transmitted:  %u\n"
3126                            "  Cells Received     :  %u\n"
3127                            "  Cells Transmitted  :  %u\n"
3128                            "  Board Dropped Cells:  %u\n"
3129                            "  Board Dropped Pkts :  %u\n",
3130                            iadev->num_tx_desc,  iadev->tx_buf_sz,
3131                            iadev->num_rx_desc,  iadev->rx_buf_sz,
3132                            iadev->rx_pkt_cnt,   iadev->tx_pkt_cnt,
3133                            iadev->rx_cell_cnt, iadev->tx_cell_cnt,
3134                            iadev->drop_rxcell, iadev->drop_rxpkt);                        
3135   }
3136   return 0;
3137 }
3138   
3139 static const struct atmdev_ops ops = {  
3140         .open           = ia_open,  
3141         .close          = ia_close,  
3142         .ioctl          = ia_ioctl,  
3143         .getsockopt     = ia_getsockopt,  
3144         .setsockopt     = ia_setsockopt,  
3145         .send           = ia_send,  
3146         .phy_put        = ia_phy_put,  
3147         .phy_get        = ia_phy_get,  
3148         .change_qos     = ia_change_qos,  
3149         .proc_read      = ia_proc_read,
3150         .owner          = THIS_MODULE,
3151 };  
3152           
3153 static int __devinit ia_init_one(struct pci_dev *pdev,
3154                                  const struct pci_device_id *ent)
3155 {  
3156         struct atm_dev *dev;  
3157         IADEV *iadev;  
3158         unsigned long flags;
3159         int ret;
3160
3161         iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
3162         if (!iadev) {
3163                 ret = -ENOMEM;
3164                 goto err_out;
3165         }
3166
3167         iadev->pci = pdev;
3168
3169         IF_INIT(printk("ia detected at bus:%d dev: %d function:%d\n",
3170                 pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));)
3171         if (pci_enable_device(pdev)) {
3172                 ret = -ENODEV;
3173                 goto err_out_free_iadev;
3174         }
3175         dev = atm_dev_register(DEV_LABEL, &ops, -1, NULL);
3176         if (!dev) {
3177                 ret = -ENOMEM;
3178                 goto err_out_disable_dev;
3179         }
3180         dev->dev_data = iadev;
3181         IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n", dev->number);)
3182         IF_INIT(printk("dev_id = 0x%p iadev->LineRate = %d \n", dev,
3183                 iadev->LineRate);)
3184
3185         pci_set_drvdata(pdev, dev);
3186
3187         ia_dev[iadev_count] = iadev;
3188         _ia_dev[iadev_count] = dev;
3189         iadev_count++;
3190         spin_lock_init(&iadev->misc_lock);
3191         /* First fixes first. I don't want to think about this now. */
3192         spin_lock_irqsave(&iadev->misc_lock, flags); 
3193         if (ia_init(dev) || ia_start(dev)) {  
3194                 IF_INIT(printk("IA register failed!\n");)
3195                 iadev_count--;
3196                 ia_dev[iadev_count] = NULL;
3197                 _ia_dev[iadev_count] = NULL;
3198                 spin_unlock_irqrestore(&iadev->misc_lock, flags); 
3199                 ret = -EINVAL;
3200                 goto err_out_deregister_dev;
3201         }
3202         spin_unlock_irqrestore(&iadev->misc_lock, flags); 
3203         IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
3204
3205         iadev->next_board = ia_boards;  
3206         ia_boards = dev;  
3207
3208         return 0;
3209
3210 err_out_deregister_dev:
3211         atm_dev_deregister(dev);  
3212 err_out_disable_dev:
3213         pci_disable_device(pdev);
3214 err_out_free_iadev:
3215         kfree(iadev);
3216 err_out:
3217         return ret;
3218 }
3219
3220 static void __devexit ia_remove_one(struct pci_dev *pdev)
3221 {
3222         struct atm_dev *dev = pci_get_drvdata(pdev);
3223         IADEV *iadev = INPH_IA_DEV(dev);
3224
3225         /* Disable phy interrupts */
3226         ia_phy_put(dev, ia_phy_get(dev, SUNI_RSOP_CIE) & ~(SUNI_RSOP_CIE_LOSE),
3227                                    SUNI_RSOP_CIE);
3228         udelay(1);
3229
3230         if (dev->phy && dev->phy->stop)
3231                 dev->phy->stop(dev);
3232
3233         /* De-register device */  
3234         free_irq(iadev->irq, dev);
3235         iadev_count--;
3236         ia_dev[iadev_count] = NULL;
3237         _ia_dev[iadev_count] = NULL;
3238         IF_EVENT(printk("deregistering iav at (itf:%d)\n", dev->number);)
3239         atm_dev_deregister(dev);
3240
3241         iounmap(iadev->base);  
3242         pci_disable_device(pdev);
3243
3244         ia_free_rx(iadev);
3245         ia_free_tx(iadev);
3246
3247         kfree(iadev);
3248 }
3249
3250 static struct pci_device_id ia_pci_tbl[] = {
3251         { PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, },
3252         { PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, },
3253         { 0,}
3254 };
3255 MODULE_DEVICE_TABLE(pci, ia_pci_tbl);
3256
3257 static struct pci_driver ia_driver = {
3258         .name =         DEV_LABEL,
3259         .id_table =     ia_pci_tbl,
3260         .probe =        ia_init_one,
3261         .remove =       __devexit_p(ia_remove_one),
3262 };
3263
3264 static int __init ia_module_init(void)
3265 {
3266         int ret;
3267
3268         ret = pci_register_driver(&ia_driver);
3269         if (ret >= 0) {
3270                 ia_timer.expires = jiffies + 3*HZ;
3271                 add_timer(&ia_timer); 
3272         } else
3273                 printk(KERN_ERR DEV_LABEL ": no adapter found\n");  
3274         return ret;
3275 }
3276
3277 static void __exit ia_module_exit(void)
3278 {
3279         pci_unregister_driver(&ia_driver);
3280
3281         del_timer(&ia_timer);
3282 }
3283
3284 module_init(ia_module_init);
3285 module_exit(ia_module_exit);