1 /* $Id: hfc_pci.c,v 1.48.2.4 2004/02/11 13:21:33 keil Exp $
3 * low level driver for CCD´s hfc-pci based cards
5 * Author Werner Cornelius
6 * based on existing driver for CCD hfc ISA cards
7 * Copyright by Werner Cornelius <werner@isdn4linux.de>
8 * by Karsten Keil <keil@isdn4linux.de>
10 * This software may be used and distributed according to the terms
11 * of the GNU General Public License, incorporated herein by reference.
13 * For changes and modifications please read
14 * Documentation/isdn/HiSax.cert
18 #include <linux/init.h>
19 #include <linux/config.h>
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
26 extern const char *CardType[];
28 static const char *hfcpci_revision = "$Revision: 1.48.2.4 $";
30 /* table entry in the PCI devices list */
38 #define NT_T1_COUNT 20 /* number of 3.125ms interrupts for G2 timeout */
39 #define CLKDEL_TE 0x0e /* CLKDEL in TE mode */
40 #define CLKDEL_NT 0x6c /* CLKDEL in NT mode */
42 static const PCI_ENTRY id_list[] =
44 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_2BD0, "CCD/Billion/Asuscom", "2BD0"},
45 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B000, "Billion", "B000"},
46 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B006, "Billion", "B006"},
47 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B007, "Billion", "B007"},
48 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B008, "Billion", "B008"},
49 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B009, "Billion", "B009"},
50 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00A, "Billion", "B00A"},
51 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00B, "Billion", "B00B"},
52 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00C, "Billion", "B00C"},
53 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B100, "Seyeon", "B100"},
54 {PCI_VENDOR_ID_ABOCOM, PCI_DEVICE_ID_ABOCOM_2BD1, "Abocom/Magitek", "2BD1"},
55 {PCI_VENDOR_ID_ASUSTEK, PCI_DEVICE_ID_ASUSTEK_0675, "Asuscom/Askey", "675"},
56 {PCI_VENDOR_ID_BERKOM, PCI_DEVICE_ID_BERKOM_T_CONCEPT, "German telekom", "T-Concept"},
57 {PCI_VENDOR_ID_BERKOM, PCI_DEVICE_ID_BERKOM_A1T, "German telekom", "A1T"},
58 {PCI_VENDOR_ID_ANIGMA, PCI_DEVICE_ID_ANIGMA_MC145575, "Motorola MC145575", "MC145575"},
59 {PCI_VENDOR_ID_ZOLTRIX, PCI_DEVICE_ID_ZOLTRIX_2BD0, "Zoltrix", "2BD0"},
60 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_E,"Digi International", "Digi DataFire Micro V IOM2 (Europe)"},
61 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_E,"Digi International", "Digi DataFire Micro V (Europe)"},
62 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_A,"Digi International", "Digi DataFire Micro V IOM2 (North America)"},
63 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_A,"Digi International", "Digi DataFire Micro V (North America)"},
64 {PCI_VENDOR_ID_SITECOM, PCI_DEVICE_ID_SITECOM_DC105V2, "Sitecom Europe", "DC-105 ISDN PCI"},
71 /******************************************/
72 /* free hardware resources used by driver */
73 /******************************************/
75 release_io_hfcpci(struct IsdnCardState *cs)
77 printk(KERN_INFO "HiSax: release hfcpci at %p\n",
78 cs->hw.hfcpci.pci_io);
79 cs->hw.hfcpci.int_m2 = 0; /* interrupt output off ! */
80 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
81 Write_hfc(cs, HFCPCI_CIRM, HFCPCI_RESET); /* Reset On */
83 Write_hfc(cs, HFCPCI_CIRM, 0); /* Reset Off */
85 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
86 pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, 0); /* disable memory mapped ports + busmaster */
87 del_timer(&cs->hw.hfcpci.timer);
88 kfree(cs->hw.hfcpci.share_start);
89 cs->hw.hfcpci.share_start = NULL;
90 iounmap((void *)cs->hw.hfcpci.pci_io);
93 /********************************************************************************/
94 /* function called to reset the HFC PCI chip. A complete software reset of chip */
95 /* and fifos is done. */
96 /********************************************************************************/
98 reset_hfcpci(struct IsdnCardState *cs)
100 pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, PCI_ENA_MEMIO); /* enable memory mapped ports, disable busmaster */
101 cs->hw.hfcpci.int_m2 = 0; /* interrupt output off ! */
102 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
104 printk(KERN_INFO "HFC_PCI: resetting card\n");
105 pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, PCI_ENA_MEMIO + PCI_ENA_MASTER); /* enable memory ports + busmaster */
106 Write_hfc(cs, HFCPCI_CIRM, HFCPCI_RESET); /* Reset On */
108 Write_hfc(cs, HFCPCI_CIRM, 0); /* Reset Off */
110 if (Read_hfc(cs, HFCPCI_STATUS) & 2)
111 printk(KERN_WARNING "HFC-PCI init bit busy\n");
113 cs->hw.hfcpci.fifo_en = 0x30; /* only D fifos enabled */
114 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
116 cs->hw.hfcpci.trm = 0 + HFCPCI_BTRANS_THRESMASK; /* no echo connect , threshold */
117 Write_hfc(cs, HFCPCI_TRM, cs->hw.hfcpci.trm);
119 Write_hfc(cs, HFCPCI_CLKDEL, CLKDEL_TE); /* ST-Bit delay for TE-Mode */
120 cs->hw.hfcpci.sctrl_e = HFCPCI_AUTO_AWAKE;
121 Write_hfc(cs, HFCPCI_SCTRL_E, cs->hw.hfcpci.sctrl_e); /* S/T Auto awake */
122 cs->hw.hfcpci.bswapped = 0; /* no exchange */
123 cs->hw.hfcpci.nt_mode = 0; /* we are in TE mode */
124 cs->hw.hfcpci.ctmt = HFCPCI_TIM3_125 | HFCPCI_AUTO_TIMER;
125 Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt);
127 cs->hw.hfcpci.int_m1 = HFCPCI_INTS_DTRANS | HFCPCI_INTS_DREC |
128 HFCPCI_INTS_L1STATE | HFCPCI_INTS_TIMER;
129 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
131 /* Clear already pending ints */
132 if (Read_hfc(cs, HFCPCI_INT_S1));
134 Write_hfc(cs, HFCPCI_STATES, HFCPCI_LOAD_STATE | 2); /* HFC ST 2 */
136 Write_hfc(cs, HFCPCI_STATES, 2); /* HFC ST 2 */
137 cs->hw.hfcpci.mst_m = HFCPCI_MASTER; /* HFC Master Mode */
139 Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m);
140 cs->hw.hfcpci.sctrl = 0x40; /* set tx_lo mode, error in datasheet ! */
141 Write_hfc(cs, HFCPCI_SCTRL, cs->hw.hfcpci.sctrl);
142 cs->hw.hfcpci.sctrl_r = 0;
143 Write_hfc(cs, HFCPCI_SCTRL_R, cs->hw.hfcpci.sctrl_r);
145 /* Init GCI/IOM2 in master mode */
146 /* Slots 0 and 1 are set for B-chan 1 and 2 */
147 /* D- and monitor/CI channel are not enabled */
148 /* STIO1 is used as output for data, B1+B2 from ST->IOM+HFC */
149 /* STIO2 is used as data input, B1+B2 from IOM->ST */
150 /* ST B-channel send disabled -> continous 1s */
151 /* The IOM slots are always enabled */
152 cs->hw.hfcpci.conn = 0x36; /* set data flow directions */
153 Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn);
154 Write_hfc(cs, HFCPCI_B1_SSL, 0x80); /* B1-Slot 0 STIO1 out enabled */
155 Write_hfc(cs, HFCPCI_B2_SSL, 0x81); /* B2-Slot 1 STIO1 out enabled */
156 Write_hfc(cs, HFCPCI_B1_RSL, 0x80); /* B1-Slot 0 STIO2 in enabled */
157 Write_hfc(cs, HFCPCI_B2_RSL, 0x81); /* B2-Slot 1 STIO2 in enabled */
159 /* Finally enable IRQ output */
160 cs->hw.hfcpci.int_m2 = HFCPCI_IRQ_ENABLE;
161 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
162 if (Read_hfc(cs, HFCPCI_INT_S1));
165 /***************************************************/
166 /* Timer function called when kernel timer expires */
167 /***************************************************/
169 hfcpci_Timer(struct IsdnCardState *cs)
171 cs->hw.hfcpci.timer.expires = jiffies + 75;
173 /* WriteReg(cs, HFCD_DATA, HFCD_CTMT, cs->hw.hfcpci.ctmt | 0x80);
174 add_timer(&cs->hw.hfcpci.timer);
179 /*********************************/
180 /* schedule a new D-channel task */
181 /*********************************/
183 sched_event_D_pci(struct IsdnCardState *cs, int event)
185 test_and_set_bit(event, &cs->event);
186 schedule_work(&cs->tqueue);
189 /*********************************/
190 /* schedule a new b_channel task */
191 /*********************************/
193 hfcpci_sched_event(struct BCState *bcs, int event)
195 test_and_set_bit(event, &bcs->event);
196 schedule_work(&bcs->tqueue);
199 /************************************************/
200 /* select a b-channel entry matching and active */
201 /************************************************/
204 Sel_BCS(struct IsdnCardState *cs, int channel)
206 if (cs->bcs[0].mode && (cs->bcs[0].channel == channel))
207 return (&cs->bcs[0]);
208 else if (cs->bcs[1].mode && (cs->bcs[1].channel == channel))
209 return (&cs->bcs[1]);
214 /***************************************/
215 /* clear the desired B-channel rx fifo */
216 /***************************************/
217 static void hfcpci_clear_fifo_rx(struct IsdnCardState *cs, int fifo)
222 bzr = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b2;
223 fifo_state = cs->hw.hfcpci.fifo_en & HFCPCI_FIFOEN_B2RX;
225 bzr = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b1;
226 fifo_state = cs->hw.hfcpci.fifo_en & HFCPCI_FIFOEN_B1RX;
229 cs->hw.hfcpci.fifo_en ^= fifo_state;
230 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
231 cs->hw.hfcpci.last_bfifo_cnt[fifo] = 0;
232 bzr->za[MAX_B_FRAMES].z1 = B_FIFO_SIZE + B_SUB_VAL - 1;
233 bzr->za[MAX_B_FRAMES].z2 = bzr->za[MAX_B_FRAMES].z1;
234 bzr->f1 = MAX_B_FRAMES;
235 bzr->f2 = bzr->f1; /* init F pointers to remain constant */
237 cs->hw.hfcpci.fifo_en |= fifo_state;
238 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
241 /***************************************/
242 /* clear the desired B-channel tx fifo */
243 /***************************************/
244 static void hfcpci_clear_fifo_tx(struct IsdnCardState *cs, int fifo)
249 bzt = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txbz_b2;
250 fifo_state = cs->hw.hfcpci.fifo_en & HFCPCI_FIFOEN_B2TX;
252 bzt = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txbz_b1;
253 fifo_state = cs->hw.hfcpci.fifo_en & HFCPCI_FIFOEN_B1TX;
256 cs->hw.hfcpci.fifo_en ^= fifo_state;
257 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
258 bzt->za[MAX_B_FRAMES].z1 = B_FIFO_SIZE + B_SUB_VAL - 1;
259 bzt->za[MAX_B_FRAMES].z2 = bzt->za[MAX_B_FRAMES].z1;
260 bzt->f1 = MAX_B_FRAMES;
261 bzt->f2 = bzt->f1; /* init F pointers to remain constant */
263 cs->hw.hfcpci.fifo_en |= fifo_state;
264 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
267 /*********************************************/
268 /* read a complete B-frame out of the buffer */
269 /*********************************************/
270 static struct sk_buff
272 hfcpci_empty_fifo(struct BCState *bcs, bzfifo_type * bz, u_char * bdata, int count)
274 u_char *ptr, *ptr1, new_f2;
276 struct IsdnCardState *cs = bcs->cs;
277 int total, maxlen, new_z2;
280 if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO))
281 debugl1(cs, "hfcpci_empty_fifo");
282 zp = &bz->za[bz->f2]; /* point to Z-Regs */
283 new_z2 = zp->z2 + count; /* new position in fifo */
284 if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
285 new_z2 -= B_FIFO_SIZE; /* buffer wrap */
286 new_f2 = (bz->f2 + 1) & MAX_B_FRAMES;
287 if ((count > HSCX_BUFMAX + 3) || (count < 4) ||
288 (*(bdata + (zp->z1 - B_SUB_VAL)))) {
289 if (cs->debug & L1_DEB_WARN)
290 debugl1(cs, "hfcpci_empty_fifo: incoming packet invalid length %d or crc", count);
291 #ifdef ERROR_STATISTIC
294 bz->za[new_f2].z2 = new_z2;
295 bz->f2 = new_f2; /* next buffer */
297 } else if (!(skb = dev_alloc_skb(count - 3)))
298 printk(KERN_WARNING "HFCPCI: receive out of memory\n");
302 ptr = skb_put(skb, count);
304 if (zp->z2 + count <= B_FIFO_SIZE + B_SUB_VAL)
305 maxlen = count; /* complete transfer */
307 maxlen = B_FIFO_SIZE + B_SUB_VAL - zp->z2; /* maximum */
309 ptr1 = bdata + (zp->z2 - B_SUB_VAL); /* start of data */
310 memcpy(ptr, ptr1, maxlen); /* copy data */
313 if (count) { /* rest remaining */
315 ptr1 = bdata; /* start of buffer */
316 memcpy(ptr, ptr1, count); /* rest */
318 bz->za[new_f2].z2 = new_z2;
319 bz->f2 = new_f2; /* next buffer */
325 /*******************************/
326 /* D-channel receive procedure */
327 /*******************************/
330 receive_dmsg(struct IsdnCardState *cs)
340 df = &((fifo_area *) (cs->hw.hfcpci.fifos))->d_chan.d_rx;
341 if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
342 debugl1(cs, "rec_dmsg blocked");
345 while (((df->f1 & D_FREG_MASK) != (df->f2 & D_FREG_MASK)) && count--) {
346 zp = &df->za[df->f2 & D_FREG_MASK];
347 rcnt = zp->z1 - zp->z2;
351 if (cs->debug & L1_DEB_ISAC)
352 debugl1(cs, "hfcpci recd f1(%d) f2(%d) z1(%x) z2(%x) cnt(%d)",
353 df->f1, df->f2, zp->z1, zp->z2, rcnt);
355 if ((rcnt > MAX_DFRAME_LEN + 3) || (rcnt < 4) ||
356 (df->data[zp->z1])) {
357 if (cs->debug & L1_DEB_WARN)
358 debugl1(cs, "empty_fifo hfcpci paket inv. len %d or crc %d", rcnt, df->data[zp->z1]);
359 #ifdef ERROR_STATISTIC
362 df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) | (MAX_D_FRAMES + 1); /* next buffer */
363 df->za[df->f2 & D_FREG_MASK].z2 = (zp->z2 + rcnt) & (D_FIFO_SIZE - 1);
364 } else if ((skb = dev_alloc_skb(rcnt - 3))) {
367 ptr = skb_put(skb, rcnt);
369 if (zp->z2 + rcnt <= D_FIFO_SIZE)
370 maxlen = rcnt; /* complete transfer */
372 maxlen = D_FIFO_SIZE - zp->z2; /* maximum */
374 ptr1 = df->data + zp->z2; /* start of data */
375 memcpy(ptr, ptr1, maxlen); /* copy data */
378 if (rcnt) { /* rest remaining */
380 ptr1 = df->data; /* start of buffer */
381 memcpy(ptr, ptr1, rcnt); /* rest */
383 df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) | (MAX_D_FRAMES + 1); /* next buffer */
384 df->za[df->f2 & D_FREG_MASK].z2 = (zp->z2 + total) & (D_FIFO_SIZE - 1);
386 skb_queue_tail(&cs->rq, skb);
387 sched_event_D_pci(cs, D_RCVBUFREADY);
389 printk(KERN_WARNING "HFC-PCI: D receive out of memory\n");
391 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
395 /*******************************************************************************/
396 /* check for transparent receive data and read max one threshold size if avail */
397 /*******************************************************************************/
399 hfcpci_empty_fifo_trans(struct BCState *bcs, bzfifo_type * bz, u_char * bdata)
401 unsigned short *z1r, *z2r;
402 int new_z2, fcnt, maxlen;
406 z1r = &bz->za[MAX_B_FRAMES].z1; /* pointer to z reg */
409 if (!(fcnt = *z1r - *z2r))
410 return (0); /* no data avail */
413 fcnt += B_FIFO_SIZE; /* bytes actually buffered */
414 if (fcnt > HFCPCI_BTRANS_THRESHOLD)
415 fcnt = HFCPCI_BTRANS_THRESHOLD; /* limit size */
417 new_z2 = *z2r + fcnt; /* new position in fifo */
418 if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
419 new_z2 -= B_FIFO_SIZE; /* buffer wrap */
421 if (!(skb = dev_alloc_skb(fcnt)))
422 printk(KERN_WARNING "HFCPCI: receive out of memory\n");
424 ptr = skb_put(skb, fcnt);
425 if (*z2r + fcnt <= B_FIFO_SIZE + B_SUB_VAL)
426 maxlen = fcnt; /* complete transfer */
428 maxlen = B_FIFO_SIZE + B_SUB_VAL - *z2r; /* maximum */
430 ptr1 = bdata + (*z2r - B_SUB_VAL); /* start of data */
431 memcpy(ptr, ptr1, maxlen); /* copy data */
434 if (fcnt) { /* rest remaining */
436 ptr1 = bdata; /* start of buffer */
437 memcpy(ptr, ptr1, fcnt); /* rest */
439 skb_queue_tail(&bcs->rqueue, skb);
440 hfcpci_sched_event(bcs, B_RCVBUFREADY);
443 *z2r = new_z2; /* new position */
445 } /* hfcpci_empty_fifo_trans */
447 /**********************************/
448 /* B-channel main receive routine */
449 /**********************************/
451 main_rec_hfcpci(struct BCState *bcs)
453 struct IsdnCardState *cs = bcs->cs;
455 int receive, count = 5;
462 if ((bcs->channel) && (!cs->hw.hfcpci.bswapped)) {
463 bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b2;
464 bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxdat_b2;
467 bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b1;
468 bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxdat_b1;
473 if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
474 debugl1(cs, "rec_data %d blocked", bcs->channel);
477 if (bz->f1 != bz->f2) {
478 if (cs->debug & L1_DEB_HSCX)
479 debugl1(cs, "hfcpci rec %d f1(%d) f2(%d)",
480 bcs->channel, bz->f1, bz->f2);
481 zp = &bz->za[bz->f2];
483 rcnt = zp->z1 - zp->z2;
487 if (cs->debug & L1_DEB_HSCX)
488 debugl1(cs, "hfcpci rec %d z1(%x) z2(%x) cnt(%d)",
489 bcs->channel, zp->z1, zp->z2, rcnt);
490 if ((skb = hfcpci_empty_fifo(bcs, bz, bdata, rcnt))) {
491 skb_queue_tail(&bcs->rqueue, skb);
492 hfcpci_sched_event(bcs, B_RCVBUFREADY);
494 rcnt = bz->f1 - bz->f2;
496 rcnt += MAX_B_FRAMES + 1;
497 if (cs->hw.hfcpci.last_bfifo_cnt[real_fifo] > rcnt + 1) {
499 hfcpci_clear_fifo_rx(cs, real_fifo);
501 cs->hw.hfcpci.last_bfifo_cnt[real_fifo] = rcnt;
506 } else if (bcs->mode == L1_MODE_TRANS)
507 receive = hfcpci_empty_fifo_trans(bcs, bz, bdata);
510 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
511 if (count && receive)
516 /**************************/
517 /* D-channel send routine */
518 /**************************/
520 hfcpci_fill_dfifo(struct IsdnCardState *cs)
523 int count, new_z1, maxlen;
525 u_char *src, *dst, new_f1;
529 if (cs->tx_skb->len <= 0)
532 df = &((fifo_area *) (cs->hw.hfcpci.fifos))->d_chan.d_tx;
534 if (cs->debug & L1_DEB_ISAC)
535 debugl1(cs, "hfcpci_fill_Dfifo f1(%d) f2(%d) z1(f1)(%x)",
537 df->za[df->f1 & D_FREG_MASK].z1);
538 fcnt = df->f1 - df->f2; /* frame count actually buffered */
540 fcnt += (MAX_D_FRAMES + 1); /* if wrap around */
541 if (fcnt > (MAX_D_FRAMES - 1)) {
542 if (cs->debug & L1_DEB_ISAC)
543 debugl1(cs, "hfcpci_fill_Dfifo more as 14 frames");
544 #ifdef ERROR_STATISTIC
549 /* now determine free bytes in FIFO buffer */
550 count = df->za[df->f2 & D_FREG_MASK].z2 - df->za[df->f1 & D_FREG_MASK].z1 - 1;
552 count += D_FIFO_SIZE; /* count now contains available bytes */
554 if (cs->debug & L1_DEB_ISAC)
555 debugl1(cs, "hfcpci_fill_Dfifo count(%ld/%d)",
556 cs->tx_skb->len, count);
557 if (count < cs->tx_skb->len) {
558 if (cs->debug & L1_DEB_ISAC)
559 debugl1(cs, "hfcpci_fill_Dfifo no fifo mem");
562 count = cs->tx_skb->len; /* get frame len */
563 new_z1 = (df->za[df->f1 & D_FREG_MASK].z1 + count) & (D_FIFO_SIZE - 1);
564 new_f1 = ((df->f1 + 1) & D_FREG_MASK) | (D_FREG_MASK + 1);
565 src = cs->tx_skb->data; /* source pointer */
566 dst = df->data + df->za[df->f1 & D_FREG_MASK].z1;
567 maxlen = D_FIFO_SIZE - df->za[df->f1 & D_FREG_MASK].z1; /* end fifo */
569 maxlen = count; /* limit size */
570 memcpy(dst, src, maxlen); /* first copy */
572 count -= maxlen; /* remaining bytes */
574 dst = df->data; /* start of buffer */
575 src += maxlen; /* new position */
576 memcpy(dst, src, count);
578 df->za[new_f1 & D_FREG_MASK].z1 = new_z1; /* for next buffer */
579 df->za[df->f1 & D_FREG_MASK].z1 = new_z1; /* new pos actual buffer */
580 df->f1 = new_f1; /* next frame */
582 dev_kfree_skb_any(cs->tx_skb);
587 /**************************/
588 /* B-channel send routine */
589 /**************************/
591 hfcpci_fill_fifo(struct BCState *bcs)
593 struct IsdnCardState *cs = bcs->cs;
598 u_char new_f1, *src, *dst;
599 unsigned short *z1t, *z2t;
603 if (bcs->tx_skb->len <= 0)
606 if ((bcs->channel) && (!cs->hw.hfcpci.bswapped)) {
607 bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txbz_b2;
608 bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txdat_b2;
610 bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txbz_b1;
611 bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txdat_b1;
614 if (bcs->mode == L1_MODE_TRANS) {
615 z1t = &bz->za[MAX_B_FRAMES].z1;
617 if (cs->debug & L1_DEB_HSCX)
618 debugl1(cs, "hfcpci_fill_fifo_trans %d z1(%x) z2(%x)",
619 bcs->channel, *z1t, *z2t);
622 fcnt += B_FIFO_SIZE; /* fcnt contains available bytes in fifo */
623 fcnt = B_FIFO_SIZE - fcnt; /* remaining bytes to send */
625 while ((fcnt < 2 * HFCPCI_BTRANS_THRESHOLD) && (bcs->tx_skb)) {
626 if (bcs->tx_skb->len < B_FIFO_SIZE - fcnt) {
627 /* data is suitable for fifo */
628 count = bcs->tx_skb->len;
630 new_z1 = *z1t + count; /* new buffer Position */
631 if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
632 new_z1 -= B_FIFO_SIZE; /* buffer wrap */
633 src = bcs->tx_skb->data; /* source pointer */
634 dst = bdata + (*z1t - B_SUB_VAL);
635 maxlen = (B_FIFO_SIZE + B_SUB_VAL) - *z1t; /* end of fifo */
637 maxlen = count; /* limit size */
638 memcpy(dst, src, maxlen); /* first copy */
640 count -= maxlen; /* remaining bytes */
642 dst = bdata; /* start of buffer */
643 src += maxlen; /* new position */
644 memcpy(dst, src, count);
646 bcs->tx_cnt -= bcs->tx_skb->len;
647 fcnt += bcs->tx_skb->len;
648 *z1t = new_z1; /* now send data */
649 } else if (cs->debug & L1_DEB_HSCX)
650 debugl1(cs, "hfcpci_fill_fifo_trans %d frame length %d discarded",
651 bcs->channel, bcs->tx_skb->len);
653 if (test_bit(FLG_LLI_L1WAKEUP,&bcs->st->lli.flag) &&
654 (PACKET_NOACK != bcs->tx_skb->pkt_type)) {
656 spin_lock_irqsave(&bcs->aclock, flags);
657 bcs->ackcnt += bcs->tx_skb->len;
658 spin_unlock_irqrestore(&bcs->aclock, flags);
659 schedule_event(bcs, B_ACKPENDING);
662 dev_kfree_skb_any(bcs->tx_skb);
663 bcs->tx_skb = skb_dequeue(&bcs->squeue); /* fetch next data */
665 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
668 if (cs->debug & L1_DEB_HSCX)
669 debugl1(cs, "hfcpci_fill_fifo_hdlc %d f1(%d) f2(%d) z1(f1)(%x)",
670 bcs->channel, bz->f1, bz->f2,
673 fcnt = bz->f1 - bz->f2; /* frame count actually buffered */
675 fcnt += (MAX_B_FRAMES + 1); /* if wrap around */
676 if (fcnt > (MAX_B_FRAMES - 1)) {
677 if (cs->debug & L1_DEB_HSCX)
678 debugl1(cs, "hfcpci_fill_Bfifo more as 14 frames");
681 /* now determine free bytes in FIFO buffer */
682 count = bz->za[bz->f2].z2 - bz->za[bz->f1].z1 - 1;
684 count += B_FIFO_SIZE; /* count now contains available bytes */
686 if (cs->debug & L1_DEB_HSCX)
687 debugl1(cs, "hfcpci_fill_fifo %d count(%ld/%d),%lx",
688 bcs->channel, bcs->tx_skb->len,
689 count, current->state);
691 if (count < bcs->tx_skb->len) {
692 if (cs->debug & L1_DEB_HSCX)
693 debugl1(cs, "hfcpci_fill_fifo no fifo mem");
696 count = bcs->tx_skb->len; /* get frame len */
697 new_z1 = bz->za[bz->f1].z1 + count; /* new buffer Position */
698 if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
699 new_z1 -= B_FIFO_SIZE; /* buffer wrap */
701 new_f1 = ((bz->f1 + 1) & MAX_B_FRAMES);
702 src = bcs->tx_skb->data; /* source pointer */
703 dst = bdata + (bz->za[bz->f1].z1 - B_SUB_VAL);
704 maxlen = (B_FIFO_SIZE + B_SUB_VAL) - bz->za[bz->f1].z1; /* end fifo */
706 maxlen = count; /* limit size */
707 memcpy(dst, src, maxlen); /* first copy */
709 count -= maxlen; /* remaining bytes */
711 dst = bdata; /* start of buffer */
712 src += maxlen; /* new position */
713 memcpy(dst, src, count);
715 bcs->tx_cnt -= bcs->tx_skb->len;
716 if (test_bit(FLG_LLI_L1WAKEUP,&bcs->st->lli.flag) &&
717 (PACKET_NOACK != bcs->tx_skb->pkt_type)) {
719 spin_lock_irqsave(&bcs->aclock, flags);
720 bcs->ackcnt += bcs->tx_skb->len;
721 spin_unlock_irqrestore(&bcs->aclock, flags);
722 schedule_event(bcs, B_ACKPENDING);
725 bz->za[new_f1].z1 = new_z1; /* for next buffer */
726 bz->f1 = new_f1; /* next frame */
728 dev_kfree_skb_any(bcs->tx_skb);
730 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
734 /**********************************************/
735 /* D-channel l1 state call for leased NT-mode */
736 /**********************************************/
738 dch_nt_l2l1(struct PStack *st, int pr, void *arg)
740 struct IsdnCardState *cs = (struct IsdnCardState *) st->l1.hardware;
743 case (PH_DATA | REQUEST):
744 case (PH_PULL | REQUEST):
745 case (PH_PULL | INDICATION):
746 st->l1.l1hw(st, pr, arg);
748 case (PH_ACTIVATE | REQUEST):
749 st->l1.l1l2(st, PH_ACTIVATE | CONFIRM, NULL);
751 case (PH_TESTLOOP | REQUEST):
753 debugl1(cs, "PH_TEST_LOOP B1");
755 debugl1(cs, "PH_TEST_LOOP B2");
756 if (!(3 & (long) arg))
757 debugl1(cs, "PH_TEST_LOOP DISABLED");
758 st->l1.l1hw(st, HW_TESTLOOP | REQUEST, arg);
762 debugl1(cs, "dch_nt_l2l1 msg %04X unhandled", pr);
769 /***********************/
770 /* set/reset echo mode */
771 /***********************/
773 hfcpci_auxcmd(struct IsdnCardState *cs, isdn_ctrl * ic)
776 int i = *(unsigned int *) ic->parm.num;
778 if ((ic->arg == 98) &&
779 (!(cs->hw.hfcpci.int_m1 & (HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC + HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC)))) {
780 spin_lock_irqsave(&cs->lock, flags);
781 Write_hfc(cs, HFCPCI_CLKDEL, CLKDEL_NT); /* ST-Bit delay for NT-Mode */
782 Write_hfc(cs, HFCPCI_STATES, HFCPCI_LOAD_STATE | 0); /* HFC ST G0 */
784 cs->hw.hfcpci.sctrl |= SCTRL_MODE_NT;
785 Write_hfc(cs, HFCPCI_SCTRL, cs->hw.hfcpci.sctrl); /* set NT-mode */
787 Write_hfc(cs, HFCPCI_STATES, HFCPCI_LOAD_STATE | 1); /* HFC ST G1 */
789 Write_hfc(cs, HFCPCI_STATES, 1 | HFCPCI_ACTIVATE | HFCPCI_DO_ACTION);
790 cs->dc.hfcpci.ph_state = 1;
791 cs->hw.hfcpci.nt_mode = 1;
792 cs->hw.hfcpci.nt_timer = 0;
793 cs->stlist->l2.l2l1 = dch_nt_l2l1;
794 spin_unlock_irqrestore(&cs->lock, flags);
795 debugl1(cs, "NT mode activated");
798 if ((cs->chanlimit > 1) || (cs->hw.hfcpci.bswapped) ||
799 (cs->hw.hfcpci.nt_mode) || (ic->arg != 12))
802 spin_lock_irqsave(&cs->lock, flags);
805 cs->hw.hfcpci.trm |= 0x20; /* enable echo chan */
806 cs->hw.hfcpci.int_m1 |= HFCPCI_INTS_B2REC;
807 cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B2RX;
810 cs->hw.hfcpci.trm &= ~0x20; /* disable echo chan */
811 cs->hw.hfcpci.int_m1 &= ~HFCPCI_INTS_B2REC;
812 cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B2RX;
814 cs->hw.hfcpci.sctrl_r &= ~SCTRL_B2_ENA;
815 cs->hw.hfcpci.sctrl &= ~SCTRL_B2_ENA;
816 cs->hw.hfcpci.conn |= 0x10; /* B2-IOM -> B2-ST */
817 cs->hw.hfcpci.ctmt &= ~2;
818 Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt);
819 Write_hfc(cs, HFCPCI_SCTRL_R, cs->hw.hfcpci.sctrl_r);
820 Write_hfc(cs, HFCPCI_SCTRL, cs->hw.hfcpci.sctrl);
821 Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn);
822 Write_hfc(cs, HFCPCI_TRM, cs->hw.hfcpci.trm);
823 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
824 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
825 spin_unlock_irqrestore(&cs->lock, flags);
827 } /* hfcpci_auxcmd */
829 /*****************************/
830 /* E-channel receive routine */
831 /*****************************/
833 receive_emsg(struct IsdnCardState *cs)
836 int receive, count = 5;
840 u_char *ptr, *ptr1, new_f2;
841 int total, maxlen, new_z2;
842 u_char e_buffer[256];
844 bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b2;
845 bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxdat_b2;
848 if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
849 debugl1(cs, "echo_rec_data blocked");
852 if (bz->f1 != bz->f2) {
853 if (cs->debug & L1_DEB_ISAC)
854 debugl1(cs, "hfcpci e_rec f1(%d) f2(%d)",
856 zp = &bz->za[bz->f2];
858 rcnt = zp->z1 - zp->z2;
862 if (cs->debug & L1_DEB_ISAC)
863 debugl1(cs, "hfcpci e_rec z1(%x) z2(%x) cnt(%d)",
864 zp->z1, zp->z2, rcnt);
865 new_z2 = zp->z2 + rcnt; /* new position in fifo */
866 if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
867 new_z2 -= B_FIFO_SIZE; /* buffer wrap */
868 new_f2 = (bz->f2 + 1) & MAX_B_FRAMES;
869 if ((rcnt > 256 + 3) || (count < 4) ||
870 (*(bdata + (zp->z1 - B_SUB_VAL)))) {
871 if (cs->debug & L1_DEB_WARN)
872 debugl1(cs, "hfcpci_empty_echan: incoming packet invalid length %d or crc", rcnt);
873 bz->za[new_f2].z2 = new_z2;
874 bz->f2 = new_f2; /* next buffer */
880 if (zp->z2 <= B_FIFO_SIZE + B_SUB_VAL)
881 maxlen = rcnt; /* complete transfer */
883 maxlen = B_FIFO_SIZE + B_SUB_VAL - zp->z2; /* maximum */
885 ptr1 = bdata + (zp->z2 - B_SUB_VAL); /* start of data */
886 memcpy(ptr, ptr1, maxlen); /* copy data */
889 if (rcnt) { /* rest remaining */
891 ptr1 = bdata; /* start of buffer */
892 memcpy(ptr, ptr1, rcnt); /* rest */
894 bz->za[new_f2].z2 = new_z2;
895 bz->f2 = new_f2; /* next buffer */
896 if (cs->debug & DEB_DLOG_HEX) {
898 if ((total - 3) < MAX_DLOG_SPACE / 3 - 10) {
904 ptr += QuickHex(ptr, e_buffer, total - 3);
908 HiSax_putstatus(cs, NULL, cs->dlog);
910 HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", total - 3);
914 rcnt = bz->f1 - bz->f2;
916 rcnt += MAX_B_FRAMES + 1;
923 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
924 if (count && receive)
929 /*********************/
930 /* Interrupt handler */
931 /*********************/
933 hfcpci_interrupt(int intno, void *dev_id, struct pt_regs *regs)
936 struct IsdnCardState *cs = dev_id;
942 if (!(cs->hw.hfcpci.int_m2 & 0x08)) {
943 debugl1(cs, "HFC-PCI: int_m2 %x not initialised", cs->hw.hfcpci.int_m2);
944 return IRQ_NONE; /* not initialised */
946 spin_lock_irqsave(&cs->lock, flags);
947 if (HFCPCI_ANYINT & (stat = Read_hfc(cs, HFCPCI_STATUS))) {
948 val = Read_hfc(cs, HFCPCI_INT_S1);
949 if (cs->debug & L1_DEB_ISAC)
950 debugl1(cs, "HFC-PCI: stat(%02x) s1(%02x)", stat, val);
952 spin_unlock_irqrestore(&cs->lock, flags);
955 if (cs->debug & L1_DEB_ISAC)
956 debugl1(cs, "HFC-PCI irq %x %s", val,
957 test_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags) ?
958 "locked" : "unlocked");
959 val &= cs->hw.hfcpci.int_m1;
960 if (val & 0x40) { /* state machine irq */
961 exval = Read_hfc(cs, HFCPCI_STATES) & 0xf;
962 if (cs->debug & L1_DEB_ISAC)
963 debugl1(cs, "ph_state chg %d->%d", cs->dc.hfcpci.ph_state,
965 cs->dc.hfcpci.ph_state = exval;
966 sched_event_D_pci(cs, D_L1STATECHANGE);
969 if (val & 0x80) { /* timer irq */
970 if (cs->hw.hfcpci.nt_mode) {
971 if ((--cs->hw.hfcpci.nt_timer) < 0)
972 sched_event_D_pci(cs, D_L1STATECHANGE);
975 Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt | HFCPCI_CLTIMER);
978 if (test_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
979 cs->hw.hfcpci.int_s1 |= val;
980 spin_unlock_irqrestore(&cs->lock, flags);
983 if (cs->hw.hfcpci.int_s1 & 0x18) {
985 val = cs->hw.hfcpci.int_s1;
986 cs->hw.hfcpci.int_s1 = exval;
989 if (!(bcs = Sel_BCS(cs, cs->hw.hfcpci.bswapped ? 1 : 0))) {
991 debugl1(cs, "hfcpci spurious 0x08 IRQ");
993 main_rec_hfcpci(bcs);
998 else if (!(bcs = Sel_BCS(cs, 1))) {
1000 debugl1(cs, "hfcpci spurious 0x10 IRQ");
1002 main_rec_hfcpci(bcs);
1005 if (!(bcs = Sel_BCS(cs, cs->hw.hfcpci.bswapped ? 1 : 0))) {
1007 debugl1(cs, "hfcpci spurious 0x01 IRQ");
1010 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1011 hfcpci_fill_fifo(bcs);
1012 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1014 debugl1(cs, "fill_data %d blocked", bcs->channel);
1016 if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
1017 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1018 hfcpci_fill_fifo(bcs);
1019 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1021 debugl1(cs, "fill_data %d blocked", bcs->channel);
1023 hfcpci_sched_event(bcs, B_XMTBUFREADY);
1029 if (!(bcs = Sel_BCS(cs, 1))) {
1031 debugl1(cs, "hfcpci spurious 0x02 IRQ");
1034 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1035 hfcpci_fill_fifo(bcs);
1036 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1038 debugl1(cs, "fill_data %d blocked", bcs->channel);
1040 if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
1041 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1042 hfcpci_fill_fifo(bcs);
1043 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1045 debugl1(cs, "fill_data %d blocked", bcs->channel);
1047 hfcpci_sched_event(bcs, B_XMTBUFREADY);
1052 if (val & 0x20) { /* receive dframe */
1055 if (val & 0x04) { /* dframe transmitted */
1056 if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags))
1057 del_timer(&cs->dbusytimer);
1058 if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags))
1059 sched_event_D_pci(cs, D_CLEARBUSY);
1061 if (cs->tx_skb->len) {
1062 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1063 hfcpci_fill_dfifo(cs);
1064 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1066 debugl1(cs, "hfcpci_fill_dfifo irq blocked");
1070 dev_kfree_skb_irq(cs->tx_skb);
1075 if ((cs->tx_skb = skb_dequeue(&cs->sq))) {
1077 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1078 hfcpci_fill_dfifo(cs);
1079 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1081 debugl1(cs, "hfcpci_fill_dfifo irq blocked");
1084 sched_event_D_pci(cs, D_XMTBUFREADY);
1087 if (cs->hw.hfcpci.int_s1 && count--) {
1088 val = cs->hw.hfcpci.int_s1;
1089 cs->hw.hfcpci.int_s1 = 0;
1090 if (cs->debug & L1_DEB_ISAC)
1091 debugl1(cs, "HFC-PCI irq %x loop %d", val, 15 - count);
1095 spin_unlock_irqrestore(&cs->lock, flags);
1099 /********************************************************************/
1100 /* timer callback for D-chan busy resolution. Currently no function */
1101 /********************************************************************/
1103 hfcpci_dbusy_timer(struct IsdnCardState *cs)
1107 /*************************************/
1108 /* Layer 1 D-channel hardware access */
1109 /*************************************/
1111 HFCPCI_l1hw(struct PStack *st, int pr, void *arg)
1114 struct IsdnCardState *cs = (struct IsdnCardState *) st->l1.hardware;
1115 struct sk_buff *skb = arg;
1118 case (PH_DATA | REQUEST):
1119 if (cs->debug & DEB_DLOG_HEX)
1120 LogFrame(cs, skb->data, skb->len);
1121 if (cs->debug & DEB_DLOG_VERBOSE)
1122 dlogframe(cs, skb, 0);
1123 spin_lock_irqsave(&cs->lock, flags);
1125 skb_queue_tail(&cs->sq, skb);
1126 #ifdef L2FRAME_DEBUG /* psa */
1127 if (cs->debug & L1_DEB_LAPD)
1128 Logl2Frame(cs, skb, "PH_DATA Queued", 0);
1133 #ifdef L2FRAME_DEBUG /* psa */
1134 if (cs->debug & L1_DEB_LAPD)
1135 Logl2Frame(cs, skb, "PH_DATA", 0);
1137 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1138 hfcpci_fill_dfifo(cs);
1139 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1141 debugl1(cs, "hfcpci_fill_dfifo blocked");
1144 spin_unlock_irqrestore(&cs->lock, flags);
1146 case (PH_PULL | INDICATION):
1147 spin_lock_irqsave(&cs->lock, flags);
1149 if (cs->debug & L1_DEB_WARN)
1150 debugl1(cs, " l2l1 tx_skb exist this shouldn't happen");
1151 skb_queue_tail(&cs->sq, skb);
1152 spin_unlock_irqrestore(&cs->lock, flags);
1155 if (cs->debug & DEB_DLOG_HEX)
1156 LogFrame(cs, skb->data, skb->len);
1157 if (cs->debug & DEB_DLOG_VERBOSE)
1158 dlogframe(cs, skb, 0);
1161 #ifdef L2FRAME_DEBUG /* psa */
1162 if (cs->debug & L1_DEB_LAPD)
1163 Logl2Frame(cs, skb, "PH_DATA_PULLED", 0);
1165 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1166 hfcpci_fill_dfifo(cs);
1167 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1169 debugl1(cs, "hfcpci_fill_dfifo blocked");
1170 spin_unlock_irqrestore(&cs->lock, flags);
1172 case (PH_PULL | REQUEST):
1173 #ifdef L2FRAME_DEBUG /* psa */
1174 if (cs->debug & L1_DEB_LAPD)
1175 debugl1(cs, "-> PH_REQUEST_PULL");
1178 test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
1179 st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
1181 test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
1183 case (HW_RESET | REQUEST):
1184 spin_lock_irqsave(&cs->lock, flags);
1185 Write_hfc(cs, HFCPCI_STATES, HFCPCI_LOAD_STATE | 3); /* HFC ST 3 */
1187 Write_hfc(cs, HFCPCI_STATES, 3); /* HFC ST 2 */
1188 cs->hw.hfcpci.mst_m |= HFCPCI_MASTER;
1189 Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m);
1190 Write_hfc(cs, HFCPCI_STATES, HFCPCI_ACTIVATE | HFCPCI_DO_ACTION);
1191 spin_unlock_irqrestore(&cs->lock, flags);
1192 l1_msg(cs, HW_POWERUP | CONFIRM, NULL);
1194 case (HW_ENABLE | REQUEST):
1195 spin_lock_irqsave(&cs->lock, flags);
1196 Write_hfc(cs, HFCPCI_STATES, HFCPCI_DO_ACTION);
1197 spin_unlock_irqrestore(&cs->lock, flags);
1199 case (HW_DEACTIVATE | REQUEST):
1200 spin_lock_irqsave(&cs->lock, flags);
1201 cs->hw.hfcpci.mst_m &= ~HFCPCI_MASTER;
1202 Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m);
1203 spin_unlock_irqrestore(&cs->lock, flags);
1205 case (HW_INFO3 | REQUEST):
1206 spin_lock_irqsave(&cs->lock, flags);
1207 cs->hw.hfcpci.mst_m |= HFCPCI_MASTER;
1208 Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m);
1209 spin_unlock_irqrestore(&cs->lock, flags);
1211 case (HW_TESTLOOP | REQUEST):
1212 spin_lock_irqsave(&cs->lock, flags);
1213 switch ((int) arg) {
1215 Write_hfc(cs, HFCPCI_B1_SSL, 0x80); /* tx slot */
1216 Write_hfc(cs, HFCPCI_B1_RSL, 0x80); /* rx slot */
1217 cs->hw.hfcpci.conn = (cs->hw.hfcpci.conn & ~7) | 1;
1218 Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn);
1222 Write_hfc(cs, HFCPCI_B2_SSL, 0x81); /* tx slot */
1223 Write_hfc(cs, HFCPCI_B2_RSL, 0x81); /* rx slot */
1224 cs->hw.hfcpci.conn = (cs->hw.hfcpci.conn & ~0x38) | 0x08;
1225 Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn);
1229 spin_unlock_irqrestore(&cs->lock, flags);
1230 if (cs->debug & L1_DEB_WARN)
1231 debugl1(cs, "hfcpci_l1hw loop invalid %4x", (int) arg);
1234 cs->hw.hfcpci.trm |= 0x80; /* enable IOM-loop */
1235 Write_hfc(cs, HFCPCI_TRM, cs->hw.hfcpci.trm);
1236 spin_unlock_irqrestore(&cs->lock, flags);
1239 if (cs->debug & L1_DEB_WARN)
1240 debugl1(cs, "hfcpci_l1hw unknown pr %4x", pr);
1245 /***********************************************/
1246 /* called during init setting l1 stack pointer */
1247 /***********************************************/
1249 setstack_hfcpci(struct PStack *st, struct IsdnCardState *cs)
1251 st->l1.l1hw = HFCPCI_l1hw;
1254 /**************************************/
1255 /* send B-channel data if not blocked */
1256 /**************************************/
1258 hfcpci_send_data(struct BCState *bcs)
1260 struct IsdnCardState *cs = bcs->cs;
1262 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1263 hfcpci_fill_fifo(bcs);
1264 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1266 debugl1(cs, "send_data %d blocked", bcs->channel);
1269 /***************************************************************/
1270 /* activate/deactivate hardware for selected channels and mode */
1271 /***************************************************************/
1273 mode_hfcpci(struct BCState *bcs, int mode, int bc)
1275 struct IsdnCardState *cs = bcs->cs;
1278 if (cs->debug & L1_DEB_HSCX)
1279 debugl1(cs, "HFCPCI bchannel mode %d bchan %d/%d",
1280 mode, bc, bcs->channel);
1284 if (cs->chanlimit > 1) {
1285 cs->hw.hfcpci.bswapped = 0; /* B1 and B2 normal mode */
1286 cs->hw.hfcpci.sctrl_e &= ~0x80;
1289 if (mode != L1_MODE_NULL) {
1290 cs->hw.hfcpci.bswapped = 1; /* B1 and B2 exchanged */
1291 cs->hw.hfcpci.sctrl_e |= 0x80;
1293 cs->hw.hfcpci.bswapped = 0; /* B1 and B2 normal mode */
1294 cs->hw.hfcpci.sctrl_e &= ~0x80;
1298 cs->hw.hfcpci.bswapped = 0; /* B1 and B2 normal mode */
1299 cs->hw.hfcpci.sctrl_e &= ~0x80;
1303 case (L1_MODE_NULL):
1305 cs->hw.hfcpci.sctrl &= ~SCTRL_B2_ENA;
1306 cs->hw.hfcpci.sctrl_r &= ~SCTRL_B2_ENA;
1308 cs->hw.hfcpci.sctrl &= ~SCTRL_B1_ENA;
1309 cs->hw.hfcpci.sctrl_r &= ~SCTRL_B1_ENA;
1312 cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B2;
1313 cs->hw.hfcpci.int_m1 &= ~(HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC);
1315 cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B1;
1316 cs->hw.hfcpci.int_m1 &= ~(HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC);
1319 case (L1_MODE_TRANS):
1320 hfcpci_clear_fifo_rx(cs, fifo2);
1321 hfcpci_clear_fifo_tx(cs, fifo2);
1323 cs->hw.hfcpci.sctrl |= SCTRL_B2_ENA;
1324 cs->hw.hfcpci.sctrl_r |= SCTRL_B2_ENA;
1326 cs->hw.hfcpci.sctrl |= SCTRL_B1_ENA;
1327 cs->hw.hfcpci.sctrl_r |= SCTRL_B1_ENA;
1330 cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B2;
1331 cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC);
1332 cs->hw.hfcpci.ctmt |= 2;
1333 cs->hw.hfcpci.conn &= ~0x18;
1335 cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B1;
1336 cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC);
1337 cs->hw.hfcpci.ctmt |= 1;
1338 cs->hw.hfcpci.conn &= ~0x03;
1341 case (L1_MODE_HDLC):
1342 hfcpci_clear_fifo_rx(cs, fifo2);
1343 hfcpci_clear_fifo_tx(cs, fifo2);
1345 cs->hw.hfcpci.sctrl |= SCTRL_B2_ENA;
1346 cs->hw.hfcpci.sctrl_r |= SCTRL_B2_ENA;
1348 cs->hw.hfcpci.sctrl |= SCTRL_B1_ENA;
1349 cs->hw.hfcpci.sctrl_r |= SCTRL_B1_ENA;
1352 cs->hw.hfcpci.last_bfifo_cnt[1] = 0;
1353 cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B2;
1354 cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC);
1355 cs->hw.hfcpci.ctmt &= ~2;
1356 cs->hw.hfcpci.conn &= ~0x18;
1358 cs->hw.hfcpci.last_bfifo_cnt[0] = 0;
1359 cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B1;
1360 cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC);
1361 cs->hw.hfcpci.ctmt &= ~1;
1362 cs->hw.hfcpci.conn &= ~0x03;
1365 case (L1_MODE_EXTRN):
1367 cs->hw.hfcpci.conn |= 0x10;
1368 cs->hw.hfcpci.sctrl |= SCTRL_B2_ENA;
1369 cs->hw.hfcpci.sctrl_r |= SCTRL_B2_ENA;
1370 cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B2;
1371 cs->hw.hfcpci.int_m1 &= ~(HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC);
1373 cs->hw.hfcpci.conn |= 0x02;
1374 cs->hw.hfcpci.sctrl |= SCTRL_B1_ENA;
1375 cs->hw.hfcpci.sctrl_r |= SCTRL_B1_ENA;
1376 cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B1;
1377 cs->hw.hfcpci.int_m1 &= ~(HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC);
1381 Write_hfc(cs, HFCPCI_SCTRL_E, cs->hw.hfcpci.sctrl_e);
1382 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1383 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
1384 Write_hfc(cs, HFCPCI_SCTRL, cs->hw.hfcpci.sctrl);
1385 Write_hfc(cs, HFCPCI_SCTRL_R, cs->hw.hfcpci.sctrl_r);
1386 Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt);
1387 Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn);
1390 /******************************/
1391 /* Layer2 -> Layer 1 Transfer */
1392 /******************************/
1394 hfcpci_l2l1(struct PStack *st, int pr, void *arg)
1396 struct BCState *bcs = st->l1.bcs;
1398 struct sk_buff *skb = arg;
1401 case (PH_DATA | REQUEST):
1402 spin_lock_irqsave(&bcs->cs->lock, flags);
1404 skb_queue_tail(&bcs->squeue, skb);
1407 // test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
1408 bcs->cs->BC_Send_Data(bcs);
1410 spin_unlock_irqrestore(&bcs->cs->lock, flags);
1412 case (PH_PULL | INDICATION):
1413 spin_lock_irqsave(&bcs->cs->lock, flags);
1415 spin_unlock_irqrestore(&bcs->cs->lock, flags);
1416 printk(KERN_WARNING "hfc_l2l1: this shouldn't happen\n");
1419 // test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
1421 bcs->cs->BC_Send_Data(bcs);
1422 spin_unlock_irqrestore(&bcs->cs->lock, flags);
1424 case (PH_PULL | REQUEST):
1426 test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
1427 st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
1429 test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
1431 case (PH_ACTIVATE | REQUEST):
1432 spin_lock_irqsave(&bcs->cs->lock, flags);
1433 test_and_set_bit(BC_FLG_ACTIV, &bcs->Flag);
1434 mode_hfcpci(bcs, st->l1.mode, st->l1.bc);
1435 spin_unlock_irqrestore(&bcs->cs->lock, flags);
1436 l1_msg_b(st, pr, arg);
1438 case (PH_DEACTIVATE | REQUEST):
1439 l1_msg_b(st, pr, arg);
1441 case (PH_DEACTIVATE | CONFIRM):
1442 spin_lock_irqsave(&bcs->cs->lock, flags);
1443 test_and_clear_bit(BC_FLG_ACTIV, &bcs->Flag);
1444 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
1445 mode_hfcpci(bcs, 0, st->l1.bc);
1446 spin_unlock_irqrestore(&bcs->cs->lock, flags);
1447 st->l1.l1l2(st, PH_DEACTIVATE | CONFIRM, NULL);
1452 /******************************************/
1453 /* deactivate B-channel access and queues */
1454 /******************************************/
1456 close_hfcpci(struct BCState *bcs)
1458 mode_hfcpci(bcs, 0, bcs->channel);
1459 if (test_and_clear_bit(BC_FLG_INIT, &bcs->Flag)) {
1460 skb_queue_purge(&bcs->rqueue);
1461 skb_queue_purge(&bcs->squeue);
1463 dev_kfree_skb_any(bcs->tx_skb);
1465 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
1470 /*************************************/
1471 /* init B-channel queues and control */
1472 /*************************************/
1474 open_hfcpcistate(struct IsdnCardState *cs, struct BCState *bcs)
1476 if (!test_and_set_bit(BC_FLG_INIT, &bcs->Flag)) {
1477 skb_queue_head_init(&bcs->rqueue);
1478 skb_queue_head_init(&bcs->squeue);
1481 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
1487 /*********************************/
1488 /* inits the stack for B-channel */
1489 /*********************************/
1491 setstack_2b(struct PStack *st, struct BCState *bcs)
1493 bcs->channel = st->l1.bc;
1494 if (open_hfcpcistate(st->l1.hardware, bcs))
1497 st->l2.l2l1 = hfcpci_l2l1;
1498 setstack_manager(st);
1504 /***************************/
1505 /* handle L1 state changes */
1506 /***************************/
1508 hfcpci_bh(struct IsdnCardState *cs)
1511 // struct PStack *stptr;
1515 if (test_and_clear_bit(D_L1STATECHANGE, &cs->event)) {
1516 if (!cs->hw.hfcpci.nt_mode)
1517 switch (cs->dc.hfcpci.ph_state) {
1519 l1_msg(cs, HW_RESET | INDICATION, NULL);
1522 l1_msg(cs, HW_DEACTIVATE | INDICATION, NULL);
1525 l1_msg(cs, HW_RSYNC | INDICATION, NULL);
1528 l1_msg(cs, HW_INFO2 | INDICATION, NULL);
1531 l1_msg(cs, HW_INFO4_P8 | INDICATION, NULL);
1536 spin_lock_irqsave(&cs->lock, flags);
1537 switch (cs->dc.hfcpci.ph_state) {
1539 if (cs->hw.hfcpci.nt_timer < 0) {
1540 cs->hw.hfcpci.nt_timer = 0;
1541 cs->hw.hfcpci.int_m1 &= ~HFCPCI_INTS_TIMER;
1542 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1543 /* Clear already pending ints */
1544 if (Read_hfc(cs, HFCPCI_INT_S1));
1545 Write_hfc(cs, HFCPCI_STATES, 4 | HFCPCI_LOAD_STATE);
1547 Write_hfc(cs, HFCPCI_STATES, 4);
1548 cs->dc.hfcpci.ph_state = 4;
1550 cs->hw.hfcpci.int_m1 |= HFCPCI_INTS_TIMER;
1551 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1552 cs->hw.hfcpci.ctmt &= ~HFCPCI_AUTO_TIMER;
1553 cs->hw.hfcpci.ctmt |= HFCPCI_TIM3_125;
1554 Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt | HFCPCI_CLTIMER);
1555 Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt | HFCPCI_CLTIMER);
1556 cs->hw.hfcpci.nt_timer = NT_T1_COUNT;
1557 Write_hfc(cs, HFCPCI_STATES, 2 | HFCPCI_NT_G2_G3); /* allow G2 -> G3 transition */
1563 cs->hw.hfcpci.nt_timer = 0;
1564 cs->hw.hfcpci.int_m1 &= ~HFCPCI_INTS_TIMER;
1565 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1570 spin_unlock_irqrestore(&cs->lock, flags);
1573 if (test_and_clear_bit(D_RCVBUFREADY, &cs->event))
1574 DChannel_proc_rcv(cs);
1575 if (test_and_clear_bit(D_XMTBUFREADY, &cs->event))
1576 DChannel_proc_xmt(cs);
1580 /********************************/
1581 /* called for card init message */
1582 /********************************/
1584 inithfcpci(struct IsdnCardState *cs)
1586 cs->bcs[0].BC_SetStack = setstack_2b;
1587 cs->bcs[1].BC_SetStack = setstack_2b;
1588 cs->bcs[0].BC_Close = close_hfcpci;
1589 cs->bcs[1].BC_Close = close_hfcpci;
1590 cs->dbusytimer.function = (void *) hfcpci_dbusy_timer;
1591 cs->dbusytimer.data = (long) cs;
1592 init_timer(&cs->dbusytimer);
1593 mode_hfcpci(cs->bcs, 0, 0);
1594 mode_hfcpci(cs->bcs + 1, 0, 1);
1599 /*******************************************/
1600 /* handle card messages from control layer */
1601 /*******************************************/
1603 hfcpci_card_msg(struct IsdnCardState *cs, int mt, void *arg)
1607 if (cs->debug & L1_DEB_ISAC)
1608 debugl1(cs, "HFCPCI: card_msg %x", mt);
1611 spin_lock_irqsave(&cs->lock, flags);
1613 spin_unlock_irqrestore(&cs->lock, flags);
1616 release_io_hfcpci(cs);
1619 spin_lock_irqsave(&cs->lock, flags);
1622 spin_unlock_irqrestore(&cs->lock, flags);
1623 msleep(80); /* Timeout 80ms */
1624 /* now switch timer interrupt off */
1625 spin_lock_irqsave(&cs->lock, flags);
1626 cs->hw.hfcpci.int_m1 &= ~HFCPCI_INTS_TIMER;
1627 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1628 /* reinit mode reg */
1629 Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m);
1630 spin_unlock_irqrestore(&cs->lock, flags);
1639 /* this variable is used as card index when more than one cards are present */
1640 static struct pci_dev *dev_hfcpci __initdata = NULL;
1642 #endif /* CONFIG_PCI */
1645 setup_hfcpci(struct IsdnCard *card)
1648 struct IsdnCardState *cs = card->cs;
1651 struct pci_dev *tmp_hfcpci = NULL;
1654 #error "not running on big endian machines now"
1656 strcpy(tmp, hfcpci_revision);
1657 printk(KERN_INFO "HiSax: HFC-PCI driver Rev. %s\n", HiSax_getrev(tmp));
1659 cs->hw.hfcpci.int_s1 = 0;
1660 cs->dc.hfcpci.ph_state = 0;
1661 cs->hw.hfcpci.fifo = 255;
1662 if (cs->typ == ISDN_CTYPE_HFC_PCI) {
1664 while (id_list[i].vendor_id) {
1665 tmp_hfcpci = pci_find_device(id_list[i].vendor_id,
1666 id_list[i].device_id,
1670 if (pci_enable_device(tmp_hfcpci))
1672 pci_set_master(tmp_hfcpci);
1673 if ((card->para[0]) && (card->para[0] != (tmp_hfcpci->resource[ 0].start & PCI_BASE_ADDRESS_IO_MASK)))
1682 dev_hfcpci = tmp_hfcpci; /* old device */
1683 cs->hw.hfcpci.dev = dev_hfcpci;
1684 cs->irq = dev_hfcpci->irq;
1686 printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n");
1689 cs->hw.hfcpci.pci_io = (char *) dev_hfcpci->resource[ 1].start;
1690 printk(KERN_INFO "HiSax: HFC-PCI card manufacturer: %s card name: %s\n", id_list[i].vendor_name, id_list[i].card_name);
1692 printk(KERN_WARNING "HFC-PCI: No PCI card found\n");
1695 if (!cs->hw.hfcpci.pci_io) {
1696 printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n");
1699 /* Allocate memory for FIFOS */
1700 /* Because the HFC-PCI needs a 32K physical alignment, we */
1701 /* need to allocate the double mem and align the address */
1702 if (!(cs->hw.hfcpci.share_start = kmalloc(65536, GFP_KERNEL))) {
1703 printk(KERN_WARNING "HFC-PCI: Error allocating memory for FIFO!\n");
1706 cs->hw.hfcpci.fifos = (void *)
1707 (((ulong) cs->hw.hfcpci.share_start) & ~0x7FFF) + 0x8000;
1708 pci_write_config_dword(cs->hw.hfcpci.dev, 0x80, (u_int) virt_to_bus(cs->hw.hfcpci.fifos));
1709 cs->hw.hfcpci.pci_io = ioremap((ulong) cs->hw.hfcpci.pci_io, 256);
1711 "HFC-PCI: defined at mem %#x fifo %#x(%#x) IRQ %d HZ %d\n",
1712 (u_int) cs->hw.hfcpci.pci_io,
1713 (u_int) cs->hw.hfcpci.fifos,
1714 (u_int) virt_to_bus(cs->hw.hfcpci.fifos),
1716 spin_lock_irqsave(&cs->lock, flags);
1717 pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, PCI_ENA_MEMIO); /* enable memory mapped ports, disable busmaster */
1718 cs->hw.hfcpci.int_m2 = 0; /* disable alle interrupts */
1719 cs->hw.hfcpci.int_m1 = 0;
1720 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1721 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
1722 /* At this point the needed PCI config is done */
1723 /* fifos are still not enabled */
1724 INIT_WORK(&cs->tqueue, (void *)(void *) hfcpci_bh, cs);
1725 cs->setstack_d = setstack_hfcpci;
1726 cs->BC_Send_Data = &hfcpci_send_data;
1727 cs->readisac = NULL;
1728 cs->writeisac = NULL;
1729 cs->readisacfifo = NULL;
1730 cs->writeisacfifo = NULL;
1731 cs->BC_Read_Reg = NULL;
1732 cs->BC_Write_Reg = NULL;
1733 cs->irq_func = &hfcpci_interrupt;
1734 cs->irq_flags |= SA_SHIRQ;
1735 cs->hw.hfcpci.timer.function = (void *) hfcpci_Timer;
1736 cs->hw.hfcpci.timer.data = (long) cs;
1737 init_timer(&cs->hw.hfcpci.timer);
1738 cs->cardmsg = &hfcpci_card_msg;
1739 cs->auxcmd = &hfcpci_auxcmd;
1740 spin_unlock_irqrestore(&cs->lock, flags);
1743 return (0); /* no valid card type */
1745 printk(KERN_WARNING "HFC-PCI: NO_PCI_BIOS\n");
1747 #endif /* CONFIG_PCI */