3 * hfcpci.c low level driver for CCD's hfc-pci based cards
5 * Author Werner Cornelius (werner@isdn4linux.de)
6 * based on existing driver for CCD hfc ISA cards
7 * type approval valid for HFC-S PCI A based card
9 * Copyright 1999 by Werner Cornelius (werner@isdn-development.de)
10 * Copyright 2008 by Karsten Keil <kkeil@novell.com>
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
28 #include <linux/module.h>
29 #include <linux/pci.h>
30 #include <linux/delay.h>
31 #include <linux/mISDNhw.h>
35 static const char *hfcpci_revision = "2.0";
41 MODULE_AUTHOR("Karsten Keil");
42 MODULE_LICENSE("GPL");
43 module_param(debug, uint, 0);
45 static LIST_HEAD(HFClist);
46 DEFINE_RWLOCK(HFClock);
84 unsigned char sctrl_r;
85 unsigned char sctrl_e;
87 unsigned char fifo_en;
88 unsigned char bswapped;
89 unsigned char protocol;
91 unsigned char *pci_io; /* start of PCI IO memory */
93 void *fifos; /* FIFO memory */
94 int last_bfifo_cnt[2];
95 /* marker saving last b-fifo frame count */
96 struct timer_list timer;
99 #define HFC_CFG_MASTER 1
100 #define HFC_CFG_SLAVE 2
101 #define HFC_CFG_PCM 3
102 #define HFC_CFG_2HFC 4
103 #define HFC_CFG_SLAVEHFC 5
104 #define HFC_CFG_NEG_F0 6
105 #define HFC_CFG_SW_DD_DU 7
107 #define FLG_HFC_TIMER_T1 16
108 #define FLG_HFC_TIMER_T3 17
110 #define NT_T1_COUNT 1120 /* number of 3.125ms interrupts (3.5s) */
111 #define NT_T3_COUNT 31 /* number of 3.125ms interrupts (97 ms) */
112 #define CLKDEL_TE 0x0e /* CLKDEL in TE mode */
113 #define CLKDEL_NT 0x6c /* CLKDEL in NT mode */
117 struct list_head list;
124 struct pci_dev *pdev;
126 spinlock_t lock; /* card lock */
128 struct bchannel bch[2];
131 /* Interface functions */
133 enable_hwirq(struct hfc_pci *hc)
135 hc->hw.int_m2 |= HFCPCI_IRQ_ENABLE;
136 Write_hfc(hc, HFCPCI_INT_M2, hc->hw.int_m2);
140 disable_hwirq(struct hfc_pci *hc)
142 hc->hw.int_m2 &= ~((u_char)HFCPCI_IRQ_ENABLE);
143 Write_hfc(hc, HFCPCI_INT_M2, hc->hw.int_m2);
147 * free hardware resources used by driver
150 release_io_hfcpci(struct hfc_pci *hc)
152 /* disable memory mapped ports + busmaster */
153 pci_write_config_word(hc->pdev, PCI_COMMAND, 0);
154 del_timer(&hc->hw.timer);
155 pci_free_consistent(hc->pdev, 0x8000, hc->hw.fifos, hc->hw.dmahandle);
156 iounmap((void *)hc->hw.pci_io);
160 * set mode (NT or TE)
163 hfcpci_setmode(struct hfc_pci *hc)
165 if (hc->hw.protocol == ISDN_P_NT_S0) {
166 hc->hw.clkdel = CLKDEL_NT; /* ST-Bit delay for NT-Mode */
167 hc->hw.sctrl |= SCTRL_MODE_NT; /* NT-MODE */
168 hc->hw.states = 1; /* G1 */
170 hc->hw.clkdel = CLKDEL_TE; /* ST-Bit delay for TE-Mode */
171 hc->hw.sctrl &= ~SCTRL_MODE_NT; /* TE-MODE */
172 hc->hw.states = 2; /* F2 */
174 Write_hfc(hc, HFCPCI_CLKDEL, hc->hw.clkdel);
175 Write_hfc(hc, HFCPCI_STATES, HFCPCI_LOAD_STATE | hc->hw.states);
177 Write_hfc(hc, HFCPCI_STATES, hc->hw.states | 0x40); /* Deactivate */
178 Write_hfc(hc, HFCPCI_SCTRL, hc->hw.sctrl);
182 * function called to reset the HFC PCI chip. A complete software reset of chip
186 reset_hfcpci(struct hfc_pci *hc)
191 printk(KERN_DEBUG "reset_hfcpci: entered\n");
192 val = Read_hfc(hc, HFCPCI_CHIP_ID);
193 printk(KERN_INFO "HFC_PCI: resetting HFC ChipId(%x)\n", val);
194 /* enable memory mapped ports, disable busmaster */
195 pci_write_config_word(hc->pdev, PCI_COMMAND, PCI_ENA_MEMIO);
197 /* enable memory ports + busmaster */
198 pci_write_config_word(hc->pdev, PCI_COMMAND,
199 PCI_ENA_MEMIO + PCI_ENA_MASTER);
200 val = Read_hfc(hc, HFCPCI_STATUS);
201 printk(KERN_DEBUG "HFC-PCI status(%x) before reset\n", val);
202 hc->hw.cirm = HFCPCI_RESET; /* Reset On */
203 Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
204 set_current_state(TASK_UNINTERRUPTIBLE);
205 mdelay(10); /* Timeout 10ms */
206 hc->hw.cirm = 0; /* Reset Off */
207 Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
208 val = Read_hfc(hc, HFCPCI_STATUS);
209 printk(KERN_DEBUG "HFC-PCI status(%x) after reset\n", val);
210 while (cnt < 50000) { /* max 50000 us */
213 val = Read_hfc(hc, HFCPCI_STATUS);
217 printk(KERN_DEBUG "HFC-PCI status(%x) after %dus\n", val, cnt);
219 hc->hw.fifo_en = 0x30; /* only D fifos enabled */
221 hc->hw.bswapped = 0; /* no exchange */
222 hc->hw.ctmt = HFCPCI_TIM3_125 | HFCPCI_AUTO_TIMER;
223 hc->hw.trm = HFCPCI_BTRANS_THRESMASK; /* no echo connect , threshold */
224 hc->hw.sctrl = 0x40; /* set tx_lo mode, error in datasheet ! */
226 hc->hw.sctrl_e = HFCPCI_AUTO_AWAKE; /* S/T Auto awake */
228 if (test_bit(HFC_CFG_MASTER, &hc->cfg))
229 hc->hw.mst_m |= HFCPCI_MASTER; /* HFC Master Mode */
230 if (test_bit(HFC_CFG_NEG_F0, &hc->cfg))
231 hc->hw.mst_m |= HFCPCI_F0_NEGATIV;
232 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
233 Write_hfc(hc, HFCPCI_TRM, hc->hw.trm);
234 Write_hfc(hc, HFCPCI_SCTRL_E, hc->hw.sctrl_e);
235 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt);
237 hc->hw.int_m1 = HFCPCI_INTS_DTRANS | HFCPCI_INTS_DREC |
238 HFCPCI_INTS_L1STATE | HFCPCI_INTS_TIMER;
239 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
241 /* Clear already pending ints */
242 if (Read_hfc(hc, HFCPCI_INT_S1));
247 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
248 Write_hfc(hc, HFCPCI_SCTRL_R, hc->hw.sctrl_r);
251 * Init GCI/IOM2 in master mode
252 * Slots 0 and 1 are set for B-chan 1 and 2
253 * D- and monitor/CI channel are not enabled
254 * STIO1 is used as output for data, B1+B2 from ST->IOM+HFC
255 * STIO2 is used as data input, B1+B2 from IOM->ST
256 * ST B-channel send disabled -> continous 1s
257 * The IOM slots are always enabled
259 if (test_bit(HFC_CFG_PCM, &hc->cfg)) {
260 /* set data flow directions: connect B1,B2: HFC to/from PCM */
263 hc->hw.conn = 0x36; /* set data flow directions */
264 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg)) {
265 Write_hfc(hc, HFCPCI_B1_SSL, 0xC0);
266 Write_hfc(hc, HFCPCI_B2_SSL, 0xC1);
267 Write_hfc(hc, HFCPCI_B1_RSL, 0xC0);
268 Write_hfc(hc, HFCPCI_B2_RSL, 0xC1);
270 Write_hfc(hc, HFCPCI_B1_SSL, 0x80);
271 Write_hfc(hc, HFCPCI_B2_SSL, 0x81);
272 Write_hfc(hc, HFCPCI_B1_RSL, 0x80);
273 Write_hfc(hc, HFCPCI_B2_RSL, 0x81);
276 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
277 val = Read_hfc(hc, HFCPCI_INT_S2);
281 * Timer function called when kernel timer expires
284 hfcpci_Timer(struct hfc_pci *hc)
286 hc->hw.timer.expires = jiffies + 75;
289 * WriteReg(hc, HFCD_DATA, HFCD_CTMT, hc->hw.ctmt | 0x80);
290 * add_timer(&hc->hw.timer);
296 * select a b-channel entry matching and active
298 static struct bchannel *
299 Sel_BCS(struct hfc_pci *hc, int channel)
301 if (test_bit(FLG_ACTIVE, &hc->bch[0].Flags) &&
302 (hc->bch[0].nr & channel))
304 else if (test_bit(FLG_ACTIVE, &hc->bch[1].Flags) &&
305 (hc->bch[1].nr & channel))
312 * clear the desired B-channel rx fifo
315 hfcpci_clear_fifo_rx(struct hfc_pci *hc, int fifo)
321 bzr = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b2;
322 fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B2RX;
324 bzr = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b1;
325 fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B1RX;
328 hc->hw.fifo_en ^= fifo_state;
329 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
330 hc->hw.last_bfifo_cnt[fifo] = 0;
331 bzr->f1 = MAX_B_FRAMES;
332 bzr->f2 = bzr->f1; /* init F pointers to remain constant */
333 bzr->za[MAX_B_FRAMES].z1 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 1);
334 bzr->za[MAX_B_FRAMES].z2 = cpu_to_le16(
335 le16_to_cpu(bzr->za[MAX_B_FRAMES].z1));
337 hc->hw.fifo_en |= fifo_state;
338 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
342 * clear the desired B-channel tx fifo
344 static void hfcpci_clear_fifo_tx(struct hfc_pci *hc, int fifo)
350 bzt = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b2;
351 fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B2TX;
353 bzt = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b1;
354 fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B1TX;
357 hc->hw.fifo_en ^= fifo_state;
358 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
359 if (hc->bch[fifo].debug & DEBUG_HW_BCHANNEL)
360 printk(KERN_DEBUG "hfcpci_clear_fifo_tx%d f1(%x) f2(%x) "
361 "z1(%x) z2(%x) state(%x)\n",
362 fifo, bzt->f1, bzt->f2,
363 le16_to_cpu(bzt->za[MAX_B_FRAMES].z1),
364 le16_to_cpu(bzt->za[MAX_B_FRAMES].z2),
366 bzt->f2 = MAX_B_FRAMES;
367 bzt->f1 = bzt->f2; /* init F pointers to remain constant */
368 bzt->za[MAX_B_FRAMES].z1 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 1);
369 bzt->za[MAX_B_FRAMES].z2 = cpu_to_le16(
370 le16_to_cpu(bzt->za[MAX_B_FRAMES].z1 - 1));
372 hc->hw.fifo_en |= fifo_state;
373 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
374 if (hc->bch[fifo].debug & DEBUG_HW_BCHANNEL)
376 "hfcpci_clear_fifo_tx%d f1(%x) f2(%x) z1(%x) z2(%x)\n",
377 fifo, bzt->f1, bzt->f2,
378 le16_to_cpu(bzt->za[MAX_B_FRAMES].z1),
379 le16_to_cpu(bzt->za[MAX_B_FRAMES].z2));
383 * read a complete B-frame out of the buffer
386 hfcpci_empty_bfifo(struct bchannel *bch, struct bzfifo *bz,
387 u_char *bdata, int count)
389 u_char *ptr, *ptr1, new_f2;
390 int total, maxlen, new_z2;
393 if ((bch->debug & DEBUG_HW_BCHANNEL) && !(bch->debug & DEBUG_HW_BFIFO))
394 printk(KERN_DEBUG "hfcpci_empty_fifo\n");
395 zp = &bz->za[bz->f2]; /* point to Z-Regs */
396 new_z2 = le16_to_cpu(zp->z2) + count; /* new position in fifo */
397 if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
398 new_z2 -= B_FIFO_SIZE; /* buffer wrap */
399 new_f2 = (bz->f2 + 1) & MAX_B_FRAMES;
400 if ((count > MAX_DATA_SIZE + 3) || (count < 4) ||
401 (*(bdata + (le16_to_cpu(zp->z1) - B_SUB_VAL)))) {
402 if (bch->debug & DEBUG_HW)
403 printk(KERN_DEBUG "hfcpci_empty_fifo: incoming packet "
404 "invalid length %d or crc\n", count);
405 #ifdef ERROR_STATISTIC
408 bz->za[new_f2].z2 = cpu_to_le16(new_z2);
409 bz->f2 = new_f2; /* next buffer */
411 bch->rx_skb = mI_alloc_skb(count - 3, GFP_ATOMIC);
413 printk(KERN_WARNING "HFCPCI: receive out of memory\n");
418 ptr = skb_put(bch->rx_skb, count);
420 if (le16_to_cpu(zp->z2) + count <= B_FIFO_SIZE + B_SUB_VAL)
421 maxlen = count; /* complete transfer */
423 maxlen = B_FIFO_SIZE + B_SUB_VAL -
424 le16_to_cpu(zp->z2); /* maximum */
426 ptr1 = bdata + (le16_to_cpu(zp->z2) - B_SUB_VAL);
428 memcpy(ptr, ptr1, maxlen); /* copy data */
431 if (count) { /* rest remaining */
433 ptr1 = bdata; /* start of buffer */
434 memcpy(ptr, ptr1, count); /* rest */
436 bz->za[new_f2].z2 = cpu_to_le16(new_z2);
437 bz->f2 = new_f2; /* next buffer */
443 * D-channel receive procedure
446 receive_dmsg(struct hfc_pci *hc)
448 struct dchannel *dch = &hc->dch;
456 df = &((union fifo_area *)(hc->hw.fifos))->d_chan.d_rx;
457 while (((df->f1 & D_FREG_MASK) != (df->f2 & D_FREG_MASK)) && count--) {
458 zp = &df->za[df->f2 & D_FREG_MASK];
459 rcnt = le16_to_cpu(zp->z1) - le16_to_cpu(zp->z2);
463 if (dch->debug & DEBUG_HW_DCHANNEL)
465 "hfcpci recd f1(%d) f2(%d) z1(%x) z2(%x) cnt(%d)\n",
471 if ((rcnt > MAX_DFRAME_LEN + 3) || (rcnt < 4) ||
472 (df->data[le16_to_cpu(zp->z1)])) {
473 if (dch->debug & DEBUG_HW)
475 "empty_fifo hfcpci paket inv. len "
478 df->data[le16_to_cpu(zp->z1)]);
479 #ifdef ERROR_STATISTIC
482 df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) |
483 (MAX_D_FRAMES + 1); /* next buffer */
484 df->za[df->f2 & D_FREG_MASK].z2 =
485 cpu_to_le16((zp->z2 + rcnt) & (D_FIFO_SIZE - 1));
487 dch->rx_skb = mI_alloc_skb(rcnt - 3, GFP_ATOMIC);
490 "HFC-PCI: D receive out of memory\n");
495 ptr = skb_put(dch->rx_skb, rcnt);
497 if (le16_to_cpu(zp->z2) + rcnt <= D_FIFO_SIZE)
498 maxlen = rcnt; /* complete transfer */
500 maxlen = D_FIFO_SIZE - le16_to_cpu(zp->z2);
503 ptr1 = df->data + le16_to_cpu(zp->z2);
505 memcpy(ptr, ptr1, maxlen); /* copy data */
508 if (rcnt) { /* rest remaining */
510 ptr1 = df->data; /* start of buffer */
511 memcpy(ptr, ptr1, rcnt); /* rest */
513 df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) |
514 (MAX_D_FRAMES + 1); /* next buffer */
515 df->za[df->f2 & D_FREG_MASK].z2 = cpu_to_le16((
516 le16_to_cpu(zp->z2) + total) & (D_FIFO_SIZE - 1));
524 * check for transparent receive data and read max one threshold size if avail
527 hfcpci_empty_fifo_trans(struct bchannel *bch, struct bzfifo *bz, u_char *bdata)
529 unsigned short *z1r, *z2r;
530 int new_z2, fcnt, maxlen;
533 z1r = &bz->za[MAX_B_FRAMES].z1; /* pointer to z reg */
536 fcnt = le16_to_cpu(*z1r) - le16_to_cpu(*z2r);
538 return 0; /* no data avail */
541 fcnt += B_FIFO_SIZE; /* bytes actually buffered */
542 if (fcnt > HFCPCI_BTRANS_THRESHOLD)
543 fcnt = HFCPCI_BTRANS_THRESHOLD; /* limit size */
545 new_z2 = le16_to_cpu(*z2r) + fcnt; /* new position in fifo */
546 if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
547 new_z2 -= B_FIFO_SIZE; /* buffer wrap */
549 bch->rx_skb = mI_alloc_skb(fcnt, GFP_ATOMIC);
551 ptr = skb_put(bch->rx_skb, fcnt);
552 if (le16_to_cpu(*z2r) + fcnt <= B_FIFO_SIZE + B_SUB_VAL)
553 maxlen = fcnt; /* complete transfer */
555 maxlen = B_FIFO_SIZE + B_SUB_VAL - le16_to_cpu(*z2r);
558 ptr1 = bdata + (le16_to_cpu(*z2r) - B_SUB_VAL);
560 memcpy(ptr, ptr1, maxlen); /* copy data */
563 if (fcnt) { /* rest remaining */
565 ptr1 = bdata; /* start of buffer */
566 memcpy(ptr, ptr1, fcnt); /* rest */
570 printk(KERN_WARNING "HFCPCI: receive out of memory\n");
572 *z2r = cpu_to_le16(new_z2); /* new position */
577 * B-channel main receive routine
580 main_rec_hfcpci(struct bchannel *bch)
582 struct hfc_pci *hc = bch->hw;
584 int receive, count = 5;
590 if ((bch->nr & 2) && (!hc->hw.bswapped)) {
591 bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b2;
592 bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.rxdat_b2;
595 bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b1;
596 bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.rxdat_b1;
601 if (bz->f1 != bz->f2) {
602 if (bch->debug & DEBUG_HW_BCHANNEL)
603 printk(KERN_DEBUG "hfcpci rec ch(%x) f1(%d) f2(%d)\n",
604 bch->nr, bz->f1, bz->f2);
605 zp = &bz->za[bz->f2];
607 rcnt = le16_to_cpu(zp->z1) - le16_to_cpu(zp->z2);
611 if (bch->debug & DEBUG_HW_BCHANNEL)
613 "hfcpci rec ch(%x) z1(%x) z2(%x) cnt(%d)\n",
614 bch->nr, le16_to_cpu(zp->z1),
615 le16_to_cpu(zp->z2), rcnt);
616 hfcpci_empty_bfifo(bch, bz, bdata, rcnt);
617 rcnt = bz->f1 - bz->f2;
619 rcnt += MAX_B_FRAMES + 1;
620 if (hc->hw.last_bfifo_cnt[real_fifo] > rcnt + 1) {
622 hfcpci_clear_fifo_rx(hc, real_fifo);
624 hc->hw.last_bfifo_cnt[real_fifo] = rcnt;
629 } else if (test_bit(FLG_TRANSPARENT, &bch->Flags))
630 receive = hfcpci_empty_fifo_trans(bch, bz, bdata);
633 if (count && receive)
639 * D-channel send routine
642 hfcpci_fill_dfifo(struct hfc_pci *hc)
644 struct dchannel *dch = &hc->dch;
646 int count, new_z1, maxlen;
648 u_char *src, *dst, new_f1;
650 if ((dch->debug & DEBUG_HW_DCHANNEL) && !(dch->debug & DEBUG_HW_DFIFO))
651 printk(KERN_DEBUG "%s\n", __func__);
655 count = dch->tx_skb->len - dch->tx_idx;
658 df = &((union fifo_area *) (hc->hw.fifos))->d_chan.d_tx;
660 if (dch->debug & DEBUG_HW_DFIFO)
661 printk(KERN_DEBUG "%s:f1(%d) f2(%d) z1(f1)(%x)\n", __func__,
663 le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1));
664 fcnt = df->f1 - df->f2; /* frame count actually buffered */
666 fcnt += (MAX_D_FRAMES + 1); /* if wrap around */
667 if (fcnt > (MAX_D_FRAMES - 1)) {
668 if (dch->debug & DEBUG_HW_DCHANNEL)
670 "hfcpci_fill_Dfifo more as 14 frames\n");
671 #ifdef ERROR_STATISTIC
676 /* now determine free bytes in FIFO buffer */
677 maxlen = le16_to_cpu(df->za[df->f2 & D_FREG_MASK].z2) -
678 le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1) - 1;
680 maxlen += D_FIFO_SIZE; /* count now contains available bytes */
682 if (dch->debug & DEBUG_HW_DCHANNEL)
683 printk(KERN_DEBUG "hfcpci_fill_Dfifo count(%d/%d)\n",
685 if (count > maxlen) {
686 if (dch->debug & DEBUG_HW_DCHANNEL)
687 printk(KERN_DEBUG "hfcpci_fill_Dfifo no fifo mem\n");
690 new_z1 = (le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1) + count) &
692 new_f1 = ((df->f1 + 1) & D_FREG_MASK) | (D_FREG_MASK + 1);
693 src = dch->tx_skb->data + dch->tx_idx; /* source pointer */
694 dst = df->data + le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1);
695 maxlen = D_FIFO_SIZE - le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1);
698 maxlen = count; /* limit size */
699 memcpy(dst, src, maxlen); /* first copy */
701 count -= maxlen; /* remaining bytes */
703 dst = df->data; /* start of buffer */
704 src += maxlen; /* new position */
705 memcpy(dst, src, count);
707 df->za[new_f1 & D_FREG_MASK].z1 = cpu_to_le16(new_z1);
708 /* for next buffer */
709 df->za[df->f1 & D_FREG_MASK].z1 = cpu_to_le16(new_z1);
710 /* new pos actual buffer */
711 df->f1 = new_f1; /* next frame */
712 dch->tx_idx = dch->tx_skb->len;
716 * B-channel send routine
719 hfcpci_fill_fifo(struct bchannel *bch)
721 struct hfc_pci *hc = bch->hw;
726 u_char new_f1, *src, *dst;
727 unsigned short *z1t, *z2t;
729 if ((bch->debug & DEBUG_HW_BCHANNEL) && !(bch->debug & DEBUG_HW_BFIFO))
730 printk(KERN_DEBUG "%s\n", __func__);
731 if ((!bch->tx_skb) || bch->tx_skb->len <= 0)
733 count = bch->tx_skb->len - bch->tx_idx;
734 if ((bch->nr & 2) && (!hc->hw.bswapped)) {
735 bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b2;
736 bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.txdat_b2;
738 bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b1;
739 bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.txdat_b1;
742 if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
743 z1t = &bz->za[MAX_B_FRAMES].z1;
745 if (bch->debug & DEBUG_HW_BCHANNEL)
746 printk(KERN_DEBUG "hfcpci_fill_fifo_trans ch(%x) "
747 "cnt(%d) z1(%x) z2(%x)\n", bch->nr, count,
748 le16_to_cpu(*z1t), le16_to_cpu(*z2t));
749 fcnt = le16_to_cpu(*z2t) - le16_to_cpu(*z1t);
752 /* fcnt contains available bytes in fifo */
753 fcnt = B_FIFO_SIZE - fcnt;
754 /* remaining bytes to send (bytes in fifo) */
756 count = bch->tx_skb->len - bch->tx_idx;
757 /* maximum fill shall be HFCPCI_BTRANS_MAX */
758 if (count > HFCPCI_BTRANS_MAX - fcnt)
759 count = HFCPCI_BTRANS_MAX - fcnt;
762 /* data is suitable for fifo */
763 new_z1 = le16_to_cpu(*z1t) + count;
764 /* new buffer Position */
765 if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
766 new_z1 -= B_FIFO_SIZE; /* buffer wrap */
767 src = bch->tx_skb->data + bch->tx_idx;
769 dst = bdata + (le16_to_cpu(*z1t) - B_SUB_VAL);
770 maxlen = (B_FIFO_SIZE + B_SUB_VAL) - le16_to_cpu(*z1t);
772 if (bch->debug & DEBUG_HW_BFIFO)
773 printk(KERN_DEBUG "hfcpci_FFt fcnt(%d) "
774 "maxl(%d) nz1(%x) dst(%p)\n",
775 fcnt, maxlen, new_z1, dst);
777 bch->tx_idx += count;
779 maxlen = count; /* limit size */
780 memcpy(dst, src, maxlen); /* first copy */
781 count -= maxlen; /* remaining bytes */
783 dst = bdata; /* start of buffer */
784 src += maxlen; /* new position */
785 memcpy(dst, src, count);
787 *z1t = cpu_to_le16(new_z1); /* now send data */
788 if (bch->tx_idx < bch->tx_skb->len)
790 /* send confirm, on trans, free on hdlc. */
791 if (test_bit(FLG_TRANSPARENT, &bch->Flags))
793 dev_kfree_skb(bch->tx_skb);
794 if (get_next_bframe(bch))
798 if (bch->debug & DEBUG_HW_BCHANNEL)
800 "%s: ch(%x) f1(%d) f2(%d) z1(f1)(%x)\n",
801 __func__, bch->nr, bz->f1, bz->f2,
803 fcnt = bz->f1 - bz->f2; /* frame count actually buffered */
805 fcnt += (MAX_B_FRAMES + 1); /* if wrap around */
806 if (fcnt > (MAX_B_FRAMES - 1)) {
807 if (bch->debug & DEBUG_HW_BCHANNEL)
809 "hfcpci_fill_Bfifo more as 14 frames\n");
812 /* now determine free bytes in FIFO buffer */
813 maxlen = le16_to_cpu(bz->za[bz->f2].z2) -
814 le16_to_cpu(bz->za[bz->f1].z1) - 1;
816 maxlen += B_FIFO_SIZE; /* count now contains available bytes */
818 if (bch->debug & DEBUG_HW_BCHANNEL)
819 printk(KERN_DEBUG "hfcpci_fill_fifo ch(%x) count(%d/%d)\n",
820 bch->nr, count, maxlen);
822 if (maxlen < count) {
823 if (bch->debug & DEBUG_HW_BCHANNEL)
824 printk(KERN_DEBUG "hfcpci_fill_fifo no fifo mem\n");
827 new_z1 = le16_to_cpu(bz->za[bz->f1].z1) + count;
828 /* new buffer Position */
829 if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
830 new_z1 -= B_FIFO_SIZE; /* buffer wrap */
832 new_f1 = ((bz->f1 + 1) & MAX_B_FRAMES);
833 src = bch->tx_skb->data + bch->tx_idx; /* source pointer */
834 dst = bdata + (le16_to_cpu(bz->za[bz->f1].z1) - B_SUB_VAL);
835 maxlen = (B_FIFO_SIZE + B_SUB_VAL) - le16_to_cpu(bz->za[bz->f1].z1);
838 maxlen = count; /* limit size */
839 memcpy(dst, src, maxlen); /* first copy */
841 count -= maxlen; /* remaining bytes */
843 dst = bdata; /* start of buffer */
844 src += maxlen; /* new position */
845 memcpy(dst, src, count);
847 bz->za[new_f1].z1 = cpu_to_le16(new_z1); /* for next buffer */
848 bz->f1 = new_f1; /* next frame */
849 dev_kfree_skb(bch->tx_skb);
850 get_next_bframe(bch);
856 * handle L1 state changes TE
860 ph_state_te(struct dchannel *dch)
863 printk(KERN_DEBUG "%s: TE newstate %x\n",
864 __func__, dch->state);
865 switch (dch->state) {
867 l1_event(dch->l1, HW_RESET_IND);
870 l1_event(dch->l1, HW_DEACT_IND);
874 l1_event(dch->l1, ANYSIGNAL);
877 l1_event(dch->l1, INFO2);
880 l1_event(dch->l1, INFO4_P8);
886 * handle L1 state changes NT
890 handle_nt_timer3(struct dchannel *dch) {
891 struct hfc_pci *hc = dch->hw;
893 test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
894 hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
895 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
897 test_and_set_bit(FLG_ACTIVE, &dch->Flags);
898 if (test_bit(HFC_CFG_MASTER, &hc->cfg))
899 hc->hw.mst_m |= HFCPCI_MASTER;
900 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
901 _queue_data(&dch->dev.D, PH_ACTIVATE_IND,
902 MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
906 ph_state_nt(struct dchannel *dch)
908 struct hfc_pci *hc = dch->hw;
911 printk(KERN_DEBUG "%s: NT newstate %x\n",
912 __func__, dch->state);
913 switch (dch->state) {
915 if (hc->hw.nt_timer < 0) {
917 test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
918 test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags);
919 hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
920 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
921 /* Clear already pending ints */
922 if (Read_hfc(hc, HFCPCI_INT_S1));
923 Write_hfc(hc, HFCPCI_STATES, 4 | HFCPCI_LOAD_STATE);
925 Write_hfc(hc, HFCPCI_STATES, 4);
927 } else if (hc->hw.nt_timer == 0) {
928 hc->hw.int_m1 |= HFCPCI_INTS_TIMER;
929 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
930 hc->hw.nt_timer = NT_T1_COUNT;
931 hc->hw.ctmt &= ~HFCPCI_AUTO_TIMER;
932 hc->hw.ctmt |= HFCPCI_TIM3_125;
933 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt |
935 test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
936 test_and_set_bit(FLG_HFC_TIMER_T1, &dch->Flags);
937 /* allow G2 -> G3 transition */
938 Write_hfc(hc, HFCPCI_STATES, 2 | HFCPCI_NT_G2_G3);
940 Write_hfc(hc, HFCPCI_STATES, 2 | HFCPCI_NT_G2_G3);
945 test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
946 test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags);
947 hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
948 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
949 test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
950 hc->hw.mst_m &= ~HFCPCI_MASTER;
951 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
952 test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
953 _queue_data(&dch->dev.D, PH_DEACTIVATE_IND,
954 MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
958 test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
959 test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags);
960 hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
961 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
964 if (!test_and_set_bit(FLG_HFC_TIMER_T3, &dch->Flags)) {
965 if (!test_and_clear_bit(FLG_L2_ACTIVATED,
967 handle_nt_timer3(dch);
970 test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags);
971 hc->hw.int_m1 |= HFCPCI_INTS_TIMER;
972 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
973 hc->hw.nt_timer = NT_T3_COUNT;
974 hc->hw.ctmt &= ~HFCPCI_AUTO_TIMER;
975 hc->hw.ctmt |= HFCPCI_TIM3_125;
976 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt |
984 ph_state(struct dchannel *dch)
986 struct hfc_pci *hc = dch->hw;
988 if (hc->hw.protocol == ISDN_P_NT_S0) {
989 if (test_bit(FLG_HFC_TIMER_T3, &dch->Flags) &&
991 handle_nt_timer3(dch);
999 * Layer 1 callback function
1002 hfc_l1callback(struct dchannel *dch, u_int cmd)
1004 struct hfc_pci *hc = dch->hw;
1009 if (test_bit(HFC_CFG_MASTER, &hc->cfg))
1010 hc->hw.mst_m |= HFCPCI_MASTER;
1011 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1014 Write_hfc(hc, HFCPCI_STATES, HFCPCI_LOAD_STATE | 3);
1017 Write_hfc(hc, HFCPCI_STATES, 3); /* HFC ST 2 */
1018 if (test_bit(HFC_CFG_MASTER, &hc->cfg))
1019 hc->hw.mst_m |= HFCPCI_MASTER;
1020 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1021 Write_hfc(hc, HFCPCI_STATES, HFCPCI_ACTIVATE |
1023 l1_event(dch->l1, HW_POWERUP_IND);
1026 hc->hw.mst_m &= ~HFCPCI_MASTER;
1027 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1028 skb_queue_purge(&dch->squeue);
1030 dev_kfree_skb(dch->tx_skb);
1035 dev_kfree_skb(dch->rx_skb);
1038 test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
1039 if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags))
1040 del_timer(&dch->timer);
1042 case HW_POWERUP_REQ:
1043 Write_hfc(hc, HFCPCI_STATES, HFCPCI_DO_ACTION);
1045 case PH_ACTIVATE_IND:
1046 test_and_set_bit(FLG_ACTIVE, &dch->Flags);
1047 _queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL,
1050 case PH_DEACTIVATE_IND:
1051 test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
1052 _queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL,
1056 if (dch->debug & DEBUG_HW)
1057 printk(KERN_DEBUG "%s: unknown command %x\n",
1068 tx_birq(struct bchannel *bch)
1070 if (bch->tx_skb && bch->tx_idx < bch->tx_skb->len)
1071 hfcpci_fill_fifo(bch);
1074 dev_kfree_skb(bch->tx_skb);
1075 if (get_next_bframe(bch))
1076 hfcpci_fill_fifo(bch);
1081 tx_dirq(struct dchannel *dch)
1083 if (dch->tx_skb && dch->tx_idx < dch->tx_skb->len)
1084 hfcpci_fill_dfifo(dch->hw);
1087 dev_kfree_skb(dch->tx_skb);
1088 if (get_next_dframe(dch))
1089 hfcpci_fill_dfifo(dch->hw);
1094 hfcpci_int(int intno, void *dev_id)
1096 struct hfc_pci *hc = dev_id;
1098 struct bchannel *bch;
1101 spin_lock(&hc->lock);
1102 if (!(hc->hw.int_m2 & 0x08)) {
1103 spin_unlock(&hc->lock);
1104 return IRQ_NONE; /* not initialised */
1106 stat = Read_hfc(hc, HFCPCI_STATUS);
1107 if (HFCPCI_ANYINT & stat) {
1108 val = Read_hfc(hc, HFCPCI_INT_S1);
1109 if (hc->dch.debug & DEBUG_HW_DCHANNEL)
1111 "HFC-PCI: stat(%02x) s1(%02x)\n", stat, val);
1114 spin_unlock(&hc->lock);
1119 if (hc->dch.debug & DEBUG_HW_DCHANNEL)
1120 printk(KERN_DEBUG "HFC-PCI irq %x\n", val);
1121 val &= hc->hw.int_m1;
1122 if (val & 0x40) { /* state machine irq */
1123 exval = Read_hfc(hc, HFCPCI_STATES) & 0xf;
1124 if (hc->dch.debug & DEBUG_HW_DCHANNEL)
1125 printk(KERN_DEBUG "ph_state chg %d->%d\n",
1126 hc->dch.state, exval);
1127 hc->dch.state = exval;
1128 schedule_event(&hc->dch, FLG_PHCHANGE);
1131 if (val & 0x80) { /* timer irq */
1132 if (hc->hw.protocol == ISDN_P_NT_S0) {
1133 if ((--hc->hw.nt_timer) < 0)
1134 schedule_event(&hc->dch, FLG_PHCHANGE);
1137 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt | HFCPCI_CLTIMER);
1140 bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1);
1142 main_rec_hfcpci(bch);
1143 else if (hc->dch.debug)
1144 printk(KERN_DEBUG "hfcpci spurious 0x08 IRQ\n");
1147 bch = Sel_BCS(hc, 2);
1149 main_rec_hfcpci(bch);
1150 else if (hc->dch.debug)
1151 printk(KERN_DEBUG "hfcpci spurious 0x10 IRQ\n");
1154 bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1);
1157 else if (hc->dch.debug)
1158 printk(KERN_DEBUG "hfcpci spurious 0x01 IRQ\n");
1161 bch = Sel_BCS(hc, 2);
1164 else if (hc->dch.debug)
1165 printk(KERN_DEBUG "hfcpci spurious 0x02 IRQ\n");
1169 if (val & 0x04) { /* dframe transmitted */
1170 if (test_and_clear_bit(FLG_BUSY_TIMER, &hc->dch.Flags))
1171 del_timer(&hc->dch.timer);
1174 spin_unlock(&hc->lock);
1179 * timer callback for D-chan busy resolution. Currently no function
1182 hfcpci_dbusy_timer(struct hfc_pci *hc)
1187 * activate/deactivate hardware for selected channels and mode
1190 mode_hfcpci(struct bchannel *bch, int bc, int protocol)
1192 struct hfc_pci *hc = bch->hw;
1194 u_char rx_slot = 0, tx_slot = 0, pcm_mode;
1196 if (bch->debug & DEBUG_HW_BCHANNEL)
1198 "HFCPCI bchannel protocol %x-->%x ch %x-->%x\n",
1199 bch->state, protocol, bch->nr, bc);
1202 pcm_mode = (bc>>24) & 0xff;
1203 if (pcm_mode) { /* PCM SLOT USE */
1204 if (!test_bit(HFC_CFG_PCM, &hc->cfg))
1206 "%s: pcm channel id without HFC_CFG_PCM\n",
1208 rx_slot = (bc>>8) & 0xff;
1209 tx_slot = (bc>>16) & 0xff;
1211 } else if (test_bit(HFC_CFG_PCM, &hc->cfg) &&
1212 (protocol > ISDN_P_NONE))
1213 printk(KERN_WARNING "%s: no pcm channel id but HFC_CFG_PCM\n",
1215 if (hc->chanlimit > 1) {
1216 hc->hw.bswapped = 0; /* B1 and B2 normal mode */
1217 hc->hw.sctrl_e &= ~0x80;
1220 if (protocol != ISDN_P_NONE) {
1221 hc->hw.bswapped = 1; /* B1 and B2 exchanged */
1222 hc->hw.sctrl_e |= 0x80;
1224 hc->hw.bswapped = 0; /* B1 and B2 normal mode */
1225 hc->hw.sctrl_e &= ~0x80;
1229 hc->hw.bswapped = 0; /* B1 and B2 normal mode */
1230 hc->hw.sctrl_e &= ~0x80;
1234 case (-1): /* used for init */
1238 if (bch->state == ISDN_P_NONE)
1241 hc->hw.sctrl &= ~SCTRL_B2_ENA;
1242 hc->hw.sctrl_r &= ~SCTRL_B2_ENA;
1244 hc->hw.sctrl &= ~SCTRL_B1_ENA;
1245 hc->hw.sctrl_r &= ~SCTRL_B1_ENA;
1248 hc->hw.fifo_en &= ~HFCPCI_FIFOEN_B2;
1249 hc->hw.int_m1 &= ~(HFCPCI_INTS_B2TRANS +
1252 hc->hw.fifo_en &= ~HFCPCI_FIFOEN_B1;
1253 hc->hw.int_m1 &= ~(HFCPCI_INTS_B1TRANS +
1256 #ifdef REVERSE_BITORDER
1258 hc->hw.cirm &= 0x7f;
1260 hc->hw.cirm &= 0xbf;
1262 bch->state = ISDN_P_NONE;
1264 test_and_clear_bit(FLG_HDLC, &bch->Flags);
1265 test_and_clear_bit(FLG_TRANSPARENT, &bch->Flags);
1267 case (ISDN_P_B_RAW):
1268 bch->state = protocol;
1270 hfcpci_clear_fifo_rx(hc, (fifo2 & 2)?1:0);
1271 hfcpci_clear_fifo_tx(hc, (fifo2 & 2)?1:0);
1273 hc->hw.sctrl |= SCTRL_B2_ENA;
1274 hc->hw.sctrl_r |= SCTRL_B2_ENA;
1275 #ifdef REVERSE_BITORDER
1276 hc->hw.cirm |= 0x80;
1279 hc->hw.sctrl |= SCTRL_B1_ENA;
1280 hc->hw.sctrl_r |= SCTRL_B1_ENA;
1281 #ifdef REVERSE_BITORDER
1282 hc->hw.cirm |= 0x40;
1286 hc->hw.fifo_en |= HFCPCI_FIFOEN_B2;
1287 hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS +
1290 hc->hw.conn &= ~0x18;
1292 hc->hw.fifo_en |= HFCPCI_FIFOEN_B1;
1293 hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS +
1296 hc->hw.conn &= ~0x03;
1298 test_and_set_bit(FLG_TRANSPARENT, &bch->Flags);
1300 case (ISDN_P_B_HDLC):
1301 bch->state = protocol;
1303 hfcpci_clear_fifo_rx(hc, (fifo2 & 2)?1:0);
1304 hfcpci_clear_fifo_tx(hc, (fifo2 & 2)?1:0);
1306 hc->hw.sctrl |= SCTRL_B2_ENA;
1307 hc->hw.sctrl_r |= SCTRL_B2_ENA;
1309 hc->hw.sctrl |= SCTRL_B1_ENA;
1310 hc->hw.sctrl_r |= SCTRL_B1_ENA;
1313 hc->hw.last_bfifo_cnt[1] = 0;
1314 hc->hw.fifo_en |= HFCPCI_FIFOEN_B2;
1315 hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS +
1318 hc->hw.conn &= ~0x18;
1320 hc->hw.last_bfifo_cnt[0] = 0;
1321 hc->hw.fifo_en |= HFCPCI_FIFOEN_B1;
1322 hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS +
1325 hc->hw.conn &= ~0x03;
1327 test_and_set_bit(FLG_HDLC, &bch->Flags);
1330 printk(KERN_DEBUG "prot not known %x\n", protocol);
1331 return -ENOPROTOOPT;
1333 if (test_bit(HFC_CFG_PCM, &hc->cfg)) {
1334 if ((protocol == ISDN_P_NONE) ||
1335 (protocol == -1)) { /* init case */
1339 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg)) {
1348 hc->hw.conn &= 0xc7;
1349 hc->hw.conn |= 0x08;
1350 printk(KERN_DEBUG "%s: Write_hfc: B2_SSL 0x%x\n",
1352 printk(KERN_DEBUG "%s: Write_hfc: B2_RSL 0x%x\n",
1354 Write_hfc(hc, HFCPCI_B2_SSL, tx_slot);
1355 Write_hfc(hc, HFCPCI_B2_RSL, rx_slot);
1357 hc->hw.conn &= 0xf8;
1358 hc->hw.conn |= 0x01;
1359 printk(KERN_DEBUG "%s: Write_hfc: B1_SSL 0x%x\n",
1361 printk(KERN_DEBUG "%s: Write_hfc: B1_RSL 0x%x\n",
1363 Write_hfc(hc, HFCPCI_B1_SSL, tx_slot);
1364 Write_hfc(hc, HFCPCI_B1_RSL, rx_slot);
1367 Write_hfc(hc, HFCPCI_SCTRL_E, hc->hw.sctrl_e);
1368 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1369 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
1370 Write_hfc(hc, HFCPCI_SCTRL, hc->hw.sctrl);
1371 Write_hfc(hc, HFCPCI_SCTRL_R, hc->hw.sctrl_r);
1372 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt);
1373 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1374 #ifdef REVERSE_BITORDER
1375 Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
1381 set_hfcpci_rxtest(struct bchannel *bch, int protocol, int chan)
1383 struct hfc_pci *hc = bch->hw;
1385 if (bch->debug & DEBUG_HW_BCHANNEL)
1387 "HFCPCI bchannel test rx protocol %x-->%x ch %x-->%x\n",
1388 bch->state, protocol, bch->nr, chan);
1389 if (bch->nr != chan) {
1391 "HFCPCI rxtest wrong channel parameter %x/%x\n",
1396 case (ISDN_P_B_RAW):
1397 bch->state = protocol;
1398 hfcpci_clear_fifo_rx(hc, (chan & 2)?1:0);
1400 hc->hw.sctrl_r |= SCTRL_B2_ENA;
1401 hc->hw.fifo_en |= HFCPCI_FIFOEN_B2RX;
1402 hc->hw.int_m1 |= HFCPCI_INTS_B2REC;
1404 hc->hw.conn &= ~0x18;
1405 #ifdef REVERSE_BITORDER
1406 hc->hw.cirm |= 0x80;
1409 hc->hw.sctrl_r |= SCTRL_B1_ENA;
1410 hc->hw.fifo_en |= HFCPCI_FIFOEN_B1RX;
1411 hc->hw.int_m1 |= HFCPCI_INTS_B1REC;
1413 hc->hw.conn &= ~0x03;
1414 #ifdef REVERSE_BITORDER
1415 hc->hw.cirm |= 0x40;
1419 case (ISDN_P_B_HDLC):
1420 bch->state = protocol;
1421 hfcpci_clear_fifo_rx(hc, (chan & 2)?1:0);
1423 hc->hw.sctrl_r |= SCTRL_B2_ENA;
1424 hc->hw.last_bfifo_cnt[1] = 0;
1425 hc->hw.fifo_en |= HFCPCI_FIFOEN_B2RX;
1426 hc->hw.int_m1 |= HFCPCI_INTS_B2REC;
1428 hc->hw.conn &= ~0x18;
1430 hc->hw.sctrl_r |= SCTRL_B1_ENA;
1431 hc->hw.last_bfifo_cnt[0] = 0;
1432 hc->hw.fifo_en |= HFCPCI_FIFOEN_B1RX;
1433 hc->hw.int_m1 |= HFCPCI_INTS_B1REC;
1435 hc->hw.conn &= ~0x03;
1439 printk(KERN_DEBUG "prot not known %x\n", protocol);
1440 return -ENOPROTOOPT;
1442 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1443 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
1444 Write_hfc(hc, HFCPCI_SCTRL_R, hc->hw.sctrl_r);
1445 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt);
1446 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1447 #ifdef REVERSE_BITORDER
1448 Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
1454 deactivate_bchannel(struct bchannel *bch)
1456 struct hfc_pci *hc = bch->hw;
1459 spin_lock_irqsave(&hc->lock, flags);
1460 if (test_and_clear_bit(FLG_TX_NEXT, &bch->Flags)) {
1461 dev_kfree_skb(bch->next_skb);
1462 bch->next_skb = NULL;
1465 dev_kfree_skb(bch->tx_skb);
1470 dev_kfree_skb(bch->rx_skb);
1473 mode_hfcpci(bch, bch->nr, ISDN_P_NONE);
1474 test_and_clear_bit(FLG_ACTIVE, &bch->Flags);
1475 test_and_clear_bit(FLG_TX_BUSY, &bch->Flags);
1476 spin_unlock_irqrestore(&hc->lock, flags);
1480 * Layer 1 B-channel hardware access
1483 channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
1488 case MISDN_CTRL_GETOP:
1492 printk(KERN_WARNING "%s: unknown Op %x\n", __func__, cq->op);
1499 hfc_bctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
1501 struct bchannel *bch = container_of(ch, struct bchannel, ch);
1502 struct hfc_pci *hc = bch->hw;
1506 if (bch->debug & DEBUG_HW)
1507 printk(KERN_DEBUG "%s: cmd:%x %p\n", __func__, cmd, arg);
1510 spin_lock_irqsave(&hc->lock, flags);
1511 ret = set_hfcpci_rxtest(bch, ISDN_P_B_RAW, (int)(long)arg);
1512 spin_unlock_irqrestore(&hc->lock, flags);
1514 case HW_TESTRX_HDLC:
1515 spin_lock_irqsave(&hc->lock, flags);
1516 ret = set_hfcpci_rxtest(bch, ISDN_P_B_HDLC, (int)(long)arg);
1517 spin_unlock_irqrestore(&hc->lock, flags);
1520 spin_lock_irqsave(&hc->lock, flags);
1521 mode_hfcpci(bch, bch->nr, ISDN_P_NONE);
1522 spin_unlock_irqrestore(&hc->lock, flags);
1526 test_and_clear_bit(FLG_OPEN, &bch->Flags);
1527 if (test_bit(FLG_ACTIVE, &bch->Flags))
1528 deactivate_bchannel(bch);
1529 ch->protocol = ISDN_P_NONE;
1531 module_put(THIS_MODULE);
1534 case CONTROL_CHANNEL:
1535 ret = channel_bctrl(bch, arg);
1538 printk(KERN_WARNING "%s: unknown prim(%x)\n",
1545 * Layer2 -> Layer 1 Dchannel data
1548 hfcpci_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb)
1550 struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
1551 struct dchannel *dch = container_of(dev, struct dchannel, dev);
1552 struct hfc_pci *hc = dch->hw;
1554 struct mISDNhead *hh = mISDN_HEAD_P(skb);
1560 spin_lock_irqsave(&hc->lock, flags);
1561 ret = dchannel_senddata(dch, skb);
1562 if (ret > 0) { /* direct TX */
1563 id = hh->id; /* skb can be freed */
1564 hfcpci_fill_dfifo(dch->hw);
1566 spin_unlock_irqrestore(&hc->lock, flags);
1567 queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
1569 spin_unlock_irqrestore(&hc->lock, flags);
1571 case PH_ACTIVATE_REQ:
1572 spin_lock_irqsave(&hc->lock, flags);
1573 if (hc->hw.protocol == ISDN_P_NT_S0) {
1575 if (test_bit(HFC_CFG_MASTER, &hc->cfg))
1576 hc->hw.mst_m |= HFCPCI_MASTER;
1577 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1578 if (test_bit(FLG_ACTIVE, &dch->Flags)) {
1579 spin_unlock_irqrestore(&hc->lock, flags);
1580 _queue_data(&dch->dev.D, PH_ACTIVATE_IND,
1581 MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
1584 test_and_set_bit(FLG_L2_ACTIVATED, &dch->Flags);
1585 Write_hfc(hc, HFCPCI_STATES, HFCPCI_ACTIVATE |
1586 HFCPCI_DO_ACTION | 1);
1588 ret = l1_event(dch->l1, hh->prim);
1589 spin_unlock_irqrestore(&hc->lock, flags);
1591 case PH_DEACTIVATE_REQ:
1592 test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
1593 spin_lock_irqsave(&hc->lock, flags);
1594 if (hc->hw.protocol == ISDN_P_NT_S0) {
1595 /* prepare deactivation */
1596 Write_hfc(hc, HFCPCI_STATES, 0x40);
1597 skb_queue_purge(&dch->squeue);
1599 dev_kfree_skb(dch->tx_skb);
1604 dev_kfree_skb(dch->rx_skb);
1607 test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
1608 if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags))
1609 del_timer(&dch->timer);
1611 if (test_and_clear_bit(FLG_L1_BUSY, &dch->Flags))
1612 dchannel_sched_event(&hc->dch, D_CLEARBUSY);
1614 hc->hw.mst_m &= ~HFCPCI_MASTER;
1615 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1618 ret = l1_event(dch->l1, hh->prim);
1620 spin_unlock_irqrestore(&hc->lock, flags);
1629 * Layer2 -> Layer 1 Bchannel data
1632 hfcpci_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
1634 struct bchannel *bch = container_of(ch, struct bchannel, ch);
1635 struct hfc_pci *hc = bch->hw;
1637 struct mISDNhead *hh = mISDN_HEAD_P(skb);
1643 spin_lock_irqsave(&hc->lock, flags);
1644 ret = bchannel_senddata(bch, skb);
1645 if (ret > 0) { /* direct TX */
1646 id = hh->id; /* skb can be freed */
1647 hfcpci_fill_fifo(bch);
1649 spin_unlock_irqrestore(&hc->lock, flags);
1650 if (!test_bit(FLG_TRANSPARENT, &bch->Flags))
1651 queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
1653 spin_unlock_irqrestore(&hc->lock, flags);
1655 case PH_ACTIVATE_REQ:
1656 spin_lock_irqsave(&hc->lock, flags);
1657 if (!test_and_set_bit(FLG_ACTIVE, &bch->Flags))
1658 ret = mode_hfcpci(bch, bch->nr, ch->protocol);
1661 spin_unlock_irqrestore(&hc->lock, flags);
1663 _queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY, 0,
1666 case PH_DEACTIVATE_REQ:
1667 deactivate_bchannel(bch);
1668 _queue_data(ch, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0,
1679 * called for card init message
1683 inithfcpci(struct hfc_pci *hc)
1685 printk(KERN_DEBUG "inithfcpci: entered\n");
1686 hc->dch.timer.function = (void *) hfcpci_dbusy_timer;
1687 hc->dch.timer.data = (long) &hc->dch;
1688 init_timer(&hc->dch.timer);
1690 mode_hfcpci(&hc->bch[0], 1, -1);
1691 mode_hfcpci(&hc->bch[1], 2, -1);
1696 init_card(struct hfc_pci *hc)
1701 printk(KERN_DEBUG "init_card: entered\n");
1704 spin_lock_irqsave(&hc->lock, flags);
1706 spin_unlock_irqrestore(&hc->lock, flags);
1707 if (request_irq(hc->irq, hfcpci_int, IRQF_SHARED, "HFC PCI", hc)) {
1709 "mISDN: couldn't get interrupt %d\n", hc->irq);
1712 spin_lock_irqsave(&hc->lock, flags);
1717 * Finally enable IRQ output
1718 * this is only allowed, if an IRQ routine is allready
1719 * established for this HFC, so don't do that earlier
1722 spin_unlock_irqrestore(&hc->lock, flags);
1724 current->state = TASK_UNINTERRUPTIBLE;
1725 schedule_timeout((80*HZ)/1000);
1726 printk(KERN_INFO "HFC PCI: IRQ %d count %d\n",
1727 hc->irq, hc->irqcnt);
1728 /* now switch timer interrupt off */
1729 spin_lock_irqsave(&hc->lock, flags);
1730 hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
1731 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1732 /* reinit mode reg */
1733 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1736 "HFC PCI: IRQ(%d) getting no interrupts "
1737 "during init %d\n", hc->irq, 4 - cnt);
1739 spin_unlock_irqrestore(&hc->lock, flags);
1746 spin_unlock_irqrestore(&hc->lock, flags);
1752 spin_unlock_irqrestore(&hc->lock, flags);
1753 free_irq(hc->irq, hc);
1758 channel_ctrl(struct hfc_pci *hc, struct mISDN_ctrl_req *cq)
1764 case MISDN_CTRL_GETOP:
1765 cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_CONNECT |
1766 MISDN_CTRL_DISCONNECT;
1768 case MISDN_CTRL_LOOP:
1769 /* channel 0 disabled loop */
1770 if (cq->channel < 0 || cq->channel > 2) {
1774 if (cq->channel & 1) {
1775 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
1779 printk(KERN_DEBUG "%s: Write_hfc: B1_SSL/RSL 0x%x\n",
1781 Write_hfc(hc, HFCPCI_B1_SSL, slot);
1782 Write_hfc(hc, HFCPCI_B1_RSL, slot);
1783 hc->hw.conn = (hc->hw.conn & ~7) | 6;
1784 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1786 if (cq->channel & 2) {
1787 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
1791 printk(KERN_DEBUG "%s: Write_hfc: B2_SSL/RSL 0x%x\n",
1793 Write_hfc(hc, HFCPCI_B2_SSL, slot);
1794 Write_hfc(hc, HFCPCI_B2_RSL, slot);
1795 hc->hw.conn = (hc->hw.conn & ~0x38) | 0x30;
1796 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1798 if (cq->channel & 3)
1799 hc->hw.trm |= 0x80; /* enable IOM-loop */
1801 hc->hw.conn = (hc->hw.conn & ~0x3f) | 0x09;
1802 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1803 hc->hw.trm &= 0x7f; /* disable IOM-loop */
1805 Write_hfc(hc, HFCPCI_TRM, hc->hw.trm);
1807 case MISDN_CTRL_CONNECT:
1808 if (cq->channel == cq->p1) {
1812 if (cq->channel < 1 || cq->channel > 2 ||
1813 cq->p1 < 1 || cq->p1 > 2) {
1817 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
1821 printk(KERN_DEBUG "%s: Write_hfc: B1_SSL/RSL 0x%x\n",
1823 Write_hfc(hc, HFCPCI_B1_SSL, slot);
1824 Write_hfc(hc, HFCPCI_B2_RSL, slot);
1825 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
1829 printk(KERN_DEBUG "%s: Write_hfc: B2_SSL/RSL 0x%x\n",
1831 Write_hfc(hc, HFCPCI_B2_SSL, slot);
1832 Write_hfc(hc, HFCPCI_B1_RSL, slot);
1833 hc->hw.conn = (hc->hw.conn & ~0x3f) | 0x36;
1834 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1836 Write_hfc(hc, HFCPCI_TRM, hc->hw.trm);
1838 case MISDN_CTRL_DISCONNECT:
1839 hc->hw.conn = (hc->hw.conn & ~0x3f) | 0x09;
1840 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1841 hc->hw.trm &= 0x7f; /* disable IOM-loop */
1844 printk(KERN_WARNING "%s: unknown Op %x\n",
1853 open_dchannel(struct hfc_pci *hc, struct mISDNchannel *ch,
1854 struct channel_req *rq)
1858 if (debug & DEBUG_HW_OPEN)
1859 printk(KERN_DEBUG "%s: dev(%d) open from %p\n", __func__,
1860 hc->dch.dev.id, __builtin_return_address(0));
1861 if (rq->protocol == ISDN_P_NONE)
1863 if (!hc->initdone) {
1864 if (rq->protocol == ISDN_P_TE_S0) {
1865 err = create_l1(&hc->dch, hfc_l1callback);
1869 hc->hw.protocol = rq->protocol;
1870 ch->protocol = rq->protocol;
1871 err = init_card(hc);
1875 if (rq->protocol != ch->protocol) {
1876 if (hc->hw.protocol == ISDN_P_TE_S0)
1877 l1_event(hc->dch.l1, CLOSE_CHANNEL);
1878 hc->hw.protocol = rq->protocol;
1879 ch->protocol = rq->protocol;
1884 if (((ch->protocol == ISDN_P_NT_S0) && (hc->dch.state == 3)) ||
1885 ((ch->protocol == ISDN_P_TE_S0) && (hc->dch.state == 7))) {
1886 _queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY,
1887 0, NULL, GFP_KERNEL);
1890 if (!try_module_get(THIS_MODULE))
1891 printk(KERN_WARNING "%s:cannot get module\n", __func__);
1896 open_bchannel(struct hfc_pci *hc, struct channel_req *rq)
1898 struct bchannel *bch;
1900 if (rq->adr.channel > 2)
1902 if (rq->protocol == ISDN_P_NONE)
1904 bch = &hc->bch[rq->adr.channel - 1];
1905 if (test_and_set_bit(FLG_OPEN, &bch->Flags))
1906 return -EBUSY; /* b-channel can be only open once */
1907 bch->ch.protocol = rq->protocol;
1908 rq->ch = &bch->ch; /* TODO: E-channel */
1909 if (!try_module_get(THIS_MODULE))
1910 printk(KERN_WARNING "%s:cannot get module\n", __func__);
1915 * device control function
1918 hfc_dctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
1920 struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
1921 struct dchannel *dch = container_of(dev, struct dchannel, dev);
1922 struct hfc_pci *hc = dch->hw;
1923 struct channel_req *rq;
1926 if (dch->debug & DEBUG_HW)
1927 printk(KERN_DEBUG "%s: cmd:%x %p\n",
1928 __func__, cmd, arg);
1932 if (rq->adr.channel == 0)
1933 err = open_dchannel(hc, ch, rq);
1935 err = open_bchannel(hc, rq);
1938 if (debug & DEBUG_HW_OPEN)
1939 printk(KERN_DEBUG "%s: dev(%d) close from %p\n",
1940 __func__, hc->dch.dev.id,
1941 __builtin_return_address(0));
1942 module_put(THIS_MODULE);
1944 case CONTROL_CHANNEL:
1945 err = channel_ctrl(hc, arg);
1948 if (dch->debug & DEBUG_HW)
1949 printk(KERN_DEBUG "%s: unknown command %x\n",
1957 setup_hw(struct hfc_pci *hc)
1961 printk(KERN_INFO "mISDN: HFC-PCI driver %s\n", hfcpci_revision);
1964 pci_set_master(hc->pdev);
1966 printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n");
1969 hc->hw.pci_io = (char *)(ulong)hc->pdev->resource[1].start;
1971 if (!hc->hw.pci_io) {
1972 printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n");
1975 /* Allocate memory for FIFOS */
1976 /* the memory needs to be on a 32k boundary within the first 4G */
1977 pci_set_dma_mask(hc->pdev, 0xFFFF8000);
1978 buffer = pci_alloc_consistent(hc->pdev, 0x8000, &hc->hw.dmahandle);
1979 /* We silently assume the address is okay if nonzero */
1982 "HFC-PCI: Error allocating memory for FIFO!\n");
1985 hc->hw.fifos = buffer;
1986 pci_write_config_dword(hc->pdev, 0x80, hc->hw.dmahandle);
1987 hc->hw.pci_io = ioremap((ulong) hc->hw.pci_io, 256);
1989 "HFC-PCI: defined at mem %#lx fifo %#lx(%#lx) IRQ %d HZ %d\n",
1990 (u_long) hc->hw.pci_io, (u_long) hc->hw.fifos,
1991 (u_long) hc->hw.dmahandle, hc->irq, HZ);
1992 /* enable memory mapped ports, disable busmaster */
1993 pci_write_config_word(hc->pdev, PCI_COMMAND, PCI_ENA_MEMIO);
1997 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1998 /* At this point the needed PCI config is done */
1999 /* fifos are still not enabled */
2000 hc->hw.timer.function = (void *) hfcpci_Timer;
2001 hc->hw.timer.data = (long) hc;
2002 init_timer(&hc->hw.timer);
2003 /* default PCM master */
2004 test_and_set_bit(HFC_CFG_MASTER, &hc->cfg);
2009 release_card(struct hfc_pci *hc) {
2012 spin_lock_irqsave(&hc->lock, flags);
2013 hc->hw.int_m2 = 0; /* interrupt output off ! */
2015 mode_hfcpci(&hc->bch[0], 1, ISDN_P_NONE);
2016 mode_hfcpci(&hc->bch[1], 2, ISDN_P_NONE);
2017 if (hc->dch.timer.function != NULL) {
2018 del_timer(&hc->dch.timer);
2019 hc->dch.timer.function = NULL;
2021 spin_unlock_irqrestore(&hc->lock, flags);
2022 if (hc->hw.protocol == ISDN_P_TE_S0)
2023 l1_event(hc->dch.l1, CLOSE_CHANNEL);
2025 free_irq(hc->irq, hc);
2026 release_io_hfcpci(hc); /* must release after free_irq! */
2027 mISDN_unregister_device(&hc->dch.dev);
2028 mISDN_freebchannel(&hc->bch[1]);
2029 mISDN_freebchannel(&hc->bch[0]);
2030 mISDN_freedchannel(&hc->dch);
2031 list_del(&hc->list);
2032 pci_set_drvdata(hc->pdev, NULL);
2037 setup_card(struct hfc_pci *card)
2042 char name[MISDN_MAX_IDLEN];
2044 if (HFC_cnt >= MAX_CARDS)
2045 return -EINVAL; /* maybe better value */
2047 card->dch.debug = debug;
2048 spin_lock_init(&card->lock);
2049 mISDN_initdchannel(&card->dch, MAX_DFRAME_LEN_L1, ph_state);
2050 card->dch.hw = card;
2051 card->dch.dev.Dprotocols = (1 << ISDN_P_TE_S0) | (1 << ISDN_P_NT_S0);
2052 card->dch.dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
2053 (1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
2054 card->dch.dev.D.send = hfcpci_l2l1D;
2055 card->dch.dev.D.ctrl = hfc_dctrl;
2056 card->dch.dev.nrbchan = 2;
2057 for (i = 0; i < 2; i++) {
2058 card->bch[i].nr = i + 1;
2059 test_and_set_bit(i + 1, &card->dch.dev.channelmap[0]);
2060 card->bch[i].debug = debug;
2061 mISDN_initbchannel(&card->bch[i], MAX_DATA_MEM);
2062 card->bch[i].hw = card;
2063 card->bch[i].ch.send = hfcpci_l2l1B;
2064 card->bch[i].ch.ctrl = hfc_bctrl;
2065 card->bch[i].ch.nr = i + 1;
2066 list_add(&card->bch[i].ch.list, &card->dch.dev.bchannels);
2068 err = setup_hw(card);
2071 snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-pci.%d", HFC_cnt + 1);
2072 err = mISDN_register_device(&card->dch.dev, name);
2076 write_lock_irqsave(&HFClock, flags);
2077 list_add_tail(&card->list, &HFClist);
2078 write_unlock_irqrestore(&HFClock, flags);
2079 printk(KERN_INFO "HFC %d cards installed\n", HFC_cnt);
2082 mISDN_freebchannel(&card->bch[1]);
2083 mISDN_freebchannel(&card->bch[0]);
2084 mISDN_freedchannel(&card->dch);
2089 /* private data in the PCI devices list */
2096 static const struct _hfc_map hfc_map[] =
2098 {HFC_CCD_2BD0, 0, "CCD/Billion/Asuscom 2BD0"},
2099 {HFC_CCD_B000, 0, "Billion B000"},
2100 {HFC_CCD_B006, 0, "Billion B006"},
2101 {HFC_CCD_B007, 0, "Billion B007"},
2102 {HFC_CCD_B008, 0, "Billion B008"},
2103 {HFC_CCD_B009, 0, "Billion B009"},
2104 {HFC_CCD_B00A, 0, "Billion B00A"},
2105 {HFC_CCD_B00B, 0, "Billion B00B"},
2106 {HFC_CCD_B00C, 0, "Billion B00C"},
2107 {HFC_CCD_B100, 0, "Seyeon B100"},
2108 {HFC_CCD_B700, 0, "Primux II S0 B700"},
2109 {HFC_CCD_B701, 0, "Primux II S0 NT B701"},
2110 {HFC_ABOCOM_2BD1, 0, "Abocom/Magitek 2BD1"},
2111 {HFC_ASUS_0675, 0, "Asuscom/Askey 675"},
2112 {HFC_BERKOM_TCONCEPT, 0, "German telekom T-Concept"},
2113 {HFC_BERKOM_A1T, 0, "German telekom A1T"},
2114 {HFC_ANIGMA_MC145575, 0, "Motorola MC145575"},
2115 {HFC_ZOLTRIX_2BD0, 0, "Zoltrix 2BD0"},
2116 {HFC_DIGI_DF_M_IOM2_E, 0,
2117 "Digi International DataFire Micro V IOM2 (Europe)"},
2118 {HFC_DIGI_DF_M_E, 0,
2119 "Digi International DataFire Micro V (Europe)"},
2120 {HFC_DIGI_DF_M_IOM2_A, 0,
2121 "Digi International DataFire Micro V IOM2 (North America)"},
2122 {HFC_DIGI_DF_M_A, 0,
2123 "Digi International DataFire Micro V (North America)"},
2124 {HFC_SITECOM_DC105V2, 0, "Sitecom Connectivity DC-105 ISDN TA"},
2128 static struct pci_device_id hfc_ids[] =
2130 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_2BD0,
2131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[0]},
2132 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B000,
2133 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[1]},
2134 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B006,
2135 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[2]},
2136 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B007,
2137 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[3]},
2138 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B008,
2139 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[4]},
2140 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B009,
2141 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[5]},
2142 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00A,
2143 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[6]},
2144 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00B,
2145 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[7]},
2146 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00C,
2147 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[8]},
2148 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B100,
2149 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[9]},
2150 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B700,
2151 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[10]},
2152 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B701,
2153 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[11]},
2154 {PCI_VENDOR_ID_ABOCOM, PCI_DEVICE_ID_ABOCOM_2BD1,
2155 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[12]},
2156 {PCI_VENDOR_ID_ASUSTEK, PCI_DEVICE_ID_ASUSTEK_0675,
2157 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[13]},
2158 {PCI_VENDOR_ID_BERKOM, PCI_DEVICE_ID_BERKOM_T_CONCEPT,
2159 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[14]},
2160 {PCI_VENDOR_ID_BERKOM, PCI_DEVICE_ID_BERKOM_A1T,
2161 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[15]},
2162 {PCI_VENDOR_ID_ANIGMA, PCI_DEVICE_ID_ANIGMA_MC145575,
2163 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[16]},
2164 {PCI_VENDOR_ID_ZOLTRIX, PCI_DEVICE_ID_ZOLTRIX_2BD0,
2165 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[17]},
2166 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_E,
2167 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[18]},
2168 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_E,
2169 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[19]},
2170 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_A,
2171 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[20]},
2172 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_A,
2173 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[21]},
2174 {PCI_VENDOR_ID_SITECOM, PCI_DEVICE_ID_SITECOM_DC105V2,
2175 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[22]},
2179 static int __devinit
2180 hfc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2183 struct hfc_pci *card;
2184 struct _hfc_map *m = (struct _hfc_map *)ent->driver_data;
2186 card = kzalloc(sizeof(struct hfc_pci), GFP_ATOMIC);
2188 printk(KERN_ERR "No kmem for HFC card\n");
2192 card->subtype = m->subtype;
2193 err = pci_enable_device(pdev);
2199 printk(KERN_INFO "mISDN_hfcpci: found adapter %s at %s\n",
2200 m->name, pci_name(pdev));
2202 card->irq = pdev->irq;
2203 pci_set_drvdata(pdev, card);
2204 err = setup_card(card);
2206 pci_set_drvdata(pdev, NULL);
2210 static void __devexit
2211 hfc_remove_pci(struct pci_dev *pdev)
2213 struct hfc_pci *card = pci_get_drvdata(pdev);
2217 write_lock_irqsave(&HFClock, flags);
2219 write_unlock_irqrestore(&HFClock, flags);
2222 printk(KERN_WARNING "%s: drvdata allready removed\n",
2227 static struct pci_driver hfc_driver = {
2230 .remove = __devexit_p(hfc_remove_pci),
2231 .id_table = hfc_ids,
2239 err = pci_register_driver(&hfc_driver);
2246 struct hfc_pci *card, *next;
2248 list_for_each_entry_safe(card, next, &HFClist, list) {
2251 pci_unregister_driver(&hfc_driver);
2254 module_init(HFC_init);
2255 module_exit(HFC_cleanup);