3 * hfcpci.c low level driver for CCD's hfc-pci based cards
5 * Author Werner Cornelius (werner@isdn4linux.de)
6 * based on existing driver for CCD hfc ISA cards
7 * type approval valid for HFC-S PCI A based card
9 * Copyright 1999 by Werner Cornelius (werner@isdn-development.de)
10 * Copyright 2008 by Karsten Keil <kkeil@novell.com>
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 * NOTE: only one poll value must be given for all cards
30 * See hfc_pci.h for debug flags.
33 * NOTE: only one poll value must be given for all cards
34 * Give the number of samples for each fifo process.
35 * By default 128 is used. Decrease to reduce delay, increase to
36 * reduce cpu load. If unsure, don't mess with it!
37 * A value of 128 will use controller's interrupt. Other values will
38 * use kernel timer, because the controller will not allow lower values
40 * Also note that the value depends on the kernel timer frequency.
41 * If kernel uses a frequency of 1000 Hz, steps of 8 samples are possible.
42 * If the kernel uses 100 Hz, steps of 80 samples are possible.
43 * If the kernel uses 300 Hz, steps of about 26 samples are possible.
47 #include <linux/module.h>
48 #include <linux/pci.h>
49 #include <linux/delay.h>
50 #include <linux/mISDNhw.h>
54 static const char *hfcpci_revision = "2.0";
58 static uint poll, tics;
59 struct timer_list hfc_tl;
62 MODULE_AUTHOR("Karsten Keil");
63 MODULE_LICENSE("GPL");
64 module_param(debug, uint, S_IRUGO | S_IWUSR);
65 module_param(poll, uint, S_IRUGO | S_IWUSR);
100 unsigned char int_m1;
101 unsigned char int_m2;
103 unsigned char sctrl_r;
104 unsigned char sctrl_e;
106 unsigned char fifo_en;
107 unsigned char bswapped;
108 unsigned char protocol;
110 unsigned char __iomem *pci_io; /* start of PCI IO memory */
111 dma_addr_t dmahandle;
112 void *fifos; /* FIFO memory */
113 int last_bfifo_cnt[2];
114 /* marker saving last b-fifo frame count */
115 struct timer_list timer;
118 #define HFC_CFG_MASTER 1
119 #define HFC_CFG_SLAVE 2
120 #define HFC_CFG_PCM 3
121 #define HFC_CFG_2HFC 4
122 #define HFC_CFG_SLAVEHFC 5
123 #define HFC_CFG_NEG_F0 6
124 #define HFC_CFG_SW_DD_DU 7
126 #define FLG_HFC_TIMER_T1 16
127 #define FLG_HFC_TIMER_T3 17
129 #define NT_T1_COUNT 1120 /* number of 3.125ms interrupts (3.5s) */
130 #define NT_T3_COUNT 31 /* number of 3.125ms interrupts (97 ms) */
131 #define CLKDEL_TE 0x0e /* CLKDEL in TE mode */
132 #define CLKDEL_NT 0x6c /* CLKDEL in NT mode */
142 struct pci_dev *pdev;
144 spinlock_t lock; /* card lock */
146 struct bchannel bch[2];
149 /* Interface functions */
151 enable_hwirq(struct hfc_pci *hc)
153 hc->hw.int_m2 |= HFCPCI_IRQ_ENABLE;
154 Write_hfc(hc, HFCPCI_INT_M2, hc->hw.int_m2);
158 disable_hwirq(struct hfc_pci *hc)
160 hc->hw.int_m2 &= ~((u_char)HFCPCI_IRQ_ENABLE);
161 Write_hfc(hc, HFCPCI_INT_M2, hc->hw.int_m2);
165 * free hardware resources used by driver
168 release_io_hfcpci(struct hfc_pci *hc)
170 /* disable memory mapped ports + busmaster */
171 pci_write_config_word(hc->pdev, PCI_COMMAND, 0);
172 del_timer(&hc->hw.timer);
173 pci_free_consistent(hc->pdev, 0x8000, hc->hw.fifos, hc->hw.dmahandle);
174 iounmap(hc->hw.pci_io);
178 * set mode (NT or TE)
181 hfcpci_setmode(struct hfc_pci *hc)
183 if (hc->hw.protocol == ISDN_P_NT_S0) {
184 hc->hw.clkdel = CLKDEL_NT; /* ST-Bit delay for NT-Mode */
185 hc->hw.sctrl |= SCTRL_MODE_NT; /* NT-MODE */
186 hc->hw.states = 1; /* G1 */
188 hc->hw.clkdel = CLKDEL_TE; /* ST-Bit delay for TE-Mode */
189 hc->hw.sctrl &= ~SCTRL_MODE_NT; /* TE-MODE */
190 hc->hw.states = 2; /* F2 */
192 Write_hfc(hc, HFCPCI_CLKDEL, hc->hw.clkdel);
193 Write_hfc(hc, HFCPCI_STATES, HFCPCI_LOAD_STATE | hc->hw.states);
195 Write_hfc(hc, HFCPCI_STATES, hc->hw.states | 0x40); /* Deactivate */
196 Write_hfc(hc, HFCPCI_SCTRL, hc->hw.sctrl);
200 * function called to reset the HFC PCI chip. A complete software reset of chip
204 reset_hfcpci(struct hfc_pci *hc)
209 printk(KERN_DEBUG "reset_hfcpci: entered\n");
210 val = Read_hfc(hc, HFCPCI_CHIP_ID);
211 printk(KERN_INFO "HFC_PCI: resetting HFC ChipId(%x)\n", val);
212 /* enable memory mapped ports, disable busmaster */
213 pci_write_config_word(hc->pdev, PCI_COMMAND, PCI_ENA_MEMIO);
215 /* enable memory ports + busmaster */
216 pci_write_config_word(hc->pdev, PCI_COMMAND,
217 PCI_ENA_MEMIO + PCI_ENA_MASTER);
218 val = Read_hfc(hc, HFCPCI_STATUS);
219 printk(KERN_DEBUG "HFC-PCI status(%x) before reset\n", val);
220 hc->hw.cirm = HFCPCI_RESET; /* Reset On */
221 Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
222 set_current_state(TASK_UNINTERRUPTIBLE);
223 mdelay(10); /* Timeout 10ms */
224 hc->hw.cirm = 0; /* Reset Off */
225 Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
226 val = Read_hfc(hc, HFCPCI_STATUS);
227 printk(KERN_DEBUG "HFC-PCI status(%x) after reset\n", val);
228 while (cnt < 50000) { /* max 50000 us */
231 val = Read_hfc(hc, HFCPCI_STATUS);
235 printk(KERN_DEBUG "HFC-PCI status(%x) after %dus\n", val, cnt);
237 hc->hw.fifo_en = 0x30; /* only D fifos enabled */
239 hc->hw.bswapped = 0; /* no exchange */
240 hc->hw.ctmt = HFCPCI_TIM3_125 | HFCPCI_AUTO_TIMER;
241 hc->hw.trm = HFCPCI_BTRANS_THRESMASK; /* no echo connect , threshold */
242 hc->hw.sctrl = 0x40; /* set tx_lo mode, error in datasheet ! */
244 hc->hw.sctrl_e = HFCPCI_AUTO_AWAKE; /* S/T Auto awake */
246 if (test_bit(HFC_CFG_MASTER, &hc->cfg))
247 hc->hw.mst_m |= HFCPCI_MASTER; /* HFC Master Mode */
248 if (test_bit(HFC_CFG_NEG_F0, &hc->cfg))
249 hc->hw.mst_m |= HFCPCI_F0_NEGATIV;
250 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
251 Write_hfc(hc, HFCPCI_TRM, hc->hw.trm);
252 Write_hfc(hc, HFCPCI_SCTRL_E, hc->hw.sctrl_e);
253 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt);
255 hc->hw.int_m1 = HFCPCI_INTS_DTRANS | HFCPCI_INTS_DREC |
256 HFCPCI_INTS_L1STATE | HFCPCI_INTS_TIMER;
257 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
259 /* Clear already pending ints */
260 if (Read_hfc(hc, HFCPCI_INT_S1));
265 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
266 Write_hfc(hc, HFCPCI_SCTRL_R, hc->hw.sctrl_r);
269 * Init GCI/IOM2 in master mode
270 * Slots 0 and 1 are set for B-chan 1 and 2
271 * D- and monitor/CI channel are not enabled
272 * STIO1 is used as output for data, B1+B2 from ST->IOM+HFC
273 * STIO2 is used as data input, B1+B2 from IOM->ST
274 * ST B-channel send disabled -> continous 1s
275 * The IOM slots are always enabled
277 if (test_bit(HFC_CFG_PCM, &hc->cfg)) {
278 /* set data flow directions: connect B1,B2: HFC to/from PCM */
281 hc->hw.conn = 0x36; /* set data flow directions */
282 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg)) {
283 Write_hfc(hc, HFCPCI_B1_SSL, 0xC0);
284 Write_hfc(hc, HFCPCI_B2_SSL, 0xC1);
285 Write_hfc(hc, HFCPCI_B1_RSL, 0xC0);
286 Write_hfc(hc, HFCPCI_B2_RSL, 0xC1);
288 Write_hfc(hc, HFCPCI_B1_SSL, 0x80);
289 Write_hfc(hc, HFCPCI_B2_SSL, 0x81);
290 Write_hfc(hc, HFCPCI_B1_RSL, 0x80);
291 Write_hfc(hc, HFCPCI_B2_RSL, 0x81);
294 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
295 val = Read_hfc(hc, HFCPCI_INT_S2);
299 * Timer function called when kernel timer expires
302 hfcpci_Timer(struct hfc_pci *hc)
304 hc->hw.timer.expires = jiffies + 75;
307 * WriteReg(hc, HFCD_DATA, HFCD_CTMT, hc->hw.ctmt | 0x80);
308 * add_timer(&hc->hw.timer);
314 * select a b-channel entry matching and active
316 static struct bchannel *
317 Sel_BCS(struct hfc_pci *hc, int channel)
319 if (test_bit(FLG_ACTIVE, &hc->bch[0].Flags) &&
320 (hc->bch[0].nr & channel))
322 else if (test_bit(FLG_ACTIVE, &hc->bch[1].Flags) &&
323 (hc->bch[1].nr & channel))
330 * clear the desired B-channel rx fifo
333 hfcpci_clear_fifo_rx(struct hfc_pci *hc, int fifo)
339 bzr = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b2;
340 fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B2RX;
342 bzr = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b1;
343 fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B1RX;
346 hc->hw.fifo_en ^= fifo_state;
347 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
348 hc->hw.last_bfifo_cnt[fifo] = 0;
349 bzr->f1 = MAX_B_FRAMES;
350 bzr->f2 = bzr->f1; /* init F pointers to remain constant */
351 bzr->za[MAX_B_FRAMES].z1 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 1);
352 bzr->za[MAX_B_FRAMES].z2 = cpu_to_le16(
353 le16_to_cpu(bzr->za[MAX_B_FRAMES].z1));
355 hc->hw.fifo_en |= fifo_state;
356 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
360 * clear the desired B-channel tx fifo
362 static void hfcpci_clear_fifo_tx(struct hfc_pci *hc, int fifo)
368 bzt = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b2;
369 fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B2TX;
371 bzt = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b1;
372 fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B1TX;
375 hc->hw.fifo_en ^= fifo_state;
376 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
377 if (hc->bch[fifo].debug & DEBUG_HW_BCHANNEL)
378 printk(KERN_DEBUG "hfcpci_clear_fifo_tx%d f1(%x) f2(%x) "
379 "z1(%x) z2(%x) state(%x)\n",
380 fifo, bzt->f1, bzt->f2,
381 le16_to_cpu(bzt->za[MAX_B_FRAMES].z1),
382 le16_to_cpu(bzt->za[MAX_B_FRAMES].z2),
384 bzt->f2 = MAX_B_FRAMES;
385 bzt->f1 = bzt->f2; /* init F pointers to remain constant */
386 bzt->za[MAX_B_FRAMES].z1 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 1);
387 bzt->za[MAX_B_FRAMES].z2 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 2);
389 hc->hw.fifo_en |= fifo_state;
390 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
391 if (hc->bch[fifo].debug & DEBUG_HW_BCHANNEL)
393 "hfcpci_clear_fifo_tx%d f1(%x) f2(%x) z1(%x) z2(%x)\n",
394 fifo, bzt->f1, bzt->f2,
395 le16_to_cpu(bzt->za[MAX_B_FRAMES].z1),
396 le16_to_cpu(bzt->za[MAX_B_FRAMES].z2));
400 * read a complete B-frame out of the buffer
403 hfcpci_empty_bfifo(struct bchannel *bch, struct bzfifo *bz,
404 u_char *bdata, int count)
406 u_char *ptr, *ptr1, new_f2;
407 int total, maxlen, new_z2;
410 if ((bch->debug & DEBUG_HW_BCHANNEL) && !(bch->debug & DEBUG_HW_BFIFO))
411 printk(KERN_DEBUG "hfcpci_empty_fifo\n");
412 zp = &bz->za[bz->f2]; /* point to Z-Regs */
413 new_z2 = le16_to_cpu(zp->z2) + count; /* new position in fifo */
414 if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
415 new_z2 -= B_FIFO_SIZE; /* buffer wrap */
416 new_f2 = (bz->f2 + 1) & MAX_B_FRAMES;
417 if ((count > MAX_DATA_SIZE + 3) || (count < 4) ||
418 (*(bdata + (le16_to_cpu(zp->z1) - B_SUB_VAL)))) {
419 if (bch->debug & DEBUG_HW)
420 printk(KERN_DEBUG "hfcpci_empty_fifo: incoming packet "
421 "invalid length %d or crc\n", count);
422 #ifdef ERROR_STATISTIC
425 bz->za[new_f2].z2 = cpu_to_le16(new_z2);
426 bz->f2 = new_f2; /* next buffer */
428 bch->rx_skb = mI_alloc_skb(count - 3, GFP_ATOMIC);
430 printk(KERN_WARNING "HFCPCI: receive out of memory\n");
435 ptr = skb_put(bch->rx_skb, count);
437 if (le16_to_cpu(zp->z2) + count <= B_FIFO_SIZE + B_SUB_VAL)
438 maxlen = count; /* complete transfer */
440 maxlen = B_FIFO_SIZE + B_SUB_VAL -
441 le16_to_cpu(zp->z2); /* maximum */
443 ptr1 = bdata + (le16_to_cpu(zp->z2) - B_SUB_VAL);
445 memcpy(ptr, ptr1, maxlen); /* copy data */
448 if (count) { /* rest remaining */
450 ptr1 = bdata; /* start of buffer */
451 memcpy(ptr, ptr1, count); /* rest */
453 bz->za[new_f2].z2 = cpu_to_le16(new_z2);
454 bz->f2 = new_f2; /* next buffer */
460 * D-channel receive procedure
463 receive_dmsg(struct hfc_pci *hc)
465 struct dchannel *dch = &hc->dch;
473 df = &((union fifo_area *)(hc->hw.fifos))->d_chan.d_rx;
474 while (((df->f1 & D_FREG_MASK) != (df->f2 & D_FREG_MASK)) && count--) {
475 zp = &df->za[df->f2 & D_FREG_MASK];
476 rcnt = le16_to_cpu(zp->z1) - le16_to_cpu(zp->z2);
480 if (dch->debug & DEBUG_HW_DCHANNEL)
482 "hfcpci recd f1(%d) f2(%d) z1(%x) z2(%x) cnt(%d)\n",
488 if ((rcnt > MAX_DFRAME_LEN + 3) || (rcnt < 4) ||
489 (df->data[le16_to_cpu(zp->z1)])) {
490 if (dch->debug & DEBUG_HW)
492 "empty_fifo hfcpci paket inv. len "
495 df->data[le16_to_cpu(zp->z1)]);
496 #ifdef ERROR_STATISTIC
499 df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) |
500 (MAX_D_FRAMES + 1); /* next buffer */
501 df->za[df->f2 & D_FREG_MASK].z2 =
502 cpu_to_le16((le16_to_cpu(zp->z2) + rcnt) & (D_FIFO_SIZE - 1));
504 dch->rx_skb = mI_alloc_skb(rcnt - 3, GFP_ATOMIC);
507 "HFC-PCI: D receive out of memory\n");
512 ptr = skb_put(dch->rx_skb, rcnt);
514 if (le16_to_cpu(zp->z2) + rcnt <= D_FIFO_SIZE)
515 maxlen = rcnt; /* complete transfer */
517 maxlen = D_FIFO_SIZE - le16_to_cpu(zp->z2);
520 ptr1 = df->data + le16_to_cpu(zp->z2);
522 memcpy(ptr, ptr1, maxlen); /* copy data */
525 if (rcnt) { /* rest remaining */
527 ptr1 = df->data; /* start of buffer */
528 memcpy(ptr, ptr1, rcnt); /* rest */
530 df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) |
531 (MAX_D_FRAMES + 1); /* next buffer */
532 df->za[df->f2 & D_FREG_MASK].z2 = cpu_to_le16((
533 le16_to_cpu(zp->z2) + total) & (D_FIFO_SIZE - 1));
541 * check for transparent receive data and read max one 'poll' size if avail
544 hfcpci_empty_fifo_trans(struct bchannel *bch, struct bzfifo *bz, u_char *bdata)
547 int new_z2, fcnt, maxlen;
550 z1r = &bz->za[MAX_B_FRAMES].z1; /* pointer to z reg */
553 fcnt = le16_to_cpu(*z1r) - le16_to_cpu(*z2r);
555 return; /* no data avail */
558 fcnt += B_FIFO_SIZE; /* bytes actually buffered */
559 new_z2 = le16_to_cpu(*z2r) + fcnt; /* new position in fifo */
560 if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
561 new_z2 -= B_FIFO_SIZE; /* buffer wrap */
563 if (fcnt > MAX_DATA_SIZE) { /* flush, if oversized */
564 *z2r = cpu_to_le16(new_z2); /* new position */
568 bch->rx_skb = mI_alloc_skb(fcnt, GFP_ATOMIC);
570 ptr = skb_put(bch->rx_skb, fcnt);
571 if (le16_to_cpu(*z2r) + fcnt <= B_FIFO_SIZE + B_SUB_VAL)
572 maxlen = fcnt; /* complete transfer */
574 maxlen = B_FIFO_SIZE + B_SUB_VAL - le16_to_cpu(*z2r);
577 ptr1 = bdata + (le16_to_cpu(*z2r) - B_SUB_VAL);
579 memcpy(ptr, ptr1, maxlen); /* copy data */
582 if (fcnt) { /* rest remaining */
584 ptr1 = bdata; /* start of buffer */
585 memcpy(ptr, ptr1, fcnt); /* rest */
589 printk(KERN_WARNING "HFCPCI: receive out of memory\n");
591 *z2r = cpu_to_le16(new_z2); /* new position */
595 * B-channel main receive routine
598 main_rec_hfcpci(struct bchannel *bch)
600 struct hfc_pci *hc = bch->hw;
602 int receive = 0, count = 5;
607 if ((bch->nr & 2) && (!hc->hw.bswapped)) {
608 bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b2;
609 bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.rxdat_b2;
612 bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b1;
613 bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.rxdat_b1;
618 if (bz->f1 != bz->f2) {
619 if (bch->debug & DEBUG_HW_BCHANNEL)
620 printk(KERN_DEBUG "hfcpci rec ch(%x) f1(%d) f2(%d)\n",
621 bch->nr, bz->f1, bz->f2);
622 zp = &bz->za[bz->f2];
624 rcnt = le16_to_cpu(zp->z1) - le16_to_cpu(zp->z2);
628 if (bch->debug & DEBUG_HW_BCHANNEL)
630 "hfcpci rec ch(%x) z1(%x) z2(%x) cnt(%d)\n",
631 bch->nr, le16_to_cpu(zp->z1),
632 le16_to_cpu(zp->z2), rcnt);
633 hfcpci_empty_bfifo(bch, bz, bdata, rcnt);
634 rcnt = bz->f1 - bz->f2;
636 rcnt += MAX_B_FRAMES + 1;
637 if (hc->hw.last_bfifo_cnt[real_fifo] > rcnt + 1) {
639 hfcpci_clear_fifo_rx(hc, real_fifo);
641 hc->hw.last_bfifo_cnt[real_fifo] = rcnt;
646 } else if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
647 hfcpci_empty_fifo_trans(bch, bz, bdata);
651 if (count && receive)
657 * D-channel send routine
660 hfcpci_fill_dfifo(struct hfc_pci *hc)
662 struct dchannel *dch = &hc->dch;
664 int count, new_z1, maxlen;
666 u_char *src, *dst, new_f1;
668 if ((dch->debug & DEBUG_HW_DCHANNEL) && !(dch->debug & DEBUG_HW_DFIFO))
669 printk(KERN_DEBUG "%s\n", __func__);
673 count = dch->tx_skb->len - dch->tx_idx;
676 df = &((union fifo_area *) (hc->hw.fifos))->d_chan.d_tx;
678 if (dch->debug & DEBUG_HW_DFIFO)
679 printk(KERN_DEBUG "%s:f1(%d) f2(%d) z1(f1)(%x)\n", __func__,
681 le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1));
682 fcnt = df->f1 - df->f2; /* frame count actually buffered */
684 fcnt += (MAX_D_FRAMES + 1); /* if wrap around */
685 if (fcnt > (MAX_D_FRAMES - 1)) {
686 if (dch->debug & DEBUG_HW_DCHANNEL)
688 "hfcpci_fill_Dfifo more as 14 frames\n");
689 #ifdef ERROR_STATISTIC
694 /* now determine free bytes in FIFO buffer */
695 maxlen = le16_to_cpu(df->za[df->f2 & D_FREG_MASK].z2) -
696 le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1) - 1;
698 maxlen += D_FIFO_SIZE; /* count now contains available bytes */
700 if (dch->debug & DEBUG_HW_DCHANNEL)
701 printk(KERN_DEBUG "hfcpci_fill_Dfifo count(%d/%d)\n",
703 if (count > maxlen) {
704 if (dch->debug & DEBUG_HW_DCHANNEL)
705 printk(KERN_DEBUG "hfcpci_fill_Dfifo no fifo mem\n");
708 new_z1 = (le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1) + count) &
710 new_f1 = ((df->f1 + 1) & D_FREG_MASK) | (D_FREG_MASK + 1);
711 src = dch->tx_skb->data + dch->tx_idx; /* source pointer */
712 dst = df->data + le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1);
713 maxlen = D_FIFO_SIZE - le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1);
716 maxlen = count; /* limit size */
717 memcpy(dst, src, maxlen); /* first copy */
719 count -= maxlen; /* remaining bytes */
721 dst = df->data; /* start of buffer */
722 src += maxlen; /* new position */
723 memcpy(dst, src, count);
725 df->za[new_f1 & D_FREG_MASK].z1 = cpu_to_le16(new_z1);
726 /* for next buffer */
727 df->za[df->f1 & D_FREG_MASK].z1 = cpu_to_le16(new_z1);
728 /* new pos actual buffer */
729 df->f1 = new_f1; /* next frame */
730 dch->tx_idx = dch->tx_skb->len;
734 * B-channel send routine
737 hfcpci_fill_fifo(struct bchannel *bch)
739 struct hfc_pci *hc = bch->hw;
744 u_char new_f1, *src, *dst;
747 if ((bch->debug & DEBUG_HW_BCHANNEL) && !(bch->debug & DEBUG_HW_BFIFO))
748 printk(KERN_DEBUG "%s\n", __func__);
749 if ((!bch->tx_skb) || bch->tx_skb->len <= 0)
751 count = bch->tx_skb->len - bch->tx_idx;
752 if ((bch->nr & 2) && (!hc->hw.bswapped)) {
753 bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b2;
754 bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.txdat_b2;
756 bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b1;
757 bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.txdat_b1;
760 if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
761 z1t = &bz->za[MAX_B_FRAMES].z1;
763 if (bch->debug & DEBUG_HW_BCHANNEL)
764 printk(KERN_DEBUG "hfcpci_fill_fifo_trans ch(%x) "
765 "cnt(%d) z1(%x) z2(%x)\n", bch->nr, count,
766 le16_to_cpu(*z1t), le16_to_cpu(*z2t));
767 fcnt = le16_to_cpu(*z2t) - le16_to_cpu(*z1t);
770 /* fcnt contains available bytes in fifo */
771 fcnt = B_FIFO_SIZE - fcnt;
772 /* remaining bytes to send (bytes in fifo) */
774 /* "fill fifo if empty" feature */
775 if (test_bit(FLG_FILLEMPTY, &bch->Flags) && !fcnt) {
776 /* printk(KERN_DEBUG "%s: buffer empty, so we have "
777 "underrun\n", __func__); */
778 /* fill buffer, to prevent future underrun */
779 count = HFCPCI_FILLEMPTY;
780 new_z1 = le16_to_cpu(*z1t) + count;
781 /* new buffer Position */
782 if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
783 new_z1 -= B_FIFO_SIZE; /* buffer wrap */
784 dst = bdata + (le16_to_cpu(*z1t) - B_SUB_VAL);
785 maxlen = (B_FIFO_SIZE + B_SUB_VAL) - le16_to_cpu(*z1t);
787 if (bch->debug & DEBUG_HW_BFIFO)
788 printk(KERN_DEBUG "hfcpci_FFt fillempty "
789 "fcnt(%d) maxl(%d) nz1(%x) dst(%p)\n",
790 fcnt, maxlen, new_z1, dst);
793 maxlen = count; /* limit size */
794 memset(dst, 0x2a, maxlen); /* first copy */
795 count -= maxlen; /* remaining bytes */
797 dst = bdata; /* start of buffer */
798 memset(dst, 0x2a, count);
800 *z1t = cpu_to_le16(new_z1); /* now send data */
804 count = bch->tx_skb->len - bch->tx_idx;
805 /* maximum fill shall be poll*2 */
806 if (count > (poll << 1) - fcnt)
807 count = (poll << 1) - fcnt;
810 /* data is suitable for fifo */
811 new_z1 = le16_to_cpu(*z1t) + count;
812 /* new buffer Position */
813 if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
814 new_z1 -= B_FIFO_SIZE; /* buffer wrap */
815 src = bch->tx_skb->data + bch->tx_idx;
817 dst = bdata + (le16_to_cpu(*z1t) - B_SUB_VAL);
818 maxlen = (B_FIFO_SIZE + B_SUB_VAL) - le16_to_cpu(*z1t);
820 if (bch->debug & DEBUG_HW_BFIFO)
821 printk(KERN_DEBUG "hfcpci_FFt fcnt(%d) "
822 "maxl(%d) nz1(%x) dst(%p)\n",
823 fcnt, maxlen, new_z1, dst);
825 bch->tx_idx += count;
827 maxlen = count; /* limit size */
828 memcpy(dst, src, maxlen); /* first copy */
829 count -= maxlen; /* remaining bytes */
831 dst = bdata; /* start of buffer */
832 src += maxlen; /* new position */
833 memcpy(dst, src, count);
835 *z1t = cpu_to_le16(new_z1); /* now send data */
836 if (bch->tx_idx < bch->tx_skb->len)
838 /* send confirm, on trans, free on hdlc. */
839 if (test_bit(FLG_TRANSPARENT, &bch->Flags))
841 dev_kfree_skb(bch->tx_skb);
842 if (get_next_bframe(bch))
846 if (bch->debug & DEBUG_HW_BCHANNEL)
848 "%s: ch(%x) f1(%d) f2(%d) z1(f1)(%x)\n",
849 __func__, bch->nr, bz->f1, bz->f2,
851 fcnt = bz->f1 - bz->f2; /* frame count actually buffered */
853 fcnt += (MAX_B_FRAMES + 1); /* if wrap around */
854 if (fcnt > (MAX_B_FRAMES - 1)) {
855 if (bch->debug & DEBUG_HW_BCHANNEL)
857 "hfcpci_fill_Bfifo more as 14 frames\n");
860 /* now determine free bytes in FIFO buffer */
861 maxlen = le16_to_cpu(bz->za[bz->f2].z2) -
862 le16_to_cpu(bz->za[bz->f1].z1) - 1;
864 maxlen += B_FIFO_SIZE; /* count now contains available bytes */
866 if (bch->debug & DEBUG_HW_BCHANNEL)
867 printk(KERN_DEBUG "hfcpci_fill_fifo ch(%x) count(%d/%d)\n",
868 bch->nr, count, maxlen);
870 if (maxlen < count) {
871 if (bch->debug & DEBUG_HW_BCHANNEL)
872 printk(KERN_DEBUG "hfcpci_fill_fifo no fifo mem\n");
875 new_z1 = le16_to_cpu(bz->za[bz->f1].z1) + count;
876 /* new buffer Position */
877 if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
878 new_z1 -= B_FIFO_SIZE; /* buffer wrap */
880 new_f1 = ((bz->f1 + 1) & MAX_B_FRAMES);
881 src = bch->tx_skb->data + bch->tx_idx; /* source pointer */
882 dst = bdata + (le16_to_cpu(bz->za[bz->f1].z1) - B_SUB_VAL);
883 maxlen = (B_FIFO_SIZE + B_SUB_VAL) - le16_to_cpu(bz->za[bz->f1].z1);
886 maxlen = count; /* limit size */
887 memcpy(dst, src, maxlen); /* first copy */
889 count -= maxlen; /* remaining bytes */
891 dst = bdata; /* start of buffer */
892 src += maxlen; /* new position */
893 memcpy(dst, src, count);
895 bz->za[new_f1].z1 = cpu_to_le16(new_z1); /* for next buffer */
896 bz->f1 = new_f1; /* next frame */
897 dev_kfree_skb(bch->tx_skb);
898 get_next_bframe(bch);
904 * handle L1 state changes TE
908 ph_state_te(struct dchannel *dch)
911 printk(KERN_DEBUG "%s: TE newstate %x\n",
912 __func__, dch->state);
913 switch (dch->state) {
915 l1_event(dch->l1, HW_RESET_IND);
918 l1_event(dch->l1, HW_DEACT_IND);
922 l1_event(dch->l1, ANYSIGNAL);
925 l1_event(dch->l1, INFO2);
928 l1_event(dch->l1, INFO4_P8);
934 * handle L1 state changes NT
938 handle_nt_timer3(struct dchannel *dch) {
939 struct hfc_pci *hc = dch->hw;
941 test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
942 hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
943 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
945 test_and_set_bit(FLG_ACTIVE, &dch->Flags);
946 if (test_bit(HFC_CFG_MASTER, &hc->cfg))
947 hc->hw.mst_m |= HFCPCI_MASTER;
948 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
949 _queue_data(&dch->dev.D, PH_ACTIVATE_IND,
950 MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
954 ph_state_nt(struct dchannel *dch)
956 struct hfc_pci *hc = dch->hw;
959 printk(KERN_DEBUG "%s: NT newstate %x\n",
960 __func__, dch->state);
961 switch (dch->state) {
963 if (hc->hw.nt_timer < 0) {
965 test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
966 test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags);
967 hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
968 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
969 /* Clear already pending ints */
970 if (Read_hfc(hc, HFCPCI_INT_S1));
971 Write_hfc(hc, HFCPCI_STATES, 4 | HFCPCI_LOAD_STATE);
973 Write_hfc(hc, HFCPCI_STATES, 4);
975 } else if (hc->hw.nt_timer == 0) {
976 hc->hw.int_m1 |= HFCPCI_INTS_TIMER;
977 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
978 hc->hw.nt_timer = NT_T1_COUNT;
979 hc->hw.ctmt &= ~HFCPCI_AUTO_TIMER;
980 hc->hw.ctmt |= HFCPCI_TIM3_125;
981 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt |
983 test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
984 test_and_set_bit(FLG_HFC_TIMER_T1, &dch->Flags);
985 /* allow G2 -> G3 transition */
986 Write_hfc(hc, HFCPCI_STATES, 2 | HFCPCI_NT_G2_G3);
988 Write_hfc(hc, HFCPCI_STATES, 2 | HFCPCI_NT_G2_G3);
993 test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
994 test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags);
995 hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
996 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
997 test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
998 hc->hw.mst_m &= ~HFCPCI_MASTER;
999 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1000 test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
1001 _queue_data(&dch->dev.D, PH_DEACTIVATE_IND,
1002 MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
1005 hc->hw.nt_timer = 0;
1006 test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
1007 test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags);
1008 hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
1009 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1012 if (!test_and_set_bit(FLG_HFC_TIMER_T3, &dch->Flags)) {
1013 if (!test_and_clear_bit(FLG_L2_ACTIVATED,
1015 handle_nt_timer3(dch);
1018 test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags);
1019 hc->hw.int_m1 |= HFCPCI_INTS_TIMER;
1020 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1021 hc->hw.nt_timer = NT_T3_COUNT;
1022 hc->hw.ctmt &= ~HFCPCI_AUTO_TIMER;
1023 hc->hw.ctmt |= HFCPCI_TIM3_125;
1024 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt |
1032 ph_state(struct dchannel *dch)
1034 struct hfc_pci *hc = dch->hw;
1036 if (hc->hw.protocol == ISDN_P_NT_S0) {
1037 if (test_bit(FLG_HFC_TIMER_T3, &dch->Flags) &&
1038 hc->hw.nt_timer < 0)
1039 handle_nt_timer3(dch);
1047 * Layer 1 callback function
1050 hfc_l1callback(struct dchannel *dch, u_int cmd)
1052 struct hfc_pci *hc = dch->hw;
1057 if (test_bit(HFC_CFG_MASTER, &hc->cfg))
1058 hc->hw.mst_m |= HFCPCI_MASTER;
1059 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1062 Write_hfc(hc, HFCPCI_STATES, HFCPCI_LOAD_STATE | 3);
1065 Write_hfc(hc, HFCPCI_STATES, 3); /* HFC ST 2 */
1066 if (test_bit(HFC_CFG_MASTER, &hc->cfg))
1067 hc->hw.mst_m |= HFCPCI_MASTER;
1068 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1069 Write_hfc(hc, HFCPCI_STATES, HFCPCI_ACTIVATE |
1071 l1_event(dch->l1, HW_POWERUP_IND);
1074 hc->hw.mst_m &= ~HFCPCI_MASTER;
1075 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1076 skb_queue_purge(&dch->squeue);
1078 dev_kfree_skb(dch->tx_skb);
1083 dev_kfree_skb(dch->rx_skb);
1086 test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
1087 if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags))
1088 del_timer(&dch->timer);
1090 case HW_POWERUP_REQ:
1091 Write_hfc(hc, HFCPCI_STATES, HFCPCI_DO_ACTION);
1093 case PH_ACTIVATE_IND:
1094 test_and_set_bit(FLG_ACTIVE, &dch->Flags);
1095 _queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL,
1098 case PH_DEACTIVATE_IND:
1099 test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
1100 _queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL,
1104 if (dch->debug & DEBUG_HW)
1105 printk(KERN_DEBUG "%s: unknown command %x\n",
1116 tx_birq(struct bchannel *bch)
1118 if (bch->tx_skb && bch->tx_idx < bch->tx_skb->len)
1119 hfcpci_fill_fifo(bch);
1122 dev_kfree_skb(bch->tx_skb);
1123 if (get_next_bframe(bch))
1124 hfcpci_fill_fifo(bch);
1129 tx_dirq(struct dchannel *dch)
1131 if (dch->tx_skb && dch->tx_idx < dch->tx_skb->len)
1132 hfcpci_fill_dfifo(dch->hw);
1135 dev_kfree_skb(dch->tx_skb);
1136 if (get_next_dframe(dch))
1137 hfcpci_fill_dfifo(dch->hw);
1142 hfcpci_int(int intno, void *dev_id)
1144 struct hfc_pci *hc = dev_id;
1146 struct bchannel *bch;
1149 spin_lock(&hc->lock);
1150 if (!(hc->hw.int_m2 & 0x08)) {
1151 spin_unlock(&hc->lock);
1152 return IRQ_NONE; /* not initialised */
1154 stat = Read_hfc(hc, HFCPCI_STATUS);
1155 if (HFCPCI_ANYINT & stat) {
1156 val = Read_hfc(hc, HFCPCI_INT_S1);
1157 if (hc->dch.debug & DEBUG_HW_DCHANNEL)
1159 "HFC-PCI: stat(%02x) s1(%02x)\n", stat, val);
1162 spin_unlock(&hc->lock);
1167 if (hc->dch.debug & DEBUG_HW_DCHANNEL)
1168 printk(KERN_DEBUG "HFC-PCI irq %x\n", val);
1169 val &= hc->hw.int_m1;
1170 if (val & 0x40) { /* state machine irq */
1171 exval = Read_hfc(hc, HFCPCI_STATES) & 0xf;
1172 if (hc->dch.debug & DEBUG_HW_DCHANNEL)
1173 printk(KERN_DEBUG "ph_state chg %d->%d\n",
1174 hc->dch.state, exval);
1175 hc->dch.state = exval;
1176 schedule_event(&hc->dch, FLG_PHCHANGE);
1179 if (val & 0x80) { /* timer irq */
1180 if (hc->hw.protocol == ISDN_P_NT_S0) {
1181 if ((--hc->hw.nt_timer) < 0)
1182 schedule_event(&hc->dch, FLG_PHCHANGE);
1185 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt | HFCPCI_CLTIMER);
1187 if (val & 0x08) { /* B1 rx */
1188 bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1);
1190 main_rec_hfcpci(bch);
1191 else if (hc->dch.debug)
1192 printk(KERN_DEBUG "hfcpci spurious 0x08 IRQ\n");
1194 if (val & 0x10) { /* B2 rx */
1195 bch = Sel_BCS(hc, 2);
1197 main_rec_hfcpci(bch);
1198 else if (hc->dch.debug)
1199 printk(KERN_DEBUG "hfcpci spurious 0x10 IRQ\n");
1201 if (val & 0x01) { /* B1 tx */
1202 bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1);
1205 else if (hc->dch.debug)
1206 printk(KERN_DEBUG "hfcpci spurious 0x01 IRQ\n");
1208 if (val & 0x02) { /* B2 tx */
1209 bch = Sel_BCS(hc, 2);
1212 else if (hc->dch.debug)
1213 printk(KERN_DEBUG "hfcpci spurious 0x02 IRQ\n");
1215 if (val & 0x20) /* D rx */
1217 if (val & 0x04) { /* D tx */
1218 if (test_and_clear_bit(FLG_BUSY_TIMER, &hc->dch.Flags))
1219 del_timer(&hc->dch.timer);
1222 spin_unlock(&hc->lock);
1227 * timer callback for D-chan busy resolution. Currently no function
1230 hfcpci_dbusy_timer(struct hfc_pci *hc)
1235 * activate/deactivate hardware for selected channels and mode
1238 mode_hfcpci(struct bchannel *bch, int bc, int protocol)
1240 struct hfc_pci *hc = bch->hw;
1242 u_char rx_slot = 0, tx_slot = 0, pcm_mode;
1244 if (bch->debug & DEBUG_HW_BCHANNEL)
1246 "HFCPCI bchannel protocol %x-->%x ch %x-->%x\n",
1247 bch->state, protocol, bch->nr, bc);
1250 pcm_mode = (bc>>24) & 0xff;
1251 if (pcm_mode) { /* PCM SLOT USE */
1252 if (!test_bit(HFC_CFG_PCM, &hc->cfg))
1254 "%s: pcm channel id without HFC_CFG_PCM\n",
1256 rx_slot = (bc>>8) & 0xff;
1257 tx_slot = (bc>>16) & 0xff;
1259 } else if (test_bit(HFC_CFG_PCM, &hc->cfg) &&
1260 (protocol > ISDN_P_NONE))
1261 printk(KERN_WARNING "%s: no pcm channel id but HFC_CFG_PCM\n",
1263 if (hc->chanlimit > 1) {
1264 hc->hw.bswapped = 0; /* B1 and B2 normal mode */
1265 hc->hw.sctrl_e &= ~0x80;
1268 if (protocol != ISDN_P_NONE) {
1269 hc->hw.bswapped = 1; /* B1 and B2 exchanged */
1270 hc->hw.sctrl_e |= 0x80;
1272 hc->hw.bswapped = 0; /* B1 and B2 normal mode */
1273 hc->hw.sctrl_e &= ~0x80;
1277 hc->hw.bswapped = 0; /* B1 and B2 normal mode */
1278 hc->hw.sctrl_e &= ~0x80;
1282 case (-1): /* used for init */
1286 if (bch->state == ISDN_P_NONE)
1289 hc->hw.sctrl &= ~SCTRL_B2_ENA;
1290 hc->hw.sctrl_r &= ~SCTRL_B2_ENA;
1292 hc->hw.sctrl &= ~SCTRL_B1_ENA;
1293 hc->hw.sctrl_r &= ~SCTRL_B1_ENA;
1296 hc->hw.fifo_en &= ~HFCPCI_FIFOEN_B2;
1297 hc->hw.int_m1 &= ~(HFCPCI_INTS_B2TRANS +
1300 hc->hw.fifo_en &= ~HFCPCI_FIFOEN_B1;
1301 hc->hw.int_m1 &= ~(HFCPCI_INTS_B1TRANS +
1304 #ifdef REVERSE_BITORDER
1306 hc->hw.cirm &= 0x7f;
1308 hc->hw.cirm &= 0xbf;
1310 bch->state = ISDN_P_NONE;
1312 test_and_clear_bit(FLG_HDLC, &bch->Flags);
1313 test_and_clear_bit(FLG_TRANSPARENT, &bch->Flags);
1315 case (ISDN_P_B_RAW):
1316 bch->state = protocol;
1318 hfcpci_clear_fifo_rx(hc, (fifo2 & 2)?1:0);
1319 hfcpci_clear_fifo_tx(hc, (fifo2 & 2)?1:0);
1321 hc->hw.sctrl |= SCTRL_B2_ENA;
1322 hc->hw.sctrl_r |= SCTRL_B2_ENA;
1323 #ifdef REVERSE_BITORDER
1324 hc->hw.cirm |= 0x80;
1327 hc->hw.sctrl |= SCTRL_B1_ENA;
1328 hc->hw.sctrl_r |= SCTRL_B1_ENA;
1329 #ifdef REVERSE_BITORDER
1330 hc->hw.cirm |= 0x40;
1334 hc->hw.fifo_en |= HFCPCI_FIFOEN_B2;
1336 hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS +
1339 hc->hw.conn &= ~0x18;
1341 hc->hw.fifo_en |= HFCPCI_FIFOEN_B1;
1343 hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS +
1346 hc->hw.conn &= ~0x03;
1348 test_and_set_bit(FLG_TRANSPARENT, &bch->Flags);
1350 case (ISDN_P_B_HDLC):
1351 bch->state = protocol;
1353 hfcpci_clear_fifo_rx(hc, (fifo2 & 2)?1:0);
1354 hfcpci_clear_fifo_tx(hc, (fifo2 & 2)?1:0);
1356 hc->hw.sctrl |= SCTRL_B2_ENA;
1357 hc->hw.sctrl_r |= SCTRL_B2_ENA;
1359 hc->hw.sctrl |= SCTRL_B1_ENA;
1360 hc->hw.sctrl_r |= SCTRL_B1_ENA;
1363 hc->hw.last_bfifo_cnt[1] = 0;
1364 hc->hw.fifo_en |= HFCPCI_FIFOEN_B2;
1365 hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS +
1368 hc->hw.conn &= ~0x18;
1370 hc->hw.last_bfifo_cnt[0] = 0;
1371 hc->hw.fifo_en |= HFCPCI_FIFOEN_B1;
1372 hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS +
1375 hc->hw.conn &= ~0x03;
1377 test_and_set_bit(FLG_HDLC, &bch->Flags);
1380 printk(KERN_DEBUG "prot not known %x\n", protocol);
1381 return -ENOPROTOOPT;
1383 if (test_bit(HFC_CFG_PCM, &hc->cfg)) {
1384 if ((protocol == ISDN_P_NONE) ||
1385 (protocol == -1)) { /* init case */
1389 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg)) {
1398 hc->hw.conn &= 0xc7;
1399 hc->hw.conn |= 0x08;
1400 printk(KERN_DEBUG "%s: Write_hfc: B2_SSL 0x%x\n",
1402 printk(KERN_DEBUG "%s: Write_hfc: B2_RSL 0x%x\n",
1404 Write_hfc(hc, HFCPCI_B2_SSL, tx_slot);
1405 Write_hfc(hc, HFCPCI_B2_RSL, rx_slot);
1407 hc->hw.conn &= 0xf8;
1408 hc->hw.conn |= 0x01;
1409 printk(KERN_DEBUG "%s: Write_hfc: B1_SSL 0x%x\n",
1411 printk(KERN_DEBUG "%s: Write_hfc: B1_RSL 0x%x\n",
1413 Write_hfc(hc, HFCPCI_B1_SSL, tx_slot);
1414 Write_hfc(hc, HFCPCI_B1_RSL, rx_slot);
1417 Write_hfc(hc, HFCPCI_SCTRL_E, hc->hw.sctrl_e);
1418 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1419 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
1420 Write_hfc(hc, HFCPCI_SCTRL, hc->hw.sctrl);
1421 Write_hfc(hc, HFCPCI_SCTRL_R, hc->hw.sctrl_r);
1422 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt);
1423 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1424 #ifdef REVERSE_BITORDER
1425 Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
1431 set_hfcpci_rxtest(struct bchannel *bch, int protocol, int chan)
1433 struct hfc_pci *hc = bch->hw;
1435 if (bch->debug & DEBUG_HW_BCHANNEL)
1437 "HFCPCI bchannel test rx protocol %x-->%x ch %x-->%x\n",
1438 bch->state, protocol, bch->nr, chan);
1439 if (bch->nr != chan) {
1441 "HFCPCI rxtest wrong channel parameter %x/%x\n",
1446 case (ISDN_P_B_RAW):
1447 bch->state = protocol;
1448 hfcpci_clear_fifo_rx(hc, (chan & 2)?1:0);
1450 hc->hw.sctrl_r |= SCTRL_B2_ENA;
1451 hc->hw.fifo_en |= HFCPCI_FIFOEN_B2RX;
1453 hc->hw.int_m1 |= HFCPCI_INTS_B2REC;
1455 hc->hw.conn &= ~0x18;
1456 #ifdef REVERSE_BITORDER
1457 hc->hw.cirm |= 0x80;
1460 hc->hw.sctrl_r |= SCTRL_B1_ENA;
1461 hc->hw.fifo_en |= HFCPCI_FIFOEN_B1RX;
1463 hc->hw.int_m1 |= HFCPCI_INTS_B1REC;
1465 hc->hw.conn &= ~0x03;
1466 #ifdef REVERSE_BITORDER
1467 hc->hw.cirm |= 0x40;
1471 case (ISDN_P_B_HDLC):
1472 bch->state = protocol;
1473 hfcpci_clear_fifo_rx(hc, (chan & 2)?1:0);
1475 hc->hw.sctrl_r |= SCTRL_B2_ENA;
1476 hc->hw.last_bfifo_cnt[1] = 0;
1477 hc->hw.fifo_en |= HFCPCI_FIFOEN_B2RX;
1478 hc->hw.int_m1 |= HFCPCI_INTS_B2REC;
1480 hc->hw.conn &= ~0x18;
1482 hc->hw.sctrl_r |= SCTRL_B1_ENA;
1483 hc->hw.last_bfifo_cnt[0] = 0;
1484 hc->hw.fifo_en |= HFCPCI_FIFOEN_B1RX;
1485 hc->hw.int_m1 |= HFCPCI_INTS_B1REC;
1487 hc->hw.conn &= ~0x03;
1491 printk(KERN_DEBUG "prot not known %x\n", protocol);
1492 return -ENOPROTOOPT;
1494 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1495 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
1496 Write_hfc(hc, HFCPCI_SCTRL_R, hc->hw.sctrl_r);
1497 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt);
1498 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1499 #ifdef REVERSE_BITORDER
1500 Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
1506 deactivate_bchannel(struct bchannel *bch)
1508 struct hfc_pci *hc = bch->hw;
1511 spin_lock_irqsave(&hc->lock, flags);
1512 if (test_and_clear_bit(FLG_TX_NEXT, &bch->Flags)) {
1513 dev_kfree_skb(bch->next_skb);
1514 bch->next_skb = NULL;
1517 dev_kfree_skb(bch->tx_skb);
1522 dev_kfree_skb(bch->rx_skb);
1525 mode_hfcpci(bch, bch->nr, ISDN_P_NONE);
1526 test_and_clear_bit(FLG_ACTIVE, &bch->Flags);
1527 test_and_clear_bit(FLG_TX_BUSY, &bch->Flags);
1528 spin_unlock_irqrestore(&hc->lock, flags);
1532 * Layer 1 B-channel hardware access
1535 channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
1540 case MISDN_CTRL_GETOP:
1541 cq->op = MISDN_CTRL_FILL_EMPTY;
1543 case MISDN_CTRL_FILL_EMPTY: /* fill fifo, if empty */
1544 test_and_set_bit(FLG_FILLEMPTY, &bch->Flags);
1545 if (debug & DEBUG_HW_OPEN)
1546 printk(KERN_DEBUG "%s: FILL_EMPTY request (nr=%d "
1547 "off=%d)\n", __func__, bch->nr, !!cq->p1);
1550 printk(KERN_WARNING "%s: unknown Op %x\n", __func__, cq->op);
1557 hfc_bctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
1559 struct bchannel *bch = container_of(ch, struct bchannel, ch);
1560 struct hfc_pci *hc = bch->hw;
1564 if (bch->debug & DEBUG_HW)
1565 printk(KERN_DEBUG "%s: cmd:%x %p\n", __func__, cmd, arg);
1568 spin_lock_irqsave(&hc->lock, flags);
1569 ret = set_hfcpci_rxtest(bch, ISDN_P_B_RAW, (int)(long)arg);
1570 spin_unlock_irqrestore(&hc->lock, flags);
1572 case HW_TESTRX_HDLC:
1573 spin_lock_irqsave(&hc->lock, flags);
1574 ret = set_hfcpci_rxtest(bch, ISDN_P_B_HDLC, (int)(long)arg);
1575 spin_unlock_irqrestore(&hc->lock, flags);
1578 spin_lock_irqsave(&hc->lock, flags);
1579 mode_hfcpci(bch, bch->nr, ISDN_P_NONE);
1580 spin_unlock_irqrestore(&hc->lock, flags);
1584 test_and_clear_bit(FLG_OPEN, &bch->Flags);
1585 if (test_bit(FLG_ACTIVE, &bch->Flags))
1586 deactivate_bchannel(bch);
1587 ch->protocol = ISDN_P_NONE;
1589 module_put(THIS_MODULE);
1592 case CONTROL_CHANNEL:
1593 ret = channel_bctrl(bch, arg);
1596 printk(KERN_WARNING "%s: unknown prim(%x)\n",
1603 * Layer2 -> Layer 1 Dchannel data
1606 hfcpci_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb)
1608 struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
1609 struct dchannel *dch = container_of(dev, struct dchannel, dev);
1610 struct hfc_pci *hc = dch->hw;
1612 struct mISDNhead *hh = mISDN_HEAD_P(skb);
1618 spin_lock_irqsave(&hc->lock, flags);
1619 ret = dchannel_senddata(dch, skb);
1620 if (ret > 0) { /* direct TX */
1621 id = hh->id; /* skb can be freed */
1622 hfcpci_fill_dfifo(dch->hw);
1624 spin_unlock_irqrestore(&hc->lock, flags);
1625 queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
1627 spin_unlock_irqrestore(&hc->lock, flags);
1629 case PH_ACTIVATE_REQ:
1630 spin_lock_irqsave(&hc->lock, flags);
1631 if (hc->hw.protocol == ISDN_P_NT_S0) {
1633 if (test_bit(HFC_CFG_MASTER, &hc->cfg))
1634 hc->hw.mst_m |= HFCPCI_MASTER;
1635 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1636 if (test_bit(FLG_ACTIVE, &dch->Flags)) {
1637 spin_unlock_irqrestore(&hc->lock, flags);
1638 _queue_data(&dch->dev.D, PH_ACTIVATE_IND,
1639 MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
1642 test_and_set_bit(FLG_L2_ACTIVATED, &dch->Flags);
1643 Write_hfc(hc, HFCPCI_STATES, HFCPCI_ACTIVATE |
1644 HFCPCI_DO_ACTION | 1);
1646 ret = l1_event(dch->l1, hh->prim);
1647 spin_unlock_irqrestore(&hc->lock, flags);
1649 case PH_DEACTIVATE_REQ:
1650 test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
1651 spin_lock_irqsave(&hc->lock, flags);
1652 if (hc->hw.protocol == ISDN_P_NT_S0) {
1653 /* prepare deactivation */
1654 Write_hfc(hc, HFCPCI_STATES, 0x40);
1655 skb_queue_purge(&dch->squeue);
1657 dev_kfree_skb(dch->tx_skb);
1662 dev_kfree_skb(dch->rx_skb);
1665 test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
1666 if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags))
1667 del_timer(&dch->timer);
1669 if (test_and_clear_bit(FLG_L1_BUSY, &dch->Flags))
1670 dchannel_sched_event(&hc->dch, D_CLEARBUSY);
1672 hc->hw.mst_m &= ~HFCPCI_MASTER;
1673 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1676 ret = l1_event(dch->l1, hh->prim);
1678 spin_unlock_irqrestore(&hc->lock, flags);
1687 * Layer2 -> Layer 1 Bchannel data
1690 hfcpci_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
1692 struct bchannel *bch = container_of(ch, struct bchannel, ch);
1693 struct hfc_pci *hc = bch->hw;
1695 struct mISDNhead *hh = mISDN_HEAD_P(skb);
1701 spin_lock_irqsave(&hc->lock, flags);
1702 ret = bchannel_senddata(bch, skb);
1703 if (ret > 0) { /* direct TX */
1704 id = hh->id; /* skb can be freed */
1705 hfcpci_fill_fifo(bch);
1707 spin_unlock_irqrestore(&hc->lock, flags);
1708 if (!test_bit(FLG_TRANSPARENT, &bch->Flags))
1709 queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
1711 spin_unlock_irqrestore(&hc->lock, flags);
1713 case PH_ACTIVATE_REQ:
1714 spin_lock_irqsave(&hc->lock, flags);
1715 if (!test_and_set_bit(FLG_ACTIVE, &bch->Flags))
1716 ret = mode_hfcpci(bch, bch->nr, ch->protocol);
1719 spin_unlock_irqrestore(&hc->lock, flags);
1721 _queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY, 0,
1724 case PH_DEACTIVATE_REQ:
1725 deactivate_bchannel(bch);
1726 _queue_data(ch, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0,
1737 * called for card init message
1741 inithfcpci(struct hfc_pci *hc)
1743 printk(KERN_DEBUG "inithfcpci: entered\n");
1744 hc->dch.timer.function = (void *) hfcpci_dbusy_timer;
1745 hc->dch.timer.data = (long) &hc->dch;
1746 init_timer(&hc->dch.timer);
1748 mode_hfcpci(&hc->bch[0], 1, -1);
1749 mode_hfcpci(&hc->bch[1], 2, -1);
1754 init_card(struct hfc_pci *hc)
1759 printk(KERN_DEBUG "init_card: entered\n");
1762 spin_lock_irqsave(&hc->lock, flags);
1764 spin_unlock_irqrestore(&hc->lock, flags);
1765 if (request_irq(hc->irq, hfcpci_int, IRQF_SHARED, "HFC PCI", hc)) {
1767 "mISDN: couldn't get interrupt %d\n", hc->irq);
1770 spin_lock_irqsave(&hc->lock, flags);
1775 * Finally enable IRQ output
1776 * this is only allowed, if an IRQ routine is allready
1777 * established for this HFC, so don't do that earlier
1780 spin_unlock_irqrestore(&hc->lock, flags);
1782 current->state = TASK_UNINTERRUPTIBLE;
1783 schedule_timeout((80*HZ)/1000);
1784 printk(KERN_INFO "HFC PCI: IRQ %d count %d\n",
1785 hc->irq, hc->irqcnt);
1786 /* now switch timer interrupt off */
1787 spin_lock_irqsave(&hc->lock, flags);
1788 hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
1789 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1790 /* reinit mode reg */
1791 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1794 "HFC PCI: IRQ(%d) getting no interrupts "
1795 "during init %d\n", hc->irq, 4 - cnt);
1797 spin_unlock_irqrestore(&hc->lock, flags);
1804 spin_unlock_irqrestore(&hc->lock, flags);
1810 spin_unlock_irqrestore(&hc->lock, flags);
1811 free_irq(hc->irq, hc);
1816 channel_ctrl(struct hfc_pci *hc, struct mISDN_ctrl_req *cq)
1822 case MISDN_CTRL_GETOP:
1823 cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_CONNECT |
1824 MISDN_CTRL_DISCONNECT;
1826 case MISDN_CTRL_LOOP:
1827 /* channel 0 disabled loop */
1828 if (cq->channel < 0 || cq->channel > 2) {
1832 if (cq->channel & 1) {
1833 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
1837 printk(KERN_DEBUG "%s: Write_hfc: B1_SSL/RSL 0x%x\n",
1839 Write_hfc(hc, HFCPCI_B1_SSL, slot);
1840 Write_hfc(hc, HFCPCI_B1_RSL, slot);
1841 hc->hw.conn = (hc->hw.conn & ~7) | 6;
1842 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1844 if (cq->channel & 2) {
1845 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
1849 printk(KERN_DEBUG "%s: Write_hfc: B2_SSL/RSL 0x%x\n",
1851 Write_hfc(hc, HFCPCI_B2_SSL, slot);
1852 Write_hfc(hc, HFCPCI_B2_RSL, slot);
1853 hc->hw.conn = (hc->hw.conn & ~0x38) | 0x30;
1854 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1856 if (cq->channel & 3)
1857 hc->hw.trm |= 0x80; /* enable IOM-loop */
1859 hc->hw.conn = (hc->hw.conn & ~0x3f) | 0x09;
1860 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1861 hc->hw.trm &= 0x7f; /* disable IOM-loop */
1863 Write_hfc(hc, HFCPCI_TRM, hc->hw.trm);
1865 case MISDN_CTRL_CONNECT:
1866 if (cq->channel == cq->p1) {
1870 if (cq->channel < 1 || cq->channel > 2 ||
1871 cq->p1 < 1 || cq->p1 > 2) {
1875 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
1879 printk(KERN_DEBUG "%s: Write_hfc: B1_SSL/RSL 0x%x\n",
1881 Write_hfc(hc, HFCPCI_B1_SSL, slot);
1882 Write_hfc(hc, HFCPCI_B2_RSL, slot);
1883 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
1887 printk(KERN_DEBUG "%s: Write_hfc: B2_SSL/RSL 0x%x\n",
1889 Write_hfc(hc, HFCPCI_B2_SSL, slot);
1890 Write_hfc(hc, HFCPCI_B1_RSL, slot);
1891 hc->hw.conn = (hc->hw.conn & ~0x3f) | 0x36;
1892 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1894 Write_hfc(hc, HFCPCI_TRM, hc->hw.trm);
1896 case MISDN_CTRL_DISCONNECT:
1897 hc->hw.conn = (hc->hw.conn & ~0x3f) | 0x09;
1898 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1899 hc->hw.trm &= 0x7f; /* disable IOM-loop */
1902 printk(KERN_WARNING "%s: unknown Op %x\n",
1911 open_dchannel(struct hfc_pci *hc, struct mISDNchannel *ch,
1912 struct channel_req *rq)
1916 if (debug & DEBUG_HW_OPEN)
1917 printk(KERN_DEBUG "%s: dev(%d) open from %p\n", __func__,
1918 hc->dch.dev.id, __builtin_return_address(0));
1919 if (rq->protocol == ISDN_P_NONE)
1921 if (rq->adr.channel == 1) {
1922 /* TODO: E-Channel */
1925 if (!hc->initdone) {
1926 if (rq->protocol == ISDN_P_TE_S0) {
1927 err = create_l1(&hc->dch, hfc_l1callback);
1931 hc->hw.protocol = rq->protocol;
1932 ch->protocol = rq->protocol;
1933 err = init_card(hc);
1937 if (rq->protocol != ch->protocol) {
1938 if (hc->hw.protocol == ISDN_P_TE_S0)
1939 l1_event(hc->dch.l1, CLOSE_CHANNEL);
1940 if (rq->protocol == ISDN_P_TE_S0) {
1941 err = create_l1(&hc->dch, hfc_l1callback);
1945 hc->hw.protocol = rq->protocol;
1946 ch->protocol = rq->protocol;
1951 if (((ch->protocol == ISDN_P_NT_S0) && (hc->dch.state == 3)) ||
1952 ((ch->protocol == ISDN_P_TE_S0) && (hc->dch.state == 7))) {
1953 _queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY,
1954 0, NULL, GFP_KERNEL);
1957 if (!try_module_get(THIS_MODULE))
1958 printk(KERN_WARNING "%s:cannot get module\n", __func__);
1963 open_bchannel(struct hfc_pci *hc, struct channel_req *rq)
1965 struct bchannel *bch;
1967 if (rq->adr.channel > 2)
1969 if (rq->protocol == ISDN_P_NONE)
1971 bch = &hc->bch[rq->adr.channel - 1];
1972 if (test_and_set_bit(FLG_OPEN, &bch->Flags))
1973 return -EBUSY; /* b-channel can be only open once */
1974 test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags);
1975 bch->ch.protocol = rq->protocol;
1976 rq->ch = &bch->ch; /* TODO: E-channel */
1977 if (!try_module_get(THIS_MODULE))
1978 printk(KERN_WARNING "%s:cannot get module\n", __func__);
1983 * device control function
1986 hfc_dctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
1988 struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
1989 struct dchannel *dch = container_of(dev, struct dchannel, dev);
1990 struct hfc_pci *hc = dch->hw;
1991 struct channel_req *rq;
1994 if (dch->debug & DEBUG_HW)
1995 printk(KERN_DEBUG "%s: cmd:%x %p\n",
1996 __func__, cmd, arg);
2000 if ((rq->protocol == ISDN_P_TE_S0) ||
2001 (rq->protocol == ISDN_P_NT_S0))
2002 err = open_dchannel(hc, ch, rq);
2004 err = open_bchannel(hc, rq);
2007 if (debug & DEBUG_HW_OPEN)
2008 printk(KERN_DEBUG "%s: dev(%d) close from %p\n",
2009 __func__, hc->dch.dev.id,
2010 __builtin_return_address(0));
2011 module_put(THIS_MODULE);
2013 case CONTROL_CHANNEL:
2014 err = channel_ctrl(hc, arg);
2017 if (dch->debug & DEBUG_HW)
2018 printk(KERN_DEBUG "%s: unknown command %x\n",
2026 setup_hw(struct hfc_pci *hc)
2030 printk(KERN_INFO "mISDN: HFC-PCI driver %s\n", hfcpci_revision);
2033 pci_set_master(hc->pdev);
2035 printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n");
2038 hc->hw.pci_io = (char __iomem *)(unsigned long)hc->pdev->resource[1].start;
2040 if (!hc->hw.pci_io) {
2041 printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n");
2044 /* Allocate memory for FIFOS */
2045 /* the memory needs to be on a 32k boundary within the first 4G */
2046 pci_set_dma_mask(hc->pdev, 0xFFFF8000);
2047 buffer = pci_alloc_consistent(hc->pdev, 0x8000, &hc->hw.dmahandle);
2048 /* We silently assume the address is okay if nonzero */
2051 "HFC-PCI: Error allocating memory for FIFO!\n");
2054 hc->hw.fifos = buffer;
2055 pci_write_config_dword(hc->pdev, 0x80, hc->hw.dmahandle);
2056 hc->hw.pci_io = ioremap((ulong) hc->hw.pci_io, 256);
2058 "HFC-PCI: defined at mem %#lx fifo %#lx(%#lx) IRQ %d HZ %d\n",
2059 (u_long) hc->hw.pci_io, (u_long) hc->hw.fifos,
2060 (u_long) hc->hw.dmahandle, hc->irq, HZ);
2061 /* enable memory mapped ports, disable busmaster */
2062 pci_write_config_word(hc->pdev, PCI_COMMAND, PCI_ENA_MEMIO);
2066 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
2067 /* At this point the needed PCI config is done */
2068 /* fifos are still not enabled */
2069 hc->hw.timer.function = (void *) hfcpci_Timer;
2070 hc->hw.timer.data = (long) hc;
2071 init_timer(&hc->hw.timer);
2072 /* default PCM master */
2073 test_and_set_bit(HFC_CFG_MASTER, &hc->cfg);
2078 release_card(struct hfc_pci *hc) {
2081 spin_lock_irqsave(&hc->lock, flags);
2082 hc->hw.int_m2 = 0; /* interrupt output off ! */
2084 mode_hfcpci(&hc->bch[0], 1, ISDN_P_NONE);
2085 mode_hfcpci(&hc->bch[1], 2, ISDN_P_NONE);
2086 if (hc->dch.timer.function != NULL) {
2087 del_timer(&hc->dch.timer);
2088 hc->dch.timer.function = NULL;
2090 spin_unlock_irqrestore(&hc->lock, flags);
2091 if (hc->hw.protocol == ISDN_P_TE_S0)
2092 l1_event(hc->dch.l1, CLOSE_CHANNEL);
2094 free_irq(hc->irq, hc);
2095 release_io_hfcpci(hc); /* must release after free_irq! */
2096 mISDN_unregister_device(&hc->dch.dev);
2097 mISDN_freebchannel(&hc->bch[1]);
2098 mISDN_freebchannel(&hc->bch[0]);
2099 mISDN_freedchannel(&hc->dch);
2100 pci_set_drvdata(hc->pdev, NULL);
2105 setup_card(struct hfc_pci *card)
2109 char name[MISDN_MAX_IDLEN];
2111 card->dch.debug = debug;
2112 spin_lock_init(&card->lock);
2113 mISDN_initdchannel(&card->dch, MAX_DFRAME_LEN_L1, ph_state);
2114 card->dch.hw = card;
2115 card->dch.dev.Dprotocols = (1 << ISDN_P_TE_S0) | (1 << ISDN_P_NT_S0);
2116 card->dch.dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
2117 (1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
2118 card->dch.dev.D.send = hfcpci_l2l1D;
2119 card->dch.dev.D.ctrl = hfc_dctrl;
2120 card->dch.dev.nrbchan = 2;
2121 for (i = 0; i < 2; i++) {
2122 card->bch[i].nr = i + 1;
2123 set_channelmap(i + 1, card->dch.dev.channelmap);
2124 card->bch[i].debug = debug;
2125 mISDN_initbchannel(&card->bch[i], MAX_DATA_MEM);
2126 card->bch[i].hw = card;
2127 card->bch[i].ch.send = hfcpci_l2l1B;
2128 card->bch[i].ch.ctrl = hfc_bctrl;
2129 card->bch[i].ch.nr = i + 1;
2130 list_add(&card->bch[i].ch.list, &card->dch.dev.bchannels);
2132 err = setup_hw(card);
2135 snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-pci.%d", HFC_cnt + 1);
2136 err = mISDN_register_device(&card->dch.dev, &card->pdev->dev, name);
2140 printk(KERN_INFO "HFC %d cards installed\n", HFC_cnt);
2143 mISDN_freebchannel(&card->bch[1]);
2144 mISDN_freebchannel(&card->bch[0]);
2145 mISDN_freedchannel(&card->dch);
2150 /* private data in the PCI devices list */
2157 static const struct _hfc_map hfc_map[] =
2159 {HFC_CCD_2BD0, 0, "CCD/Billion/Asuscom 2BD0"},
2160 {HFC_CCD_B000, 0, "Billion B000"},
2161 {HFC_CCD_B006, 0, "Billion B006"},
2162 {HFC_CCD_B007, 0, "Billion B007"},
2163 {HFC_CCD_B008, 0, "Billion B008"},
2164 {HFC_CCD_B009, 0, "Billion B009"},
2165 {HFC_CCD_B00A, 0, "Billion B00A"},
2166 {HFC_CCD_B00B, 0, "Billion B00B"},
2167 {HFC_CCD_B00C, 0, "Billion B00C"},
2168 {HFC_CCD_B100, 0, "Seyeon B100"},
2169 {HFC_CCD_B700, 0, "Primux II S0 B700"},
2170 {HFC_CCD_B701, 0, "Primux II S0 NT B701"},
2171 {HFC_ABOCOM_2BD1, 0, "Abocom/Magitek 2BD1"},
2172 {HFC_ASUS_0675, 0, "Asuscom/Askey 675"},
2173 {HFC_BERKOM_TCONCEPT, 0, "German telekom T-Concept"},
2174 {HFC_BERKOM_A1T, 0, "German telekom A1T"},
2175 {HFC_ANIGMA_MC145575, 0, "Motorola MC145575"},
2176 {HFC_ZOLTRIX_2BD0, 0, "Zoltrix 2BD0"},
2177 {HFC_DIGI_DF_M_IOM2_E, 0,
2178 "Digi International DataFire Micro V IOM2 (Europe)"},
2179 {HFC_DIGI_DF_M_E, 0,
2180 "Digi International DataFire Micro V (Europe)"},
2181 {HFC_DIGI_DF_M_IOM2_A, 0,
2182 "Digi International DataFire Micro V IOM2 (North America)"},
2183 {HFC_DIGI_DF_M_A, 0,
2184 "Digi International DataFire Micro V (North America)"},
2185 {HFC_SITECOM_DC105V2, 0, "Sitecom Connectivity DC-105 ISDN TA"},
2189 static struct pci_device_id hfc_ids[] =
2191 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_2BD0,
2192 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[0]},
2193 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B000,
2194 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[1]},
2195 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B006,
2196 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[2]},
2197 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B007,
2198 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[3]},
2199 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B008,
2200 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[4]},
2201 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B009,
2202 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[5]},
2203 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00A,
2204 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[6]},
2205 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00B,
2206 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[7]},
2207 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00C,
2208 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[8]},
2209 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B100,
2210 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[9]},
2211 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B700,
2212 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[10]},
2213 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B701,
2214 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[11]},
2215 {PCI_VENDOR_ID_ABOCOM, PCI_DEVICE_ID_ABOCOM_2BD1,
2216 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[12]},
2217 {PCI_VENDOR_ID_ASUSTEK, PCI_DEVICE_ID_ASUSTEK_0675,
2218 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[13]},
2219 {PCI_VENDOR_ID_BERKOM, PCI_DEVICE_ID_BERKOM_T_CONCEPT,
2220 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[14]},
2221 {PCI_VENDOR_ID_BERKOM, PCI_DEVICE_ID_BERKOM_A1T,
2222 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[15]},
2223 {PCI_VENDOR_ID_ANIGMA, PCI_DEVICE_ID_ANIGMA_MC145575,
2224 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[16]},
2225 {PCI_VENDOR_ID_ZOLTRIX, PCI_DEVICE_ID_ZOLTRIX_2BD0,
2226 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[17]},
2227 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_E,
2228 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[18]},
2229 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_E,
2230 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[19]},
2231 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_A,
2232 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[20]},
2233 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_A,
2234 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[21]},
2235 {PCI_VENDOR_ID_SITECOM, PCI_DEVICE_ID_SITECOM_DC105V2,
2236 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[22]},
2240 static int __devinit
2241 hfc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2244 struct hfc_pci *card;
2245 struct _hfc_map *m = (struct _hfc_map *)ent->driver_data;
2247 card = kzalloc(sizeof(struct hfc_pci), GFP_ATOMIC);
2249 printk(KERN_ERR "No kmem for HFC card\n");
2253 card->subtype = m->subtype;
2254 err = pci_enable_device(pdev);
2260 printk(KERN_INFO "mISDN_hfcpci: found adapter %s at %s\n",
2261 m->name, pci_name(pdev));
2263 card->irq = pdev->irq;
2264 pci_set_drvdata(pdev, card);
2265 err = setup_card(card);
2267 pci_set_drvdata(pdev, NULL);
2271 static void __devexit
2272 hfc_remove_pci(struct pci_dev *pdev)
2274 struct hfc_pci *card = pci_get_drvdata(pdev);
2280 printk(KERN_WARNING "%s: drvdata already removed\n",
2285 static struct pci_driver hfc_driver = {
2288 .remove = __devexit_p(hfc_remove_pci),
2289 .id_table = hfc_ids,
2293 _hfcpci_softirq(struct device *dev, void *arg)
2295 struct hfc_pci *hc = dev_get_drvdata(dev);
2296 struct bchannel *bch;
2300 if (hc->hw.int_m2 & HFCPCI_IRQ_ENABLE) {
2301 spin_lock(&hc->lock);
2302 bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1);
2303 if (bch && bch->state == ISDN_P_B_RAW) { /* B1 rx&tx */
2304 main_rec_hfcpci(bch);
2307 bch = Sel_BCS(hc, hc->hw.bswapped ? 1 : 2);
2308 if (bch && bch->state == ISDN_P_B_RAW) { /* B2 rx&tx */
2309 main_rec_hfcpci(bch);
2312 spin_unlock(&hc->lock);
2318 hfcpci_softirq(void *arg)
2320 (void) driver_for_each_device(&hfc_driver.driver, NULL, arg,
2323 /* if next event would be in the past ... */
2324 if ((s32)(hfc_jiffies + tics - jiffies) <= 0)
2325 hfc_jiffies = jiffies + 1;
2327 hfc_jiffies += tics;
2328 hfc_tl.expires = hfc_jiffies;
2338 poll = HFCPCI_BTRANS_THRESHOLD;
2340 if (poll != HFCPCI_BTRANS_THRESHOLD) {
2341 tics = (poll * HZ) / 8000;
2344 poll = (tics * 8000) / HZ;
2345 if (poll > 256 || poll < 8) {
2346 printk(KERN_ERR "%s: Wrong poll value %d not in range "
2347 "of 8..256.\n", __func__, poll);
2352 if (poll != HFCPCI_BTRANS_THRESHOLD) {
2353 printk(KERN_INFO "%s: Using alternative poll value of %d\n",
2355 hfc_tl.function = (void *)hfcpci_softirq;
2357 init_timer(&hfc_tl);
2358 hfc_tl.expires = jiffies + tics;
2359 hfc_jiffies = hfc_tl.expires;
2362 tics = 0; /* indicate the use of controller's timer */
2364 err = pci_register_driver(&hfc_driver);
2366 if (timer_pending(&hfc_tl))
2376 if (timer_pending(&hfc_tl))
2379 pci_unregister_driver(&hfc_driver);
2382 module_init(HFC_init);
2383 module_exit(HFC_cleanup);
2385 MODULE_DEVICE_TABLE(pci, hfc_ids);