2 * Driver for the Conexant CX23885 PCIe bridge
4 * Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 #include <linux/init.h>
23 #include <linux/list.h>
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <linux/kmod.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/interrupt.h>
30 #include <linux/delay.h>
31 #include <asm/div64.h>
36 MODULE_DESCRIPTION("Driver for cx23885 based TV cards");
37 MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
38 MODULE_LICENSE("GPL");
40 static unsigned int debug;
41 module_param(debug, int, 0644);
42 MODULE_PARM_DESC(debug, "enable debug messages");
44 static unsigned int card[] = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET };
45 module_param_array(card, int, NULL, 0444);
46 MODULE_PARM_DESC(card, "card type");
48 #define dprintk(level, fmt, arg...)\
49 do { if (debug >= level)\
50 printk(KERN_DEBUG "%s/0: " fmt, dev->name, ## arg);\
53 static unsigned int cx23885_devcount;
55 static DEFINE_MUTEX(devlist);
56 LIST_HEAD(cx23885_devlist);
58 #define NO_SYNC_LINE (-1U)
60 /* FIXME, these allocations will change when
61 * analog arrives. The be reviewed.
63 * 1 line = 16 bytes of CDT
65 * cdt size = 16 * linesize
70 * 0x00000000 0x00008fff FIFO clusters
71 * 0x00010000 0x000104af Channel Management Data Structures
72 * 0x000104b0 0x000104ff Free
73 * 0x00010500 0x000108bf 15 channels * iqsize
74 * 0x000108c0 0x000108ff Free
75 * 0x00010900 0x00010e9f IQ's + Cluster Descriptor Tables
76 * 15 channels * (iqsize + (maxlines * linesize))
77 * 0x00010ea0 0x00010xxx Free
80 static struct sram_channel cx23885_sram_channels[] = {
83 .cmds_start = 0x10000,
84 .ctrl_start = 0x10380,
88 .ptr1_reg = DMA1_PTR1,
89 .ptr2_reg = DMA1_PTR2,
90 .cnt1_reg = DMA1_CNT1,
91 .cnt2_reg = DMA1_CNT2,
100 .ptr1_reg = DMA2_PTR1,
101 .ptr2_reg = DMA2_PTR2,
102 .cnt1_reg = DMA2_CNT1,
103 .cnt2_reg = DMA2_CNT2,
107 .cmds_start = 0x100A0,
108 .ctrl_start = 0x10400,
110 .fifo_start = 0x5000,
112 .ptr1_reg = DMA3_PTR1,
113 .ptr2_reg = DMA3_PTR2,
114 .cnt1_reg = DMA3_CNT1,
115 .cnt2_reg = DMA3_CNT2,
124 .ptr1_reg = DMA4_PTR1,
125 .ptr2_reg = DMA4_PTR2,
126 .cnt1_reg = DMA4_CNT1,
127 .cnt2_reg = DMA4_CNT2,
136 .ptr1_reg = DMA5_PTR1,
137 .ptr2_reg = DMA5_PTR2,
138 .cnt1_reg = DMA5_CNT1,
139 .cnt2_reg = DMA5_CNT2,
143 .cmds_start = 0x10140,
144 .ctrl_start = 0x10440,
146 .fifo_start = 0x6000,
148 .ptr1_reg = DMA5_PTR1,
149 .ptr2_reg = DMA5_PTR2,
150 .cnt1_reg = DMA5_CNT1,
151 .cnt2_reg = DMA5_CNT2,
160 .ptr1_reg = DMA6_PTR1,
161 .ptr2_reg = DMA6_PTR2,
162 .cnt1_reg = DMA6_CNT1,
163 .cnt2_reg = DMA6_CNT2,
172 .ptr1_reg = DMA7_PTR1,
173 .ptr2_reg = DMA7_PTR2,
174 .cnt1_reg = DMA7_CNT1,
175 .cnt2_reg = DMA7_CNT2,
184 .ptr1_reg = DMA8_PTR1,
185 .ptr2_reg = DMA8_PTR2,
186 .cnt1_reg = DMA8_CNT1,
187 .cnt2_reg = DMA8_CNT2,
191 static struct sram_channel cx23887_sram_channels[] = {
194 .cmds_start = 0x10000,
195 .ctrl_start = 0x105b0,
199 .ptr1_reg = DMA1_PTR1,
200 .ptr2_reg = DMA1_PTR2,
201 .cnt1_reg = DMA1_CNT1,
202 .cnt2_reg = DMA1_CNT2,
211 .ptr1_reg = DMA2_PTR1,
212 .ptr2_reg = DMA2_PTR2,
213 .cnt1_reg = DMA2_CNT1,
214 .cnt2_reg = DMA2_CNT2,
218 .cmds_start = 0x100A0,
219 .ctrl_start = 0x10630,
221 .fifo_start = 0x5000,
223 .ptr1_reg = DMA3_PTR1,
224 .ptr2_reg = DMA3_PTR2,
225 .cnt1_reg = DMA3_CNT1,
226 .cnt2_reg = DMA3_CNT2,
235 .ptr1_reg = DMA4_PTR1,
236 .ptr2_reg = DMA4_PTR2,
237 .cnt1_reg = DMA4_CNT1,
238 .cnt2_reg = DMA4_CNT2,
247 .ptr1_reg = DMA5_PTR1,
248 .ptr2_reg = DMA5_PTR2,
249 .cnt1_reg = DMA5_CNT1,
250 .cnt2_reg = DMA5_CNT2,
254 .cmds_start = 0x10140,
255 .ctrl_start = 0x10670,
257 .fifo_start = 0x6000,
259 .ptr1_reg = DMA5_PTR1,
260 .ptr2_reg = DMA5_PTR2,
261 .cnt1_reg = DMA5_CNT1,
262 .cnt2_reg = DMA5_CNT2,
271 .ptr1_reg = DMA6_PTR1,
272 .ptr2_reg = DMA6_PTR2,
273 .cnt1_reg = DMA6_CNT1,
274 .cnt2_reg = DMA6_CNT2,
283 .ptr1_reg = DMA7_PTR1,
284 .ptr2_reg = DMA7_PTR2,
285 .cnt1_reg = DMA7_CNT1,
286 .cnt2_reg = DMA7_CNT2,
295 .ptr1_reg = DMA8_PTR1,
296 .ptr2_reg = DMA8_PTR2,
297 .cnt1_reg = DMA8_CNT1,
298 .cnt2_reg = DMA8_CNT2,
302 static int cx23885_risc_decode(u32 risc)
304 static char *instr[16] = {
305 [RISC_SYNC >> 28] = "sync",
306 [RISC_WRITE >> 28] = "write",
307 [RISC_WRITEC >> 28] = "writec",
308 [RISC_READ >> 28] = "read",
309 [RISC_READC >> 28] = "readc",
310 [RISC_JUMP >> 28] = "jump",
311 [RISC_SKIP >> 28] = "skip",
312 [RISC_WRITERM >> 28] = "writerm",
313 [RISC_WRITECM >> 28] = "writecm",
314 [RISC_WRITECR >> 28] = "writecr",
316 static int incr[16] = {
317 [RISC_WRITE >> 28] = 3,
318 [RISC_JUMP >> 28] = 3,
319 [RISC_SKIP >> 28] = 1,
320 [RISC_SYNC >> 28] = 1,
321 [RISC_WRITERM >> 28] = 3,
322 [RISC_WRITECM >> 28] = 3,
323 [RISC_WRITECR >> 28] = 4,
325 static char *bits[] = {
326 "12", "13", "14", "resync",
327 "cnt0", "cnt1", "18", "19",
328 "20", "21", "22", "23",
329 "irq1", "irq2", "eol", "sol",
333 printk("0x%08x [ %s", risc,
334 instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
335 for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--)
336 if (risc & (1 << (i + 12)))
337 printk(" %s", bits[i]);
338 printk(" count=%d ]\n", risc & 0xfff);
339 return incr[risc >> 28] ? incr[risc >> 28] : 1;
342 void cx23885_wakeup(struct cx23885_tsport *port,
343 struct cx23885_dmaqueue *q, u32 count)
345 struct cx23885_dev *dev = port->dev;
346 struct cx23885_buffer *buf;
349 for (bc = 0;; bc++) {
350 if (list_empty(&q->active))
352 buf = list_entry(q->active.next,
353 struct cx23885_buffer, vb.queue);
355 /* count comes from the hw and is is 16bit wide --
356 * this trick handles wrap-arounds correctly for
357 * up to 32767 buffers in flight... */
358 if ((s16) (count - buf->count) < 0)
361 do_gettimeofday(&buf->vb.ts);
362 dprintk(2, "[%p/%d] wakeup reg=%d buf=%d\n", buf, buf->vb.i,
364 buf->vb.state = VIDEOBUF_DONE;
365 list_del(&buf->vb.queue);
366 wake_up(&buf->vb.done);
368 if (list_empty(&q->active))
369 del_timer(&q->timeout);
371 mod_timer(&q->timeout, jiffies + BUFFER_TIMEOUT);
373 printk(KERN_WARNING "%s: %d buffers handled (should be 1)\n",
377 int cx23885_sram_channel_setup(struct cx23885_dev *dev,
378 struct sram_channel *ch,
379 unsigned int bpl, u32 risc)
381 unsigned int i, lines;
384 if (ch->cmds_start == 0) {
385 dprintk(1, "%s() Erasing channel [%s]\n", __func__,
387 cx_write(ch->ptr1_reg, 0);
388 cx_write(ch->ptr2_reg, 0);
389 cx_write(ch->cnt2_reg, 0);
390 cx_write(ch->cnt1_reg, 0);
393 dprintk(1, "%s() Configuring channel [%s]\n", __func__,
397 bpl = (bpl + 7) & ~7; /* alignment */
399 lines = ch->fifo_size / bpl;
404 cx_write(8 + 0, RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
409 for (i = 0; i < lines; i++) {
410 dprintk(2, "%s() 0x%08x <- 0x%08x\n", __func__, cdt + 16*i,
411 ch->fifo_start + bpl*i);
412 cx_write(cdt + 16*i, ch->fifo_start + bpl*i);
413 cx_write(cdt + 16*i + 4, 0);
414 cx_write(cdt + 16*i + 8, 0);
415 cx_write(cdt + 16*i + 12, 0);
420 cx_write(ch->cmds_start + 0, 8);
422 cx_write(ch->cmds_start + 0, risc);
423 cx_write(ch->cmds_start + 4, 0); /* 64 bits 63-32 */
424 cx_write(ch->cmds_start + 8, cdt);
425 cx_write(ch->cmds_start + 12, (lines*16) >> 3);
426 cx_write(ch->cmds_start + 16, ch->ctrl_start);
428 cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2));
430 cx_write(ch->cmds_start + 20, 64 >> 2);
431 for (i = 24; i < 80; i += 4)
432 cx_write(ch->cmds_start + i, 0);
435 cx_write(ch->ptr1_reg, ch->fifo_start);
436 cx_write(ch->ptr2_reg, cdt);
437 cx_write(ch->cnt2_reg, (lines*16) >> 3);
438 cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
440 dprintk(2, "[bridge %d] sram setup %s: bpl=%d lines=%d\n",
449 void cx23885_sram_channel_dump(struct cx23885_dev *dev,
450 struct sram_channel *ch)
452 static char *name[] = {
469 unsigned int i, j, n;
471 printk(KERN_WARNING "%s: %s - dma channel status dump\n",
472 dev->name, ch->name);
473 for (i = 0; i < ARRAY_SIZE(name); i++)
474 printk(KERN_WARNING "%s: cmds: %-15s: 0x%08x\n",
476 cx_read(ch->cmds_start + 4*i));
478 for (i = 0; i < 4; i++) {
479 risc = cx_read(ch->cmds_start + 4 * (i + 14));
480 printk(KERN_WARNING "%s: risc%d: ", dev->name, i);
481 cx23885_risc_decode(risc);
483 for (i = 0; i < (64 >> 2); i += n) {
484 risc = cx_read(ch->ctrl_start + 4 * i);
485 /* No consideration for bits 63-32 */
487 printk(KERN_WARNING "%s: (0x%08x) iq %x: ", dev->name,
488 ch->ctrl_start + 4 * i, i);
489 n = cx23885_risc_decode(risc);
490 for (j = 1; j < n; j++) {
491 risc = cx_read(ch->ctrl_start + 4 * (i + j));
492 printk(KERN_WARNING "%s: iq %x: 0x%08x [ arg #%d ]\n",
493 dev->name, i+j, risc, j);
497 printk(KERN_WARNING "%s: fifo: 0x%08x -> 0x%x\n",
498 dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size);
499 printk(KERN_WARNING "%s: ctrl: 0x%08x -> 0x%x\n",
500 dev->name, ch->ctrl_start, ch->ctrl_start + 6*16);
501 printk(KERN_WARNING "%s: ptr1_reg: 0x%08x\n",
502 dev->name, cx_read(ch->ptr1_reg));
503 printk(KERN_WARNING "%s: ptr2_reg: 0x%08x\n",
504 dev->name, cx_read(ch->ptr2_reg));
505 printk(KERN_WARNING "%s: cnt1_reg: 0x%08x\n",
506 dev->name, cx_read(ch->cnt1_reg));
507 printk(KERN_WARNING "%s: cnt2_reg: 0x%08x\n",
508 dev->name, cx_read(ch->cnt2_reg));
511 static void cx23885_risc_disasm(struct cx23885_tsport *port,
512 struct btcx_riscmem *risc)
514 struct cx23885_dev *dev = port->dev;
515 unsigned int i, j, n;
517 printk(KERN_INFO "%s: risc disasm: %p [dma=0x%08lx]\n",
518 dev->name, risc->cpu, (unsigned long)risc->dma);
519 for (i = 0; i < (risc->size >> 2); i += n) {
520 printk(KERN_INFO "%s: %04d: ", dev->name, i);
521 n = cx23885_risc_decode(le32_to_cpu(risc->cpu[i]));
522 for (j = 1; j < n; j++)
523 printk(KERN_INFO "%s: %04d: 0x%08x [ arg #%d ]\n",
524 dev->name, i + j, risc->cpu[i + j], j);
525 if (risc->cpu[i] == cpu_to_le32(RISC_JUMP))
530 static void cx23885_shutdown(struct cx23885_dev *dev)
532 /* disable RISC controller */
533 cx_write(DEV_CNTRL2, 0);
535 /* Disable all IR activity */
536 cx_write(IR_CNTRL_REG, 0);
538 /* Disable Video A/B activity */
539 cx_write(VID_A_DMA_CTL, 0);
540 cx_write(VID_B_DMA_CTL, 0);
541 cx_write(VID_C_DMA_CTL, 0);
543 /* Disable Audio activity */
544 cx_write(AUD_INT_DMA_CTL, 0);
545 cx_write(AUD_EXT_DMA_CTL, 0);
547 /* Disable Serial port */
548 cx_write(UART_CTL, 0);
550 /* Disable Interrupts */
551 cx_write(PCI_INT_MSK, 0);
552 cx_write(VID_A_INT_MSK, 0);
553 cx_write(VID_B_INT_MSK, 0);
554 cx_write(VID_C_INT_MSK, 0);
555 cx_write(AUDIO_INT_INT_MSK, 0);
556 cx_write(AUDIO_EXT_INT_MSK, 0);
560 static void cx23885_reset(struct cx23885_dev *dev)
562 dprintk(1, "%s()\n", __func__);
564 cx23885_shutdown(dev);
566 cx_write(PCI_INT_STAT, 0xffffffff);
567 cx_write(VID_A_INT_STAT, 0xffffffff);
568 cx_write(VID_B_INT_STAT, 0xffffffff);
569 cx_write(VID_C_INT_STAT, 0xffffffff);
570 cx_write(AUDIO_INT_INT_STAT, 0xffffffff);
571 cx_write(AUDIO_EXT_INT_STAT, 0xffffffff);
572 cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000);
573 cx_write(PAD_CTRL, 0x00500300);
577 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH01],
579 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH02], 128, 0);
580 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH03],
582 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH04], 128, 0);
583 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH05], 128, 0);
584 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH06],
586 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH07], 128, 0);
587 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH08], 128, 0);
588 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH09], 128, 0);
590 cx23885_gpio_setup(dev);
594 static int cx23885_pci_quirks(struct cx23885_dev *dev)
596 dprintk(1, "%s()\n", __func__);
598 /* The cx23885 bridge has a weird bug which causes NMI to be asserted
599 * when DMA begins if RDR_TLCTL0 bit4 is not cleared. It does not
600 * occur on the cx23887 bridge.
602 if (dev->bridge == CX23885_BRIDGE_885)
603 cx_clear(RDR_TLCTL0, 1 << 4);
608 static int get_resources(struct cx23885_dev *dev)
610 if (request_mem_region(pci_resource_start(dev->pci, 0),
611 pci_resource_len(dev->pci, 0),
615 printk(KERN_ERR "%s: can't get MMIO memory @ 0x%llx\n",
616 dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
621 static void cx23885_timeout(unsigned long data);
622 int cx23885_risc_stopper(struct pci_dev *pci, struct btcx_riscmem *risc,
623 u32 reg, u32 mask, u32 value);
625 static int cx23885_init_tsport(struct cx23885_dev *dev,
626 struct cx23885_tsport *port, int portno)
628 dprintk(1, "%s(portno=%d)\n", __func__, portno);
630 /* Transport bus init dma queue - Common settings */
631 port->dma_ctl_val = 0x11; /* Enable RISC controller and Fifo */
632 port->ts_int_msk_val = 0x1111; /* TS port bits for RISC */
633 port->vld_misc_val = 0x0;
634 port->hw_sop_ctrl_val = (0x47 << 16 | 188 << 4);
636 spin_lock_init(&port->slock);
640 INIT_LIST_HEAD(&port->mpegq.active);
641 INIT_LIST_HEAD(&port->mpegq.queued);
642 port->mpegq.timeout.function = cx23885_timeout;
643 port->mpegq.timeout.data = (unsigned long)port;
644 init_timer(&port->mpegq.timeout);
646 mutex_init(&port->frontends.lock);
647 INIT_LIST_HEAD(&port->frontends.felist);
648 port->frontends.active_fe_id = 0;
650 /* This should be hardcoded allow a single frontend
651 * attachment to this tsport, keeping the -dvb.c
652 * code clean and safe.
654 if (!port->num_frontends)
655 port->num_frontends = 1;
659 port->reg_gpcnt = VID_B_GPCNT;
660 port->reg_gpcnt_ctl = VID_B_GPCNT_CTL;
661 port->reg_dma_ctl = VID_B_DMA_CTL;
662 port->reg_lngth = VID_B_LNGTH;
663 port->reg_hw_sop_ctrl = VID_B_HW_SOP_CTL;
664 port->reg_gen_ctrl = VID_B_GEN_CTL;
665 port->reg_bd_pkt_status = VID_B_BD_PKT_STATUS;
666 port->reg_sop_status = VID_B_SOP_STATUS;
667 port->reg_fifo_ovfl_stat = VID_B_FIFO_OVFL_STAT;
668 port->reg_vld_misc = VID_B_VLD_MISC;
669 port->reg_ts_clk_en = VID_B_TS_CLK_EN;
670 port->reg_src_sel = VID_B_SRC_SEL;
671 port->reg_ts_int_msk = VID_B_INT_MSK;
672 port->reg_ts_int_stat = VID_B_INT_STAT;
673 port->sram_chno = SRAM_CH03; /* VID_B */
674 port->pci_irqmask = 0x02; /* VID_B bit1 */
677 port->reg_gpcnt = VID_C_GPCNT;
678 port->reg_gpcnt_ctl = VID_C_GPCNT_CTL;
679 port->reg_dma_ctl = VID_C_DMA_CTL;
680 port->reg_lngth = VID_C_LNGTH;
681 port->reg_hw_sop_ctrl = VID_C_HW_SOP_CTL;
682 port->reg_gen_ctrl = VID_C_GEN_CTL;
683 port->reg_bd_pkt_status = VID_C_BD_PKT_STATUS;
684 port->reg_sop_status = VID_C_SOP_STATUS;
685 port->reg_fifo_ovfl_stat = VID_C_FIFO_OVFL_STAT;
686 port->reg_vld_misc = VID_C_VLD_MISC;
687 port->reg_ts_clk_en = VID_C_TS_CLK_EN;
688 port->reg_src_sel = 0;
689 port->reg_ts_int_msk = VID_C_INT_MSK;
690 port->reg_ts_int_stat = VID_C_INT_STAT;
691 port->sram_chno = SRAM_CH06; /* VID_C */
692 port->pci_irqmask = 0x04; /* VID_C bit2 */
698 cx23885_risc_stopper(dev->pci, &port->mpegq.stopper,
699 port->reg_dma_ctl, port->dma_ctl_val, 0x00);
704 static void cx23885_dev_checkrevision(struct cx23885_dev *dev)
706 switch (cx_read(RDR_CFG2) & 0xff) {
709 dev->hwrevision = 0xa0;
713 dev->hwrevision = 0xa1;
717 dev->hwrevision = 0xb0;
721 dev->hwrevision = 0xc0;
725 dev->hwrevision = 0xc0;
728 dev->hwrevision = 0xb1;
731 printk(KERN_ERR "%s() New hardware revision found 0x%x\n",
732 __func__, dev->hwrevision);
735 printk(KERN_INFO "%s() Hardware revision = 0x%02x\n",
736 __func__, dev->hwrevision);
738 printk(KERN_ERR "%s() Hardware revision unknown 0x%x\n",
739 __func__, dev->hwrevision);
742 static int cx23885_dev_setup(struct cx23885_dev *dev)
746 mutex_init(&dev->lock);
748 atomic_inc(&dev->refcount);
750 dev->nr = cx23885_devcount++;
751 sprintf(dev->name, "cx23885[%d]", dev->nr);
753 mutex_lock(&devlist);
754 list_add_tail(&dev->devlist, &cx23885_devlist);
755 mutex_unlock(&devlist);
757 /* Configure the internal memory */
758 if (dev->pci->device == 0x8880) {
759 dev->bridge = CX23885_BRIDGE_887;
760 /* Apply a sensible clock frequency for the PCIe bridge */
761 dev->clk_freq = 25000000;
762 dev->sram_channels = cx23887_sram_channels;
764 if (dev->pci->device == 0x8852) {
765 dev->bridge = CX23885_BRIDGE_885;
766 /* Apply a sensible clock frequency for the PCIe bridge */
767 dev->clk_freq = 28000000;
768 dev->sram_channels = cx23885_sram_channels;
772 dprintk(1, "%s() Memory configured for PCIe bridge type %d\n",
773 __func__, dev->bridge);
777 if (card[dev->nr] < cx23885_bcount)
778 dev->board = card[dev->nr];
779 for (i = 0; UNSET == dev->board && i < cx23885_idcount; i++)
780 if (dev->pci->subsystem_vendor == cx23885_subids[i].subvendor &&
781 dev->pci->subsystem_device == cx23885_subids[i].subdevice)
782 dev->board = cx23885_subids[i].card;
783 if (UNSET == dev->board) {
784 dev->board = CX23885_BOARD_UNKNOWN;
785 cx23885_card_list(dev);
788 /* If the user specific a clk freq override, apply it */
789 if (cx23885_boards[dev->board].clk_freq > 0)
790 dev->clk_freq = cx23885_boards[dev->board].clk_freq;
792 dev->pci_bus = dev->pci->bus->number;
793 dev->pci_slot = PCI_SLOT(dev->pci->devfn);
794 dev->pci_irqmask = 0x001f00;
795 if (cx23885_boards[dev->board].cimax > 0)
796 dev->pci_irqmask |= 0x01800000; /* for CiMaxes */
798 /* External Master 1 Bus */
799 dev->i2c_bus[0].nr = 0;
800 dev->i2c_bus[0].dev = dev;
801 dev->i2c_bus[0].reg_stat = I2C1_STAT;
802 dev->i2c_bus[0].reg_ctrl = I2C1_CTRL;
803 dev->i2c_bus[0].reg_addr = I2C1_ADDR;
804 dev->i2c_bus[0].reg_rdata = I2C1_RDATA;
805 dev->i2c_bus[0].reg_wdata = I2C1_WDATA;
806 dev->i2c_bus[0].i2c_period = (0x9d << 24); /* 100kHz */
808 /* External Master 2 Bus */
809 dev->i2c_bus[1].nr = 1;
810 dev->i2c_bus[1].dev = dev;
811 dev->i2c_bus[1].reg_stat = I2C2_STAT;
812 dev->i2c_bus[1].reg_ctrl = I2C2_CTRL;
813 dev->i2c_bus[1].reg_addr = I2C2_ADDR;
814 dev->i2c_bus[1].reg_rdata = I2C2_RDATA;
815 dev->i2c_bus[1].reg_wdata = I2C2_WDATA;
816 dev->i2c_bus[1].i2c_period = (0x9d << 24); /* 100kHz */
818 /* Internal Master 3 Bus */
819 dev->i2c_bus[2].nr = 2;
820 dev->i2c_bus[2].dev = dev;
821 dev->i2c_bus[2].reg_stat = I2C3_STAT;
822 dev->i2c_bus[2].reg_ctrl = I2C3_CTRL;
823 dev->i2c_bus[2].reg_addr = I2C3_ADDR;
824 dev->i2c_bus[2].reg_rdata = I2C3_RDATA;
825 dev->i2c_bus[2].reg_wdata = I2C3_WDATA;
826 dev->i2c_bus[2].i2c_period = (0x07 << 24); /* 1.95MHz */
828 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) ||
829 (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER))
830 cx23885_init_tsport(dev, &dev->ts1, 1);
832 if ((cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) ||
833 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
834 cx23885_init_tsport(dev, &dev->ts2, 2);
836 if (get_resources(dev) < 0) {
837 printk(KERN_ERR "CORE %s No more PCIe resources for "
838 "subsystem: %04x:%04x\n",
839 dev->name, dev->pci->subsystem_vendor,
840 dev->pci->subsystem_device);
847 dev->lmmio = ioremap(pci_resource_start(dev->pci, 0),
848 pci_resource_len(dev->pci, 0));
850 dev->bmmio = (u8 __iomem *)dev->lmmio;
852 printk(KERN_INFO "CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
853 dev->name, dev->pci->subsystem_vendor,
854 dev->pci->subsystem_device, cx23885_boards[dev->board].name,
855 dev->board, card[dev->nr] == dev->board ?
856 "insmod option" : "autodetected");
858 cx23885_pci_quirks(dev);
860 /* Assume some sensible defaults */
861 dev->tuner_type = cx23885_boards[dev->board].tuner_type;
862 dev->tuner_addr = cx23885_boards[dev->board].tuner_addr;
863 dev->radio_type = cx23885_boards[dev->board].radio_type;
864 dev->radio_addr = cx23885_boards[dev->board].radio_addr;
866 dprintk(1, "%s() tuner_type = 0x%x tuner_addr = 0x%x\n",
867 __func__, dev->tuner_type, dev->tuner_addr);
868 dprintk(1, "%s() radio_type = 0x%x radio_addr = 0x%x\n",
869 __func__, dev->radio_type, dev->radio_addr);
874 cx23885_i2c_register(&dev->i2c_bus[0]);
875 cx23885_i2c_register(&dev->i2c_bus[1]);
876 cx23885_i2c_register(&dev->i2c_bus[2]);
877 cx23885_card_setup(dev);
878 call_all(dev, tuner, s_standby);
879 cx23885_ir_init(dev);
881 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO) {
882 if (cx23885_video_register(dev) < 0) {
883 printk(KERN_ERR "%s() Failed to register analog "
884 "video adapters on VID_A\n", __func__);
888 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
889 if (cx23885_dvb_register(&dev->ts1) < 0) {
890 printk(KERN_ERR "%s() Failed to register dvb adapters on VID_B\n",
894 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
895 if (cx23885_417_register(dev) < 0) {
897 "%s() Failed to register 417 on VID_B\n",
902 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
903 if (cx23885_dvb_register(&dev->ts2) < 0) {
905 "%s() Failed to register dvb on VID_C\n",
909 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) {
910 if (cx23885_417_register(dev) < 0) {
912 "%s() Failed to register 417 on VID_C\n",
917 cx23885_dev_checkrevision(dev);
922 static void cx23885_dev_unregister(struct cx23885_dev *dev)
924 release_mem_region(pci_resource_start(dev->pci, 0),
925 pci_resource_len(dev->pci, 0));
927 if (!atomic_dec_and_test(&dev->refcount))
930 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO)
931 cx23885_video_unregister(dev);
933 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
934 cx23885_dvb_unregister(&dev->ts1);
936 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
937 cx23885_417_unregister(dev);
939 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
940 cx23885_dvb_unregister(&dev->ts2);
942 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
943 cx23885_417_unregister(dev);
945 cx23885_i2c_unregister(&dev->i2c_bus[2]);
946 cx23885_i2c_unregister(&dev->i2c_bus[1]);
947 cx23885_i2c_unregister(&dev->i2c_bus[0]);
952 static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
953 unsigned int offset, u32 sync_line,
954 unsigned int bpl, unsigned int padding,
957 struct scatterlist *sg;
958 unsigned int line, todo;
960 /* sync instruction */
961 if (sync_line != NO_SYNC_LINE)
962 *(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
966 for (line = 0; line < lines; line++) {
967 while (offset && offset >= sg_dma_len(sg)) {
968 offset -= sg_dma_len(sg);
971 if (bpl <= sg_dma_len(sg)-offset) {
972 /* fits into current chunk */
973 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_SOL|RISC_EOL|bpl);
974 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
975 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
978 /* scanline needs to be split */
980 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_SOL|
981 (sg_dma_len(sg)-offset));
982 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
983 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
984 todo -= (sg_dma_len(sg)-offset);
987 while (todo > sg_dma_len(sg)) {
988 *(rp++) = cpu_to_le32(RISC_WRITE|
990 *(rp++) = cpu_to_le32(sg_dma_address(sg));
991 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
992 todo -= sg_dma_len(sg);
995 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
996 *(rp++) = cpu_to_le32(sg_dma_address(sg));
997 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1006 int cx23885_risc_buffer(struct pci_dev *pci, struct btcx_riscmem *risc,
1007 struct scatterlist *sglist, unsigned int top_offset,
1008 unsigned int bottom_offset, unsigned int bpl,
1009 unsigned int padding, unsigned int lines)
1011 u32 instructions, fields;
1016 if (UNSET != top_offset)
1018 if (UNSET != bottom_offset)
1021 /* estimate risc mem: worst case is one write per page border +
1022 one write per scan line + syncs + jump (all 2 dwords). Padding
1023 can cause next bpl to start close to a page border. First DMA
1024 region may be smaller than PAGE_SIZE */
1025 /* write and jump need and extra dword */
1026 instructions = fields * (1 + ((bpl + padding) * lines)
1027 / PAGE_SIZE + lines);
1029 rc = btcx_riscmem_alloc(pci, risc, instructions*12);
1033 /* write risc instructions */
1035 if (UNSET != top_offset)
1036 rp = cx23885_risc_field(rp, sglist, top_offset, 0,
1037 bpl, padding, lines);
1038 if (UNSET != bottom_offset)
1039 rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
1040 bpl, padding, lines);
1042 /* save pointer to jmp instruction address */
1044 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1048 static int cx23885_risc_databuffer(struct pci_dev *pci,
1049 struct btcx_riscmem *risc,
1050 struct scatterlist *sglist,
1058 /* estimate risc mem: worst case is one write per page border +
1059 one write per scan line + syncs + jump (all 2 dwords). Here
1060 there is no padding and no sync. First DMA region may be smaller
1062 /* Jump and write need an extra dword */
1063 instructions = 1 + (bpl * lines) / PAGE_SIZE + lines;
1066 rc = btcx_riscmem_alloc(pci, risc, instructions*12);
1070 /* write risc instructions */
1072 rp = cx23885_risc_field(rp, sglist, 0, NO_SYNC_LINE, bpl, 0, lines);
1074 /* save pointer to jmp instruction address */
1076 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1080 int cx23885_risc_stopper(struct pci_dev *pci, struct btcx_riscmem *risc,
1081 u32 reg, u32 mask, u32 value)
1086 rc = btcx_riscmem_alloc(pci, risc, 4*16);
1090 /* write risc instructions */
1092 *(rp++) = cpu_to_le32(RISC_WRITECR | RISC_IRQ2);
1093 *(rp++) = cpu_to_le32(reg);
1094 *(rp++) = cpu_to_le32(value);
1095 *(rp++) = cpu_to_le32(mask);
1096 *(rp++) = cpu_to_le32(RISC_JUMP);
1097 *(rp++) = cpu_to_le32(risc->dma);
1098 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1102 void cx23885_free_buffer(struct videobuf_queue *q, struct cx23885_buffer *buf)
1104 struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb);
1106 BUG_ON(in_interrupt());
1107 videobuf_waiton(&buf->vb, 0, 0);
1108 videobuf_dma_unmap(q, dma);
1109 videobuf_dma_free(dma);
1110 btcx_riscmem_free(to_pci_dev(q->dev), &buf->risc);
1111 buf->vb.state = VIDEOBUF_NEEDS_INIT;
1114 static void cx23885_tsport_reg_dump(struct cx23885_tsport *port)
1116 struct cx23885_dev *dev = port->dev;
1118 dprintk(1, "%s() Register Dump\n", __func__);
1119 dprintk(1, "%s() DEV_CNTRL2 0x%08X\n", __func__,
1120 cx_read(DEV_CNTRL2));
1121 dprintk(1, "%s() PCI_INT_MSK 0x%08X\n", __func__,
1122 cx_read(PCI_INT_MSK));
1123 dprintk(1, "%s() AUD_INT_INT_MSK 0x%08X\n", __func__,
1124 cx_read(AUDIO_INT_INT_MSK));
1125 dprintk(1, "%s() AUD_INT_DMA_CTL 0x%08X\n", __func__,
1126 cx_read(AUD_INT_DMA_CTL));
1127 dprintk(1, "%s() AUD_EXT_INT_MSK 0x%08X\n", __func__,
1128 cx_read(AUDIO_EXT_INT_MSK));
1129 dprintk(1, "%s() AUD_EXT_DMA_CTL 0x%08X\n", __func__,
1130 cx_read(AUD_EXT_DMA_CTL));
1131 dprintk(1, "%s() PAD_CTRL 0x%08X\n", __func__,
1133 dprintk(1, "%s() ALT_PIN_OUT_SEL 0x%08X\n", __func__,
1134 cx_read(ALT_PIN_OUT_SEL));
1135 dprintk(1, "%s() GPIO2 0x%08X\n", __func__,
1137 dprintk(1, "%s() gpcnt(0x%08X) 0x%08X\n", __func__,
1138 port->reg_gpcnt, cx_read(port->reg_gpcnt));
1139 dprintk(1, "%s() gpcnt_ctl(0x%08X) 0x%08x\n", __func__,
1140 port->reg_gpcnt_ctl, cx_read(port->reg_gpcnt_ctl));
1141 dprintk(1, "%s() dma_ctl(0x%08X) 0x%08x\n", __func__,
1142 port->reg_dma_ctl, cx_read(port->reg_dma_ctl));
1143 if (port->reg_src_sel)
1144 dprintk(1, "%s() src_sel(0x%08X) 0x%08x\n", __func__,
1145 port->reg_src_sel, cx_read(port->reg_src_sel));
1146 dprintk(1, "%s() lngth(0x%08X) 0x%08x\n", __func__,
1147 port->reg_lngth, cx_read(port->reg_lngth));
1148 dprintk(1, "%s() hw_sop_ctrl(0x%08X) 0x%08x\n", __func__,
1149 port->reg_hw_sop_ctrl, cx_read(port->reg_hw_sop_ctrl));
1150 dprintk(1, "%s() gen_ctrl(0x%08X) 0x%08x\n", __func__,
1151 port->reg_gen_ctrl, cx_read(port->reg_gen_ctrl));
1152 dprintk(1, "%s() bd_pkt_status(0x%08X) 0x%08x\n", __func__,
1153 port->reg_bd_pkt_status, cx_read(port->reg_bd_pkt_status));
1154 dprintk(1, "%s() sop_status(0x%08X) 0x%08x\n", __func__,
1155 port->reg_sop_status, cx_read(port->reg_sop_status));
1156 dprintk(1, "%s() fifo_ovfl_stat(0x%08X) 0x%08x\n", __func__,
1157 port->reg_fifo_ovfl_stat, cx_read(port->reg_fifo_ovfl_stat));
1158 dprintk(1, "%s() vld_misc(0x%08X) 0x%08x\n", __func__,
1159 port->reg_vld_misc, cx_read(port->reg_vld_misc));
1160 dprintk(1, "%s() ts_clk_en(0x%08X) 0x%08x\n", __func__,
1161 port->reg_ts_clk_en, cx_read(port->reg_ts_clk_en));
1162 dprintk(1, "%s() ts_int_msk(0x%08X) 0x%08x\n", __func__,
1163 port->reg_ts_int_msk, cx_read(port->reg_ts_int_msk));
1166 static int cx23885_start_dma(struct cx23885_tsport *port,
1167 struct cx23885_dmaqueue *q,
1168 struct cx23885_buffer *buf)
1170 struct cx23885_dev *dev = port->dev;
1173 dprintk(1, "%s() w: %d, h: %d, f: %d\n", __func__,
1174 buf->vb.width, buf->vb.height, buf->vb.field);
1176 /* Stop the fifo and risc engine for this port */
1177 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1179 /* setup fifo + format */
1180 cx23885_sram_channel_setup(dev,
1181 &dev->sram_channels[port->sram_chno],
1182 port->ts_packet_size, buf->risc.dma);
1184 cx23885_sram_channel_dump(dev,
1185 &dev->sram_channels[port->sram_chno]);
1186 cx23885_risc_disasm(port, &buf->risc);
1189 /* write TS length to chip */
1190 cx_write(port->reg_lngth, buf->vb.width);
1192 if ((!(cx23885_boards[dev->board].portb & CX23885_MPEG_DVB)) &&
1193 (!(cx23885_boards[dev->board].portc & CX23885_MPEG_DVB))) {
1194 printk("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n",
1196 cx23885_boards[dev->board].portb,
1197 cx23885_boards[dev->board].portc);
1201 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1202 cx23885_av_clk(dev, 0);
1206 /* If the port supports SRC SELECT, configure it */
1207 if (port->reg_src_sel)
1208 cx_write(port->reg_src_sel, port->src_sel_val);
1210 cx_write(port->reg_hw_sop_ctrl, port->hw_sop_ctrl_val);
1211 cx_write(port->reg_ts_clk_en, port->ts_clk_en_val);
1212 cx_write(port->reg_vld_misc, port->vld_misc_val);
1213 cx_write(port->reg_gen_ctrl, port->gen_ctrl_val);
1216 /* NOTE: this is 2 (reserved) for portb, does it matter? */
1217 /* reset counter to zero */
1218 cx_write(port->reg_gpcnt_ctl, 3);
1221 /* Set VIDB pins to input */
1222 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1223 reg = cx_read(PAD_CTRL);
1224 reg &= ~0x3; /* Clear TS1_OE & TS1_SOP_OE */
1225 cx_write(PAD_CTRL, reg);
1228 /* Set VIDC pins to input */
1229 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1230 reg = cx_read(PAD_CTRL);
1231 reg &= ~0x4; /* Clear TS2_SOP_OE */
1232 cx_write(PAD_CTRL, reg);
1235 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1237 reg = cx_read(PAD_CTRL);
1238 reg = reg & ~0x1; /* Clear TS1_OE */
1240 /* FIXME, bit 2 writing here is questionable */
1241 /* set TS1_SOP_OE and TS1_OE_HI */
1243 cx_write(PAD_CTRL, reg);
1245 /* FIXME and these two registers should be documented. */
1246 cx_write(CLK_DELAY, cx_read(CLK_DELAY) | 0x80000011);
1247 cx_write(ALT_PIN_OUT_SEL, 0x10100045);
1250 switch (dev->bridge) {
1251 case CX23885_BRIDGE_885:
1252 case CX23885_BRIDGE_887:
1254 dprintk(1, "%s() enabling TS int's and DMA\n", __func__);
1255 cx_set(port->reg_ts_int_msk, port->ts_int_msk_val);
1256 cx_set(port->reg_dma_ctl, port->dma_ctl_val);
1257 cx_set(PCI_INT_MSK, dev->pci_irqmask | port->pci_irqmask);
1263 cx_set(DEV_CNTRL2, (1<<5)); /* Enable RISC controller */
1265 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1266 cx23885_av_clk(dev, 1);
1269 cx23885_tsport_reg_dump(port);
1274 static int cx23885_stop_dma(struct cx23885_tsport *port)
1276 struct cx23885_dev *dev = port->dev;
1279 dprintk(1, "%s()\n", __func__);
1281 /* Stop interrupts and DMA */
1282 cx_clear(port->reg_ts_int_msk, port->ts_int_msk_val);
1283 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1285 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1287 reg = cx_read(PAD_CTRL);
1292 /* clear TS1_SOP_OE and TS1_OE_HI */
1294 cx_write(PAD_CTRL, reg);
1295 cx_write(port->reg_src_sel, 0);
1296 cx_write(port->reg_gen_ctrl, 8);
1300 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1301 cx23885_av_clk(dev, 0);
1306 int cx23885_restart_queue(struct cx23885_tsport *port,
1307 struct cx23885_dmaqueue *q)
1309 struct cx23885_dev *dev = port->dev;
1310 struct cx23885_buffer *buf;
1312 dprintk(5, "%s()\n", __func__);
1313 if (list_empty(&q->active)) {
1314 struct cx23885_buffer *prev;
1317 dprintk(5, "%s() queue is empty\n", __func__);
1320 if (list_empty(&q->queued))
1322 buf = list_entry(q->queued.next, struct cx23885_buffer,
1325 list_del(&buf->vb.queue);
1326 list_add_tail(&buf->vb.queue, &q->active);
1327 cx23885_start_dma(port, q, buf);
1328 buf->vb.state = VIDEOBUF_ACTIVE;
1329 buf->count = q->count++;
1330 mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
1331 dprintk(5, "[%p/%d] restart_queue - f/active\n",
1334 } else if (prev->vb.width == buf->vb.width &&
1335 prev->vb.height == buf->vb.height &&
1336 prev->fmt == buf->fmt) {
1337 list_del(&buf->vb.queue);
1338 list_add_tail(&buf->vb.queue, &q->active);
1339 buf->vb.state = VIDEOBUF_ACTIVE;
1340 buf->count = q->count++;
1341 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
1342 /* 64 bit bits 63-32 */
1343 prev->risc.jmp[2] = cpu_to_le32(0);
1344 dprintk(5, "[%p/%d] restart_queue - m/active\n",
1354 buf = list_entry(q->active.next, struct cx23885_buffer, vb.queue);
1355 dprintk(2, "restart_queue [%p/%d]: restart dma\n",
1357 cx23885_start_dma(port, q, buf);
1358 list_for_each_entry(buf, &q->active, vb.queue)
1359 buf->count = q->count++;
1360 mod_timer(&q->timeout, jiffies + BUFFER_TIMEOUT);
1364 /* ------------------------------------------------------------------ */
1366 int cx23885_buf_prepare(struct videobuf_queue *q, struct cx23885_tsport *port,
1367 struct cx23885_buffer *buf, enum v4l2_field field)
1369 struct cx23885_dev *dev = port->dev;
1370 int size = port->ts_packet_size * port->ts_packet_count;
1373 dprintk(1, "%s: %p\n", __func__, buf);
1374 if (0 != buf->vb.baddr && buf->vb.bsize < size)
1377 if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
1378 buf->vb.width = port->ts_packet_size;
1379 buf->vb.height = port->ts_packet_count;
1380 buf->vb.size = size;
1381 buf->vb.field = field /*V4L2_FIELD_TOP*/;
1383 rc = videobuf_iolock(q, &buf->vb, NULL);
1386 cx23885_risc_databuffer(dev->pci, &buf->risc,
1387 videobuf_to_dma(&buf->vb)->sglist,
1388 buf->vb.width, buf->vb.height);
1390 buf->vb.state = VIDEOBUF_PREPARED;
1394 cx23885_free_buffer(q, buf);
1398 void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf)
1400 struct cx23885_buffer *prev;
1401 struct cx23885_dev *dev = port->dev;
1402 struct cx23885_dmaqueue *cx88q = &port->mpegq;
1404 /* add jump to stopper */
1405 buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
1406 buf->risc.jmp[1] = cpu_to_le32(cx88q->stopper.dma);
1407 buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
1409 if (list_empty(&cx88q->active)) {
1410 dprintk(1, "queue is empty - first active\n");
1411 list_add_tail(&buf->vb.queue, &cx88q->active);
1412 cx23885_start_dma(port, cx88q, buf);
1413 buf->vb.state = VIDEOBUF_ACTIVE;
1414 buf->count = cx88q->count++;
1415 mod_timer(&cx88q->timeout, jiffies + BUFFER_TIMEOUT);
1416 dprintk(1, "[%p/%d] %s - first active\n",
1417 buf, buf->vb.i, __func__);
1419 dprintk(1, "queue is not empty - append to active\n");
1420 prev = list_entry(cx88q->active.prev, struct cx23885_buffer,
1422 list_add_tail(&buf->vb.queue, &cx88q->active);
1423 buf->vb.state = VIDEOBUF_ACTIVE;
1424 buf->count = cx88q->count++;
1425 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
1426 prev->risc.jmp[2] = cpu_to_le32(0); /* 64 bit bits 63-32 */
1427 dprintk(1, "[%p/%d] %s - append to active\n",
1428 buf, buf->vb.i, __func__);
1432 /* ----------------------------------------------------------- */
1434 static void do_cancel_buffers(struct cx23885_tsport *port, char *reason,
1437 struct cx23885_dev *dev = port->dev;
1438 struct cx23885_dmaqueue *q = &port->mpegq;
1439 struct cx23885_buffer *buf;
1440 unsigned long flags;
1442 spin_lock_irqsave(&port->slock, flags);
1443 while (!list_empty(&q->active)) {
1444 buf = list_entry(q->active.next, struct cx23885_buffer,
1446 list_del(&buf->vb.queue);
1447 buf->vb.state = VIDEOBUF_ERROR;
1448 wake_up(&buf->vb.done);
1449 dprintk(1, "[%p/%d] %s - dma=0x%08lx\n",
1450 buf, buf->vb.i, reason, (unsigned long)buf->risc.dma);
1453 dprintk(1, "restarting queue\n");
1454 cx23885_restart_queue(port, q);
1456 spin_unlock_irqrestore(&port->slock, flags);
1459 void cx23885_cancel_buffers(struct cx23885_tsport *port)
1461 struct cx23885_dev *dev = port->dev;
1462 struct cx23885_dmaqueue *q = &port->mpegq;
1464 dprintk(1, "%s()\n", __func__);
1465 del_timer_sync(&q->timeout);
1466 cx23885_stop_dma(port);
1467 do_cancel_buffers(port, "cancel", 0);
1470 static void cx23885_timeout(unsigned long data)
1472 struct cx23885_tsport *port = (struct cx23885_tsport *)data;
1473 struct cx23885_dev *dev = port->dev;
1475 dprintk(1, "%s()\n", __func__);
1478 cx23885_sram_channel_dump(dev,
1479 &dev->sram_channels[port->sram_chno]);
1481 cx23885_stop_dma(port);
1482 do_cancel_buffers(port, "timeout", 1);
1485 int cx23885_irq_417(struct cx23885_dev *dev, u32 status)
1487 /* FIXME: port1 assumption here. */
1488 struct cx23885_tsport *port = &dev->ts1;
1495 count = cx_read(port->reg_gpcnt);
1496 dprintk(7, "status: 0x%08x mask: 0x%08x count: 0x%x\n",
1497 status, cx_read(port->reg_ts_int_msk), count);
1499 if ((status & VID_B_MSK_BAD_PKT) ||
1500 (status & VID_B_MSK_OPC_ERR) ||
1501 (status & VID_B_MSK_VBI_OPC_ERR) ||
1502 (status & VID_B_MSK_SYNC) ||
1503 (status & VID_B_MSK_VBI_SYNC) ||
1504 (status & VID_B_MSK_OF) ||
1505 (status & VID_B_MSK_VBI_OF)) {
1506 printk(KERN_ERR "%s: V4L mpeg risc op code error, status "
1507 "= 0x%x\n", dev->name, status);
1508 if (status & VID_B_MSK_BAD_PKT)
1509 dprintk(1, " VID_B_MSK_BAD_PKT\n");
1510 if (status & VID_B_MSK_OPC_ERR)
1511 dprintk(1, " VID_B_MSK_OPC_ERR\n");
1512 if (status & VID_B_MSK_VBI_OPC_ERR)
1513 dprintk(1, " VID_B_MSK_VBI_OPC_ERR\n");
1514 if (status & VID_B_MSK_SYNC)
1515 dprintk(1, " VID_B_MSK_SYNC\n");
1516 if (status & VID_B_MSK_VBI_SYNC)
1517 dprintk(1, " VID_B_MSK_VBI_SYNC\n");
1518 if (status & VID_B_MSK_OF)
1519 dprintk(1, " VID_B_MSK_OF\n");
1520 if (status & VID_B_MSK_VBI_OF)
1521 dprintk(1, " VID_B_MSK_VBI_OF\n");
1523 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1524 cx23885_sram_channel_dump(dev,
1525 &dev->sram_channels[port->sram_chno]);
1526 cx23885_417_check_encoder(dev);
1527 } else if (status & VID_B_MSK_RISCI1) {
1528 dprintk(7, " VID_B_MSK_RISCI1\n");
1529 spin_lock(&port->slock);
1530 cx23885_wakeup(port, &port->mpegq, count);
1531 spin_unlock(&port->slock);
1532 } else if (status & VID_B_MSK_RISCI2) {
1533 dprintk(7, " VID_B_MSK_RISCI2\n");
1534 spin_lock(&port->slock);
1535 cx23885_restart_queue(port, &port->mpegq);
1536 spin_unlock(&port->slock);
1539 cx_write(port->reg_ts_int_stat, status);
1546 static int cx23885_irq_ts(struct cx23885_tsport *port, u32 status)
1548 struct cx23885_dev *dev = port->dev;
1552 if ((status & VID_BC_MSK_OPC_ERR) ||
1553 (status & VID_BC_MSK_BAD_PKT) ||
1554 (status & VID_BC_MSK_SYNC) ||
1555 (status & VID_BC_MSK_OF)) {
1557 if (status & VID_BC_MSK_OPC_ERR)
1558 dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n",
1559 VID_BC_MSK_OPC_ERR);
1561 if (status & VID_BC_MSK_BAD_PKT)
1562 dprintk(7, " (VID_BC_MSK_BAD_PKT 0x%08x)\n",
1563 VID_BC_MSK_BAD_PKT);
1565 if (status & VID_BC_MSK_SYNC)
1566 dprintk(7, " (VID_BC_MSK_SYNC 0x%08x)\n",
1569 if (status & VID_BC_MSK_OF)
1570 dprintk(7, " (VID_BC_MSK_OF 0x%08x)\n",
1573 printk(KERN_ERR "%s: mpeg risc op code error\n", dev->name);
1575 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1576 cx23885_sram_channel_dump(dev,
1577 &dev->sram_channels[port->sram_chno]);
1579 } else if (status & VID_BC_MSK_RISCI1) {
1581 dprintk(7, " (RISCI1 0x%08x)\n", VID_BC_MSK_RISCI1);
1583 spin_lock(&port->slock);
1584 count = cx_read(port->reg_gpcnt);
1585 cx23885_wakeup(port, &port->mpegq, count);
1586 spin_unlock(&port->slock);
1588 } else if (status & VID_BC_MSK_RISCI2) {
1590 dprintk(7, " (RISCI2 0x%08x)\n", VID_BC_MSK_RISCI2);
1592 spin_lock(&port->slock);
1593 cx23885_restart_queue(port, &port->mpegq);
1594 spin_unlock(&port->slock);
1598 cx_write(port->reg_ts_int_stat, status);
1605 static irqreturn_t cx23885_irq(int irq, void *dev_id)
1607 struct cx23885_dev *dev = dev_id;
1608 struct cx23885_tsport *ts1 = &dev->ts1;
1609 struct cx23885_tsport *ts2 = &dev->ts2;
1610 u32 pci_status, pci_mask;
1611 u32 vida_status, vida_mask;
1612 u32 ts1_status, ts1_mask;
1613 u32 ts2_status, ts2_mask;
1614 int vida_count = 0, ts1_count = 0, ts2_count = 0, handled = 0;
1616 pci_status = cx_read(PCI_INT_STAT);
1617 pci_mask = cx_read(PCI_INT_MSK);
1618 vida_status = cx_read(VID_A_INT_STAT);
1619 vida_mask = cx_read(VID_A_INT_MSK);
1620 ts1_status = cx_read(VID_B_INT_STAT);
1621 ts1_mask = cx_read(VID_B_INT_MSK);
1622 ts2_status = cx_read(VID_C_INT_STAT);
1623 ts2_mask = cx_read(VID_C_INT_MSK);
1625 if ((pci_status == 0) && (ts2_status == 0) && (ts1_status == 0))
1628 vida_count = cx_read(VID_A_GPCNT);
1629 ts1_count = cx_read(ts1->reg_gpcnt);
1630 ts2_count = cx_read(ts2->reg_gpcnt);
1631 dprintk(7, "pci_status: 0x%08x pci_mask: 0x%08x\n",
1632 pci_status, pci_mask);
1633 dprintk(7, "vida_status: 0x%08x vida_mask: 0x%08x count: 0x%x\n",
1634 vida_status, vida_mask, vida_count);
1635 dprintk(7, "ts1_status: 0x%08x ts1_mask: 0x%08x count: 0x%x\n",
1636 ts1_status, ts1_mask, ts1_count);
1637 dprintk(7, "ts2_status: 0x%08x ts2_mask: 0x%08x count: 0x%x\n",
1638 ts2_status, ts2_mask, ts2_count);
1640 if ((pci_status & PCI_MSK_RISC_RD) ||
1641 (pci_status & PCI_MSK_RISC_WR) ||
1642 (pci_status & PCI_MSK_AL_RD) ||
1643 (pci_status & PCI_MSK_AL_WR) ||
1644 (pci_status & PCI_MSK_APB_DMA) ||
1645 (pci_status & PCI_MSK_VID_C) ||
1646 (pci_status & PCI_MSK_VID_B) ||
1647 (pci_status & PCI_MSK_VID_A) ||
1648 (pci_status & PCI_MSK_AUD_INT) ||
1649 (pci_status & PCI_MSK_AUD_EXT) ||
1650 (pci_status & PCI_MSK_GPIO0) ||
1651 (pci_status & PCI_MSK_GPIO1)) {
1653 if (pci_status & PCI_MSK_RISC_RD)
1654 dprintk(7, " (PCI_MSK_RISC_RD 0x%08x)\n",
1657 if (pci_status & PCI_MSK_RISC_WR)
1658 dprintk(7, " (PCI_MSK_RISC_WR 0x%08x)\n",
1661 if (pci_status & PCI_MSK_AL_RD)
1662 dprintk(7, " (PCI_MSK_AL_RD 0x%08x)\n",
1665 if (pci_status & PCI_MSK_AL_WR)
1666 dprintk(7, " (PCI_MSK_AL_WR 0x%08x)\n",
1669 if (pci_status & PCI_MSK_APB_DMA)
1670 dprintk(7, " (PCI_MSK_APB_DMA 0x%08x)\n",
1673 if (pci_status & PCI_MSK_VID_C)
1674 dprintk(7, " (PCI_MSK_VID_C 0x%08x)\n",
1677 if (pci_status & PCI_MSK_VID_B)
1678 dprintk(7, " (PCI_MSK_VID_B 0x%08x)\n",
1681 if (pci_status & PCI_MSK_VID_A)
1682 dprintk(7, " (PCI_MSK_VID_A 0x%08x)\n",
1685 if (pci_status & PCI_MSK_AUD_INT)
1686 dprintk(7, " (PCI_MSK_AUD_INT 0x%08x)\n",
1689 if (pci_status & PCI_MSK_AUD_EXT)
1690 dprintk(7, " (PCI_MSK_AUD_EXT 0x%08x)\n",
1693 if (pci_status & PCI_MSK_GPIO0)
1694 dprintk(7, " (PCI_MSK_GPIO0 0x%08x)\n",
1697 if (pci_status & PCI_MSK_GPIO1)
1698 dprintk(7, " (PCI_MSK_GPIO1 0x%08x)\n",
1702 if (cx23885_boards[dev->board].cimax > 0 &&
1703 ((pci_status & PCI_MSK_GPIO0) ||
1704 (pci_status & PCI_MSK_GPIO1))) {
1706 if (cx23885_boards[dev->board].cimax > 0)
1707 handled += netup_ci_slot_status(dev, pci_status);
1712 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1713 handled += cx23885_irq_ts(ts1, ts1_status);
1715 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1716 handled += cx23885_irq_417(dev, ts1_status);
1720 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1721 handled += cx23885_irq_ts(ts2, ts2_status);
1723 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1724 handled += cx23885_irq_417(dev, ts2_status);
1728 handled += cx23885_video_irq(dev, vida_status);
1731 cx_write(PCI_INT_STAT, pci_status);
1733 return IRQ_RETVAL(handled);
1736 static inline int encoder_on_portb(struct cx23885_dev *dev)
1738 return cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER;
1741 static inline int encoder_on_portc(struct cx23885_dev *dev)
1743 return cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER;
1746 /* Mask represents 32 different GPIOs, GPIO's are split into multiple
1747 * registers depending on the board configuration (and whether the
1748 * 417 encoder (wi it's own GPIO's) are present. Each GPIO bit will
1749 * be pushed into the correct hardware register, regardless of the
1750 * physical location. Certain registers are shared so we sanity check
1751 * and report errors if we think we're tampering with a GPIo that might
1752 * be assigned to the encoder (and used for the host bus).
1754 * GPIO 2 thru 0 - On the cx23885 bridge
1755 * GPIO 18 thru 3 - On the cx23417 host bus interface
1756 * GPIO 23 thru 19 - On the cx25840 a/v core
1758 void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask)
1761 cx_set(GP0_IO, mask & 0x7);
1763 if (mask & 0x0007fff8) {
1764 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1766 "%s: Setting GPIO on encoder ports\n",
1768 cx_set(MC417_RWD, (mask & 0x0007fff8) >> 3);
1772 if (mask & 0x00f80000)
1773 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1776 void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask)
1778 if (mask & 0x00000007)
1779 cx_clear(GP0_IO, mask & 0x7);
1781 if (mask & 0x0007fff8) {
1782 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1784 "%s: Clearing GPIO moving on encoder ports\n",
1786 cx_clear(MC417_RWD, (mask & 0x7fff8) >> 3);
1790 if (mask & 0x00f80000)
1791 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1794 void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
1796 if ((mask & 0x00000007) && asoutput)
1797 cx_set(GP0_IO, (mask & 0x7) << 16);
1798 else if ((mask & 0x00000007) && !asoutput)
1799 cx_clear(GP0_IO, (mask & 0x7) << 16);
1801 if (mask & 0x0007fff8) {
1802 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1804 "%s: Enabling GPIO on encoder ports\n",
1808 /* MC417_OEN is active low for output, write 1 for an input */
1809 if ((mask & 0x0007fff8) && asoutput)
1810 cx_clear(MC417_OEN, (mask & 0x7fff8) >> 3);
1812 else if ((mask & 0x0007fff8) && !asoutput)
1813 cx_set(MC417_OEN, (mask & 0x7fff8) >> 3);
1818 static int __devinit cx23885_initdev(struct pci_dev *pci_dev,
1819 const struct pci_device_id *pci_id)
1821 struct cx23885_dev *dev;
1824 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1828 err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
1834 if (pci_enable_device(pci_dev)) {
1839 if (cx23885_dev_setup(dev) < 0) {
1844 /* print pci info */
1845 pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &dev->pci_rev);
1846 pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat);
1847 printk(KERN_INFO "%s/0: found at %s, rev: %d, irq: %d, "
1848 "latency: %d, mmio: 0x%llx\n", dev->name,
1849 pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
1851 (unsigned long long)pci_resource_start(pci_dev, 0));
1853 pci_set_master(pci_dev);
1854 if (!pci_dma_supported(pci_dev, 0xffffffff)) {
1855 printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
1860 err = request_irq(pci_dev->irq, cx23885_irq,
1861 IRQF_SHARED | IRQF_DISABLED, dev->name, dev);
1863 printk(KERN_ERR "%s: can't get IRQ %d\n",
1864 dev->name, pci_dev->irq);
1868 switch (dev->board) {
1869 case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
1870 cx_set(PCI_INT_MSK, 0x01800000); /* for NetUP */
1877 cx23885_dev_unregister(dev);
1879 v4l2_device_unregister(&dev->v4l2_dev);
1885 static void __devexit cx23885_finidev(struct pci_dev *pci_dev)
1887 struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
1888 struct cx23885_dev *dev = to_cx23885(v4l2_dev);
1890 cx23885_shutdown(dev);
1892 pci_disable_device(pci_dev);
1894 /* unregister stuff */
1895 free_irq(pci_dev->irq, dev);
1897 mutex_lock(&devlist);
1898 list_del(&dev->devlist);
1899 mutex_unlock(&devlist);
1901 cx23885_dev_unregister(dev);
1902 v4l2_device_unregister(v4l2_dev);
1906 static struct pci_device_id cx23885_pci_tbl[] = {
1911 .subvendor = PCI_ANY_ID,
1912 .subdevice = PCI_ANY_ID,
1917 .subvendor = PCI_ANY_ID,
1918 .subdevice = PCI_ANY_ID,
1920 /* --- end of list --- */
1923 MODULE_DEVICE_TABLE(pci, cx23885_pci_tbl);
1925 static struct pci_driver cx23885_pci_driver = {
1927 .id_table = cx23885_pci_tbl,
1928 .probe = cx23885_initdev,
1929 .remove = __devexit_p(cx23885_finidev),
1935 static int cx23885_init(void)
1937 printk(KERN_INFO "cx23885 driver version %d.%d.%d loaded\n",
1938 (CX23885_VERSION_CODE >> 16) & 0xff,
1939 (CX23885_VERSION_CODE >> 8) & 0xff,
1940 CX23885_VERSION_CODE & 0xff);
1942 printk(KERN_INFO "cx23885: snapshot date %04d-%02d-%02d\n",
1943 SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100);
1945 return pci_register_driver(&cx23885_pci_driver);
1948 static void cx23885_fini(void)
1950 pci_unregister_driver(&cx23885_pci_driver);
1953 module_init(cx23885_init);
1954 module_exit(cx23885_fini);
1956 /* ----------------------------------------------------------- */