2 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
3 Copyright (C) 2004 Chris Kennedy <c@groovy.org>
4 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include "ivtv-driver.h"
22 #include "ivtv-firmware.h"
23 #include "ivtv-fileops.h"
24 #include "ivtv-queue.h"
25 #include "ivtv-udma.h"
27 #include "ivtv-ioctl.h"
28 #include "ivtv-mailbox.h"
32 #define DMA_MAGIC_COOKIE 0x000001fe
34 static void ivtv_dma_dec_start(struct ivtv_stream *s);
36 static const int ivtv_stream_map[] = {
37 IVTV_ENC_STREAM_TYPE_MPG,
38 IVTV_ENC_STREAM_TYPE_YUV,
39 IVTV_ENC_STREAM_TYPE_PCM,
40 IVTV_ENC_STREAM_TYPE_VBI,
44 static void ivtv_pio_work_handler(struct ivtv *itv)
46 struct ivtv_stream *s = &itv->streams[itv->cur_pio_stream];
47 struct ivtv_buffer *buf;
51 IVTV_DEBUG_HI_DMA("ivtv_pio_work_handler\n");
52 if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS ||
53 s->v4l2dev == NULL || !ivtv_use_pio(s)) {
54 itv->cur_pio_stream = -1;
55 /* trigger PIO complete user interrupt */
56 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
59 IVTV_DEBUG_HI_DMA("Process PIO %s\n", s->name);
60 buf = list_entry(s->q_dma.list.next, struct ivtv_buffer, list);
61 list_for_each(p, &s->q_dma.list) {
62 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
63 u32 size = s->PIOarray[i].size & 0x3ffff;
65 /* Copy the data from the card to the buffer */
66 if (s->type == IVTV_DEC_STREAM_TYPE_VBI) {
67 memcpy_fromio(buf->buf, itv->dec_mem + s->PIOarray[i].src - IVTV_DECODER_OFFSET, size);
70 memcpy_fromio(buf->buf, itv->enc_mem + s->PIOarray[i].src, size);
72 if (s->PIOarray[i].size & 0x80000000)
76 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
79 void ivtv_irq_work_handler(struct work_struct *work)
81 struct ivtv *itv = container_of(work, struct ivtv, irq_work_queue);
85 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags))
86 ivtv_pio_work_handler(itv);
88 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags))
89 ivtv_vbi_work_handler(itv);
91 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags))
92 ivtv_yuv_work_handler(itv);
95 /* Determine the required DMA size, setup enough buffers in the predma queue and
96 actually copy the data from the card to the buffers in case a PIO transfer is
97 required for this stream.
99 static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MAX_DATA])
101 struct ivtv *itv = s->itv;
102 struct ivtv_buffer *buf;
104 u32 bytes_needed = 0;
106 u32 UVoffset = 0, UVsize = 0;
107 int skip_bufs = s->q_predma.buffers;
108 int idx = s->SG_length;
112 if (s->v4l2dev == NULL) {
113 IVTV_DEBUG_WARN("Stream %s not started\n", s->name);
116 if (!test_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
117 IVTV_DEBUG_WARN("Stream %s not open\n", s->name);
121 /* determine offset, size and PTS for the various streams */
123 case IVTV_ENC_STREAM_TYPE_MPG:
129 case IVTV_ENC_STREAM_TYPE_YUV:
134 s->dma_pts = ((u64) data[5] << 32) | data[6];
137 case IVTV_ENC_STREAM_TYPE_PCM:
138 offset = data[1] + 12;
140 s->dma_pts = read_dec(offset - 8) |
141 ((u64)(read_dec(offset - 12)) << 32);
142 if (itv->has_cx23415)
143 offset += IVTV_DECODER_OFFSET;
146 case IVTV_ENC_STREAM_TYPE_VBI:
147 size = itv->vbi.enc_size * itv->vbi.fpi;
148 offset = read_enc(itv->vbi.enc_start - 4) + 12;
150 IVTV_DEBUG_INFO("VBI offset == 0\n");
153 s->dma_pts = read_enc(offset - 4) | ((u64)read_enc(offset - 8) << 32);
156 case IVTV_DEC_STREAM_TYPE_VBI:
157 size = read_dec(itv->vbi.dec_start + 4) + 8;
158 offset = read_dec(itv->vbi.dec_start) + itv->vbi.dec_start;
160 offset += IVTV_DECODER_OFFSET;
163 /* shouldn't happen */
167 /* if this is the start of the DMA then fill in the magic cookie */
168 if (s->SG_length == 0) {
169 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
170 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
171 s->dma_backup = read_dec(offset - IVTV_DECODER_OFFSET);
172 write_dec_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset - IVTV_DECODER_OFFSET);
175 s->dma_backup = read_enc(offset);
176 write_enc_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset);
178 s->dma_offset = offset;
182 if (s->type == IVTV_ENC_STREAM_TYPE_YUV) {
183 /* The size for the Y samples needs to be rounded upwards to a
184 multiple of the buf_size. The UV samples then start in the
186 bytes_needed = s->buf_size * ((bytes_needed + s->buf_size - 1) / s->buf_size);
187 bytes_needed += UVsize;
190 IVTV_DEBUG_HI_DMA("%s %s: 0x%08x bytes at 0x%08x\n",
191 ivtv_use_pio(s) ? "PIO" : "DMA", s->name, bytes_needed, offset);
193 rc = ivtv_queue_move(s, &s->q_free, &s->q_full, &s->q_predma, bytes_needed);
194 if (rc < 0) { /* Insufficient buffers */
195 IVTV_DEBUG_WARN("Cannot obtain %d bytes for %s data transfer\n",
196 bytes_needed, s->name);
199 if (rc && !s->buffers_stolen && (s->s_flags & IVTV_F_S_APPL_IO)) {
200 IVTV_WARN("All %s stream buffers are full. Dropping data.\n", s->name);
201 IVTV_WARN("Cause: the application is not reading fast enough.\n");
203 s->buffers_stolen = rc;
205 /* got the buffers, now fill in SGarray (DMA) */
206 buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
207 memset(buf->buf, 0, 128);
208 list_for_each(p, &s->q_predma.list) {
209 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
213 s->SGarray[idx].dst = cpu_to_le32(buf->dma_handle);
214 s->SGarray[idx].src = cpu_to_le32(offset);
215 s->SGarray[idx].size = cpu_to_le32(s->buf_size);
216 buf->bytesused = (size < s->buf_size) ? size : s->buf_size;
218 s->q_predma.bytesused += buf->bytesused;
219 size -= buf->bytesused;
220 offset += s->buf_size;
222 /* Sync SG buffers */
223 ivtv_buf_sync_for_device(s, buf);
225 if (size == 0) { /* YUV */
226 /* process the UV section */
236 static void dma_post(struct ivtv_stream *s)
238 struct ivtv *itv = s->itv;
239 struct ivtv_buffer *buf = NULL;
245 IVTV_DEBUG_HI_DMA("%s %s completed (%x)\n", ivtv_use_pio(s) ? "PIO" : "DMA",
246 s->name, s->dma_offset);
247 list_for_each(p, &s->q_dma.list) {
248 buf = list_entry(p, struct ivtv_buffer, list);
249 u32buf = (u32 *)buf->buf;
252 ivtv_buf_sync_for_cpu(s, buf);
255 offset = s->dma_last_offset;
256 if (u32buf[offset / 4] != DMA_MAGIC_COOKIE)
258 for (offset = 0; offset < 64; offset++) {
259 if (u32buf[offset] == DMA_MAGIC_COOKIE) {
265 IVTV_DEBUG_WARN("%s: Couldn't find start of buffer within the first 256 bytes\n", s->name);
266 offset = s->dma_last_offset;
268 if (s->dma_last_offset != offset)
269 IVTV_DEBUG_WARN("%s: offset %d -> %d\n", s->name, s->dma_last_offset, offset);
270 s->dma_last_offset = offset;
272 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
273 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
274 write_dec_sync(0, s->dma_offset - IVTV_DECODER_OFFSET);
277 write_enc_sync(0, s->dma_offset);
280 buf->bytesused -= offset;
281 memcpy(buf->buf, buf->buf + offset, buf->bytesused + offset);
283 *u32buf = cpu_to_le32(s->dma_backup);
286 /* flag byteswap ABCD -> DCBA for MPG & VBI data outside irq */
287 if (s->type == IVTV_ENC_STREAM_TYPE_MPG ||
288 s->type == IVTV_ENC_STREAM_TYPE_VBI)
289 set_bit(IVTV_F_B_NEED_BUF_SWAP, &buf->b_flags);
292 buf->bytesused += s->dma_last_offset;
293 if (buf && s->type == IVTV_DEC_STREAM_TYPE_VBI) {
294 list_for_each(p, &s->q_dma.list) {
295 buf = list_entry(p, struct ivtv_buffer, list);
297 /* Parse and Groom VBI Data */
298 s->q_dma.bytesused -= buf->bytesused;
299 ivtv_process_vbi_data(itv, buf, 0, s->type);
300 s->q_dma.bytesused += buf->bytesused;
303 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
307 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused);
312 void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
314 struct ivtv *itv = s->itv;
315 struct ivtv_buffer *buf;
317 u32 y_size = itv->params.height * itv->params.width;
318 u32 uv_offset = offset + IVTV_YUV_BUFFER_UV_OFFSET;
320 int bytes_written = 0;
321 unsigned long flags = 0;
324 IVTV_DEBUG_HI_DMA("DEC PREPARE DMA %s: %08x %08x\n", s->name, s->q_predma.bytesused, offset);
325 buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
326 list_for_each(p, &s->q_predma.list) {
327 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
329 /* YUV UV Offset from Y Buffer */
330 if (s->type == IVTV_DEC_STREAM_TYPE_YUV && !y_done && bytes_written >= y_size) {
334 s->SGarray[idx].src = cpu_to_le32(buf->dma_handle);
335 s->SGarray[idx].dst = cpu_to_le32(offset);
336 s->SGarray[idx].size = cpu_to_le32(buf->bytesused);
338 offset += buf->bytesused;
339 bytes_written += buf->bytesused;
341 /* Sync SG buffers */
342 ivtv_buf_sync_for_device(s, buf);
347 /* Mark last buffer size for Interrupt flag */
348 s->SGarray[s->SG_length - 1].size |= cpu_to_le32(0x80000000);
350 /* Sync Hardware SG List of buffers */
351 ivtv_stream_sync_for_device(s);
353 spin_lock_irqsave(&itv->dma_reg_lock, flags);
354 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
355 ivtv_dma_dec_start(s);
358 set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
361 spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
364 /* start the encoder DMA */
365 static void ivtv_dma_enc_start(struct ivtv_stream *s)
367 struct ivtv *itv = s->itv;
368 struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
371 IVTV_DEBUG_HI_DMA("start %s for %s\n", ivtv_use_dma(s) ? "DMA" : "PIO", s->name);
373 if (s->q_predma.bytesused)
374 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
377 s->SGarray[s->SG_length - 1].size =
378 cpu_to_le32(le32_to_cpu(s->SGarray[s->SG_length - 1].size) + 256);
380 /* If this is an MPEG stream, and VBI data is also pending, then append the
381 VBI DMA to the MPEG DMA and transfer both sets of data at once.
383 VBI DMA is a second class citizen compared to MPEG and mixing them together
384 will confuse the firmware (the end of a VBI DMA is seen as the end of a
385 MPEG DMA, thus effectively dropping an MPEG frame). So instead we make
386 sure we only use the MPEG DMA to transfer the VBI DMA if both are in
387 use. This way no conflicts occur. */
388 clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
389 if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->SG_length &&
390 s->SG_length + s_vbi->SG_length <= s->buffers) {
391 ivtv_queue_move(s_vbi, &s_vbi->q_predma, NULL, &s_vbi->q_dma, s_vbi->q_predma.bytesused);
392 if (ivtv_use_dma(s_vbi))
393 s_vbi->SGarray[s_vbi->SG_length - 1].size = cpu_to_le32(le32_to_cpu(s_vbi->SGarray[s->SG_length - 1].size) + 256);
394 for (i = 0; i < s_vbi->SG_length; i++) {
395 s->SGarray[s->SG_length++] = s_vbi->SGarray[i];
397 itv->vbi.dma_offset = s_vbi->dma_offset;
398 s_vbi->SG_length = 0;
399 set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
400 IVTV_DEBUG_HI_DMA("include DMA for %s\n", s->name);
403 /* Mark last buffer size for Interrupt flag */
404 s->SGarray[s->SG_length - 1].size |= cpu_to_le32(0x80000000);
406 if (s->type == IVTV_ENC_STREAM_TYPE_VBI)
407 set_bit(IVTV_F_I_ENC_VBI, &itv->i_flags);
409 clear_bit(IVTV_F_I_ENC_VBI, &itv->i_flags);
411 if (ivtv_use_pio(s)) {
412 for (i = 0; i < s->SG_length; i++) {
413 s->PIOarray[i].src = le32_to_cpu(s->SGarray[i].src);
414 s->PIOarray[i].size = le32_to_cpu(s->SGarray[i].size);
416 set_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags);
417 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
418 set_bit(IVTV_F_I_PIO, &itv->i_flags);
419 itv->cur_pio_stream = s->type;
422 /* Sync Hardware SG List of buffers */
423 ivtv_stream_sync_for_device(s);
424 write_reg(s->SG_handle, IVTV_REG_ENCDMAADDR);
425 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER);
426 set_bit(IVTV_F_I_DMA, &itv->i_flags);
427 itv->cur_dma_stream = s->type;
428 itv->dma_timer.expires = jiffies + msecs_to_jiffies(100);
429 add_timer(&itv->dma_timer);
433 static void ivtv_dma_dec_start(struct ivtv_stream *s)
435 struct ivtv *itv = s->itv;
437 if (s->q_predma.bytesused)
438 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
439 IVTV_DEBUG_HI_DMA("start DMA for %s\n", s->name);
440 /* put SG Handle into register 0x0c */
441 write_reg(s->SG_handle, IVTV_REG_DECDMAADDR);
442 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
443 set_bit(IVTV_F_I_DMA, &itv->i_flags);
444 itv->cur_dma_stream = s->type;
445 itv->dma_timer.expires = jiffies + msecs_to_jiffies(100);
446 add_timer(&itv->dma_timer);
449 static void ivtv_irq_dma_read(struct ivtv *itv)
451 struct ivtv_stream *s = NULL;
452 struct ivtv_buffer *buf;
455 IVTV_DEBUG_HI_IRQ("DEC DMA READ\n");
456 del_timer(&itv->dma_timer);
457 if (read_reg(IVTV_REG_DMASTATUS) & 0x14) {
458 IVTV_DEBUG_WARN("DEC DMA ERROR %x\n", read_reg(IVTV_REG_DMASTATUS));
459 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
461 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
462 if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) {
463 s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
467 s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
470 IVTV_DEBUG_HI_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused);
472 ivtv_stream_sync_for_cpu(s);
474 /* For some reason must kick the firmware, like PIO mode,
475 I think this tells the firmware we are done and the size
476 of the xfer so it can calculate what we need next.
477 I think we can do this part ourselves but would have to
478 fully calculate xfer info ourselves and not use interrupts
480 ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, 3, 0, s->q_dma.bytesused,
483 /* Free last DMA call */
484 while ((buf = ivtv_dequeue(s, &s->q_dma)) != NULL) {
485 ivtv_buf_sync_for_cpu(s, buf);
486 ivtv_enqueue(s, buf, &s->q_free);
490 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
491 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
492 itv->cur_dma_stream = -1;
493 wake_up(&itv->dma_waitq);
496 static void ivtv_irq_enc_dma_complete(struct ivtv *itv)
498 u32 data[CX2341X_MBOX_MAX_DATA];
499 struct ivtv_stream *s;
501 del_timer(&itv->dma_timer);
502 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
503 IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d\n", data[0], data[1]);
504 if (test_and_clear_bit(IVTV_F_I_ENC_VBI, &itv->i_flags))
506 else if (data[1] > 2)
508 s = &itv->streams[ivtv_stream_map[data[1]]];
509 if (data[0] & 0x18) {
510 IVTV_DEBUG_WARN("ENC DMA ERROR %x\n", data[0]);
511 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
512 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, data[1]);
515 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
516 itv->cur_dma_stream = -1;
518 ivtv_stream_sync_for_cpu(s);
519 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
522 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
524 s->dma_offset = itv->vbi.dma_offset;
528 wake_up(&itv->dma_waitq);
531 static void ivtv_irq_enc_pio_complete(struct ivtv *itv)
533 struct ivtv_stream *s;
535 if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS) {
536 itv->cur_pio_stream = -1;
539 s = &itv->streams[itv->cur_pio_stream];
540 IVTV_DEBUG_HI_IRQ("ENC PIO COMPLETE %s\n", s->name);
542 clear_bit(IVTV_F_I_ENC_VBI, &itv->i_flags);
543 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
544 itv->cur_pio_stream = -1;
546 if (s->type == IVTV_ENC_STREAM_TYPE_MPG)
547 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 0);
548 else if (s->type == IVTV_ENC_STREAM_TYPE_YUV)
549 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 1);
550 else if (s->type == IVTV_ENC_STREAM_TYPE_PCM)
551 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 2);
552 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
553 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
556 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
558 s->dma_offset = itv->vbi.dma_offset;
562 wake_up(&itv->dma_waitq);
565 static void ivtv_irq_dma_err(struct ivtv *itv)
567 u32 data[CX2341X_MBOX_MAX_DATA];
569 del_timer(&itv->dma_timer);
570 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
571 IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1],
572 read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
573 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) &&
574 itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) {
575 struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream];
578 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
579 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
580 ivtv_dma_dec_start(s);
582 ivtv_dma_enc_start(s);
585 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
586 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
587 itv->cur_dma_stream = -1;
588 wake_up(&itv->dma_waitq);
591 static void ivtv_irq_enc_start_cap(struct ivtv *itv)
593 u32 data[CX2341X_MBOX_MAX_DATA];
594 struct ivtv_stream *s;
596 /* Get DMA destination and size arguments from card */
597 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA, data);
598 IVTV_DEBUG_HI_IRQ("ENC START CAP %d: %08x %08x\n", data[0], data[1], data[2]);
600 if (data[0] > 2 || data[1] == 0 || data[2] == 0) {
601 IVTV_DEBUG_WARN("Unknown input: %08x %08x %08x\n",
602 data[0], data[1], data[2]);
605 s = &itv->streams[ivtv_stream_map[data[0]]];
606 if (!stream_enc_dma_append(s, data)) {
607 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
611 static void ivtv_irq_enc_vbi_cap(struct ivtv *itv)
613 struct ivtv_stream *s_mpg = &itv->streams[IVTV_ENC_STREAM_TYPE_MPG];
614 u32 data[CX2341X_MBOX_MAX_DATA];
615 struct ivtv_stream *s;
617 IVTV_DEBUG_HI_IRQ("ENC START VBI CAP\n");
618 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
620 /* If more than two VBI buffers are pending, then
621 clear the old ones and start with this new one.
622 This can happen during transition stages when MPEG capturing is
623 started, but the first interrupts haven't arrived yet. During
624 that period VBI requests can accumulate without being able to
625 DMA the data. Since at most four VBI DMA buffers are available,
626 we just drop the old requests when there are already three
628 if (s->SG_length > 2) {
630 list_for_each(p, &s->q_predma.list) {
631 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
632 ivtv_buf_sync_for_cpu(s, buf);
634 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_free, 0);
637 /* if we can append the data, and the MPEG stream isn't capturing,
638 then start a DMA request for just the VBI data. */
639 if (!stream_enc_dma_append(s, data) &&
640 !test_bit(IVTV_F_S_STREAMING, &s_mpg->s_flags)) {
641 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
645 static void ivtv_irq_dec_vbi_reinsert(struct ivtv *itv)
647 u32 data[CX2341X_MBOX_MAX_DATA];
648 struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI];
650 IVTV_DEBUG_HI_IRQ("DEC VBI REINSERT\n");
651 if (test_bit(IVTV_F_S_CLAIMED, &s->s_flags) &&
652 !stream_enc_dma_append(s, data)) {
653 set_bit(IVTV_F_S_PIO_PENDING, &s->s_flags);
657 static void ivtv_irq_dec_data_req(struct ivtv *itv)
659 u32 data[CX2341X_MBOX_MAX_DATA];
660 struct ivtv_stream *s;
663 ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, data);
665 if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) {
666 itv->dma_data_req_size = itv->params.width * itv->params.height * 3 / 2;
667 itv->dma_data_req_offset = data[1] ? data[1] : yuv_offset[0];
668 s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
671 itv->dma_data_req_size = data[2] >= 0x10000 ? 0x10000 : data[2];
672 itv->dma_data_req_offset = data[1];
673 s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
675 IVTV_DEBUG_HI_IRQ("DEC DATA REQ %s: %d %08x %u\n", s->name, s->q_full.bytesused,
676 itv->dma_data_req_offset, itv->dma_data_req_size);
677 if (itv->dma_data_req_size == 0 || s->q_full.bytesused < itv->dma_data_req_size) {
678 set_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
681 clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
682 ivtv_queue_move(s, &s->q_full, NULL, &s->q_predma, itv->dma_data_req_size);
683 ivtv_dma_stream_dec_prepare(s, itv->dma_data_req_offset + IVTV_DECODER_OFFSET, 0);
687 static void ivtv_irq_vsync(struct ivtv *itv)
689 /* The vsync interrupt is unusual in that it won't clear until
690 * the end of the first line for the current field, at which
691 * point it clears itself. This can result in repeated vsync
692 * interrupts, or a missed vsync. Read some of the registers
693 * to determine the line being displayed and ensure we handle
694 * one vsync per frame.
696 unsigned int frame = read_reg(0x28c0) & 1;
697 int last_dma_frame = atomic_read(&itv->yuv_info.next_dma_frame);
699 if (0) IVTV_DEBUG_IRQ("DEC VSYNC\n");
701 if (((frame ^ itv->yuv_info.lace_sync_field) == 0 && ((itv->lastVsyncFrame & 1) ^ itv->yuv_info.lace_sync_field)) ||
702 (frame != (itv->lastVsyncFrame & 1) && !itv->yuv_info.frame_interlaced)) {
703 int next_dma_frame = last_dma_frame;
705 if (next_dma_frame >= 0 && next_dma_frame != atomic_read(&itv->yuv_info.next_fill_frame)) {
706 write_reg(yuv_offset[next_dma_frame] >> 4, 0x82c);
707 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x830);
708 write_reg(yuv_offset[next_dma_frame] >> 4, 0x834);
709 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x838);
710 next_dma_frame = (next_dma_frame + 1) & 0x3;
711 atomic_set(&itv->yuv_info.next_dma_frame, next_dma_frame);
714 if (frame != (itv->lastVsyncFrame & 1)) {
715 struct ivtv_stream *s = ivtv_get_output_stream(itv);
717 itv->lastVsyncFrame += 1;
719 clear_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags);
720 clear_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
723 set_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
725 if (test_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags)) {
726 set_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags);
727 wake_up(&itv->event_waitq);
729 wake_up(&itv->vsync_waitq);
733 /* Send VBI to saa7127 */
735 set_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags);
736 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
739 /* Check if we need to update the yuv registers */
740 if ((itv->yuv_info.yuv_forced_update || itv->yuv_info.new_frame_info[last_dma_frame].update) && last_dma_frame != -1) {
741 if (!itv->yuv_info.new_frame_info[last_dma_frame].update)
742 last_dma_frame = (last_dma_frame - 1) & 3;
744 if (itv->yuv_info.new_frame_info[last_dma_frame].src_w) {
745 itv->yuv_info.update_frame = last_dma_frame;
746 itv->yuv_info.new_frame_info[last_dma_frame].update = 0;
747 itv->yuv_info.yuv_forced_update = 0;
748 set_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags);
749 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
755 #define IVTV_IRQ_DMA (IVTV_IRQ_DMA_READ | IVTV_IRQ_ENC_DMA_COMPLETE | IVTV_IRQ_DMA_ERR | IVTV_IRQ_ENC_START_CAP | IVTV_IRQ_ENC_VBI_CAP | IVTV_IRQ_DEC_DATA_REQ)
757 irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
759 struct ivtv *itv = (struct ivtv *)dev_id;
765 spin_lock(&itv->dma_reg_lock);
766 /* get contents of irq status register */
767 stat = read_reg(IVTV_REG_IRQSTATUS);
769 combo = ~itv->irqmask & stat;
772 if (combo) write_reg(combo, IVTV_REG_IRQSTATUS);
775 /* The vsync interrupt is unusual and clears itself. If we
776 * took too long, we may have missed it. Do some checks
778 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
779 /* vsync is enabled, see if we're in a new field */
780 if ((itv->lastVsyncFrame & 1) != (read_reg(0x28c0) & 1)) {
781 /* New field, looks like we missed it */
782 IVTV_DEBUG_YUV("VSync interrupt missed %d\n",read_reg(0x28c0)>>16);
788 /* No Vsync expected, wasn't for us */
789 spin_unlock(&itv->dma_reg_lock);
794 /* Exclude interrupts noted below from the output, otherwise the log is flooded with
796 if (combo & ~0xff6d0400)
797 IVTV_DEBUG_HI_IRQ("======= valid IRQ bits: 0x%08x ======\n", combo);
799 if (combo & IVTV_IRQ_DEC_DMA_COMPLETE) {
800 IVTV_DEBUG_HI_IRQ("DEC DMA COMPLETE\n");
803 if (combo & IVTV_IRQ_DMA_READ) {
804 ivtv_irq_dma_read(itv);
807 if (combo & IVTV_IRQ_ENC_DMA_COMPLETE) {
808 ivtv_irq_enc_dma_complete(itv);
811 if (combo & IVTV_IRQ_ENC_PIO_COMPLETE) {
812 ivtv_irq_enc_pio_complete(itv);
815 if (combo & IVTV_IRQ_DMA_ERR) {
816 ivtv_irq_dma_err(itv);
819 if (combo & IVTV_IRQ_ENC_START_CAP) {
820 ivtv_irq_enc_start_cap(itv);
823 if (combo & IVTV_IRQ_ENC_VBI_CAP) {
824 ivtv_irq_enc_vbi_cap(itv);
827 if (combo & IVTV_IRQ_DEC_VBI_RE_INSERT) {
828 ivtv_irq_dec_vbi_reinsert(itv);
831 if (combo & IVTV_IRQ_ENC_EOS) {
832 IVTV_DEBUG_IRQ("ENC EOS\n");
833 set_bit(IVTV_F_I_EOS, &itv->i_flags);
834 wake_up(&itv->cap_w);
837 if (combo & IVTV_IRQ_DEC_DATA_REQ) {
838 ivtv_irq_dec_data_req(itv);
841 /* Decoder Vertical Sync - We can't rely on 'combo', so check if vsync enabled */
842 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
846 if (combo & IVTV_IRQ_ENC_VIM_RST) {
847 IVTV_DEBUG_IRQ("VIM RST\n");
848 /*ivtv_vapi(itv, CX2341X_ENC_REFRESH_INPUT, 0); */
851 if (combo & IVTV_IRQ_DEC_AUD_MODE_CHG) {
852 IVTV_DEBUG_INFO("Stereo mode changed\n");
855 if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
856 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
857 int idx = (i + itv->irq_rr_idx++) % IVTV_MAX_STREAMS;
858 struct ivtv_stream *s = &itv->streams[idx];
860 if (!test_and_clear_bit(IVTV_F_S_DMA_PENDING, &s->s_flags))
862 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
863 ivtv_dma_dec_start(s);
865 ivtv_dma_enc_start(s);
868 if (i == IVTV_MAX_STREAMS && test_and_clear_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags)) {
869 ivtv_udma_start(itv);
873 if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_PIO, &itv->i_flags)) {
874 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
875 int idx = (i + itv->irq_rr_idx++) % IVTV_MAX_STREAMS;
876 struct ivtv_stream *s = &itv->streams[idx];
878 if (!test_and_clear_bit(IVTV_F_S_PIO_PENDING, &s->s_flags))
880 if (s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type < IVTV_DEC_STREAM_TYPE_MPG)
881 ivtv_dma_enc_start(s);
886 if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags))
887 queue_work(itv->irq_work_queues, &itv->irq_work_queue);
889 spin_unlock(&itv->dma_reg_lock);
891 /* If we've just handled a 'forced' vsync, it's safest to say it
892 * wasn't ours. Another device may have triggered it at just
895 return vsync_force ? IRQ_NONE : IRQ_HANDLED;
898 void ivtv_unfinished_dma(unsigned long arg)
900 struct ivtv *itv = (struct ivtv *)arg;
902 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
904 IVTV_ERR("DMA TIMEOUT %08x %d\n", read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
906 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
907 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
908 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
909 itv->cur_dma_stream = -1;
910 wake_up(&itv->dma_waitq);