V4L/DVB (6712): ivtv: ivtv yuv stream handling change
[linux-2.6] / drivers / media / video / ivtv / ivtv-irq.c
1 /* interrupt handling
2     Copyright (C) 2003-2004  Kevin Thayer <nufan_wfk at yahoo.com>
3     Copyright (C) 2004  Chris Kennedy <c@groovy.org>
4     Copyright (C) 2005-2007  Hans Verkuil <hverkuil@xs4all.nl>
5
6     This program is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     This program is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with this program; if not, write to the Free Software
18     Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19  */
20
21 #include "ivtv-driver.h"
22 #include "ivtv-queue.h"
23 #include "ivtv-udma.h"
24 #include "ivtv-irq.h"
25 #include "ivtv-mailbox.h"
26 #include "ivtv-vbi.h"
27 #include "ivtv-yuv.h"
28
29 #define DMA_MAGIC_COOKIE 0x000001fe
30
31 static void ivtv_dma_dec_start(struct ivtv_stream *s);
32
33 static const int ivtv_stream_map[] = {
34         IVTV_ENC_STREAM_TYPE_MPG,
35         IVTV_ENC_STREAM_TYPE_YUV,
36         IVTV_ENC_STREAM_TYPE_PCM,
37         IVTV_ENC_STREAM_TYPE_VBI,
38 };
39
40
41 static void ivtv_pio_work_handler(struct ivtv *itv)
42 {
43         struct ivtv_stream *s = &itv->streams[itv->cur_pio_stream];
44         struct ivtv_buffer *buf;
45         int i = 0;
46
47         IVTV_DEBUG_HI_DMA("ivtv_pio_work_handler\n");
48         if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS ||
49                         s->v4l2dev == NULL || !ivtv_use_pio(s)) {
50                 itv->cur_pio_stream = -1;
51                 /* trigger PIO complete user interrupt */
52                 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
53                 return;
54         }
55         IVTV_DEBUG_HI_DMA("Process PIO %s\n", s->name);
56         list_for_each_entry(buf, &s->q_dma.list, list) {
57                 u32 size = s->sg_processing[i].size & 0x3ffff;
58
59                 /* Copy the data from the card to the buffer */
60                 if (s->type == IVTV_DEC_STREAM_TYPE_VBI) {
61                         memcpy_fromio(buf->buf, itv->dec_mem + s->sg_processing[i].src - IVTV_DECODER_OFFSET, size);
62                 }
63                 else {
64                         memcpy_fromio(buf->buf, itv->enc_mem + s->sg_processing[i].src, size);
65                 }
66                 i++;
67                 if (i == s->sg_processing_size)
68                         break;
69         }
70         write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
71 }
72
73 void ivtv_irq_work_handler(struct work_struct *work)
74 {
75         struct ivtv *itv = container_of(work, struct ivtv, irq_work_queue);
76
77         DEFINE_WAIT(wait);
78
79         if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags))
80                 ivtv_pio_work_handler(itv);
81
82         if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags))
83                 ivtv_vbi_work_handler(itv);
84
85         if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags))
86                 ivtv_yuv_work_handler(itv);
87 }
88
89 /* Determine the required DMA size, setup enough buffers in the predma queue and
90    actually copy the data from the card to the buffers in case a PIO transfer is
91    required for this stream.
92  */
93 static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MAX_DATA])
94 {
95         struct ivtv *itv = s->itv;
96         struct ivtv_buffer *buf;
97         u32 bytes_needed = 0;
98         u32 offset, size;
99         u32 UVoffset = 0, UVsize = 0;
100         int skip_bufs = s->q_predma.buffers;
101         int idx = s->sg_pending_size;
102         int rc;
103
104         /* sanity checks */
105         if (s->v4l2dev == NULL) {
106                 IVTV_DEBUG_WARN("Stream %s not started\n", s->name);
107                 return -1;
108         }
109         if (!test_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
110                 IVTV_DEBUG_WARN("Stream %s not open\n", s->name);
111                 return -1;
112         }
113
114         /* determine offset, size and PTS for the various streams */
115         switch (s->type) {
116                 case IVTV_ENC_STREAM_TYPE_MPG:
117                         offset = data[1];
118                         size = data[2];
119                         s->pending_pts = 0;
120                         break;
121
122                 case IVTV_ENC_STREAM_TYPE_YUV:
123                         offset = data[1];
124                         size = data[2];
125                         UVoffset = data[3];
126                         UVsize = data[4];
127                         s->pending_pts = ((u64) data[5] << 32) | data[6];
128                         break;
129
130                 case IVTV_ENC_STREAM_TYPE_PCM:
131                         offset = data[1] + 12;
132                         size = data[2] - 12;
133                         s->pending_pts = read_dec(offset - 8) |
134                                 ((u64)(read_dec(offset - 12)) << 32);
135                         if (itv->has_cx23415)
136                                 offset += IVTV_DECODER_OFFSET;
137                         break;
138
139                 case IVTV_ENC_STREAM_TYPE_VBI:
140                         size = itv->vbi.enc_size * itv->vbi.fpi;
141                         offset = read_enc(itv->vbi.enc_start - 4) + 12;
142                         if (offset == 12) {
143                                 IVTV_DEBUG_INFO("VBI offset == 0\n");
144                                 return -1;
145                         }
146                         s->pending_pts = read_enc(offset - 4) | ((u64)read_enc(offset - 8) << 32);
147                         break;
148
149                 case IVTV_DEC_STREAM_TYPE_VBI:
150                         size = read_dec(itv->vbi.dec_start + 4) + 8;
151                         offset = read_dec(itv->vbi.dec_start) + itv->vbi.dec_start;
152                         s->pending_pts = 0;
153                         offset += IVTV_DECODER_OFFSET;
154                         break;
155                 default:
156                         /* shouldn't happen */
157                         return -1;
158         }
159
160         /* if this is the start of the DMA then fill in the magic cookie */
161         if (s->sg_pending_size == 0 && ivtv_use_dma(s)) {
162                 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
163                     s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
164                         s->pending_backup = read_dec(offset - IVTV_DECODER_OFFSET);
165                         write_dec_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset - IVTV_DECODER_OFFSET);
166                 }
167                 else {
168                         s->pending_backup = read_enc(offset);
169                         write_enc_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset);
170                 }
171                 s->pending_offset = offset;
172         }
173
174         bytes_needed = size;
175         if (s->type == IVTV_ENC_STREAM_TYPE_YUV) {
176                 /* The size for the Y samples needs to be rounded upwards to a
177                    multiple of the buf_size. The UV samples then start in the
178                    next buffer. */
179                 bytes_needed = s->buf_size * ((bytes_needed + s->buf_size - 1) / s->buf_size);
180                 bytes_needed += UVsize;
181         }
182
183         IVTV_DEBUG_HI_DMA("%s %s: 0x%08x bytes at 0x%08x\n",
184                 ivtv_use_pio(s) ? "PIO" : "DMA", s->name, bytes_needed, offset);
185
186         rc = ivtv_queue_move(s, &s->q_free, &s->q_full, &s->q_predma, bytes_needed);
187         if (rc < 0) { /* Insufficient buffers */
188                 IVTV_DEBUG_WARN("Cannot obtain %d bytes for %s data transfer\n",
189                                 bytes_needed, s->name);
190                 return -1;
191         }
192         if (rc && !s->buffers_stolen && (s->s_flags & IVTV_F_S_APPL_IO)) {
193                 IVTV_WARN("All %s stream buffers are full. Dropping data.\n", s->name);
194                 IVTV_WARN("Cause: the application is not reading fast enough.\n");
195         }
196         s->buffers_stolen = rc;
197
198         /* got the buffers, now fill in sg_pending */
199         buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
200         memset(buf->buf, 0, 128);
201         list_for_each_entry(buf, &s->q_predma.list, list) {
202                 if (skip_bufs-- > 0)
203                         continue;
204                 s->sg_pending[idx].dst = buf->dma_handle;
205                 s->sg_pending[idx].src = offset;
206                 s->sg_pending[idx].size = s->buf_size;
207                 buf->bytesused = (size < s->buf_size) ? size : s->buf_size;
208                 buf->dma_xfer_cnt = s->dma_xfer_cnt;
209
210                 s->q_predma.bytesused += buf->bytesused;
211                 size -= buf->bytesused;
212                 offset += s->buf_size;
213
214                 /* Sync SG buffers */
215                 ivtv_buf_sync_for_device(s, buf);
216
217                 if (size == 0) {        /* YUV */
218                         /* process the UV section */
219                         offset = UVoffset;
220                         size = UVsize;
221                 }
222                 idx++;
223         }
224         s->sg_pending_size = idx;
225         return 0;
226 }
227
228 static void dma_post(struct ivtv_stream *s)
229 {
230         struct ivtv *itv = s->itv;
231         struct ivtv_buffer *buf = NULL;
232         struct list_head *p;
233         u32 offset;
234         u32 *u32buf;
235         int x = 0;
236
237         IVTV_DEBUG_HI_DMA("%s %s completed (%x)\n", ivtv_use_pio(s) ? "PIO" : "DMA",
238                         s->name, s->dma_offset);
239         list_for_each(p, &s->q_dma.list) {
240                 buf = list_entry(p, struct ivtv_buffer, list);
241                 u32buf = (u32 *)buf->buf;
242
243                 /* Sync Buffer */
244                 ivtv_buf_sync_for_cpu(s, buf);
245
246                 if (x == 0 && ivtv_use_dma(s)) {
247                         offset = s->dma_last_offset;
248                         if (u32buf[offset / 4] != DMA_MAGIC_COOKIE)
249                         {
250                                 for (offset = 0; offset < 64; offset++) {
251                                         if (u32buf[offset] == DMA_MAGIC_COOKIE) {
252                                                 break;
253                                         }
254                                 }
255                                 offset *= 4;
256                                 if (offset == 256) {
257                                         IVTV_DEBUG_WARN("%s: Couldn't find start of buffer within the first 256 bytes\n", s->name);
258                                         offset = s->dma_last_offset;
259                                 }
260                                 if (s->dma_last_offset != offset)
261                                         IVTV_DEBUG_WARN("%s: offset %d -> %d\n", s->name, s->dma_last_offset, offset);
262                                 s->dma_last_offset = offset;
263                         }
264                         if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
265                                                 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
266                                 write_dec_sync(0, s->dma_offset - IVTV_DECODER_OFFSET);
267                         }
268                         else {
269                                 write_enc_sync(0, s->dma_offset);
270                         }
271                         if (offset) {
272                                 buf->bytesused -= offset;
273                                 memcpy(buf->buf, buf->buf + offset, buf->bytesused + offset);
274                         }
275                         *u32buf = cpu_to_le32(s->dma_backup);
276                 }
277                 x++;
278                 /* flag byteswap ABCD -> DCBA for MPG & VBI data outside irq */
279                 if (s->type == IVTV_ENC_STREAM_TYPE_MPG ||
280                     s->type == IVTV_ENC_STREAM_TYPE_VBI)
281                         buf->b_flags |= IVTV_F_B_NEED_BUF_SWAP;
282         }
283         if (buf)
284                 buf->bytesused += s->dma_last_offset;
285         if (buf && s->type == IVTV_DEC_STREAM_TYPE_VBI) {
286                 list_for_each_entry(buf, &s->q_dma.list, list) {
287                         /* Parse and Groom VBI Data */
288                         s->q_dma.bytesused -= buf->bytesused;
289                         ivtv_process_vbi_data(itv, buf, 0, s->type);
290                         s->q_dma.bytesused += buf->bytesused;
291                 }
292                 if (s->id == -1) {
293                         ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
294                         return;
295                 }
296         }
297         ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused);
298         if (s->id != -1)
299                 wake_up(&s->waitq);
300 }
301
302 void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
303 {
304         struct ivtv *itv = s->itv;
305         struct ivtv_buffer *buf;
306         u32 y_size = itv->params.height * itv->params.width;
307         u32 uv_offset = offset + IVTV_YUV_BUFFER_UV_OFFSET;
308         int y_done = 0;
309         int bytes_written = 0;
310         unsigned long flags = 0;
311         int idx = 0;
312
313         IVTV_DEBUG_HI_DMA("DEC PREPARE DMA %s: %08x %08x\n", s->name, s->q_predma.bytesused, offset);
314         list_for_each_entry(buf, &s->q_predma.list, list) {
315                 /* YUV UV Offset from Y Buffer */
316                 if (s->type == IVTV_DEC_STREAM_TYPE_YUV && !y_done &&
317                                 (bytes_written + buf->bytesused) >= y_size) {
318                         s->sg_pending[idx].src = buf->dma_handle;
319                         s->sg_pending[idx].dst = offset;
320                         s->sg_pending[idx].size = y_size - bytes_written;
321                         offset = uv_offset;
322                         if (s->sg_pending[idx].size != buf->bytesused) {
323                                 idx++;
324                                 s->sg_pending[idx].src =
325                                   buf->dma_handle + s->sg_pending[idx - 1].size;
326                                 s->sg_pending[idx].dst = offset;
327                                 s->sg_pending[idx].size =
328                                    buf->bytesused - s->sg_pending[idx - 1].size;
329                                 offset += s->sg_pending[idx].size;
330                         }
331                         y_done = 1;
332                 } else {
333                         s->sg_pending[idx].src = buf->dma_handle;
334                         s->sg_pending[idx].dst = offset;
335                         s->sg_pending[idx].size = buf->bytesused;
336                         offset += buf->bytesused;
337                 }
338                 bytes_written += buf->bytesused;
339
340                 /* Sync SG buffers */
341                 ivtv_buf_sync_for_device(s, buf);
342                 idx++;
343         }
344         s->sg_pending_size = idx;
345
346         /* Sync Hardware SG List of buffers */
347         ivtv_stream_sync_for_device(s);
348         if (lock)
349                 spin_lock_irqsave(&itv->dma_reg_lock, flags);
350         if (!test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
351                 ivtv_dma_dec_start(s);
352         }
353         else {
354                 set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
355         }
356         if (lock)
357                 spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
358 }
359
360 static void ivtv_dma_enc_start_xfer(struct ivtv_stream *s)
361 {
362         struct ivtv *itv = s->itv;
363
364         s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
365         s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
366         s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
367         s->sg_processed++;
368         /* Sync Hardware SG List of buffers */
369         ivtv_stream_sync_for_device(s);
370         write_reg(s->sg_handle, IVTV_REG_ENCDMAADDR);
371         write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER);
372 }
373
374 static void ivtv_dma_dec_start_xfer(struct ivtv_stream *s)
375 {
376         struct ivtv *itv = s->itv;
377
378         s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
379         s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
380         s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
381         s->sg_processed++;
382         /* Sync Hardware SG List of buffers */
383         ivtv_stream_sync_for_device(s);
384         write_reg(s->sg_handle, IVTV_REG_DECDMAADDR);
385         write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
386 }
387
388 /* start the encoder DMA */
389 static void ivtv_dma_enc_start(struct ivtv_stream *s)
390 {
391         struct ivtv *itv = s->itv;
392         struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
393         int i;
394
395         IVTV_DEBUG_HI_DMA("start %s for %s\n", ivtv_use_dma(s) ? "DMA" : "PIO", s->name);
396
397         if (s->q_predma.bytesused)
398                 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
399
400         if (ivtv_use_dma(s))
401                 s->sg_pending[s->sg_pending_size - 1].size += 256;
402
403         /* If this is an MPEG stream, and VBI data is also pending, then append the
404            VBI DMA to the MPEG DMA and transfer both sets of data at once.
405
406            VBI DMA is a second class citizen compared to MPEG and mixing them together
407            will confuse the firmware (the end of a VBI DMA is seen as the end of a
408            MPEG DMA, thus effectively dropping an MPEG frame). So instead we make
409            sure we only use the MPEG DMA to transfer the VBI DMA if both are in
410            use. This way no conflicts occur. */
411         clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
412         if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->sg_pending_size &&
413                         s->sg_pending_size + s_vbi->sg_pending_size <= s->buffers) {
414                 ivtv_queue_move(s_vbi, &s_vbi->q_predma, NULL, &s_vbi->q_dma, s_vbi->q_predma.bytesused);
415                 if (ivtv_use_dma(s_vbi))
416                         s_vbi->sg_pending[s_vbi->sg_pending_size - 1].size += 256;
417                 for (i = 0; i < s_vbi->sg_pending_size; i++) {
418                         s->sg_pending[s->sg_pending_size++] = s_vbi->sg_pending[i];
419                 }
420                 s_vbi->dma_offset = s_vbi->pending_offset;
421                 s_vbi->sg_pending_size = 0;
422                 s_vbi->dma_xfer_cnt++;
423                 set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
424                 IVTV_DEBUG_HI_DMA("include DMA for %s\n", s->name);
425         }
426
427         s->dma_xfer_cnt++;
428         memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_element) * s->sg_pending_size);
429         s->sg_processing_size = s->sg_pending_size;
430         s->sg_pending_size = 0;
431         s->sg_processed = 0;
432         s->dma_offset = s->pending_offset;
433         s->dma_backup = s->pending_backup;
434         s->dma_pts = s->pending_pts;
435
436         if (ivtv_use_pio(s)) {
437                 set_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags);
438                 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
439                 set_bit(IVTV_F_I_PIO, &itv->i_flags);
440                 itv->cur_pio_stream = s->type;
441         }
442         else {
443                 itv->dma_retries = 0;
444                 ivtv_dma_enc_start_xfer(s);
445                 set_bit(IVTV_F_I_DMA, &itv->i_flags);
446                 itv->cur_dma_stream = s->type;
447                 itv->dma_timer.expires = jiffies + msecs_to_jiffies(100);
448                 add_timer(&itv->dma_timer);
449         }
450 }
451
452 static void ivtv_dma_dec_start(struct ivtv_stream *s)
453 {
454         struct ivtv *itv = s->itv;
455
456         if (s->q_predma.bytesused)
457                 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
458         s->dma_xfer_cnt++;
459         memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_element) * s->sg_pending_size);
460         s->sg_processing_size = s->sg_pending_size;
461         s->sg_pending_size = 0;
462         s->sg_processed = 0;
463
464         IVTV_DEBUG_HI_DMA("start DMA for %s\n", s->name);
465         itv->dma_retries = 0;
466         ivtv_dma_dec_start_xfer(s);
467         set_bit(IVTV_F_I_DMA, &itv->i_flags);
468         itv->cur_dma_stream = s->type;
469         itv->dma_timer.expires = jiffies + msecs_to_jiffies(100);
470         add_timer(&itv->dma_timer);
471 }
472
473 static void ivtv_irq_dma_read(struct ivtv *itv)
474 {
475         struct ivtv_stream *s = NULL;
476         struct ivtv_buffer *buf;
477         int hw_stream_type = 0;
478
479         IVTV_DEBUG_HI_IRQ("DEC DMA READ\n");
480         if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && itv->cur_dma_stream < 0) {
481                 del_timer(&itv->dma_timer);
482                 return;
483         }
484
485         if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
486                 s = &itv->streams[itv->cur_dma_stream];
487                 ivtv_stream_sync_for_cpu(s);
488
489                 if (read_reg(IVTV_REG_DMASTATUS) & 0x14) {
490                         IVTV_DEBUG_WARN("DEC DMA ERROR %x (xfer %d of %d, retry %d)\n",
491                                         read_reg(IVTV_REG_DMASTATUS),
492                                         s->sg_processed, s->sg_processing_size, itv->dma_retries);
493                         write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
494                         if (itv->dma_retries == 3) {
495                                 /* Too many retries, give up on this frame */
496                                 itv->dma_retries = 0;
497                                 s->sg_processed = s->sg_processing_size;
498                         }
499                         else {
500                                 /* Retry, starting with the first xfer segment.
501                                    Just retrying the current segment is not sufficient. */
502                                 s->sg_processed = 0;
503                                 itv->dma_retries++;
504                         }
505                 }
506                 if (s->sg_processed < s->sg_processing_size) {
507                         /* DMA next buffer */
508                         ivtv_dma_dec_start_xfer(s);
509                         return;
510                 }
511                 if (s->type == IVTV_DEC_STREAM_TYPE_YUV)
512                         hw_stream_type = 2;
513                 IVTV_DEBUG_HI_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused);
514
515                 /* For some reason must kick the firmware, like PIO mode,
516                    I think this tells the firmware we are done and the size
517                    of the xfer so it can calculate what we need next.
518                    I think we can do this part ourselves but would have to
519                    fully calculate xfer info ourselves and not use interrupts
520                  */
521                 ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, 3, 0, s->q_dma.bytesused,
522                                 hw_stream_type);
523
524                 /* Free last DMA call */
525                 while ((buf = ivtv_dequeue(s, &s->q_dma)) != NULL) {
526                         ivtv_buf_sync_for_cpu(s, buf);
527                         ivtv_enqueue(s, buf, &s->q_free);
528                 }
529                 wake_up(&s->waitq);
530         }
531         del_timer(&itv->dma_timer);
532         clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
533         clear_bit(IVTV_F_I_DMA, &itv->i_flags);
534         itv->cur_dma_stream = -1;
535         wake_up(&itv->dma_waitq);
536 }
537
538 static void ivtv_irq_enc_dma_complete(struct ivtv *itv)
539 {
540         u32 data[CX2341X_MBOX_MAX_DATA];
541         struct ivtv_stream *s;
542
543         ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
544         IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d (%d)\n", data[0], data[1], itv->cur_dma_stream);
545         if (itv->cur_dma_stream < 0) {
546                 del_timer(&itv->dma_timer);
547                 return;
548         }
549         s = &itv->streams[itv->cur_dma_stream];
550         ivtv_stream_sync_for_cpu(s);
551
552         if (data[0] & 0x18) {
553                 IVTV_DEBUG_WARN("ENC DMA ERROR %x (offset %08x, xfer %d of %d, retry %d)\n", data[0],
554                         s->dma_offset, s->sg_processed, s->sg_processing_size, itv->dma_retries);
555                 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
556                 if (itv->dma_retries == 3) {
557                         /* Too many retries, give up on this frame */
558                         itv->dma_retries = 0;
559                         s->sg_processed = s->sg_processing_size;
560                 }
561                 else {
562                         /* Retry, starting with the first xfer segment.
563                            Just retrying the current segment is not sufficient. */
564                         s->sg_processed = 0;
565                         itv->dma_retries++;
566                 }
567         }
568         if (s->sg_processed < s->sg_processing_size) {
569                 /* DMA next buffer */
570                 ivtv_dma_enc_start_xfer(s);
571                 return;
572         }
573         del_timer(&itv->dma_timer);
574         clear_bit(IVTV_F_I_DMA, &itv->i_flags);
575         itv->cur_dma_stream = -1;
576         dma_post(s);
577         if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
578                 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
579                 dma_post(s);
580         }
581         s->sg_processing_size = 0;
582         s->sg_processed = 0;
583         wake_up(&itv->dma_waitq);
584 }
585
586 static void ivtv_irq_enc_pio_complete(struct ivtv *itv)
587 {
588         struct ivtv_stream *s;
589
590         if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS) {
591                 itv->cur_pio_stream = -1;
592                 return;
593         }
594         s = &itv->streams[itv->cur_pio_stream];
595         IVTV_DEBUG_HI_IRQ("ENC PIO COMPLETE %s\n", s->name);
596         clear_bit(IVTV_F_I_PIO, &itv->i_flags);
597         itv->cur_pio_stream = -1;
598         dma_post(s);
599         if (s->type == IVTV_ENC_STREAM_TYPE_MPG)
600                 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 0);
601         else if (s->type == IVTV_ENC_STREAM_TYPE_YUV)
602                 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 1);
603         else if (s->type == IVTV_ENC_STREAM_TYPE_PCM)
604                 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 2);
605         clear_bit(IVTV_F_I_PIO, &itv->i_flags);
606         if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
607                 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
608                 dma_post(s);
609         }
610         wake_up(&itv->dma_waitq);
611 }
612
613 static void ivtv_irq_dma_err(struct ivtv *itv)
614 {
615         u32 data[CX2341X_MBOX_MAX_DATA];
616
617         del_timer(&itv->dma_timer);
618         ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
619         IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1],
620                                 read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
621         write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
622         if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) &&
623             itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) {
624                 struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream];
625
626                 /* retry */
627                 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
628                         ivtv_dma_dec_start(s);
629                 else
630                         ivtv_dma_enc_start(s);
631                 return;
632         }
633         if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
634                 ivtv_udma_start(itv);
635                 return;
636         }
637         clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
638         clear_bit(IVTV_F_I_DMA, &itv->i_flags);
639         itv->cur_dma_stream = -1;
640         wake_up(&itv->dma_waitq);
641 }
642
643 static void ivtv_irq_enc_start_cap(struct ivtv *itv)
644 {
645         u32 data[CX2341X_MBOX_MAX_DATA];
646         struct ivtv_stream *s;
647
648         /* Get DMA destination and size arguments from card */
649         ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA, data);
650         IVTV_DEBUG_HI_IRQ("ENC START CAP %d: %08x %08x\n", data[0], data[1], data[2]);
651
652         if (data[0] > 2 || data[1] == 0 || data[2] == 0) {
653                 IVTV_DEBUG_WARN("Unknown input: %08x %08x %08x\n",
654                                 data[0], data[1], data[2]);
655                 return;
656         }
657         s = &itv->streams[ivtv_stream_map[data[0]]];
658         if (!stream_enc_dma_append(s, data)) {
659                 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
660         }
661 }
662
663 static void ivtv_irq_enc_vbi_cap(struct ivtv *itv)
664 {
665         struct ivtv_stream *s_mpg = &itv->streams[IVTV_ENC_STREAM_TYPE_MPG];
666         u32 data[CX2341X_MBOX_MAX_DATA];
667         struct ivtv_stream *s;
668
669         IVTV_DEBUG_HI_IRQ("ENC START VBI CAP\n");
670         s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
671
672         /* If more than two VBI buffers are pending, then
673            clear the old ones and start with this new one.
674            This can happen during transition stages when MPEG capturing is
675            started, but the first interrupts haven't arrived yet. During
676            that period VBI requests can accumulate without being able to
677            DMA the data. Since at most four VBI DMA buffers are available,
678            we just drop the old requests when there are already three
679            requests queued. */
680         if (s->sg_pending_size > 2) {
681                 struct ivtv_buffer *buf;
682                 list_for_each_entry(buf, &s->q_predma.list, list)
683                         ivtv_buf_sync_for_cpu(s, buf);
684                 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_free, 0);
685                 s->sg_pending_size = 0;
686         }
687         /* if we can append the data, and the MPEG stream isn't capturing,
688            then start a DMA request for just the VBI data. */
689         if (!stream_enc_dma_append(s, data) &&
690                         !test_bit(IVTV_F_S_STREAMING, &s_mpg->s_flags)) {
691                 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
692         }
693 }
694
695 static void ivtv_irq_dec_vbi_reinsert(struct ivtv *itv)
696 {
697         u32 data[CX2341X_MBOX_MAX_DATA];
698         struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI];
699
700         IVTV_DEBUG_HI_IRQ("DEC VBI REINSERT\n");
701         if (test_bit(IVTV_F_S_CLAIMED, &s->s_flags) &&
702                         !stream_enc_dma_append(s, data)) {
703                 set_bit(IVTV_F_S_PIO_PENDING, &s->s_flags);
704         }
705 }
706
707 static void ivtv_irq_dec_data_req(struct ivtv *itv)
708 {
709         u32 data[CX2341X_MBOX_MAX_DATA];
710         struct ivtv_stream *s;
711
712         /* YUV or MPG */
713         ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, data);
714
715         if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) {
716                 itv->dma_data_req_size = itv->params.width * itv->params.height * 3 / 2;
717                 itv->dma_data_req_offset = data[1] ? data[1] : yuv_offset[0];
718                 s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
719         }
720         else {
721                 itv->dma_data_req_size = data[2] >= 0x10000 ? 0x10000 : data[2];
722                 itv->dma_data_req_offset = data[1];
723                 s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
724         }
725         IVTV_DEBUG_HI_IRQ("DEC DATA REQ %s: %d %08x %u\n", s->name, s->q_full.bytesused,
726                        itv->dma_data_req_offset, itv->dma_data_req_size);
727         if (itv->dma_data_req_size == 0 || s->q_full.bytesused < itv->dma_data_req_size) {
728                 set_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
729         }
730         else {
731                 clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
732                 ivtv_queue_move(s, &s->q_full, NULL, &s->q_predma, itv->dma_data_req_size);
733                 ivtv_dma_stream_dec_prepare(s, itv->dma_data_req_offset + IVTV_DECODER_OFFSET, 0);
734         }
735 }
736
737 static void ivtv_irq_vsync(struct ivtv *itv)
738 {
739         /* The vsync interrupt is unusual in that it won't clear until
740          * the end of the first line for the current field, at which
741          * point it clears itself. This can result in repeated vsync
742          * interrupts, or a missed vsync. Read some of the registers
743          * to determine the line being displayed and ensure we handle
744          * one vsync per frame.
745          */
746         unsigned int frame = read_reg(0x28c0) & 1;
747         int last_dma_frame = atomic_read(&itv->yuv_info.next_dma_frame);
748
749         if (0) IVTV_DEBUG_IRQ("DEC VSYNC\n");
750
751         if (((frame ^ itv->yuv_info.sync_field[last_dma_frame]) == 0 &&
752                 ((itv->last_vsync_field & 1) ^ itv->yuv_info.sync_field[last_dma_frame])) ||
753                         (frame != (itv->last_vsync_field & 1) && !itv->yuv_info.frame_interlaced)) {
754                 int next_dma_frame = last_dma_frame;
755
756                 if (!(itv->yuv_info.frame_interlaced && itv->yuv_info.field_delay[next_dma_frame] && itv->yuv_info.fields_lapsed < 1)) {
757                         if (next_dma_frame >= 0 && next_dma_frame != atomic_read(&itv->yuv_info.next_fill_frame)) {
758                                 write_reg(yuv_offset[next_dma_frame] >> 4, 0x82c);
759                                 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x830);
760                                 write_reg(yuv_offset[next_dma_frame] >> 4, 0x834);
761                                 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x838);
762                                 next_dma_frame = (next_dma_frame + 1) & 0x3;
763                                 atomic_set(&itv->yuv_info.next_dma_frame, next_dma_frame);
764                                 itv->yuv_info.fields_lapsed = -1;
765                         }
766                 }
767         }
768         if (frame != (itv->last_vsync_field & 1)) {
769                 struct ivtv_stream *s = ivtv_get_output_stream(itv);
770
771                 itv->last_vsync_field += 1;
772                 if (frame == 0) {
773                         clear_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags);
774                         clear_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
775                 }
776                 else {
777                         set_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
778                 }
779                 if (test_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags)) {
780                         set_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags);
781                         wake_up(&itv->event_waitq);
782                 }
783                 wake_up(&itv->vsync_waitq);
784                 if (s)
785                         wake_up(&s->waitq);
786
787                 /* Send VBI to saa7127 */
788                 if (frame && (itv->output_mode == OUT_PASSTHROUGH ||
789                         test_bit(IVTV_F_I_UPDATE_WSS, &itv->i_flags) ||
790                         test_bit(IVTV_F_I_UPDATE_VPS, &itv->i_flags) ||
791                         test_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags))) {
792                         set_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags);
793                         set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
794                 }
795
796                 /* Check if we need to update the yuv registers */
797                 if ((itv->yuv_info.yuv_forced_update || itv->yuv_info.new_frame_info[last_dma_frame].update) && last_dma_frame != -1) {
798                         if (!itv->yuv_info.new_frame_info[last_dma_frame].update)
799                                 last_dma_frame = (last_dma_frame - 1) & 3;
800
801                         if (itv->yuv_info.new_frame_info[last_dma_frame].src_w) {
802                                 itv->yuv_info.update_frame = last_dma_frame;
803                                 itv->yuv_info.new_frame_info[last_dma_frame].update = 0;
804                                 itv->yuv_info.yuv_forced_update = 0;
805                                 set_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags);
806                                 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
807                         }
808                 }
809
810                 itv->yuv_info.fields_lapsed ++;
811         }
812 }
813
814 #define IVTV_IRQ_DMA (IVTV_IRQ_DMA_READ | IVTV_IRQ_ENC_DMA_COMPLETE | IVTV_IRQ_DMA_ERR | IVTV_IRQ_ENC_START_CAP | IVTV_IRQ_ENC_VBI_CAP | IVTV_IRQ_DEC_DATA_REQ | IVTV_IRQ_DEC_VBI_RE_INSERT)
815
816 irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
817 {
818         struct ivtv *itv = (struct ivtv *)dev_id;
819         u32 combo;
820         u32 stat;
821         int i;
822         u8 vsync_force = 0;
823
824         spin_lock(&itv->dma_reg_lock);
825         /* get contents of irq status register */
826         stat = read_reg(IVTV_REG_IRQSTATUS);
827
828         combo = ~itv->irqmask & stat;
829
830         /* Clear out IRQ */
831         if (combo) write_reg(combo, IVTV_REG_IRQSTATUS);
832
833         if (0 == combo) {
834                 /* The vsync interrupt is unusual and clears itself. If we
835                  * took too long, we may have missed it. Do some checks
836                  */
837                 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
838                         /* vsync is enabled, see if we're in a new field */
839                         if ((itv->last_vsync_field & 1) != (read_reg(0x28c0) & 1)) {
840                                 /* New field, looks like we missed it */
841                                 IVTV_DEBUG_YUV("VSync interrupt missed %d\n",read_reg(0x28c0)>>16);
842                                 vsync_force = 1;
843                         }
844                 }
845
846                 if (!vsync_force) {
847                         /* No Vsync expected, wasn't for us */
848                         spin_unlock(&itv->dma_reg_lock);
849                         return IRQ_NONE;
850                 }
851         }
852
853         /* Exclude interrupts noted below from the output, otherwise the log is flooded with
854            these messages */
855         if (combo & ~0xff6d0400)
856                 IVTV_DEBUG_HI_IRQ("======= valid IRQ bits: 0x%08x ======\n", combo);
857
858         if (combo & IVTV_IRQ_DEC_DMA_COMPLETE) {
859                 IVTV_DEBUG_HI_IRQ("DEC DMA COMPLETE\n");
860         }
861
862         if (combo & IVTV_IRQ_DMA_READ) {
863                 ivtv_irq_dma_read(itv);
864         }
865
866         if (combo & IVTV_IRQ_ENC_DMA_COMPLETE) {
867                 ivtv_irq_enc_dma_complete(itv);
868         }
869
870         if (combo & IVTV_IRQ_ENC_PIO_COMPLETE) {
871                 ivtv_irq_enc_pio_complete(itv);
872         }
873
874         if (combo & IVTV_IRQ_DMA_ERR) {
875                 ivtv_irq_dma_err(itv);
876         }
877
878         if (combo & IVTV_IRQ_ENC_START_CAP) {
879                 ivtv_irq_enc_start_cap(itv);
880         }
881
882         if (combo & IVTV_IRQ_ENC_VBI_CAP) {
883                 ivtv_irq_enc_vbi_cap(itv);
884         }
885
886         if (combo & IVTV_IRQ_DEC_VBI_RE_INSERT) {
887                 ivtv_irq_dec_vbi_reinsert(itv);
888         }
889
890         if (combo & IVTV_IRQ_ENC_EOS) {
891                 IVTV_DEBUG_IRQ("ENC EOS\n");
892                 set_bit(IVTV_F_I_EOS, &itv->i_flags);
893                 wake_up(&itv->eos_waitq);
894         }
895
896         if (combo & IVTV_IRQ_DEC_DATA_REQ) {
897                 ivtv_irq_dec_data_req(itv);
898         }
899
900         /* Decoder Vertical Sync - We can't rely on 'combo', so check if vsync enabled */
901         if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
902                 ivtv_irq_vsync(itv);
903         }
904
905         if (combo & IVTV_IRQ_ENC_VIM_RST) {
906                 IVTV_DEBUG_IRQ("VIM RST\n");
907                 /*ivtv_vapi(itv, CX2341X_ENC_REFRESH_INPUT, 0); */
908         }
909
910         if (combo & IVTV_IRQ_DEC_AUD_MODE_CHG) {
911                 IVTV_DEBUG_INFO("Stereo mode changed\n");
912         }
913
914         if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
915                 itv->irq_rr_idx++;
916                 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
917                         int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
918                         struct ivtv_stream *s = &itv->streams[idx];
919
920                         if (!test_and_clear_bit(IVTV_F_S_DMA_PENDING, &s->s_flags))
921                                 continue;
922                         if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
923                                 ivtv_dma_dec_start(s);
924                         else
925                                 ivtv_dma_enc_start(s);
926                         break;
927                 }
928                 if (i == IVTV_MAX_STREAMS && test_and_clear_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags)) {
929                         ivtv_udma_start(itv);
930                 }
931         }
932
933         if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_PIO, &itv->i_flags)) {
934                 itv->irq_rr_idx++;
935                 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
936                         int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
937                         struct ivtv_stream *s = &itv->streams[idx];
938
939                         if (!test_and_clear_bit(IVTV_F_S_PIO_PENDING, &s->s_flags))
940                                 continue;
941                         if (s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type < IVTV_DEC_STREAM_TYPE_MPG)
942                                 ivtv_dma_enc_start(s);
943                         break;
944                 }
945         }
946
947         if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags)) {
948                 queue_work(itv->irq_work_queues, &itv->irq_work_queue);
949         }
950
951         spin_unlock(&itv->dma_reg_lock);
952
953         /* If we've just handled a 'forced' vsync, it's safest to say it
954          * wasn't ours. Another device may have triggered it at just
955          * the right time.
956          */
957         return vsync_force ? IRQ_NONE : IRQ_HANDLED;
958 }
959
960 void ivtv_unfinished_dma(unsigned long arg)
961 {
962         struct ivtv *itv = (struct ivtv *)arg;
963
964         if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
965                 return;
966         IVTV_ERR("DMA TIMEOUT %08x %d\n", read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
967
968         write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
969         clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
970         clear_bit(IVTV_F_I_DMA, &itv->i_flags);
971         itv->cur_dma_stream = -1;
972         wake_up(&itv->dma_waitq);
973 }