V4L/DVB (10956): cx231xx: First series of manual CodingStyle fixes
[linux-2.6] / drivers / media / video / cx231xx / cx231xx-vbi.c
1 /*
2    cx231xx_vbi.c - driver for Conexant Cx23100/101/102 USB video capture devices
3
4    Copyright (C) 2008 <srinivasa.deevi at conexant dot com>
5         Based on cx88 driver
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License as published by
9    the Free Software Foundation; either version 2 of the License, or
10    (at your option) any later version.
11
12    This program is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15    GNU General Public License for more details.
16
17    You should have received a copy of the GNU General Public License
18    along with this program; if not, write to the Free Software
19    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  */
21
22 #include <linux/init.h>
23 #include <linux/list.h>
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/bitmap.h>
27 #include <linux/usb.h>
28 #include <linux/i2c.h>
29 #include <linux/version.h>
30 #include <linux/mm.h>
31 #include <linux/mutex.h>
32
33 #include <media/v4l2-common.h>
34 #include <media/v4l2-ioctl.h>
35 #include <media/v4l2-chip-ident.h>
36 #include <media/msp3400.h>
37 #include <media/tuner.h>
38
39 #include "cx231xx.h"
40 #include "cx231xx-vbi.h"
41
42 static inline void print_err_status(struct cx231xx *dev, int packet, int status)
43 {
44         char *errmsg = "Unknown";
45
46         switch (status) {
47         case -ENOENT:
48                 errmsg = "unlinked synchronuously";
49                 break;
50         case -ECONNRESET:
51                 errmsg = "unlinked asynchronuously";
52                 break;
53         case -ENOSR:
54                 errmsg = "Buffer error (overrun)";
55                 break;
56         case -EPIPE:
57                 errmsg = "Stalled (device not responding)";
58                 break;
59         case -EOVERFLOW:
60                 errmsg = "Babble (bad cable?)";
61                 break;
62         case -EPROTO:
63                 errmsg = "Bit-stuff error (bad cable?)";
64                 break;
65         case -EILSEQ:
66                 errmsg = "CRC/Timeout (could be anything)";
67                 break;
68         case -ETIME:
69                 errmsg = "Device does not respond";
70                 break;
71         }
72         if (packet < 0) {
73                 cx231xx_err(DRIVER_NAME "URB status %d [%s].\n", status,
74                             errmsg);
75         } else {
76                 cx231xx_err(DRIVER_NAME "URB packet %d, status %d [%s].\n",
77                             packet, status, errmsg);
78         }
79 }
80
81 /*
82  * Controls the isoc copy of each urb packet
83  */
84 static inline int cx231xx_isoc_vbi_copy(struct cx231xx *dev, struct urb *urb)
85 {
86         struct cx231xx_buffer *buf;
87         struct cx231xx_dmaqueue *dma_q = urb->context;
88         int rc = 1;
89         unsigned char *p_buffer;
90         u32 bytes_parsed = 0, buffer_size = 0;
91         u8 sav_eav = 0;
92
93         if (!dev)
94                 return 0;
95
96         if ((dev->state & DEV_DISCONNECTED) || (dev->state & DEV_MISCONFIGURED))
97                 return 0;
98
99         if (urb->status < 0) {
100                 print_err_status(dev, -1, urb->status);
101                 if (urb->status == -ENOENT)
102                         return 0;
103         }
104
105         buf = dev->vbi_mode.isoc_ctl.buf;
106
107         /* get buffer pointer and length */
108         p_buffer = urb->transfer_buffer;
109         buffer_size = urb->actual_length;
110
111         if (buffer_size > 0) {
112                 bytes_parsed = 0;
113
114                 if (dma_q->is_partial_line) {
115                         /* Handle the case where we were working on a partial
116                            line */
117                         sav_eav = dma_q->last_sav;
118                 } else {
119                         /* Check for a SAV/EAV overlapping the
120                            buffer boundary */
121
122                         sav_eav = cx231xx_find_boundary_SAV_EAV(p_buffer,
123                                                           dma_q->partial_buf,
124                                                           &bytes_parsed);
125                 }
126
127                 sav_eav &= 0xF0;
128                 /* Get the first line if we have some portion of an SAV/EAV from
129                    the last buffer or a partial line */
130                 if (sav_eav) {
131                         bytes_parsed += cx231xx_get_vbi_line(dev, dma_q,
132                                 sav_eav,                       /* SAV/EAV */
133                                 p_buffer + bytes_parsed,       /* p_buffer */
134                                 buffer_size - bytes_parsed);   /* buffer size */
135                 }
136
137                 /* Now parse data that is completely in this buffer */
138                 dma_q->is_partial_line = 0;
139
140                 while (bytes_parsed < buffer_size) {
141                         u32 bytes_used = 0;
142
143                         sav_eav = cx231xx_find_next_SAV_EAV(p_buffer + bytes_parsed,    /* p_buffer */
144                                 buffer_size - bytes_parsed,   /* buffer size */
145                                 &bytes_used);   /* Receives bytes used to get SAV/EAV */
146
147                         bytes_parsed += bytes_used;
148
149                         sav_eav &= 0xF0;
150                         if (sav_eav && (bytes_parsed < buffer_size)) {
151                                 bytes_parsed += cx231xx_get_vbi_line(dev,
152                                                 dma_q, sav_eav, /* SAV/EAV */
153                                                 p_buffer + bytes_parsed,        /* p_buffer */
154                                                 buffer_size - bytes_parsed);    /* buffer size */
155                         }
156                 }
157
158                 /* Save the last four bytes of the buffer so we can check the buffer boundary
159                    condition next time */
160                 memcpy(dma_q->partial_buf, p_buffer + buffer_size - 4, 4);
161                 bytes_parsed = 0;
162         }
163
164         return rc;
165 }
166
167 /* ------------------------------------------------------------------
168         Vbi buf operations
169    ------------------------------------------------------------------*/
170
171 static int
172 vbi_buffer_setup(struct videobuf_queue *vq, unsigned int *count,
173                  unsigned int *size)
174 {
175         struct cx231xx_fh *fh = vq->priv_data;
176         struct cx231xx *dev = fh->dev;
177         u32 height = 0;
178
179         height = ((dev->norm & V4L2_STD_625_50) ?
180                   PAL_VBI_LINES : NTSC_VBI_LINES);
181
182         *size = (dev->width * height * 2);
183         if (0 == *count)
184                 *count = CX231XX_DEF_VBI_BUF;
185
186         if (*count < CX231XX_MIN_BUF)
187                 *count = CX231XX_MIN_BUF;
188
189         /* call VBI setup if required */
190         /* cx231xx_i2c_call_clients(&dev->i2c_bus[1], VIDIOC_S_FREQUENCY, &f);
191          */
192
193         return 0;
194 }
195
196 /* This is called *without* dev->slock held; please keep it that way */
197 static void free_buffer(struct videobuf_queue *vq, struct cx231xx_buffer *buf)
198 {
199         struct cx231xx_fh *fh = vq->priv_data;
200         struct cx231xx *dev = fh->dev;
201         unsigned long flags = 0;
202         if (in_interrupt())
203                 BUG();
204
205         /* We used to wait for the buffer to finish here, but this didn't work
206            because, as we were keeping the state as VIDEOBUF_QUEUED,
207            videobuf_queue_cancel marked it as finished for us.
208            (Also, it could wedge forever if the hardware was misconfigured.)
209
210            This should be safe; by the time we get here, the buffer isn't
211            queued anymore. If we ever start marking the buffers as
212            VIDEOBUF_ACTIVE, it won't be, though.
213          */
214         spin_lock_irqsave(&dev->vbi_mode.slock, flags);
215         if (dev->vbi_mode.isoc_ctl.buf == buf)
216                 dev->vbi_mode.isoc_ctl.buf = NULL;
217         spin_unlock_irqrestore(&dev->vbi_mode.slock, flags);
218
219         videobuf_vmalloc_free(&buf->vb);
220         buf->vb.state = VIDEOBUF_NEEDS_INIT;
221 }
222
223 static int
224 vbi_buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
225                    enum v4l2_field field)
226 {
227         struct cx231xx_fh *fh = vq->priv_data;
228         struct cx231xx_buffer *buf =
229             container_of(vb, struct cx231xx_buffer, vb);
230         struct cx231xx *dev = fh->dev;
231         int rc = 0, urb_init = 0;
232         u32 height = 0;
233
234         height = ((dev->norm & V4L2_STD_625_50) ?
235                   PAL_VBI_LINES : NTSC_VBI_LINES);
236         buf->vb.size = ((dev->width << 1) * height);
237
238         if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size)
239                 return -EINVAL;
240
241         buf->vb.width = dev->width;
242         buf->vb.height = height;
243         buf->vb.field = field;
244         buf->vb.field = V4L2_FIELD_SEQ_TB;
245
246         if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
247                 rc = videobuf_iolock(vq, &buf->vb, NULL);
248                 if (rc < 0)
249                         goto fail;
250         }
251
252         if (!dev->vbi_mode.isoc_ctl.num_bufs)
253                 urb_init = 1;
254
255         if (urb_init) {
256                 rc = cx231xx_init_vbi_isoc(dev, CX231XX_NUM_VBI_PACKETS,
257                                            CX231XX_NUM_VBI_BUFS,
258                                            dev->vbi_mode.alt_max_pkt_size[0],
259                                            cx231xx_isoc_vbi_copy);
260                 if (rc < 0)
261                         goto fail;
262         }
263
264         buf->vb.state = VIDEOBUF_PREPARED;
265         return 0;
266
267 fail:
268         free_buffer(vq, buf);
269         return rc;
270 }
271
272 static void
273 vbi_buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
274 {
275         struct cx231xx_buffer *buf =
276             container_of(vb, struct cx231xx_buffer, vb);
277         struct cx231xx_fh *fh = vq->priv_data;
278         struct cx231xx *dev = fh->dev;
279         struct cx231xx_dmaqueue *vidq = &dev->vbi_mode.vidq;
280
281         buf->vb.state = VIDEOBUF_QUEUED;
282         list_add_tail(&buf->vb.queue, &vidq->active);
283
284 }
285
286 static void vbi_buffer_release(struct videobuf_queue *vq,
287                                struct videobuf_buffer *vb)
288 {
289         struct cx231xx_buffer *buf =
290             container_of(vb, struct cx231xx_buffer, vb);
291
292
293         free_buffer(vq, buf);
294 }
295
296 struct videobuf_queue_ops cx231xx_vbi_qops = {
297         .buf_setup   = vbi_buffer_setup,
298         .buf_prepare = vbi_buffer_prepare,
299         .buf_queue   = vbi_buffer_queue,
300         .buf_release = vbi_buffer_release,
301 };
302
303 /* ------------------------------------------------------------------
304         URB control
305    ------------------------------------------------------------------*/
306
307 /*
308  * IRQ callback, called by URB callback
309  */
310 static void cx231xx_irq_vbi_callback(struct urb *urb)
311 {
312         struct cx231xx_dmaqueue *dma_q = urb->context;
313         struct cx231xx_video_mode *vmode =
314             container_of(dma_q, struct cx231xx_video_mode, vidq);
315         struct cx231xx *dev = container_of(vmode, struct cx231xx, vbi_mode);
316         int rc;
317
318         switch (urb->status) {
319         case 0:         /* success */
320         case -ETIMEDOUT:        /* NAK */
321                 break;
322         case -ECONNRESET:       /* kill */
323         case -ENOENT:
324         case -ESHUTDOWN:
325                 return;
326         default:                /* error */
327                 cx231xx_err(DRIVER_NAME "urb completition error %d.\n",
328                             urb->status);
329                 break;
330         }
331
332         /* Copy data from URB */
333         spin_lock(&dev->vbi_mode.slock);
334         rc = dev->vbi_mode.isoc_ctl.isoc_copy(dev, urb);
335         spin_unlock(&dev->vbi_mode.slock);
336
337         /* Reset status */
338         urb->status = 0;
339
340         urb->status = usb_submit_urb(urb, GFP_ATOMIC);
341         if (urb->status) {
342                 cx231xx_err(DRIVER_NAME "urb resubmit failed (error=%i)\n",
343                             urb->status);
344         }
345 }
346
347 /*
348  * Stop and Deallocate URBs
349  */
350 void cx231xx_uninit_vbi_isoc(struct cx231xx *dev)
351 {
352         struct urb *urb;
353         int i;
354
355         cx231xx_info(DRIVER_NAME "cx231xx: called cx231xx_uninit_vbi_isoc\n");
356
357         dev->vbi_mode.isoc_ctl.nfields = -1;
358         for (i = 0; i < dev->vbi_mode.isoc_ctl.num_bufs; i++) {
359                 urb = dev->vbi_mode.isoc_ctl.urb[i];
360                 if (urb) {
361                         if (!irqs_disabled())
362                                 usb_kill_urb(urb);
363                         else
364                                 usb_unlink_urb(urb);
365
366                         if (dev->vbi_mode.isoc_ctl.transfer_buffer[i]) {
367
368                                 kfree(dev->vbi_mode.isoc_ctl.
369                                       transfer_buffer[i]);
370                                 dev->vbi_mode.isoc_ctl.transfer_buffer[i] =
371                                     NULL;
372                         }
373                         usb_free_urb(urb);
374                         dev->vbi_mode.isoc_ctl.urb[i] = NULL;
375                 }
376                 dev->vbi_mode.isoc_ctl.transfer_buffer[i] = NULL;
377         }
378
379         kfree(dev->vbi_mode.isoc_ctl.urb);
380         kfree(dev->vbi_mode.isoc_ctl.transfer_buffer);
381
382         dev->vbi_mode.isoc_ctl.urb = NULL;
383         dev->vbi_mode.isoc_ctl.transfer_buffer = NULL;
384         dev->vbi_mode.isoc_ctl.num_bufs = 0;
385
386         cx231xx_capture_start(dev, 0, Vbi);
387 }
388 EXPORT_SYMBOL_GPL(cx231xx_uninit_vbi_isoc);
389
390 /*
391  * Allocate URBs and start IRQ
392  */
393 int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets,
394                           int num_bufs, int max_pkt_size,
395                           int (*isoc_copy) (struct cx231xx *dev,
396                                             struct urb *urb))
397 {
398         struct cx231xx_dmaqueue *dma_q = &dev->vbi_mode.vidq;
399         int i;
400         int sb_size, pipe;
401         struct urb *urb;
402         int rc;
403
404         cx231xx_info(DRIVER_NAME "cx231xx: called cx231xx_prepare_isoc\n");
405
406         /* De-allocates all pending stuff */
407         cx231xx_uninit_vbi_isoc(dev);
408
409         /* clear if any halt */
410         usb_clear_halt(dev->udev,
411                        usb_rcvbulkpipe(dev->udev,
412                                        dev->vbi_mode.end_point_addr));
413
414         dev->vbi_mode.isoc_ctl.isoc_copy = isoc_copy;
415         dev->vbi_mode.isoc_ctl.num_bufs = num_bufs;
416         dma_q->pos = 0;
417         dma_q->is_partial_line = 0;
418         dma_q->last_sav = 0;
419         dma_q->current_field = -1;
420         dma_q->bytes_left_in_line = dev->width << 1;
421         dma_q->lines_per_field = ((dev->norm & V4L2_STD_625_50) ?
422                                   PAL_VBI_LINES : NTSC_VBI_LINES);
423         dma_q->lines_completed = 0;
424         for (i = 0; i < 8; i++)
425                 dma_q->partial_buf[i] = 0;
426
427         dev->vbi_mode.isoc_ctl.urb = kzalloc(sizeof(void *) * num_bufs,
428                                              GFP_KERNEL);
429         if (!dev->vbi_mode.isoc_ctl.urb) {
430                 cx231xx_errdev("cannot alloc memory for usb buffers\n");
431                 return -ENOMEM;
432         }
433
434         dev->vbi_mode.isoc_ctl.transfer_buffer =
435             kzalloc(sizeof(void *) * num_bufs, GFP_KERNEL);
436         if (!dev->vbi_mode.isoc_ctl.transfer_buffer) {
437                 cx231xx_errdev("cannot allocate memory for usbtransfer\n");
438                 kfree(dev->vbi_mode.isoc_ctl.urb);
439                 return -ENOMEM;
440         }
441
442         dev->vbi_mode.isoc_ctl.max_pkt_size = max_pkt_size;
443         dev->vbi_mode.isoc_ctl.buf = NULL;
444
445         sb_size = max_packets * dev->vbi_mode.isoc_ctl.max_pkt_size;
446
447         /* allocate urbs and transfer buffers */
448         for (i = 0; i < dev->vbi_mode.isoc_ctl.num_bufs; i++) {
449
450                 urb = usb_alloc_urb(0, GFP_KERNEL);
451                 if (!urb) {
452                         cx231xx_err(DRIVER_NAME
453                                     ": cannot alloc isoc_ctl.urb %i\n", i);
454                         cx231xx_uninit_vbi_isoc(dev);
455                         return -ENOMEM;
456                 }
457                 dev->vbi_mode.isoc_ctl.urb[i] = urb;
458                 urb->transfer_flags = 0;
459
460                 dev->vbi_mode.isoc_ctl.transfer_buffer[i] =
461                     kzalloc(sb_size, GFP_KERNEL);
462                 if (!dev->vbi_mode.isoc_ctl.transfer_buffer[i]) {
463                         cx231xx_err(DRIVER_NAME
464                                     ": unable to allocate %i bytes for transfer"
465                                     " buffer %i%s\n", sb_size, i,
466                                     in_interrupt() ? " while in int" : "");
467                         cx231xx_uninit_vbi_isoc(dev);
468                         return -ENOMEM;
469                 }
470
471                 pipe = usb_rcvbulkpipe(dev->udev, dev->vbi_mode.end_point_addr);
472                 usb_fill_bulk_urb(urb, dev->udev, pipe,
473                                   dev->vbi_mode.isoc_ctl.transfer_buffer[i],
474                                   sb_size, cx231xx_irq_vbi_callback, dma_q);
475         }
476
477         init_waitqueue_head(&dma_q->wq);
478
479         /* submit urbs and enables IRQ */
480         for (i = 0; i < dev->vbi_mode.isoc_ctl.num_bufs; i++) {
481                 rc = usb_submit_urb(dev->vbi_mode.isoc_ctl.urb[i], GFP_ATOMIC);
482                 if (rc) {
483                         cx231xx_err(DRIVER_NAME
484                                     ": submit of urb %i failed (error=%i)\n", i,
485                                     rc);
486                         cx231xx_uninit_vbi_isoc(dev);
487                         return rc;
488                 }
489         }
490
491         cx231xx_capture_start(dev, 1, Vbi);
492
493         return 0;
494 }
495 EXPORT_SYMBOL_GPL(cx231xx_init_vbi_isoc);
496
497 u32 cx231xx_get_vbi_line(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q,
498                          u8 sav_eav, u8 *p_buffer, u32 buffer_size)
499 {
500         u32 bytes_copied = 0;
501         int current_field = -1;
502
503         switch (sav_eav) {
504
505         case SAV_VBI_FIELD1:
506                 current_field = 1;
507                 break;
508
509         case SAV_VBI_FIELD2:
510                 current_field = 2;
511                 break;
512         default:
513                 break;
514         }
515
516         if (current_field < 0)
517                 return bytes_copied;
518
519         dma_q->last_sav = sav_eav;
520
521         bytes_copied =
522             cx231xx_copy_vbi_line(dev, dma_q, p_buffer, buffer_size,
523                                   current_field);
524
525         return bytes_copied;
526 }
527
528 /*
529  * Announces that a buffer were filled and request the next
530  */
531 static inline void vbi_buffer_filled(struct cx231xx *dev,
532                                      struct cx231xx_dmaqueue *dma_q,
533                                      struct cx231xx_buffer *buf)
534 {
535         /* Advice that buffer was filled */
536         /* cx231xx_info(DRIVER_NAME "[%p/%d] wakeup\n", buf, buf->vb.i); */
537
538         buf->vb.state = VIDEOBUF_DONE;
539         buf->vb.field_count++;
540         do_gettimeofday(&buf->vb.ts);
541
542         dev->vbi_mode.isoc_ctl.buf = NULL;
543
544         list_del(&buf->vb.queue);
545         wake_up(&buf->vb.done);
546 }
547
548 u32 cx231xx_copy_vbi_line(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q,
549                           u8 *p_line, u32 length, int field_number)
550 {
551         u32 bytes_to_copy;
552         struct cx231xx_buffer *buf;
553         u32 _line_size = dev->width * 2;
554
555         if (dma_q->current_field != field_number)
556                 cx231xx_reset_vbi_buffer(dev, dma_q);
557
558         /* get the buffer pointer */
559         buf = dev->vbi_mode.isoc_ctl.buf;
560
561         /* Remember the field number for next time */
562         dma_q->current_field = field_number;
563
564         bytes_to_copy = dma_q->bytes_left_in_line;
565         if (bytes_to_copy > length)
566                 bytes_to_copy = length;
567
568         if (dma_q->lines_completed >= dma_q->lines_per_field) {
569                 dma_q->bytes_left_in_line -= bytes_to_copy;
570                 dma_q->is_partial_line =
571                     (dma_q->bytes_left_in_line == 0) ? 0 : 1;
572                 return 0;
573         }
574
575         dma_q->is_partial_line = 1;
576
577         /* If we don't have a buffer, just return the number of bytes we would
578            have copied if we had a buffer. */
579         if (!buf) {
580                 dma_q->bytes_left_in_line -= bytes_to_copy;
581                 dma_q->is_partial_line =
582                     (dma_q->bytes_left_in_line == 0) ? 0 : 1;
583                 return bytes_to_copy;
584         }
585
586         /* copy the data to video buffer */
587         cx231xx_do_vbi_copy(dev, dma_q, p_line, bytes_to_copy);
588
589         dma_q->pos += bytes_to_copy;
590         dma_q->bytes_left_in_line -= bytes_to_copy;
591
592         if (dma_q->bytes_left_in_line == 0) {
593
594                 dma_q->bytes_left_in_line = _line_size;
595                 dma_q->lines_completed++;
596                 dma_q->is_partial_line = 0;
597
598                 if (cx231xx_is_vbi_buffer_done(dev, dma_q) && buf) {
599
600                         vbi_buffer_filled(dev, dma_q, buf);
601
602                         dma_q->pos = 0;
603                         buf = NULL;
604                         dma_q->lines_completed = 0;
605                 }
606         }
607
608         return bytes_to_copy;
609 }
610
611 /*
612  * video-buf generic routine to get the next available buffer
613  */
614 static inline void get_next_vbi_buf(struct cx231xx_dmaqueue *dma_q,
615                                     struct cx231xx_buffer **buf)
616 {
617         struct cx231xx_video_mode *vmode =
618             container_of(dma_q, struct cx231xx_video_mode, vidq);
619         struct cx231xx *dev = container_of(vmode, struct cx231xx, vbi_mode);
620         char *outp;
621
622         if (list_empty(&dma_q->active)) {
623                 cx231xx_err(DRIVER_NAME ": No active queue to serve\n");
624                 dev->vbi_mode.isoc_ctl.buf = NULL;
625                 *buf = NULL;
626                 return;
627         }
628
629         /* Get the next buffer */
630         *buf = list_entry(dma_q->active.next, struct cx231xx_buffer, vb.queue);
631
632         /* Cleans up buffer - Usefull for testing for frame/URB loss */
633         outp = videobuf_to_vmalloc(&(*buf)->vb);
634         memset(outp, 0, (*buf)->vb.size);
635
636         dev->vbi_mode.isoc_ctl.buf = *buf;
637
638         return;
639 }
640
641 void cx231xx_reset_vbi_buffer(struct cx231xx *dev,
642                               struct cx231xx_dmaqueue *dma_q)
643 {
644         struct cx231xx_buffer *buf;
645
646         buf = dev->vbi_mode.isoc_ctl.buf;
647
648         if (buf == NULL) {
649                 /* first try to get the buffer */
650                 get_next_vbi_buf(dma_q, &buf);
651
652                 dma_q->pos = 0;
653                 dma_q->current_field = -1;
654         }
655
656         dma_q->bytes_left_in_line = dev->width << 1;
657         dma_q->lines_completed = 0;
658 }
659
660 int cx231xx_do_vbi_copy(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q,
661                         u8 *p_buffer, u32 bytes_to_copy)
662 {
663         u8 *p_out_buffer = NULL;
664         u32 current_line_bytes_copied = 0;
665         struct cx231xx_buffer *buf;
666         u32 _line_size = dev->width << 1;
667         void *startwrite;
668         int offset, lencopy;
669
670         buf = dev->vbi_mode.isoc_ctl.buf;
671
672         if (buf == NULL)
673                 return -EINVAL;
674
675         p_out_buffer = videobuf_to_vmalloc(&buf->vb);
676
677         if (dma_q->bytes_left_in_line != _line_size) {
678                 current_line_bytes_copied =
679                     _line_size - dma_q->bytes_left_in_line;
680         }
681
682         offset = (dma_q->lines_completed * _line_size) +
683                  current_line_bytes_copied;
684
685         /* prepare destination address */
686         startwrite = p_out_buffer + offset;
687
688         lencopy = dma_q->bytes_left_in_line > bytes_to_copy ?
689                   bytes_to_copy : dma_q->bytes_left_in_line;
690
691         memcpy(startwrite, p_buffer, lencopy);
692
693         return 0;
694 }
695
696 u8 cx231xx_is_vbi_buffer_done(struct cx231xx *dev,
697                               struct cx231xx_dmaqueue *dma_q)
698 {
699         u32 height = 0;
700
701         height = ((dev->norm & V4L2_STD_625_50) ?
702                   PAL_VBI_LINES : NTSC_VBI_LINES);
703         return (dma_q->lines_completed == height) ? 1 : 0;
704 }