4 * Derived from ivtv-queue.c
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
24 #include "cx18-driver.h"
25 #include "cx18-streams.h"
26 #include "cx18-queue.h"
29 void cx18_buf_swap(struct cx18_buffer *buf)
33 for (i = 0; i < buf->bytesused; i += 4)
34 swab32s((u32 *)(buf->buf + i));
37 void cx18_queue_init(struct cx18_queue *q)
39 INIT_LIST_HEAD(&q->list);
40 atomic_set(&q->buffers, 0);
44 void cx18_enqueue(struct cx18_stream *s, struct cx18_buffer *buf,
47 unsigned long flags = 0;
49 /* clear the buffer if it is going to be enqueued to the free queue */
50 if (q == &s->q_free) {
55 spin_lock_irqsave(&s->qlock, flags);
56 list_add_tail(&buf->list, &q->list);
57 atomic_inc(&q->buffers);
58 q->bytesused += buf->bytesused - buf->readpos;
59 spin_unlock_irqrestore(&s->qlock, flags);
62 struct cx18_buffer *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q)
64 struct cx18_buffer *buf = NULL;
65 unsigned long flags = 0;
67 spin_lock_irqsave(&s->qlock, flags);
68 if (!list_empty(&q->list)) {
69 buf = list_entry(q->list.next, struct cx18_buffer, list);
70 list_del_init(q->list.next);
71 atomic_dec(&q->buffers);
72 q->bytesused -= buf->bytesused - buf->readpos;
74 spin_unlock_irqrestore(&s->qlock, flags);
78 struct cx18_buffer *cx18_queue_get_buf_irq(struct cx18_stream *s, u32 id,
81 struct cx18 *cx = s->cx;
85 list_for_each(p, &s->q_free.list) {
86 struct cx18_buffer *buf =
87 list_entry(p, struct cx18_buffer, list);
92 buf->bytesused = bytesused;
93 atomic_dec(&s->q_free.buffers);
94 atomic_inc(&s->q_full.buffers);
95 s->q_full.bytesused += buf->bytesused;
96 list_move_tail(&buf->list, &s->q_full.list);
98 spin_unlock(&s->qlock);
101 spin_unlock(&s->qlock);
102 CX18_ERR("Cannot find buffer %d for stream %s\n", id, s->name);
106 /* Move all buffers of a queue to q_free, while flushing the buffers */
107 static void cx18_queue_flush(struct cx18_stream *s, struct cx18_queue *q)
110 struct cx18_buffer *buf;
115 spin_lock_irqsave(&s->qlock, flags);
116 while (!list_empty(&q->list)) {
117 buf = list_entry(q->list.next, struct cx18_buffer, list);
118 list_move_tail(q->list.next, &s->q_free.list);
119 buf->bytesused = buf->readpos = buf->b_flags = 0;
120 atomic_inc(&s->q_free.buffers);
123 spin_unlock_irqrestore(&s->qlock, flags);
126 void cx18_flush_queues(struct cx18_stream *s)
128 cx18_queue_flush(s, &s->q_io);
129 cx18_queue_flush(s, &s->q_full);
132 int cx18_stream_alloc(struct cx18_stream *s)
134 struct cx18 *cx = s->cx;
140 CX18_DEBUG_INFO("Allocate %s stream: %d x %d buffers (%dkB total)\n",
141 s->name, s->buffers, s->buf_size,
142 s->buffers * s->buf_size / 1024);
144 if (((char __iomem *)&cx->scb->cpu_mdl[cx->mdl_offset + s->buffers] -
145 (char __iomem *)cx->scb) > SCB_RESERVED_SIZE) {
146 unsigned bufsz = (((char __iomem *)cx->scb) + SCB_RESERVED_SIZE -
147 ((char __iomem *)cx->scb->cpu_mdl));
149 CX18_ERR("Too many buffers, cannot fit in SCB area\n");
150 CX18_ERR("Max buffers = %zd\n",
151 bufsz / sizeof(struct cx18_mdl));
155 s->mdl_offset = cx->mdl_offset;
157 /* allocate stream buffers. Initially all buffers are in q_free. */
158 for (i = 0; i < s->buffers; i++) {
159 struct cx18_buffer *buf = kzalloc(sizeof(struct cx18_buffer),
160 GFP_KERNEL|__GFP_NOWARN);
164 buf->buf = kmalloc(s->buf_size, GFP_KERNEL|__GFP_NOWARN);
165 if (buf->buf == NULL) {
169 buf->id = cx->buffer_id++;
170 INIT_LIST_HEAD(&buf->list);
171 buf->dma_handle = pci_map_single(s->cx->dev,
172 buf->buf, s->buf_size, s->dma);
173 cx18_buf_sync_for_cpu(s, buf);
174 cx18_enqueue(s, buf, &s->q_free);
176 if (i == s->buffers) {
177 cx->mdl_offset += s->buffers;
180 CX18_ERR("Couldn't allocate buffers for %s stream\n", s->name);
185 void cx18_stream_free(struct cx18_stream *s)
187 struct cx18_buffer *buf;
189 /* move all buffers to q_free */
190 cx18_flush_queues(s);
193 while ((buf = cx18_dequeue(s, &s->q_free))) {
194 pci_unmap_single(s->cx->dev, buf->dma_handle,
195 s->buf_size, s->dma);