4 * kernel ISO transmission/reception
6 * Copyright (C) 2002 Maas Digital LLC
8 * This code is licensed under the GPL. See the file COPYING in the root
9 * directory of the kernel sources for details.
12 #include <linux/slab.h>
13 #include <linux/sched.h>
16 void hpsb_iso_stop(struct hpsb_iso *iso)
18 if (!(iso->flags & HPSB_ISO_DRIVER_STARTED))
21 iso->host->driver->isoctl(iso, iso->type == HPSB_ISO_XMIT ?
22 XMIT_STOP : RECV_STOP, 0);
23 iso->flags &= ~HPSB_ISO_DRIVER_STARTED;
26 void hpsb_iso_shutdown(struct hpsb_iso *iso)
28 if (iso->flags & HPSB_ISO_DRIVER_INIT) {
30 iso->host->driver->isoctl(iso, iso->type == HPSB_ISO_XMIT ?
31 XMIT_SHUTDOWN : RECV_SHUTDOWN, 0);
32 iso->flags &= ~HPSB_ISO_DRIVER_INIT;
35 dma_region_free(&iso->data_buf);
39 static struct hpsb_iso* hpsb_iso_common_init(struct hpsb_host *host, enum hpsb_iso_type type,
40 unsigned int data_buf_size,
41 unsigned int buf_packets,
45 void (*callback)(struct hpsb_iso*))
50 /* make sure driver supports the ISO API */
51 if (!host->driver->isoctl) {
52 printk(KERN_INFO "ieee1394: host driver '%s' does not support the rawiso API\n",
57 /* sanitize parameters */
62 if ((dma_mode < HPSB_ISO_DMA_DEFAULT) || (dma_mode > HPSB_ISO_DMA_PACKET_PER_BUFFER))
63 dma_mode=HPSB_ISO_DMA_DEFAULT;
65 if ((irq_interval < 0) || (irq_interval > buf_packets / 4))
66 irq_interval = buf_packets / 4;
67 if (irq_interval == 0) /* really interrupt for each packet*/
70 if (channel < -1 || channel >= 64)
73 /* channel = -1 is OK for multi-channel recv but not for xmit */
74 if (type == HPSB_ISO_XMIT && channel < 0)
77 /* allocate and write the struct hpsb_iso */
79 iso = kmalloc(sizeof(*iso) + buf_packets * sizeof(struct hpsb_iso_packet_info), GFP_KERNEL);
83 iso->infos = (struct hpsb_iso_packet_info *)(iso + 1);
88 iso->callback = callback;
89 init_waitqueue_head(&iso->waitq);
90 iso->channel = channel;
91 iso->irq_interval = irq_interval;
92 iso->dma_mode = dma_mode;
93 dma_region_init(&iso->data_buf);
94 iso->buf_size = PAGE_ALIGN(data_buf_size);
95 iso->buf_packets = buf_packets;
97 iso->first_packet = 0;
98 spin_lock_init(&iso->lock);
100 if (iso->type == HPSB_ISO_XMIT) {
101 iso->n_ready_packets = iso->buf_packets;
102 dma_direction = PCI_DMA_TODEVICE;
104 iso->n_ready_packets = 0;
105 dma_direction = PCI_DMA_FROMDEVICE;
108 atomic_set(&iso->overflows, 0);
109 iso->bytes_discarded = 0;
113 /* allocate the packet buffer */
114 if (dma_region_alloc(&iso->data_buf, iso->buf_size, host->pdev, dma_direction))
120 hpsb_iso_shutdown(iso);
124 int hpsb_iso_n_ready(struct hpsb_iso* iso)
129 spin_lock_irqsave(&iso->lock, flags);
130 val = iso->n_ready_packets;
131 spin_unlock_irqrestore(&iso->lock, flags);
137 struct hpsb_iso* hpsb_iso_xmit_init(struct hpsb_host *host,
138 unsigned int data_buf_size,
139 unsigned int buf_packets,
143 void (*callback)(struct hpsb_iso*))
145 struct hpsb_iso *iso = hpsb_iso_common_init(host, HPSB_ISO_XMIT,
146 data_buf_size, buf_packets,
147 channel, HPSB_ISO_DMA_DEFAULT, irq_interval, callback);
153 /* tell the driver to start working */
154 if (host->driver->isoctl(iso, XMIT_INIT, 0))
157 iso->flags |= HPSB_ISO_DRIVER_INIT;
161 hpsb_iso_shutdown(iso);
165 struct hpsb_iso* hpsb_iso_recv_init(struct hpsb_host *host,
166 unsigned int data_buf_size,
167 unsigned int buf_packets,
171 void (*callback)(struct hpsb_iso*))
173 struct hpsb_iso *iso = hpsb_iso_common_init(host, HPSB_ISO_RECV,
174 data_buf_size, buf_packets,
175 channel, dma_mode, irq_interval, callback);
179 /* tell the driver to start working */
180 if (host->driver->isoctl(iso, RECV_INIT, 0))
183 iso->flags |= HPSB_ISO_DRIVER_INIT;
187 hpsb_iso_shutdown(iso);
191 int hpsb_iso_recv_listen_channel(struct hpsb_iso *iso, unsigned char channel)
193 if (iso->type != HPSB_ISO_RECV || iso->channel != -1 || channel >= 64)
195 return iso->host->driver->isoctl(iso, RECV_LISTEN_CHANNEL, channel);
198 int hpsb_iso_recv_unlisten_channel(struct hpsb_iso *iso, unsigned char channel)
200 if (iso->type != HPSB_ISO_RECV || iso->channel != -1 || channel >= 64)
202 return iso->host->driver->isoctl(iso, RECV_UNLISTEN_CHANNEL, channel);
205 int hpsb_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
207 if (iso->type != HPSB_ISO_RECV || iso->channel != -1)
209 return iso->host->driver->isoctl(iso, RECV_SET_CHANNEL_MASK, (unsigned long) &mask);
212 int hpsb_iso_recv_flush(struct hpsb_iso *iso)
214 if (iso->type != HPSB_ISO_RECV)
216 return iso->host->driver->isoctl(iso, RECV_FLUSH, 0);
219 static int do_iso_xmit_start(struct hpsb_iso *iso, int cycle)
221 int retval = iso->host->driver->isoctl(iso, XMIT_START, cycle);
225 iso->flags |= HPSB_ISO_DRIVER_STARTED;
229 int hpsb_iso_xmit_start(struct hpsb_iso *iso, int cycle, int prebuffer)
231 if (iso->type != HPSB_ISO_XMIT)
234 if (iso->flags & HPSB_ISO_DRIVER_STARTED)
239 else if (cycle >= 8000)
242 iso->xmit_cycle = cycle;
245 prebuffer = iso->buf_packets - 1;
246 else if (prebuffer == 0)
249 if (prebuffer >= iso->buf_packets)
250 prebuffer = iso->buf_packets - 1;
252 iso->prebuffer = prebuffer;
254 /* remember the starting cycle; DMA will commence from xmit_queue_packets()
255 once enough packets have been buffered */
256 iso->start_cycle = cycle;
261 int hpsb_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
266 if (iso->type != HPSB_ISO_RECV)
269 if (iso->flags & HPSB_ISO_DRIVER_STARTED)
274 else if (cycle >= 8000)
277 isoctl_args[0] = cycle;
282 isoctl_args[1] = tag_mask;
284 isoctl_args[2] = sync;
286 retval = iso->host->driver->isoctl(iso, RECV_START, (unsigned long) &isoctl_args[0]);
290 iso->flags |= HPSB_ISO_DRIVER_STARTED;
294 /* check to make sure the user has not supplied bogus values of offset/len
295 that would cause the kernel to access memory outside the buffer */
297 static int hpsb_iso_check_offset_len(struct hpsb_iso *iso,
298 unsigned int offset, unsigned short len,
299 unsigned int *out_offset, unsigned short *out_len)
301 if (offset >= iso->buf_size)
304 /* make sure the packet does not go beyond the end of the buffer */
305 if (offset + len > iso->buf_size)
308 /* check for wrap-around */
309 if (offset + len < offset)
312 /* now we can trust 'offset' and 'length' */
313 *out_offset = offset;
320 int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len, u8 tag, u8 sy)
322 struct hpsb_iso_packet_info *info;
326 if (iso->type != HPSB_ISO_XMIT)
329 /* is there space in the buffer? */
330 if (iso->n_ready_packets <= 0) {
334 info = &iso->infos[iso->first_packet];
336 /* check for bogus offset/length */
337 if (hpsb_iso_check_offset_len(iso, offset, len, &info->offset, &info->len))
343 spin_lock_irqsave(&iso->lock, flags);
345 rv = iso->host->driver->isoctl(iso, XMIT_QUEUE, (unsigned long) info);
349 /* increment cursors */
350 iso->first_packet = (iso->first_packet+1) % iso->buf_packets;
351 iso->xmit_cycle = (iso->xmit_cycle+1) % 8000;
352 iso->n_ready_packets--;
354 if (iso->prebuffer != 0) {
356 if (iso->prebuffer <= 0) {
358 rv = do_iso_xmit_start(iso, iso->start_cycle);
363 spin_unlock_irqrestore(&iso->lock, flags);
367 int hpsb_iso_xmit_sync(struct hpsb_iso *iso)
369 if (iso->type != HPSB_ISO_XMIT)
372 return wait_event_interruptible(iso->waitq, hpsb_iso_n_ready(iso) == iso->buf_packets);
375 void hpsb_iso_packet_sent(struct hpsb_iso *iso, int cycle, int error)
378 spin_lock_irqsave(&iso->lock, flags);
380 /* predict the cycle of the next packet to be queued */
382 /* jump ahead by the number of packets that are already buffered */
383 cycle += iso->buf_packets - iso->n_ready_packets;
386 iso->xmit_cycle = cycle;
387 iso->n_ready_packets++;
388 iso->pkt_dma = (iso->pkt_dma + 1) % iso->buf_packets;
390 if (iso->n_ready_packets == iso->buf_packets || error != 0) {
391 /* the buffer has run empty! */
392 atomic_inc(&iso->overflows);
395 spin_unlock_irqrestore(&iso->lock, flags);
398 void hpsb_iso_packet_received(struct hpsb_iso *iso, u32 offset, u16 len,
399 u16 total_len, u16 cycle, u8 channel, u8 tag, u8 sy)
402 spin_lock_irqsave(&iso->lock, flags);
404 if (iso->n_ready_packets == iso->buf_packets) {
406 atomic_inc(&iso->overflows);
407 /* Record size of this discarded packet */
408 iso->bytes_discarded += total_len;
410 struct hpsb_iso_packet_info *info = &iso->infos[iso->pkt_dma];
411 info->offset = offset;
413 info->total_len = total_len;
415 info->channel = channel;
419 iso->pkt_dma = (iso->pkt_dma+1) % iso->buf_packets;
420 iso->n_ready_packets++;
423 spin_unlock_irqrestore(&iso->lock, flags);
426 int hpsb_iso_recv_release_packets(struct hpsb_iso *iso, unsigned int n_packets)
432 if (iso->type != HPSB_ISO_RECV)
435 spin_lock_irqsave(&iso->lock, flags);
436 for (i = 0; i < n_packets; i++) {
437 rv = iso->host->driver->isoctl(iso, RECV_RELEASE,
438 (unsigned long) &iso->infos[iso->first_packet]);
442 iso->first_packet = (iso->first_packet+1) % iso->buf_packets;
443 iso->n_ready_packets--;
445 /* release memory from packets discarded when queue was full */
446 if (iso->n_ready_packets == 0) { /* Release only after all prior packets handled */
447 if (iso->bytes_discarded != 0) {
448 struct hpsb_iso_packet_info inf;
449 inf.total_len = iso->bytes_discarded;
450 iso->host->driver->isoctl(iso, RECV_RELEASE,
451 (unsigned long) &inf);
452 iso->bytes_discarded = 0;
456 spin_unlock_irqrestore(&iso->lock, flags);
460 void hpsb_iso_wake(struct hpsb_iso *iso)
462 wake_up_interruptible(&iso->waitq);