4 * Raw interface to the bus
6 * Copyright (C) 1999, 2000 Andreas E. Bombe
7 * 2001, 2002 Manfred Weihs <weihs@ict.tuwien.ac.at>
8 * 2002 Christian Toegel <christian.toegel@gmx.at>
10 * This code is licensed under the GPL. See the file COPYING in the root
11 * directory of the kernel sources for details.
16 * Manfred Weihs <weihs@ict.tuwien.ac.at>
17 * configuration ROM manipulation
18 * address range mapping
19 * adaptation for new (transparent) loopback mechanism
20 * sending of arbitrary async packets
21 * Christian Toegel <christian.toegel@gmx.at>
22 * address range mapping
24 * transmit physical packet
25 * busreset notification control (switch on/off)
26 * busreset with selection of type (short/long)
30 #include <linux/kernel.h>
31 #include <linux/list.h>
32 #include <linux/string.h>
33 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/smp_lock.h>
39 #include <linux/interrupt.h>
40 #include <linux/vmalloc.h>
41 #include <linux/cdev.h>
42 #include <asm/uaccess.h>
43 #include <asm/atomic.h>
44 #include <linux/compat.h>
48 #include "ieee1394_types.h"
49 #include "ieee1394_core.h"
52 #include "highlevel.h"
54 #include "ieee1394_transactions.h"
56 #include "raw1394-private.h"
58 #define int2ptr(x) ((void __user *)(unsigned long)x)
59 #define ptr2int(x) ((u64)(unsigned long)(void __user *)x)
61 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
66 #define DBGMSG(fmt, args...) \
67 printk(KERN_INFO "raw1394:" fmt "\n" , ## args)
69 #define DBGMSG(fmt, args...)
72 static LIST_HEAD(host_info_list);
73 static int host_count;
74 static DEFINE_SPINLOCK(host_info_lock);
75 static atomic_t internal_generation = ATOMIC_INIT(0);
77 static atomic_t iso_buffer_size;
78 static const int iso_buffer_max = 4 * 1024 * 1024; /* 4 MB */
80 static struct hpsb_highlevel raw1394_highlevel;
82 static int arm_read(struct hpsb_host *host, int nodeid, quadlet_t * buffer,
83 u64 addr, size_t length, u16 flags);
84 static int arm_write(struct hpsb_host *host, int nodeid, int destid,
85 quadlet_t * data, u64 addr, size_t length, u16 flags);
86 static int arm_lock(struct hpsb_host *host, int nodeid, quadlet_t * store,
87 u64 addr, quadlet_t data, quadlet_t arg, int ext_tcode,
89 static int arm_lock64(struct hpsb_host *host, int nodeid, octlet_t * store,
90 u64 addr, octlet_t data, octlet_t arg, int ext_tcode,
92 static struct hpsb_address_ops arm_ops = {
99 static void queue_complete_cb(struct pending_request *req);
101 static struct pending_request *__alloc_pending_request(gfp_t flags)
103 struct pending_request *req;
105 req = kzalloc(sizeof(*req), flags);
107 INIT_LIST_HEAD(&req->list);
112 static inline struct pending_request *alloc_pending_request(void)
114 return __alloc_pending_request(SLAB_KERNEL);
117 static void free_pending_request(struct pending_request *req)
120 if (atomic_dec_and_test(&req->ibs->refcount)) {
121 atomic_sub(req->ibs->data_size, &iso_buffer_size);
124 } else if (req->free_data) {
127 hpsb_free_packet(req->packet);
131 /* fi->reqlists_lock must be taken */
132 static void __queue_complete_req(struct pending_request *req)
134 struct file_info *fi = req->file_info;
135 list_del(&req->list);
136 list_add_tail(&req->list, &fi->req_complete);
138 up(&fi->complete_sem);
139 wake_up_interruptible(&fi->poll_wait_complete);
142 static void queue_complete_req(struct pending_request *req)
145 struct file_info *fi = req->file_info;
147 spin_lock_irqsave(&fi->reqlists_lock, flags);
148 __queue_complete_req(req);
149 spin_unlock_irqrestore(&fi->reqlists_lock, flags);
152 static void queue_complete_cb(struct pending_request *req)
154 struct hpsb_packet *packet = req->packet;
155 int rcode = (packet->header[1] >> 12) & 0xf;
157 switch (packet->ack_code) {
159 case ACKX_SEND_ERROR:
160 req->req.error = RAW1394_ERROR_SEND_ERROR;
163 req->req.error = RAW1394_ERROR_ABORTED;
166 req->req.error = RAW1394_ERROR_TIMEOUT;
169 req->req.error = (packet->ack_code << 16) | rcode;
173 if (!((packet->ack_code == ACK_PENDING) && (rcode == RCODE_COMPLETE))) {
177 if ((req->req.type == RAW1394_REQ_ASYNC_READ) ||
178 (req->req.type == RAW1394_REQ_ASYNC_WRITE) ||
179 (req->req.type == RAW1394_REQ_ASYNC_STREAM) ||
180 (req->req.type == RAW1394_REQ_LOCK) ||
181 (req->req.type == RAW1394_REQ_LOCK64))
182 hpsb_free_tlabel(packet);
184 queue_complete_req(req);
187 static void add_host(struct hpsb_host *host)
189 struct host_info *hi;
192 hi = kmalloc(sizeof(*hi), GFP_KERNEL);
195 INIT_LIST_HEAD(&hi->list);
197 INIT_LIST_HEAD(&hi->file_info_list);
199 spin_lock_irqsave(&host_info_lock, flags);
200 list_add_tail(&hi->list, &host_info_list);
202 spin_unlock_irqrestore(&host_info_lock, flags);
205 atomic_inc(&internal_generation);
208 static struct host_info *find_host_info(struct hpsb_host *host)
210 struct host_info *hi;
212 list_for_each_entry(hi, &host_info_list, list)
213 if (hi->host == host)
219 static void remove_host(struct hpsb_host *host)
221 struct host_info *hi;
224 spin_lock_irqsave(&host_info_lock, flags);
225 hi = find_host_info(host);
231 FIXME: address ranges should be removed
232 and fileinfo states should be initialized
233 (including setting generation to
234 internal-generation ...)
237 spin_unlock_irqrestore(&host_info_lock, flags);
240 printk(KERN_ERR "raw1394: attempt to remove unknown host "
247 atomic_inc(&internal_generation);
250 static void host_reset(struct hpsb_host *host)
253 struct host_info *hi;
254 struct file_info *fi;
255 struct pending_request *req;
257 spin_lock_irqsave(&host_info_lock, flags);
258 hi = find_host_info(host);
261 list_for_each_entry(fi, &hi->file_info_list, list) {
262 if (fi->notification == RAW1394_NOTIFY_ON) {
263 req = __alloc_pending_request(SLAB_ATOMIC);
267 req->req.type = RAW1394_REQ_BUS_RESET;
268 req->req.generation =
269 get_hpsb_generation(host);
270 req->req.misc = (host->node_id << 16)
272 if (fi->protocol_version > 3) {
279 queue_complete_req(req);
284 spin_unlock_irqrestore(&host_info_lock, flags);
287 static void iso_receive(struct hpsb_host *host, int channel, quadlet_t * data,
291 struct host_info *hi;
292 struct file_info *fi;
293 struct pending_request *req, *req_next;
294 struct iso_block_store *ibs = NULL;
297 if ((atomic_read(&iso_buffer_size) + length) > iso_buffer_max) {
298 HPSB_INFO("dropped iso packet");
302 spin_lock_irqsave(&host_info_lock, flags);
303 hi = find_host_info(host);
306 list_for_each_entry(fi, &hi->file_info_list, list) {
307 if (!(fi->listen_channels & (1ULL << channel)))
310 req = __alloc_pending_request(SLAB_ATOMIC);
315 ibs = kmalloc(sizeof(*ibs) + length,
322 atomic_add(length, &iso_buffer_size);
323 atomic_set(&ibs->refcount, 0);
324 ibs->data_size = length;
325 memcpy(ibs->data, data, length);
328 atomic_inc(&ibs->refcount);
332 req->data = ibs->data;
333 req->req.type = RAW1394_REQ_ISO_RECEIVE;
334 req->req.generation = get_hpsb_generation(host);
336 req->req.recvb = ptr2int(fi->iso_buffer);
337 req->req.length = min(length, fi->iso_buffer_length);
339 list_add_tail(&req->list, &reqs);
342 spin_unlock_irqrestore(&host_info_lock, flags);
344 list_for_each_entry_safe(req, req_next, &reqs, list)
345 queue_complete_req(req);
348 static void fcp_request(struct hpsb_host *host, int nodeid, int direction,
349 int cts, u8 * data, size_t length)
352 struct host_info *hi;
353 struct file_info *fi;
354 struct pending_request *req, *req_next;
355 struct iso_block_store *ibs = NULL;
358 if ((atomic_read(&iso_buffer_size) + length) > iso_buffer_max) {
359 HPSB_INFO("dropped fcp request");
363 spin_lock_irqsave(&host_info_lock, flags);
364 hi = find_host_info(host);
367 list_for_each_entry(fi, &hi->file_info_list, list) {
371 req = __alloc_pending_request(SLAB_ATOMIC);
376 ibs = kmalloc(sizeof(*ibs) + length,
383 atomic_add(length, &iso_buffer_size);
384 atomic_set(&ibs->refcount, 0);
385 ibs->data_size = length;
386 memcpy(ibs->data, data, length);
389 atomic_inc(&ibs->refcount);
393 req->data = ibs->data;
394 req->req.type = RAW1394_REQ_FCP_REQUEST;
395 req->req.generation = get_hpsb_generation(host);
396 req->req.misc = nodeid | (direction << 16);
397 req->req.recvb = ptr2int(fi->fcp_buffer);
398 req->req.length = length;
400 list_add_tail(&req->list, &reqs);
403 spin_unlock_irqrestore(&host_info_lock, flags);
405 list_for_each_entry_safe(req, req_next, &reqs, list)
406 queue_complete_req(req);
410 struct compat_raw1394_req {
424 } __attribute__((packed));
426 static const char __user *raw1394_compat_write(const char __user *buf)
428 struct compat_raw1394_req __user *cr = (typeof(cr)) buf;
429 struct raw1394_request __user *r;
430 r = compat_alloc_user_space(sizeof(struct raw1394_request));
432 #define C(x) __copy_in_user(&r->x, &cr->x, sizeof(r->x))
434 if (copy_in_user(r, cr, sizeof(struct compat_raw1394_req)) ||
439 return ERR_PTR(-EFAULT);
440 return (const char __user *)r;
444 #define P(x) __put_user(r->x, &cr->x)
447 raw1394_compat_read(const char __user *buf, struct raw1394_request *r)
449 struct compat_raw1394_req __user *cr = (typeof(cr)) r;
450 if (!access_ok(VERIFY_WRITE, cr, sizeof(struct compat_raw1394_req)) ||
461 return sizeof(struct compat_raw1394_req);
468 static ssize_t raw1394_read(struct file *file, char __user * buffer,
469 size_t count, loff_t * offset_is_ignored)
472 struct file_info *fi = (struct file_info *)file->private_data;
473 struct list_head *lh;
474 struct pending_request *req;
478 if (count == sizeof(struct compat_raw1394_req)) {
482 if (count != sizeof(struct raw1394_request)) {
486 if (!access_ok(VERIFY_WRITE, buffer, count)) {
490 if (file->f_flags & O_NONBLOCK) {
491 if (down_trylock(&fi->complete_sem)) {
495 if (down_interruptible(&fi->complete_sem)) {
500 spin_lock_irqsave(&fi->reqlists_lock, flags);
501 lh = fi->req_complete.next;
503 spin_unlock_irqrestore(&fi->reqlists_lock, flags);
505 req = list_entry(lh, struct pending_request, list);
507 if (req->req.length) {
508 if (copy_to_user(int2ptr(req->req.recvb), req->data,
510 req->req.error = RAW1394_ERROR_MEMFAULT;
515 if (count == sizeof(struct compat_raw1394_req) &&
516 sizeof(struct compat_raw1394_req) !=
517 sizeof(struct raw1394_request)) {
518 ret = raw1394_compat_read(buffer, &req->req);
522 if (copy_to_user(buffer, &req->req, sizeof(req->req))) {
526 ret = (ssize_t) sizeof(struct raw1394_request);
529 free_pending_request(req);
533 static int state_opened(struct file_info *fi, struct pending_request *req)
535 if (req->req.type == RAW1394_REQ_INITIALIZE) {
536 switch (req->req.misc) {
537 case RAW1394_KERNELAPI_VERSION:
539 fi->state = initialized;
540 fi->protocol_version = req->req.misc;
541 req->req.error = RAW1394_ERROR_NONE;
542 req->req.generation = atomic_read(&internal_generation);
546 req->req.error = RAW1394_ERROR_COMPAT;
547 req->req.misc = RAW1394_KERNELAPI_VERSION;
550 req->req.error = RAW1394_ERROR_STATE_ORDER;
554 queue_complete_req(req);
555 return sizeof(struct raw1394_request);
558 static int state_initialized(struct file_info *fi, struct pending_request *req)
561 struct host_info *hi;
562 struct raw1394_khost_list *khl;
564 if (req->req.generation != atomic_read(&internal_generation)) {
565 req->req.error = RAW1394_ERROR_GENERATION;
566 req->req.generation = atomic_read(&internal_generation);
568 queue_complete_req(req);
569 return sizeof(struct raw1394_request);
572 switch (req->req.type) {
573 case RAW1394_REQ_LIST_CARDS:
574 spin_lock_irqsave(&host_info_lock, flags);
575 khl = kmalloc(sizeof(*khl) * host_count, SLAB_ATOMIC);
578 req->req.misc = host_count;
579 req->data = (quadlet_t *) khl;
581 list_for_each_entry(hi, &host_info_list, list) {
582 khl->nodes = hi->host->node_count;
583 strcpy(khl->name, hi->host->driver->name);
587 spin_unlock_irqrestore(&host_info_lock, flags);
590 req->req.error = RAW1394_ERROR_NONE;
591 req->req.length = min(req->req.length,
593 (struct raw1394_khost_list)
601 case RAW1394_REQ_SET_CARD:
602 spin_lock_irqsave(&host_info_lock, flags);
603 if (req->req.misc < host_count) {
604 list_for_each_entry(hi, &host_info_list, list) {
605 if (!req->req.misc--)
608 get_device(&hi->host->device); // XXX Need to handle failure case
609 list_add_tail(&fi->list, &hi->file_info_list);
611 fi->state = connected;
613 req->req.error = RAW1394_ERROR_NONE;
614 req->req.generation = get_hpsb_generation(fi->host);
615 req->req.misc = (fi->host->node_id << 16)
616 | fi->host->node_count;
617 if (fi->protocol_version > 3) {
619 NODEID_TO_NODE(fi->host->irm_id) << 8;
622 req->req.error = RAW1394_ERROR_INVALID_ARG;
624 spin_unlock_irqrestore(&host_info_lock, flags);
630 req->req.error = RAW1394_ERROR_STATE_ORDER;
635 queue_complete_req(req);
636 return sizeof(struct raw1394_request);
639 static void handle_iso_listen(struct file_info *fi, struct pending_request *req)
641 int channel = req->req.misc;
643 if ((channel > 63) || (channel < -64)) {
644 req->req.error = RAW1394_ERROR_INVALID_ARG;
645 } else if (channel >= 0) {
646 /* allocate channel req.misc */
647 if (fi->listen_channels & (1ULL << channel)) {
648 req->req.error = RAW1394_ERROR_ALREADY;
650 if (hpsb_listen_channel
651 (&raw1394_highlevel, fi->host, channel)) {
652 req->req.error = RAW1394_ERROR_ALREADY;
654 fi->listen_channels |= 1ULL << channel;
655 fi->iso_buffer = int2ptr(req->req.recvb);
656 fi->iso_buffer_length = req->req.length;
660 /* deallocate channel (one's complement neg) req.misc */
663 if (fi->listen_channels & (1ULL << channel)) {
664 hpsb_unlisten_channel(&raw1394_highlevel, fi->host,
666 fi->listen_channels &= ~(1ULL << channel);
668 req->req.error = RAW1394_ERROR_INVALID_ARG;
673 queue_complete_req(req);
676 static void handle_fcp_listen(struct file_info *fi, struct pending_request *req)
679 if (fi->fcp_buffer) {
680 req->req.error = RAW1394_ERROR_ALREADY;
682 fi->fcp_buffer = int2ptr(req->req.recvb);
685 if (!fi->fcp_buffer) {
686 req->req.error = RAW1394_ERROR_ALREADY;
688 fi->fcp_buffer = NULL;
693 queue_complete_req(req);
696 static int handle_async_request(struct file_info *fi,
697 struct pending_request *req, int node)
700 struct hpsb_packet *packet = NULL;
701 u64 addr = req->req.address & 0xffffffffffffULL;
703 switch (req->req.type) {
704 case RAW1394_REQ_ASYNC_READ:
705 DBGMSG("read_request called");
707 hpsb_make_readpacket(fi->host, node, addr, req->req.length);
712 if (req->req.length == 4)
713 req->data = &packet->header[3];
715 req->data = packet->data;
719 case RAW1394_REQ_ASYNC_WRITE:
720 DBGMSG("write_request called");
722 packet = hpsb_make_writepacket(fi->host, node, addr, NULL,
727 if (req->req.length == 4) {
729 (&packet->header[3], int2ptr(req->req.sendb),
731 req->req.error = RAW1394_ERROR_MEMFAULT;
734 (packet->data, int2ptr(req->req.sendb),
736 req->req.error = RAW1394_ERROR_MEMFAULT;
742 case RAW1394_REQ_ASYNC_STREAM:
743 DBGMSG("stream_request called");
746 hpsb_make_streampacket(fi->host, NULL, req->req.length,
747 node & 0x3f /*channel */ ,
748 (req->req.misc >> 16) & 0x3,
749 req->req.misc & 0xf);
753 if (copy_from_user(packet->data, int2ptr(req->req.sendb),
755 req->req.error = RAW1394_ERROR_MEMFAULT;
760 case RAW1394_REQ_LOCK:
761 DBGMSG("lock_request called");
762 if ((req->req.misc == EXTCODE_FETCH_ADD)
763 || (req->req.misc == EXTCODE_LITTLE_ADD)) {
764 if (req->req.length != 4) {
765 req->req.error = RAW1394_ERROR_INVALID_ARG;
769 if (req->req.length != 8) {
770 req->req.error = RAW1394_ERROR_INVALID_ARG;
775 packet = hpsb_make_lockpacket(fi->host, node, addr,
776 req->req.misc, NULL, 0);
780 if (copy_from_user(packet->data, int2ptr(req->req.sendb),
782 req->req.error = RAW1394_ERROR_MEMFAULT;
786 req->data = packet->data;
790 case RAW1394_REQ_LOCK64:
791 DBGMSG("lock64_request called");
792 if ((req->req.misc == EXTCODE_FETCH_ADD)
793 || (req->req.misc == EXTCODE_LITTLE_ADD)) {
794 if (req->req.length != 8) {
795 req->req.error = RAW1394_ERROR_INVALID_ARG;
799 if (req->req.length != 16) {
800 req->req.error = RAW1394_ERROR_INVALID_ARG;
804 packet = hpsb_make_lock64packet(fi->host, node, addr,
805 req->req.misc, NULL, 0);
809 if (copy_from_user(packet->data, int2ptr(req->req.sendb),
811 req->req.error = RAW1394_ERROR_MEMFAULT;
815 req->data = packet->data;
820 req->req.error = RAW1394_ERROR_STATE_ORDER;
823 req->packet = packet;
825 if (req->req.error) {
827 queue_complete_req(req);
828 return sizeof(struct raw1394_request);
831 hpsb_set_packet_complete_task(packet,
832 (void (*)(void *))queue_complete_cb, req);
834 spin_lock_irqsave(&fi->reqlists_lock, flags);
835 list_add_tail(&req->list, &fi->req_pending);
836 spin_unlock_irqrestore(&fi->reqlists_lock, flags);
838 packet->generation = req->req.generation;
840 if (hpsb_send_packet(packet) < 0) {
841 req->req.error = RAW1394_ERROR_SEND_ERROR;
843 hpsb_free_tlabel(packet);
844 queue_complete_req(req);
846 return sizeof(struct raw1394_request);
849 static int handle_iso_send(struct file_info *fi, struct pending_request *req,
853 struct hpsb_packet *packet;
855 packet = hpsb_make_isopacket(fi->host, req->req.length, channel & 0x3f,
856 (req->req.misc >> 16) & 0x3,
857 req->req.misc & 0xf);
861 packet->speed_code = req->req.address & 0x3;
863 req->packet = packet;
865 if (copy_from_user(packet->data, int2ptr(req->req.sendb),
867 req->req.error = RAW1394_ERROR_MEMFAULT;
869 queue_complete_req(req);
870 return sizeof(struct raw1394_request);
874 hpsb_set_packet_complete_task(packet,
875 (void (*)(void *))queue_complete_req,
878 spin_lock_irqsave(&fi->reqlists_lock, flags);
879 list_add_tail(&req->list, &fi->req_pending);
880 spin_unlock_irqrestore(&fi->reqlists_lock, flags);
882 /* Update the generation of the packet just before sending. */
883 packet->generation = req->req.generation;
885 if (hpsb_send_packet(packet) < 0) {
886 req->req.error = RAW1394_ERROR_SEND_ERROR;
887 queue_complete_req(req);
890 return sizeof(struct raw1394_request);
893 static int handle_async_send(struct file_info *fi, struct pending_request *req)
896 struct hpsb_packet *packet;
897 int header_length = req->req.misc & 0xffff;
898 int expect_response = req->req.misc >> 16;
900 if ((header_length > req->req.length) || (header_length < 12)) {
901 req->req.error = RAW1394_ERROR_INVALID_ARG;
903 queue_complete_req(req);
904 return sizeof(struct raw1394_request);
907 packet = hpsb_alloc_packet(req->req.length - header_length);
908 req->packet = packet;
912 if (copy_from_user(packet->header, int2ptr(req->req.sendb),
914 req->req.error = RAW1394_ERROR_MEMFAULT;
916 queue_complete_req(req);
917 return sizeof(struct raw1394_request);
921 (packet->data, int2ptr(req->req.sendb) + header_length,
922 packet->data_size)) {
923 req->req.error = RAW1394_ERROR_MEMFAULT;
925 queue_complete_req(req);
926 return sizeof(struct raw1394_request);
929 packet->type = hpsb_async;
930 packet->node_id = packet->header[0] >> 16;
931 packet->tcode = (packet->header[0] >> 4) & 0xf;
932 packet->tlabel = (packet->header[0] >> 10) & 0x3f;
933 packet->host = fi->host;
934 packet->expect_response = expect_response;
935 packet->header_size = header_length;
936 packet->data_size = req->req.length - header_length;
939 hpsb_set_packet_complete_task(packet,
940 (void (*)(void *))queue_complete_cb, req);
942 spin_lock_irqsave(&fi->reqlists_lock, flags);
943 list_add_tail(&req->list, &fi->req_pending);
944 spin_unlock_irqrestore(&fi->reqlists_lock, flags);
946 /* Update the generation of the packet just before sending. */
947 packet->generation = req->req.generation;
949 if (hpsb_send_packet(packet) < 0) {
950 req->req.error = RAW1394_ERROR_SEND_ERROR;
951 queue_complete_req(req);
954 return sizeof(struct raw1394_request);
957 static int arm_read(struct hpsb_host *host, int nodeid, quadlet_t * buffer,
958 u64 addr, size_t length, u16 flags)
960 unsigned long irqflags;
961 struct pending_request *req;
962 struct host_info *hi;
963 struct file_info *fi = NULL;
964 struct list_head *entry;
965 struct arm_addr *arm_addr = NULL;
966 struct arm_request *arm_req = NULL;
967 struct arm_response *arm_resp = NULL;
968 int found = 0, size = 0, rcode = -1;
969 struct arm_request_response *arm_req_resp = NULL;
971 DBGMSG("arm_read called by node: %X"
972 "addr: %4.4x %8.8x length: %Zu", nodeid,
973 (u16) ((addr >> 32) & 0xFFFF), (u32) (addr & 0xFFFFFFFF),
975 spin_lock_irqsave(&host_info_lock, irqflags);
976 hi = find_host_info(host); /* search address-entry */
978 list_for_each_entry(fi, &hi->file_info_list, list) {
979 entry = fi->addr_list.next;
980 while (entry != &(fi->addr_list)) {
982 list_entry(entry, struct arm_addr,
984 if (((arm_addr->start) <= (addr))
985 && ((arm_addr->end) >= (addr + length))) {
998 printk(KERN_ERR "raw1394: arm_read FAILED addr_entry not found"
999 " -> rcode_address_error\n");
1000 spin_unlock_irqrestore(&host_info_lock, irqflags);
1001 return (RCODE_ADDRESS_ERROR);
1003 DBGMSG("arm_read addr_entry FOUND");
1005 if (arm_addr->rec_length < length) {
1006 DBGMSG("arm_read blocklength too big -> rcode_data_error");
1007 rcode = RCODE_DATA_ERROR; /* hardware error, data is unavailable */
1010 if (arm_addr->access_rights & ARM_READ) {
1011 if (!(arm_addr->client_transactions & ARM_READ)) {
1013 (arm_addr->addr_space_buffer) + (addr -
1017 DBGMSG("arm_read -> (rcode_complete)");
1018 rcode = RCODE_COMPLETE;
1021 rcode = RCODE_TYPE_ERROR; /* function not allowed */
1022 DBGMSG("arm_read -> rcode_type_error (access denied)");
1025 if (arm_addr->notification_options & ARM_READ) {
1026 DBGMSG("arm_read -> entering notification-section");
1027 req = __alloc_pending_request(SLAB_ATOMIC);
1029 DBGMSG("arm_read -> rcode_conflict_error");
1030 spin_unlock_irqrestore(&host_info_lock, irqflags);
1031 return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
1032 The request may be retried */
1034 if (rcode == RCODE_COMPLETE) {
1036 sizeof(struct arm_request) +
1037 sizeof(struct arm_response) +
1038 length * sizeof(byte_t) +
1039 sizeof(struct arm_request_response);
1042 sizeof(struct arm_request) +
1043 sizeof(struct arm_response) +
1044 sizeof(struct arm_request_response);
1046 req->data = kmalloc(size, SLAB_ATOMIC);
1048 free_pending_request(req);
1049 DBGMSG("arm_read -> rcode_conflict_error");
1050 spin_unlock_irqrestore(&host_info_lock, irqflags);
1051 return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
1052 The request may be retried */
1055 req->file_info = fi;
1056 req->req.type = RAW1394_REQ_ARM;
1057 req->req.generation = get_hpsb_generation(host);
1059 (((length << 16) & (0xFFFF0000)) | (ARM_READ & 0xFF));
1060 req->req.tag = arm_addr->arm_tag;
1061 req->req.recvb = arm_addr->recvb;
1062 req->req.length = size;
1063 arm_req_resp = (struct arm_request_response *)(req->data);
1064 arm_req = (struct arm_request *)((byte_t *) (req->data) +
1067 arm_request_response)));
1069 (struct arm_response *)((byte_t *) (arm_req) +
1070 (sizeof(struct arm_request)));
1071 arm_req->buffer = NULL;
1072 arm_resp->buffer = NULL;
1073 if (rcode == RCODE_COMPLETE) {
1075 (byte_t *) arm_resp + sizeof(struct arm_response);
1077 (arm_addr->addr_space_buffer) + (addr -
1082 int2ptr((arm_addr->recvb) +
1083 sizeof(struct arm_request_response) +
1084 sizeof(struct arm_request) +
1085 sizeof(struct arm_response));
1087 arm_resp->buffer_length =
1088 (rcode == RCODE_COMPLETE) ? length : 0;
1089 arm_resp->response_code = rcode;
1090 arm_req->buffer_length = 0;
1091 arm_req->generation = req->req.generation;
1092 arm_req->extended_transaction_code = 0;
1093 arm_req->destination_offset = addr;
1094 arm_req->source_nodeid = nodeid;
1095 arm_req->destination_nodeid = host->node_id;
1096 arm_req->tlabel = (flags >> 10) & 0x3f;
1097 arm_req->tcode = (flags >> 4) & 0x0f;
1098 arm_req_resp->request = int2ptr((arm_addr->recvb) +
1100 arm_request_response));
1101 arm_req_resp->response =
1102 int2ptr((arm_addr->recvb) +
1103 sizeof(struct arm_request_response) +
1104 sizeof(struct arm_request));
1105 queue_complete_req(req);
1107 spin_unlock_irqrestore(&host_info_lock, irqflags);
1111 static int arm_write(struct hpsb_host *host, int nodeid, int destid,
1112 quadlet_t * data, u64 addr, size_t length, u16 flags)
1114 unsigned long irqflags;
1115 struct pending_request *req;
1116 struct host_info *hi;
1117 struct file_info *fi = NULL;
1118 struct list_head *entry;
1119 struct arm_addr *arm_addr = NULL;
1120 struct arm_request *arm_req = NULL;
1121 struct arm_response *arm_resp = NULL;
1122 int found = 0, size = 0, rcode = -1, length_conflict = 0;
1123 struct arm_request_response *arm_req_resp = NULL;
1125 DBGMSG("arm_write called by node: %X"
1126 "addr: %4.4x %8.8x length: %Zu", nodeid,
1127 (u16) ((addr >> 32) & 0xFFFF), (u32) (addr & 0xFFFFFFFF),
1129 spin_lock_irqsave(&host_info_lock, irqflags);
1130 hi = find_host_info(host); /* search address-entry */
1132 list_for_each_entry(fi, &hi->file_info_list, list) {
1133 entry = fi->addr_list.next;
1134 while (entry != &(fi->addr_list)) {
1136 list_entry(entry, struct arm_addr,
1138 if (((arm_addr->start) <= (addr))
1139 && ((arm_addr->end) >= (addr + length))) {
1143 entry = entry->next;
1152 printk(KERN_ERR "raw1394: arm_write FAILED addr_entry not found"
1153 " -> rcode_address_error\n");
1154 spin_unlock_irqrestore(&host_info_lock, irqflags);
1155 return (RCODE_ADDRESS_ERROR);
1157 DBGMSG("arm_write addr_entry FOUND");
1159 if (arm_addr->rec_length < length) {
1160 DBGMSG("arm_write blocklength too big -> rcode_data_error");
1161 length_conflict = 1;
1162 rcode = RCODE_DATA_ERROR; /* hardware error, data is unavailable */
1165 if (arm_addr->access_rights & ARM_WRITE) {
1166 if (!(arm_addr->client_transactions & ARM_WRITE)) {
1167 memcpy((arm_addr->addr_space_buffer) +
1168 (addr - (arm_addr->start)), data,
1170 DBGMSG("arm_write -> (rcode_complete)");
1171 rcode = RCODE_COMPLETE;
1174 rcode = RCODE_TYPE_ERROR; /* function not allowed */
1175 DBGMSG("arm_write -> rcode_type_error (access denied)");
1178 if (arm_addr->notification_options & ARM_WRITE) {
1179 DBGMSG("arm_write -> entering notification-section");
1180 req = __alloc_pending_request(SLAB_ATOMIC);
1182 DBGMSG("arm_write -> rcode_conflict_error");
1183 spin_unlock_irqrestore(&host_info_lock, irqflags);
1184 return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
1185 The request my be retried */
1188 sizeof(struct arm_request) + sizeof(struct arm_response) +
1189 (length) * sizeof(byte_t) +
1190 sizeof(struct arm_request_response);
1191 req->data = kmalloc(size, SLAB_ATOMIC);
1193 free_pending_request(req);
1194 DBGMSG("arm_write -> rcode_conflict_error");
1195 spin_unlock_irqrestore(&host_info_lock, irqflags);
1196 return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
1197 The request may be retried */
1200 req->file_info = fi;
1201 req->req.type = RAW1394_REQ_ARM;
1202 req->req.generation = get_hpsb_generation(host);
1204 (((length << 16) & (0xFFFF0000)) | (ARM_WRITE & 0xFF));
1205 req->req.tag = arm_addr->arm_tag;
1206 req->req.recvb = arm_addr->recvb;
1207 req->req.length = size;
1208 arm_req_resp = (struct arm_request_response *)(req->data);
1209 arm_req = (struct arm_request *)((byte_t *) (req->data) +
1212 arm_request_response)));
1214 (struct arm_response *)((byte_t *) (arm_req) +
1215 (sizeof(struct arm_request)));
1216 arm_resp->buffer = NULL;
1217 memcpy((byte_t *) arm_resp + sizeof(struct arm_response),
1219 arm_req->buffer = int2ptr((arm_addr->recvb) +
1220 sizeof(struct arm_request_response) +
1221 sizeof(struct arm_request) +
1222 sizeof(struct arm_response));
1223 arm_req->buffer_length = length;
1224 arm_req->generation = req->req.generation;
1225 arm_req->extended_transaction_code = 0;
1226 arm_req->destination_offset = addr;
1227 arm_req->source_nodeid = nodeid;
1228 arm_req->destination_nodeid = destid;
1229 arm_req->tlabel = (flags >> 10) & 0x3f;
1230 arm_req->tcode = (flags >> 4) & 0x0f;
1231 arm_resp->buffer_length = 0;
1232 arm_resp->response_code = rcode;
1233 arm_req_resp->request = int2ptr((arm_addr->recvb) +
1235 arm_request_response));
1236 arm_req_resp->response =
1237 int2ptr((arm_addr->recvb) +
1238 sizeof(struct arm_request_response) +
1239 sizeof(struct arm_request));
1240 queue_complete_req(req);
1242 spin_unlock_irqrestore(&host_info_lock, irqflags);
1246 static int arm_lock(struct hpsb_host *host, int nodeid, quadlet_t * store,
1247 u64 addr, quadlet_t data, quadlet_t arg, int ext_tcode,
1250 unsigned long irqflags;
1251 struct pending_request *req;
1252 struct host_info *hi;
1253 struct file_info *fi = NULL;
1254 struct list_head *entry;
1255 struct arm_addr *arm_addr = NULL;
1256 struct arm_request *arm_req = NULL;
1257 struct arm_response *arm_resp = NULL;
1258 int found = 0, size = 0, rcode = -1;
1260 struct arm_request_response *arm_req_resp = NULL;
1262 if (((ext_tcode & 0xFF) == EXTCODE_FETCH_ADD) ||
1263 ((ext_tcode & 0xFF) == EXTCODE_LITTLE_ADD)) {
1264 DBGMSG("arm_lock called by node: %X "
1265 "addr: %4.4x %8.8x extcode: %2.2X data: %8.8X",
1266 nodeid, (u16) ((addr >> 32) & 0xFFFF),
1267 (u32) (addr & 0xFFFFFFFF), ext_tcode & 0xFF,
1270 DBGMSG("arm_lock called by node: %X "
1271 "addr: %4.4x %8.8x extcode: %2.2X data: %8.8X arg: %8.8X",
1272 nodeid, (u16) ((addr >> 32) & 0xFFFF),
1273 (u32) (addr & 0xFFFFFFFF), ext_tcode & 0xFF,
1274 be32_to_cpu(data), be32_to_cpu(arg));
1276 spin_lock_irqsave(&host_info_lock, irqflags);
1277 hi = find_host_info(host); /* search address-entry */
1279 list_for_each_entry(fi, &hi->file_info_list, list) {
1280 entry = fi->addr_list.next;
1281 while (entry != &(fi->addr_list)) {
1283 list_entry(entry, struct arm_addr,
1285 if (((arm_addr->start) <= (addr))
1286 && ((arm_addr->end) >=
1287 (addr + sizeof(*store)))) {
1291 entry = entry->next;
1300 printk(KERN_ERR "raw1394: arm_lock FAILED addr_entry not found"
1301 " -> rcode_address_error\n");
1302 spin_unlock_irqrestore(&host_info_lock, irqflags);
1303 return (RCODE_ADDRESS_ERROR);
1305 DBGMSG("arm_lock addr_entry FOUND");
1308 if (arm_addr->access_rights & ARM_LOCK) {
1309 if (!(arm_addr->client_transactions & ARM_LOCK)) {
1311 (arm_addr->addr_space_buffer) + (addr -
1315 switch (ext_tcode) {
1316 case (EXTCODE_MASK_SWAP):
1317 new = data | (old & ~arg);
1319 case (EXTCODE_COMPARE_SWAP):
1326 case (EXTCODE_FETCH_ADD):
1328 cpu_to_be32(be32_to_cpu(data) +
1331 case (EXTCODE_LITTLE_ADD):
1333 cpu_to_le32(le32_to_cpu(data) +
1336 case (EXTCODE_BOUNDED_ADD):
1339 cpu_to_be32(be32_to_cpu
1347 case (EXTCODE_WRAP_ADD):
1350 cpu_to_be32(be32_to_cpu
1359 rcode = RCODE_TYPE_ERROR; /* function not allowed */
1361 "raw1394: arm_lock FAILED "
1362 "ext_tcode not allowed -> rcode_type_error\n");
1366 DBGMSG("arm_lock -> (rcode_complete)");
1367 rcode = RCODE_COMPLETE;
1368 memcpy(store, &old, sizeof(*store));
1369 memcpy((arm_addr->addr_space_buffer) +
1370 (addr - (arm_addr->start)),
1371 &new, sizeof(*store));
1375 rcode = RCODE_TYPE_ERROR; /* function not allowed */
1376 DBGMSG("arm_lock -> rcode_type_error (access denied)");
1379 if (arm_addr->notification_options & ARM_LOCK) {
1380 byte_t *buf1, *buf2;
1381 DBGMSG("arm_lock -> entering notification-section");
1382 req = __alloc_pending_request(SLAB_ATOMIC);
1384 DBGMSG("arm_lock -> rcode_conflict_error");
1385 spin_unlock_irqrestore(&host_info_lock, irqflags);
1386 return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
1387 The request may be retried */
1389 size = sizeof(struct arm_request) + sizeof(struct arm_response) + 3 * sizeof(*store) + sizeof(struct arm_request_response); /* maximum */
1390 req->data = kmalloc(size, SLAB_ATOMIC);
1392 free_pending_request(req);
1393 DBGMSG("arm_lock -> rcode_conflict_error");
1394 spin_unlock_irqrestore(&host_info_lock, irqflags);
1395 return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
1396 The request may be retried */
1399 arm_req_resp = (struct arm_request_response *)(req->data);
1400 arm_req = (struct arm_request *)((byte_t *) (req->data) +
1403 arm_request_response)));
1405 (struct arm_response *)((byte_t *) (arm_req) +
1406 (sizeof(struct arm_request)));
1407 buf1 = (byte_t *) arm_resp + sizeof(struct arm_response);
1408 buf2 = buf1 + 2 * sizeof(*store);
1409 if ((ext_tcode == EXTCODE_FETCH_ADD) ||
1410 (ext_tcode == EXTCODE_LITTLE_ADD)) {
1411 arm_req->buffer_length = sizeof(*store);
1412 memcpy(buf1, &data, sizeof(*store));
1415 arm_req->buffer_length = 2 * sizeof(*store);
1416 memcpy(buf1, &arg, sizeof(*store));
1417 memcpy(buf1 + sizeof(*store), &data, sizeof(*store));
1419 if (rcode == RCODE_COMPLETE) {
1420 arm_resp->buffer_length = sizeof(*store);
1421 memcpy(buf2, &old, sizeof(*store));
1423 arm_resp->buffer_length = 0;
1425 req->file_info = fi;
1426 req->req.type = RAW1394_REQ_ARM;
1427 req->req.generation = get_hpsb_generation(host);
1428 req->req.misc = ((((sizeof(*store)) << 16) & (0xFFFF0000)) |
1430 req->req.tag = arm_addr->arm_tag;
1431 req->req.recvb = arm_addr->recvb;
1432 req->req.length = size;
1433 arm_req->generation = req->req.generation;
1434 arm_req->extended_transaction_code = ext_tcode;
1435 arm_req->destination_offset = addr;
1436 arm_req->source_nodeid = nodeid;
1437 arm_req->destination_nodeid = host->node_id;
1438 arm_req->tlabel = (flags >> 10) & 0x3f;
1439 arm_req->tcode = (flags >> 4) & 0x0f;
1440 arm_resp->response_code = rcode;
1441 arm_req_resp->request = int2ptr((arm_addr->recvb) +
1443 arm_request_response));
1444 arm_req_resp->response =
1445 int2ptr((arm_addr->recvb) +
1446 sizeof(struct arm_request_response) +
1447 sizeof(struct arm_request));
1449 int2ptr((arm_addr->recvb) +
1450 sizeof(struct arm_request_response) +
1451 sizeof(struct arm_request) +
1452 sizeof(struct arm_response));
1454 int2ptr((arm_addr->recvb) +
1455 sizeof(struct arm_request_response) +
1456 sizeof(struct arm_request) +
1457 sizeof(struct arm_response) + 2 * sizeof(*store));
1458 queue_complete_req(req);
1460 spin_unlock_irqrestore(&host_info_lock, irqflags);
1464 static int arm_lock64(struct hpsb_host *host, int nodeid, octlet_t * store,
1465 u64 addr, octlet_t data, octlet_t arg, int ext_tcode,
1468 unsigned long irqflags;
1469 struct pending_request *req;
1470 struct host_info *hi;
1471 struct file_info *fi = NULL;
1472 struct list_head *entry;
1473 struct arm_addr *arm_addr = NULL;
1474 struct arm_request *arm_req = NULL;
1475 struct arm_response *arm_resp = NULL;
1476 int found = 0, size = 0, rcode = -1;
1478 struct arm_request_response *arm_req_resp = NULL;
1480 if (((ext_tcode & 0xFF) == EXTCODE_FETCH_ADD) ||
1481 ((ext_tcode & 0xFF) == EXTCODE_LITTLE_ADD)) {
1482 DBGMSG("arm_lock64 called by node: %X "
1483 "addr: %4.4x %8.8x extcode: %2.2X data: %8.8X %8.8X ",
1484 nodeid, (u16) ((addr >> 32) & 0xFFFF),
1485 (u32) (addr & 0xFFFFFFFF),
1487 (u32) ((be64_to_cpu(data) >> 32) & 0xFFFFFFFF),
1488 (u32) (be64_to_cpu(data) & 0xFFFFFFFF));
1490 DBGMSG("arm_lock64 called by node: %X "
1491 "addr: %4.4x %8.8x extcode: %2.2X data: %8.8X %8.8X arg: "
1493 nodeid, (u16) ((addr >> 32) & 0xFFFF),
1494 (u32) (addr & 0xFFFFFFFF),
1496 (u32) ((be64_to_cpu(data) >> 32) & 0xFFFFFFFF),
1497 (u32) (be64_to_cpu(data) & 0xFFFFFFFF),
1498 (u32) ((be64_to_cpu(arg) >> 32) & 0xFFFFFFFF),
1499 (u32) (be64_to_cpu(arg) & 0xFFFFFFFF));
1501 spin_lock_irqsave(&host_info_lock, irqflags);
1502 hi = find_host_info(host); /* search addressentry in file_info's for host */
1504 list_for_each_entry(fi, &hi->file_info_list, list) {
1505 entry = fi->addr_list.next;
1506 while (entry != &(fi->addr_list)) {
1508 list_entry(entry, struct arm_addr,
1510 if (((arm_addr->start) <= (addr))
1511 && ((arm_addr->end) >=
1512 (addr + sizeof(*store)))) {
1516 entry = entry->next;
1526 "raw1394: arm_lock64 FAILED addr_entry not found"
1527 " -> rcode_address_error\n");
1528 spin_unlock_irqrestore(&host_info_lock, irqflags);
1529 return (RCODE_ADDRESS_ERROR);
1531 DBGMSG("arm_lock64 addr_entry FOUND");
1534 if (arm_addr->access_rights & ARM_LOCK) {
1535 if (!(arm_addr->client_transactions & ARM_LOCK)) {
1537 (arm_addr->addr_space_buffer) + (addr -
1541 switch (ext_tcode) {
1542 case (EXTCODE_MASK_SWAP):
1543 new = data | (old & ~arg);
1545 case (EXTCODE_COMPARE_SWAP):
1552 case (EXTCODE_FETCH_ADD):
1554 cpu_to_be64(be64_to_cpu(data) +
1557 case (EXTCODE_LITTLE_ADD):
1559 cpu_to_le64(le64_to_cpu(data) +
1562 case (EXTCODE_BOUNDED_ADD):
1565 cpu_to_be64(be64_to_cpu
1573 case (EXTCODE_WRAP_ADD):
1576 cpu_to_be64(be64_to_cpu
1586 "raw1394: arm_lock64 FAILED "
1587 "ext_tcode not allowed -> rcode_type_error\n");
1588 rcode = RCODE_TYPE_ERROR; /* function not allowed */
1593 ("arm_lock64 -> (rcode_complete)");
1594 rcode = RCODE_COMPLETE;
1595 memcpy(store, &old, sizeof(*store));
1596 memcpy((arm_addr->addr_space_buffer) +
1597 (addr - (arm_addr->start)),
1598 &new, sizeof(*store));
1602 rcode = RCODE_TYPE_ERROR; /* function not allowed */
1604 ("arm_lock64 -> rcode_type_error (access denied)");
1607 if (arm_addr->notification_options & ARM_LOCK) {
1608 byte_t *buf1, *buf2;
1609 DBGMSG("arm_lock64 -> entering notification-section");
1610 req = __alloc_pending_request(SLAB_ATOMIC);
1612 spin_unlock_irqrestore(&host_info_lock, irqflags);
1613 DBGMSG("arm_lock64 -> rcode_conflict_error");
1614 return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
1615 The request may be retried */
1617 size = sizeof(struct arm_request) + sizeof(struct arm_response) + 3 * sizeof(*store) + sizeof(struct arm_request_response); /* maximum */
1618 req->data = kmalloc(size, SLAB_ATOMIC);
1620 free_pending_request(req);
1621 spin_unlock_irqrestore(&host_info_lock, irqflags);
1622 DBGMSG("arm_lock64 -> rcode_conflict_error");
1623 return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
1624 The request may be retried */
1627 arm_req_resp = (struct arm_request_response *)(req->data);
1628 arm_req = (struct arm_request *)((byte_t *) (req->data) +
1631 arm_request_response)));
1633 (struct arm_response *)((byte_t *) (arm_req) +
1634 (sizeof(struct arm_request)));
1635 buf1 = (byte_t *) arm_resp + sizeof(struct arm_response);
1636 buf2 = buf1 + 2 * sizeof(*store);
1637 if ((ext_tcode == EXTCODE_FETCH_ADD) ||
1638 (ext_tcode == EXTCODE_LITTLE_ADD)) {
1639 arm_req->buffer_length = sizeof(*store);
1640 memcpy(buf1, &data, sizeof(*store));
1643 arm_req->buffer_length = 2 * sizeof(*store);
1644 memcpy(buf1, &arg, sizeof(*store));
1645 memcpy(buf1 + sizeof(*store), &data, sizeof(*store));
1647 if (rcode == RCODE_COMPLETE) {
1648 arm_resp->buffer_length = sizeof(*store);
1649 memcpy(buf2, &old, sizeof(*store));
1651 arm_resp->buffer_length = 0;
1653 req->file_info = fi;
1654 req->req.type = RAW1394_REQ_ARM;
1655 req->req.generation = get_hpsb_generation(host);
1656 req->req.misc = ((((sizeof(*store)) << 16) & (0xFFFF0000)) |
1658 req->req.tag = arm_addr->arm_tag;
1659 req->req.recvb = arm_addr->recvb;
1660 req->req.length = size;
1661 arm_req->generation = req->req.generation;
1662 arm_req->extended_transaction_code = ext_tcode;
1663 arm_req->destination_offset = addr;
1664 arm_req->source_nodeid = nodeid;
1665 arm_req->destination_nodeid = host->node_id;
1666 arm_req->tlabel = (flags >> 10) & 0x3f;
1667 arm_req->tcode = (flags >> 4) & 0x0f;
1668 arm_resp->response_code = rcode;
1669 arm_req_resp->request = int2ptr((arm_addr->recvb) +
1671 arm_request_response));
1672 arm_req_resp->response =
1673 int2ptr((arm_addr->recvb) +
1674 sizeof(struct arm_request_response) +
1675 sizeof(struct arm_request));
1677 int2ptr((arm_addr->recvb) +
1678 sizeof(struct arm_request_response) +
1679 sizeof(struct arm_request) +
1680 sizeof(struct arm_response));
1682 int2ptr((arm_addr->recvb) +
1683 sizeof(struct arm_request_response) +
1684 sizeof(struct arm_request) +
1685 sizeof(struct arm_response) + 2 * sizeof(*store));
1686 queue_complete_req(req);
1688 spin_unlock_irqrestore(&host_info_lock, irqflags);
1692 static int arm_register(struct file_info *fi, struct pending_request *req)
1695 struct arm_addr *addr;
1696 struct host_info *hi;
1697 struct file_info *fi_hlp = NULL;
1698 struct list_head *entry;
1699 struct arm_addr *arm_addr = NULL;
1700 int same_host, another_host;
1701 unsigned long flags;
1703 DBGMSG("arm_register called "
1704 "addr(Offset): %8.8x %8.8x length: %u "
1705 "rights: %2.2X notify: %2.2X "
1706 "max_blk_len: %4.4X",
1707 (u32) ((req->req.address >> 32) & 0xFFFF),
1708 (u32) (req->req.address & 0xFFFFFFFF),
1709 req->req.length, ((req->req.misc >> 8) & 0xFF),
1710 (req->req.misc & 0xFF), ((req->req.misc >> 16) & 0xFFFF));
1711 /* check addressrange */
1712 if ((((req->req.address) & ~(0xFFFFFFFFFFFFULL)) != 0) ||
1713 (((req->req.address + req->req.length) & ~(0xFFFFFFFFFFFFULL)) !=
1715 req->req.length = 0;
1718 /* addr-list-entry for fileinfo */
1719 addr = kmalloc(sizeof(*addr), SLAB_KERNEL);
1721 req->req.length = 0;
1724 /* allocation of addr_space_buffer */
1725 addr->addr_space_buffer = vmalloc(req->req.length);
1726 if (!(addr->addr_space_buffer)) {
1728 req->req.length = 0;
1731 /* initialization of addr_space_buffer */
1732 if ((req->req.sendb) == (unsigned long)NULL) {
1734 memset(addr->addr_space_buffer, 0, req->req.length);
1736 /* init: user -> kernel */
1738 (addr->addr_space_buffer, int2ptr(req->req.sendb),
1740 vfree(addr->addr_space_buffer);
1745 INIT_LIST_HEAD(&addr->addr_list);
1746 addr->arm_tag = req->req.tag;
1747 addr->start = req->req.address;
1748 addr->end = req->req.address + req->req.length;
1749 addr->access_rights = (u8) (req->req.misc & 0x0F);
1750 addr->notification_options = (u8) ((req->req.misc >> 4) & 0x0F);
1751 addr->client_transactions = (u8) ((req->req.misc >> 8) & 0x0F);
1752 addr->access_rights |= addr->client_transactions;
1753 addr->notification_options |= addr->client_transactions;
1754 addr->recvb = req->req.recvb;
1755 addr->rec_length = (u16) ((req->req.misc >> 16) & 0xFFFF);
1756 spin_lock_irqsave(&host_info_lock, flags);
1757 hi = find_host_info(fi->host);
1760 /* same host with address-entry containing same addressrange ? */
1761 list_for_each_entry(fi_hlp, &hi->file_info_list, list) {
1762 entry = fi_hlp->addr_list.next;
1763 while (entry != &(fi_hlp->addr_list)) {
1765 list_entry(entry, struct arm_addr, addr_list);
1766 if ((arm_addr->start == addr->start)
1767 && (arm_addr->end == addr->end)) {
1768 DBGMSG("same host ownes same "
1769 "addressrange -> EALREADY");
1773 entry = entry->next;
1780 /* addressrange occupied by same host */
1781 vfree(addr->addr_space_buffer);
1783 spin_unlock_irqrestore(&host_info_lock, flags);
1786 /* another host with valid address-entry containing same addressrange */
1787 list_for_each_entry(hi, &host_info_list, list) {
1788 if (hi->host != fi->host) {
1789 list_for_each_entry(fi_hlp, &hi->file_info_list, list) {
1790 entry = fi_hlp->addr_list.next;
1791 while (entry != &(fi_hlp->addr_list)) {
1793 list_entry(entry, struct arm_addr,
1795 if ((arm_addr->start == addr->start)
1796 && (arm_addr->end == addr->end)) {
1798 ("another host ownes same "
1803 entry = entry->next;
1812 DBGMSG("another hosts entry is valid -> SUCCESS");
1813 if (copy_to_user(int2ptr(req->req.recvb),
1814 &addr->start, sizeof(u64))) {
1815 printk(KERN_ERR "raw1394: arm_register failed "
1816 " address-range-entry is invalid -> EFAULT !!!\n");
1817 vfree(addr->addr_space_buffer);
1819 spin_unlock_irqrestore(&host_info_lock, flags);
1822 free_pending_request(req); /* immediate success or fail */
1824 list_add_tail(&addr->addr_list, &fi->addr_list);
1825 spin_unlock_irqrestore(&host_info_lock, flags);
1826 return sizeof(struct raw1394_request);
1829 hpsb_register_addrspace(&raw1394_highlevel, fi->host, &arm_ops,
1831 req->req.address + req->req.length);
1834 list_add_tail(&addr->addr_list, &fi->addr_list);
1836 DBGMSG("arm_register failed errno: %d \n", retval);
1837 vfree(addr->addr_space_buffer);
1839 spin_unlock_irqrestore(&host_info_lock, flags);
1842 spin_unlock_irqrestore(&host_info_lock, flags);
1843 free_pending_request(req); /* immediate success or fail */
1844 return sizeof(struct raw1394_request);
1847 static int arm_unregister(struct file_info *fi, struct pending_request *req)
1851 struct list_head *entry;
1852 struct arm_addr *addr = NULL;
1853 struct host_info *hi;
1854 struct file_info *fi_hlp = NULL;
1855 struct arm_addr *arm_addr = NULL;
1857 unsigned long flags;
1859 DBGMSG("arm_Unregister called addr(Offset): "
1861 (u32) ((req->req.address >> 32) & 0xFFFF),
1862 (u32) (req->req.address & 0xFFFFFFFF));
1863 spin_lock_irqsave(&host_info_lock, flags);
1865 entry = fi->addr_list.next;
1866 while (entry != &(fi->addr_list)) {
1867 addr = list_entry(entry, struct arm_addr, addr_list);
1868 if (addr->start == req->req.address) {
1872 entry = entry->next;
1875 DBGMSG("arm_Unregister addr not found");
1876 spin_unlock_irqrestore(&host_info_lock, flags);
1879 DBGMSG("arm_Unregister addr found");
1881 /* another host with valid address-entry containing
1882 same addressrange */
1883 list_for_each_entry(hi, &host_info_list, list) {
1884 if (hi->host != fi->host) {
1885 list_for_each_entry(fi_hlp, &hi->file_info_list, list) {
1886 entry = fi_hlp->addr_list.next;
1887 while (entry != &(fi_hlp->addr_list)) {
1888 arm_addr = list_entry(entry,
1891 if (arm_addr->start == addr->start) {
1892 DBGMSG("another host ownes "
1893 "same addressrange");
1897 entry = entry->next;
1906 DBGMSG("delete entry from list -> success");
1907 list_del(&addr->addr_list);
1908 vfree(addr->addr_space_buffer);
1910 free_pending_request(req); /* immediate success or fail */
1911 spin_unlock_irqrestore(&host_info_lock, flags);
1912 return sizeof(struct raw1394_request);
1915 hpsb_unregister_addrspace(&raw1394_highlevel, fi->host,
1918 printk(KERN_ERR "raw1394: arm_Unregister failed -> EINVAL\n");
1919 spin_unlock_irqrestore(&host_info_lock, flags);
1922 DBGMSG("delete entry from list -> success");
1923 list_del(&addr->addr_list);
1924 spin_unlock_irqrestore(&host_info_lock, flags);
1925 vfree(addr->addr_space_buffer);
1927 free_pending_request(req); /* immediate success or fail */
1928 return sizeof(struct raw1394_request);
1931 /* Copy data from ARM buffer(s) to user buffer. */
1932 static int arm_get_buf(struct file_info *fi, struct pending_request *req)
1934 struct arm_addr *arm_addr = NULL;
1935 unsigned long flags;
1936 unsigned long offset;
1938 struct list_head *entry;
1940 DBGMSG("arm_get_buf "
1941 "addr(Offset): %04X %08X length: %u",
1942 (u32) ((req->req.address >> 32) & 0xFFFF),
1943 (u32) (req->req.address & 0xFFFFFFFF), (u32) req->req.length);
1945 spin_lock_irqsave(&host_info_lock, flags);
1946 entry = fi->addr_list.next;
1947 while (entry != &(fi->addr_list)) {
1948 arm_addr = list_entry(entry, struct arm_addr, addr_list);
1949 if ((arm_addr->start <= req->req.address) &&
1950 (arm_addr->end > req->req.address)) {
1951 if (req->req.address + req->req.length <= arm_addr->end) {
1952 offset = req->req.address - arm_addr->start;
1955 ("arm_get_buf copy_to_user( %08X, %p, %u )",
1956 (u32) req->req.recvb,
1957 arm_addr->addr_space_buffer + offset,
1958 (u32) req->req.length);
1961 (int2ptr(req->req.recvb),
1962 arm_addr->addr_space_buffer + offset,
1964 spin_unlock_irqrestore(&host_info_lock,
1969 spin_unlock_irqrestore(&host_info_lock, flags);
1970 /* We have to free the request, because we
1971 * queue no response, and therefore nobody
1973 free_pending_request(req);
1974 return sizeof(struct raw1394_request);
1976 DBGMSG("arm_get_buf request exceeded mapping");
1977 spin_unlock_irqrestore(&host_info_lock, flags);
1981 entry = entry->next;
1983 spin_unlock_irqrestore(&host_info_lock, flags);
1987 /* Copy data from user buffer to ARM buffer(s). */
1988 static int arm_set_buf(struct file_info *fi, struct pending_request *req)
1990 struct arm_addr *arm_addr = NULL;
1991 unsigned long flags;
1992 unsigned long offset;
1994 struct list_head *entry;
1996 DBGMSG("arm_set_buf "
1997 "addr(Offset): %04X %08X length: %u",
1998 (u32) ((req->req.address >> 32) & 0xFFFF),
1999 (u32) (req->req.address & 0xFFFFFFFF), (u32) req->req.length);
2001 spin_lock_irqsave(&host_info_lock, flags);
2002 entry = fi->addr_list.next;
2003 while (entry != &(fi->addr_list)) {
2004 arm_addr = list_entry(entry, struct arm_addr, addr_list);
2005 if ((arm_addr->start <= req->req.address) &&
2006 (arm_addr->end > req->req.address)) {
2007 if (req->req.address + req->req.length <= arm_addr->end) {
2008 offset = req->req.address - arm_addr->start;
2011 ("arm_set_buf copy_from_user( %p, %08X, %u )",
2012 arm_addr->addr_space_buffer + offset,
2013 (u32) req->req.sendb,
2014 (u32) req->req.length);
2017 (arm_addr->addr_space_buffer + offset,
2018 int2ptr(req->req.sendb),
2020 spin_unlock_irqrestore(&host_info_lock,
2025 spin_unlock_irqrestore(&host_info_lock, flags);
2026 free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */
2027 return sizeof(struct raw1394_request);
2029 DBGMSG("arm_set_buf request exceeded mapping");
2030 spin_unlock_irqrestore(&host_info_lock, flags);
2034 entry = entry->next;
2036 spin_unlock_irqrestore(&host_info_lock, flags);
2040 static int reset_notification(struct file_info *fi, struct pending_request *req)
2042 DBGMSG("reset_notification called - switch %s ",
2043 (req->req.misc == RAW1394_NOTIFY_OFF) ? "OFF" : "ON");
2044 if ((req->req.misc == RAW1394_NOTIFY_OFF) ||
2045 (req->req.misc == RAW1394_NOTIFY_ON)) {
2046 fi->notification = (u8) req->req.misc;
2047 free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */
2048 return sizeof(struct raw1394_request);
2050 /* error EINVAL (22) invalid argument */
2054 static int write_phypacket(struct file_info *fi, struct pending_request *req)
2056 struct hpsb_packet *packet = NULL;
2059 unsigned long flags;
2061 data = be32_to_cpu((u32) req->req.sendb);
2062 DBGMSG("write_phypacket called - quadlet 0x%8.8x ", data);
2063 packet = hpsb_make_phypacket(fi->host, data);
2066 req->req.length = 0;
2067 req->packet = packet;
2068 hpsb_set_packet_complete_task(packet,
2069 (void (*)(void *))queue_complete_cb, req);
2070 spin_lock_irqsave(&fi->reqlists_lock, flags);
2071 list_add_tail(&req->list, &fi->req_pending);
2072 spin_unlock_irqrestore(&fi->reqlists_lock, flags);
2073 packet->generation = req->req.generation;
2074 retval = hpsb_send_packet(packet);
2075 DBGMSG("write_phypacket send_packet called => retval: %d ", retval);
2077 req->req.error = RAW1394_ERROR_SEND_ERROR;
2078 req->req.length = 0;
2079 queue_complete_req(req);
2081 return sizeof(struct raw1394_request);
2084 static int get_config_rom(struct file_info *fi, struct pending_request *req)
2086 int ret = sizeof(struct raw1394_request);
2087 quadlet_t *data = kmalloc(req->req.length, SLAB_KERNEL);
2094 csr1212_read(fi->host->csr.rom, CSR1212_CONFIG_ROM_SPACE_OFFSET,
2095 data, req->req.length);
2096 if (copy_to_user(int2ptr(req->req.recvb), data, req->req.length))
2099 (int2ptr(req->req.tag), &fi->host->csr.rom->cache_head->len,
2100 sizeof(fi->host->csr.rom->cache_head->len)))
2102 if (copy_to_user(int2ptr(req->req.address), &fi->host->csr.generation,
2103 sizeof(fi->host->csr.generation)))
2105 if (copy_to_user(int2ptr(req->req.sendb), &status, sizeof(status)))
2109 free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */
2114 static int update_config_rom(struct file_info *fi, struct pending_request *req)
2116 int ret = sizeof(struct raw1394_request);
2117 quadlet_t *data = kmalloc(req->req.length, SLAB_KERNEL);
2120 if (copy_from_user(data, int2ptr(req->req.sendb), req->req.length)) {
2123 int status = hpsb_update_config_rom(fi->host,
2124 data, req->req.length,
2125 (unsigned char)req->req.
2128 (int2ptr(req->req.recvb), &status, sizeof(status)))
2133 free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */
2139 static int modify_config_rom(struct file_info *fi, struct pending_request *req)
2141 struct csr1212_keyval *kv;
2142 struct csr1212_csr_rom_cache *cache;
2143 struct csr1212_dentry *dentry;
2147 if (req->req.misc == ~0) {
2148 if (req->req.length == 0)
2151 /* Find an unused slot */
2153 dr < RAW1394_MAX_USER_CSR_DIRS && fi->csr1212_dirs[dr];
2156 if (dr == RAW1394_MAX_USER_CSR_DIRS)
2159 fi->csr1212_dirs[dr] =
2160 csr1212_new_directory(CSR1212_KV_ID_VENDOR);
2161 if (!fi->csr1212_dirs[dr])
2165 if (!fi->csr1212_dirs[dr])
2168 /* Delete old stuff */
2170 fi->csr1212_dirs[dr]->value.directory.dentries_head;
2171 dentry; dentry = dentry->next) {
2172 csr1212_detach_keyval_from_directory(fi->host->csr.rom->
2177 if (req->req.length == 0) {
2178 csr1212_release_keyval(fi->csr1212_dirs[dr]);
2179 fi->csr1212_dirs[dr] = NULL;
2181 hpsb_update_config_rom_image(fi->host);
2182 free_pending_request(req);
2183 return sizeof(struct raw1394_request);
2187 cache = csr1212_rom_cache_malloc(0, req->req.length);
2189 csr1212_release_keyval(fi->csr1212_dirs[dr]);
2190 fi->csr1212_dirs[dr] = NULL;
2194 cache->filled_head = kmalloc(sizeof(*cache->filled_head), GFP_KERNEL);
2195 if (!cache->filled_head) {
2196 csr1212_release_keyval(fi->csr1212_dirs[dr]);
2197 fi->csr1212_dirs[dr] = NULL;
2198 CSR1212_FREE(cache);
2201 cache->filled_tail = cache->filled_head;
2203 if (copy_from_user(cache->data, int2ptr(req->req.sendb),
2205 csr1212_release_keyval(fi->csr1212_dirs[dr]);
2206 fi->csr1212_dirs[dr] = NULL;
2209 cache->len = req->req.length;
2210 cache->filled_head->offset_start = 0;
2211 cache->filled_head->offset_end = cache->size - 1;
2213 cache->layout_head = cache->layout_tail = fi->csr1212_dirs[dr];
2215 ret = CSR1212_SUCCESS;
2216 /* parse all the items */
2217 for (kv = cache->layout_head; ret == CSR1212_SUCCESS && kv;
2219 ret = csr1212_parse_keyval(kv, cache);
2222 /* attach top level items to the root directory */
2224 fi->csr1212_dirs[dr]->value.directory.dentries_head;
2225 ret == CSR1212_SUCCESS && dentry; dentry = dentry->next) {
2227 csr1212_attach_keyval_to_directory(fi->host->csr.
2232 if (ret == CSR1212_SUCCESS) {
2233 ret = hpsb_update_config_rom_image(fi->host);
2235 if (ret >= 0 && copy_to_user(int2ptr(req->req.recvb),
2241 kfree(cache->filled_head);
2242 CSR1212_FREE(cache);
2245 /* we have to free the request, because we queue no response,
2246 * and therefore nobody will free it */
2247 free_pending_request(req);
2248 return sizeof(struct raw1394_request);
2251 fi->csr1212_dirs[dr]->value.directory.dentries_head;
2252 dentry; dentry = dentry->next) {
2253 csr1212_detach_keyval_from_directory(fi->host->csr.rom->
2257 csr1212_release_keyval(fi->csr1212_dirs[dr]);
2258 fi->csr1212_dirs[dr] = NULL;
2263 static int state_connected(struct file_info *fi, struct pending_request *req)
2265 int node = req->req.address >> 48;
2267 req->req.error = RAW1394_ERROR_NONE;
2269 switch (req->req.type) {
2271 case RAW1394_REQ_ECHO:
2272 queue_complete_req(req);
2273 return sizeof(struct raw1394_request);
2275 case RAW1394_REQ_ISO_SEND:
2276 return handle_iso_send(fi, req, node);
2278 case RAW1394_REQ_ARM_REGISTER:
2279 return arm_register(fi, req);
2281 case RAW1394_REQ_ARM_UNREGISTER:
2282 return arm_unregister(fi, req);
2284 case RAW1394_REQ_ARM_SET_BUF:
2285 return arm_set_buf(fi, req);
2287 case RAW1394_REQ_ARM_GET_BUF:
2288 return arm_get_buf(fi, req);
2290 case RAW1394_REQ_RESET_NOTIFY:
2291 return reset_notification(fi, req);
2293 case RAW1394_REQ_ISO_LISTEN:
2294 handle_iso_listen(fi, req);
2295 return sizeof(struct raw1394_request);
2297 case RAW1394_REQ_FCP_LISTEN:
2298 handle_fcp_listen(fi, req);
2299 return sizeof(struct raw1394_request);
2301 case RAW1394_REQ_RESET_BUS:
2302 if (req->req.misc == RAW1394_LONG_RESET) {
2303 DBGMSG("busreset called (type: LONG)");
2304 hpsb_reset_bus(fi->host, LONG_RESET);
2305 free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */
2306 return sizeof(struct raw1394_request);
2308 if (req->req.misc == RAW1394_SHORT_RESET) {
2309 DBGMSG("busreset called (type: SHORT)");
2310 hpsb_reset_bus(fi->host, SHORT_RESET);
2311 free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */
2312 return sizeof(struct raw1394_request);
2314 /* error EINVAL (22) invalid argument */
2316 case RAW1394_REQ_GET_ROM:
2317 return get_config_rom(fi, req);
2319 case RAW1394_REQ_UPDATE_ROM:
2320 return update_config_rom(fi, req);
2322 case RAW1394_REQ_MODIFY_ROM:
2323 return modify_config_rom(fi, req);
2326 if (req->req.generation != get_hpsb_generation(fi->host)) {
2327 req->req.error = RAW1394_ERROR_GENERATION;
2328 req->req.generation = get_hpsb_generation(fi->host);
2329 req->req.length = 0;
2330 queue_complete_req(req);
2331 return sizeof(struct raw1394_request);
2334 switch (req->req.type) {
2335 case RAW1394_REQ_PHYPACKET:
2336 return write_phypacket(fi, req);
2337 case RAW1394_REQ_ASYNC_SEND:
2338 return handle_async_send(fi, req);
2341 if (req->req.length == 0) {
2342 req->req.error = RAW1394_ERROR_INVALID_ARG;
2343 queue_complete_req(req);
2344 return sizeof(struct raw1394_request);
2347 return handle_async_request(fi, req, node);
2350 static ssize_t raw1394_write(struct file *file, const char __user * buffer,
2351 size_t count, loff_t * offset_is_ignored)
2353 struct file_info *fi = (struct file_info *)file->private_data;
2354 struct pending_request *req;
2357 #ifdef CONFIG_COMPAT
2358 if (count == sizeof(struct compat_raw1394_req) &&
2359 sizeof(struct compat_raw1394_req) !=
2360 sizeof(struct raw1394_request)) {
2361 buffer = raw1394_compat_write(buffer);
2363 return PTR_ERR(buffer);
2366 if (count != sizeof(struct raw1394_request)) {
2370 req = alloc_pending_request();
2374 req->file_info = fi;
2376 if (copy_from_user(&req->req, buffer, sizeof(struct raw1394_request))) {
2377 free_pending_request(req);
2381 switch (fi->state) {
2383 retval = state_opened(fi, req);
2387 retval = state_initialized(fi, req);
2391 retval = state_connected(fi, req);
2396 free_pending_request(req);
2402 /* rawiso operations */
2404 /* check if any RAW1394_REQ_RAWISO_ACTIVITY event is already in the
2405 * completion queue (reqlists_lock must be taken) */
2406 static inline int __rawiso_event_in_queue(struct file_info *fi)
2408 struct pending_request *req;
2410 list_for_each_entry(req, &fi->req_complete, list)
2411 if (req->req.type == RAW1394_REQ_RAWISO_ACTIVITY)
2417 /* put a RAWISO_ACTIVITY event in the queue, if one isn't there already */
2418 static void queue_rawiso_event(struct file_info *fi)
2420 unsigned long flags;
2422 spin_lock_irqsave(&fi->reqlists_lock, flags);
2424 /* only one ISO activity event may be in the queue */
2425 if (!__rawiso_event_in_queue(fi)) {
2426 struct pending_request *req =
2427 __alloc_pending_request(SLAB_ATOMIC);
2430 req->file_info = fi;
2431 req->req.type = RAW1394_REQ_RAWISO_ACTIVITY;
2432 req->req.generation = get_hpsb_generation(fi->host);
2433 __queue_complete_req(req);
2435 /* on allocation failure, signal an overflow */
2436 if (fi->iso_handle) {
2437 atomic_inc(&fi->iso_handle->overflows);
2441 spin_unlock_irqrestore(&fi->reqlists_lock, flags);
2444 static void rawiso_activity_cb(struct hpsb_iso *iso)
2446 unsigned long flags;
2447 struct host_info *hi;
2448 struct file_info *fi;
2450 spin_lock_irqsave(&host_info_lock, flags);
2451 hi = find_host_info(iso->host);
2454 list_for_each_entry(fi, &hi->file_info_list, list) {
2455 if (fi->iso_handle == iso)
2456 queue_rawiso_event(fi);
2460 spin_unlock_irqrestore(&host_info_lock, flags);
2463 /* helper function - gather all the kernel iso status bits for returning to user-space */
2464 static void raw1394_iso_fill_status(struct hpsb_iso *iso,
2465 struct raw1394_iso_status *stat)
2467 stat->config.data_buf_size = iso->buf_size;
2468 stat->config.buf_packets = iso->buf_packets;
2469 stat->config.channel = iso->channel;
2470 stat->config.speed = iso->speed;
2471 stat->config.irq_interval = iso->irq_interval;
2472 stat->n_packets = hpsb_iso_n_ready(iso);
2473 stat->overflows = atomic_read(&iso->overflows);
2474 stat->xmit_cycle = iso->xmit_cycle;
2477 static int raw1394_iso_xmit_init(struct file_info *fi, void __user * uaddr)
2479 struct raw1394_iso_status stat;
2484 if (copy_from_user(&stat, uaddr, sizeof(stat)))
2487 fi->iso_handle = hpsb_iso_xmit_init(fi->host,
2488 stat.config.data_buf_size,
2489 stat.config.buf_packets,
2490 stat.config.channel,
2492 stat.config.irq_interval,
2493 rawiso_activity_cb);
2494 if (!fi->iso_handle)
2497 fi->iso_state = RAW1394_ISO_XMIT;
2499 raw1394_iso_fill_status(fi->iso_handle, &stat);
2500 if (copy_to_user(uaddr, &stat, sizeof(stat)))
2503 /* queue an event to get things started */
2504 rawiso_activity_cb(fi->iso_handle);
2509 static int raw1394_iso_recv_init(struct file_info *fi, void __user * uaddr)
2511 struct raw1394_iso_status stat;
2516 if (copy_from_user(&stat, uaddr, sizeof(stat)))
2519 fi->iso_handle = hpsb_iso_recv_init(fi->host,
2520 stat.config.data_buf_size,
2521 stat.config.buf_packets,
2522 stat.config.channel,
2523 stat.config.dma_mode,
2524 stat.config.irq_interval,
2525 rawiso_activity_cb);
2526 if (!fi->iso_handle)
2529 fi->iso_state = RAW1394_ISO_RECV;
2531 raw1394_iso_fill_status(fi->iso_handle, &stat);
2532 if (copy_to_user(uaddr, &stat, sizeof(stat)))
2537 static int raw1394_iso_get_status(struct file_info *fi, void __user * uaddr)
2539 struct raw1394_iso_status stat;
2540 struct hpsb_iso *iso = fi->iso_handle;
2542 raw1394_iso_fill_status(fi->iso_handle, &stat);
2543 if (copy_to_user(uaddr, &stat, sizeof(stat)))
2546 /* reset overflow counter */
2547 atomic_set(&iso->overflows, 0);
2552 /* copy N packet_infos out of the ringbuffer into user-supplied array */
2553 static int raw1394_iso_recv_packets(struct file_info *fi, void __user * uaddr)
2555 struct raw1394_iso_packets upackets;
2556 unsigned int packet = fi->iso_handle->first_packet;
2559 if (copy_from_user(&upackets, uaddr, sizeof(upackets)))
2562 if (upackets.n_packets > hpsb_iso_n_ready(fi->iso_handle))
2565 /* ensure user-supplied buffer is accessible and big enough */
2566 if (!access_ok(VERIFY_WRITE, upackets.infos,
2567 upackets.n_packets *
2568 sizeof(struct raw1394_iso_packet_info)))
2571 /* copy the packet_infos out */
2572 for (i = 0; i < upackets.n_packets; i++) {
2573 if (__copy_to_user(&upackets.infos[i],
2574 &fi->iso_handle->infos[packet],
2575 sizeof(struct raw1394_iso_packet_info)))
2578 packet = (packet + 1) % fi->iso_handle->buf_packets;
2584 /* copy N packet_infos from user to ringbuffer, and queue them for transmission */
2585 static int raw1394_iso_send_packets(struct file_info *fi, void __user * uaddr)
2587 struct raw1394_iso_packets upackets;
2590 if (copy_from_user(&upackets, uaddr, sizeof(upackets)))
2593 if (upackets.n_packets >= fi->iso_handle->buf_packets)
2596 if (upackets.n_packets >= hpsb_iso_n_ready(fi->iso_handle))
2599 /* ensure user-supplied buffer is accessible and big enough */
2600 if (!access_ok(VERIFY_READ, upackets.infos,
2601 upackets.n_packets *
2602 sizeof(struct raw1394_iso_packet_info)))
2605 /* copy the infos structs in and queue the packets */
2606 for (i = 0; i < upackets.n_packets; i++) {
2607 struct raw1394_iso_packet_info info;
2609 if (__copy_from_user(&info, &upackets.infos[i],
2610 sizeof(struct raw1394_iso_packet_info)))
2613 rv = hpsb_iso_xmit_queue_packet(fi->iso_handle, info.offset,
2614 info.len, info.tag, info.sy);
2622 static void raw1394_iso_shutdown(struct file_info *fi)
2625 hpsb_iso_shutdown(fi->iso_handle);
2627 fi->iso_handle = NULL;
2628 fi->iso_state = RAW1394_ISO_INACTIVE;
2631 /* mmap the rawiso xmit/recv buffer */
2632 static int raw1394_mmap(struct file *file, struct vm_area_struct *vma)
2634 struct file_info *fi = file->private_data;
2636 if (fi->iso_state == RAW1394_ISO_INACTIVE)
2639 return dma_region_mmap(&fi->iso_handle->data_buf, file, vma);
2642 /* ioctl is only used for rawiso operations */
2643 static int raw1394_ioctl(struct inode *inode, struct file *file,
2644 unsigned int cmd, unsigned long arg)
2646 struct file_info *fi = file->private_data;
2647 void __user *argp = (void __user *)arg;
2649 switch (fi->iso_state) {
2650 case RAW1394_ISO_INACTIVE:
2652 case RAW1394_IOC_ISO_XMIT_INIT:
2653 return raw1394_iso_xmit_init(fi, argp);
2654 case RAW1394_IOC_ISO_RECV_INIT:
2655 return raw1394_iso_recv_init(fi, argp);
2660 case RAW1394_ISO_RECV:
2662 case RAW1394_IOC_ISO_RECV_START:{
2663 /* copy args from user-space */
2666 (&args[0], argp, sizeof(args)))
2668 return hpsb_iso_recv_start(fi->iso_handle,
2672 case RAW1394_IOC_ISO_XMIT_RECV_STOP:
2673 hpsb_iso_stop(fi->iso_handle);
2675 case RAW1394_IOC_ISO_RECV_LISTEN_CHANNEL:
2676 return hpsb_iso_recv_listen_channel(fi->iso_handle,
2678 case RAW1394_IOC_ISO_RECV_UNLISTEN_CHANNEL:
2679 return hpsb_iso_recv_unlisten_channel(fi->iso_handle,
2681 case RAW1394_IOC_ISO_RECV_SET_CHANNEL_MASK:{
2682 /* copy the u64 from user-space */
2684 if (copy_from_user(&mask, argp, sizeof(mask)))
2686 return hpsb_iso_recv_set_channel_mask(fi->
2690 case RAW1394_IOC_ISO_GET_STATUS:
2691 return raw1394_iso_get_status(fi, argp);
2692 case RAW1394_IOC_ISO_RECV_PACKETS:
2693 return raw1394_iso_recv_packets(fi, argp);
2694 case RAW1394_IOC_ISO_RECV_RELEASE_PACKETS:
2695 return hpsb_iso_recv_release_packets(fi->iso_handle,
2697 case RAW1394_IOC_ISO_RECV_FLUSH:
2698 return hpsb_iso_recv_flush(fi->iso_handle);
2699 case RAW1394_IOC_ISO_SHUTDOWN:
2700 raw1394_iso_shutdown(fi);
2702 case RAW1394_IOC_ISO_QUEUE_ACTIVITY:
2703 queue_rawiso_event(fi);
2707 case RAW1394_ISO_XMIT:
2709 case RAW1394_IOC_ISO_XMIT_START:{
2710 /* copy two ints from user-space */
2713 (&args[0], argp, sizeof(args)))
2715 return hpsb_iso_xmit_start(fi->iso_handle,
2718 case RAW1394_IOC_ISO_XMIT_SYNC:
2719 return hpsb_iso_xmit_sync(fi->iso_handle);
2720 case RAW1394_IOC_ISO_XMIT_RECV_STOP:
2721 hpsb_iso_stop(fi->iso_handle);
2723 case RAW1394_IOC_ISO_GET_STATUS:
2724 return raw1394_iso_get_status(fi, argp);
2725 case RAW1394_IOC_ISO_XMIT_PACKETS:
2726 return raw1394_iso_send_packets(fi, argp);
2727 case RAW1394_IOC_ISO_SHUTDOWN:
2728 raw1394_iso_shutdown(fi);
2730 case RAW1394_IOC_ISO_QUEUE_ACTIVITY:
2731 queue_rawiso_event(fi);
2742 static unsigned int raw1394_poll(struct file *file, poll_table * pt)
2744 struct file_info *fi = file->private_data;
2745 unsigned int mask = POLLOUT | POLLWRNORM;
2746 unsigned long flags;
2748 poll_wait(file, &fi->poll_wait_complete, pt);
2750 spin_lock_irqsave(&fi->reqlists_lock, flags);
2751 if (!list_empty(&fi->req_complete)) {
2752 mask |= POLLIN | POLLRDNORM;
2754 spin_unlock_irqrestore(&fi->reqlists_lock, flags);
2759 static int raw1394_open(struct inode *inode, struct file *file)
2761 struct file_info *fi;
2763 fi = kzalloc(sizeof(*fi), SLAB_KERNEL);
2767 fi->notification = (u8) RAW1394_NOTIFY_ON; /* busreset notification */
2769 INIT_LIST_HEAD(&fi->list);
2771 INIT_LIST_HEAD(&fi->req_pending);
2772 INIT_LIST_HEAD(&fi->req_complete);
2773 sema_init(&fi->complete_sem, 0);
2774 spin_lock_init(&fi->reqlists_lock);
2775 init_waitqueue_head(&fi->poll_wait_complete);
2776 INIT_LIST_HEAD(&fi->addr_list);
2778 file->private_data = fi;
2783 static int raw1394_release(struct inode *inode, struct file *file)
2785 struct file_info *fi = file->private_data;
2786 struct list_head *lh;
2787 struct pending_request *req;
2788 int done = 0, i, fail = 0;
2790 struct list_head *entry;
2791 struct arm_addr *addr = NULL;
2792 struct host_info *hi;
2793 struct file_info *fi_hlp = NULL;
2794 struct arm_addr *arm_addr = NULL;
2797 unsigned long flags;
2799 if (fi->iso_state != RAW1394_ISO_INACTIVE)
2800 raw1394_iso_shutdown(fi);
2802 for (i = 0; i < 64; i++) {
2803 if (fi->listen_channels & (1ULL << i)) {
2804 hpsb_unlisten_channel(&raw1394_highlevel, fi->host, i);
2808 spin_lock_irqsave(&host_info_lock, flags);
2809 fi->listen_channels = 0;
2812 /* set address-entries invalid */
2814 while (!list_empty(&fi->addr_list)) {
2816 lh = fi->addr_list.next;
2817 addr = list_entry(lh, struct arm_addr, addr_list);
2818 /* another host with valid address-entry containing
2819 same addressrange? */
2820 list_for_each_entry(hi, &host_info_list, list) {
2821 if (hi->host != fi->host) {
2822 list_for_each_entry(fi_hlp, &hi->file_info_list,
2824 entry = fi_hlp->addr_list.next;
2825 while (entry != &(fi_hlp->addr_list)) {
2826 arm_addr = list_entry(entry, struct
2829 if (arm_addr->start ==
2832 ("raw1394_release: "
2833 "another host ownes "
2834 "same addressrange");
2838 entry = entry->next;
2846 if (!another_host) {
2847 DBGMSG("raw1394_release: call hpsb_arm_unregister");
2849 hpsb_unregister_addrspace(&raw1394_highlevel,
2850 fi->host, addr->start);
2854 "raw1394_release arm_Unregister failed\n");
2857 DBGMSG("raw1394_release: delete addr_entry from list");
2858 list_del(&addr->addr_list);
2859 vfree(addr->addr_space_buffer);
2862 spin_unlock_irqrestore(&host_info_lock, flags);
2864 printk(KERN_ERR "raw1394: during addr_list-release "
2865 "error(s) occurred \n");
2869 spin_lock_irqsave(&fi->reqlists_lock, flags);
2871 while (!list_empty(&fi->req_complete)) {
2872 lh = fi->req_complete.next;
2875 req = list_entry(lh, struct pending_request, list);
2877 free_pending_request(req);
2880 if (list_empty(&fi->req_pending))
2883 spin_unlock_irqrestore(&fi->reqlists_lock, flags);
2886 down_interruptible(&fi->complete_sem);
2889 /* Remove any sub-trees left by user space programs */
2890 for (i = 0; i < RAW1394_MAX_USER_CSR_DIRS; i++) {
2891 struct csr1212_dentry *dentry;
2892 if (!fi->csr1212_dirs[i])
2895 fi->csr1212_dirs[i]->value.directory.dentries_head; dentry;
2896 dentry = dentry->next) {
2897 csr1212_detach_keyval_from_directory(fi->host->csr.rom->
2901 csr1212_release_keyval(fi->csr1212_dirs[i]);
2902 fi->csr1212_dirs[i] = NULL;
2906 if ((csr_mod || fi->cfgrom_upd)
2907 && hpsb_update_config_rom_image(fi->host) < 0)
2909 ("Failed to generate Configuration ROM image for host %d",
2912 if (fi->state == connected) {
2913 spin_lock_irqsave(&host_info_lock, flags);
2914 list_del(&fi->list);
2915 spin_unlock_irqrestore(&host_info_lock, flags);
2917 put_device(&fi->host->device);
2925 /*** HOTPLUG STUFF **********************************************************/
2927 * Export information about protocols/devices supported by this driver.
2929 static struct ieee1394_device_id raw1394_id_table[] = {
2931 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
2932 .specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff,
2933 .version = AVC_SW_VERSION_ENTRY & 0xffffff},
2935 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
2936 .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
2937 .version = CAMERA_SW_VERSION_ENTRY & 0xffffff},
2939 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
2940 .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
2941 .version = (CAMERA_SW_VERSION_ENTRY + 1) & 0xffffff},
2943 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
2944 .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
2945 .version = (CAMERA_SW_VERSION_ENTRY + 2) & 0xffffff},
2949 MODULE_DEVICE_TABLE(ieee1394, raw1394_id_table);
2951 static struct hpsb_protocol_driver raw1394_driver = {
2952 .name = "raw1394 Driver",
2953 .id_table = raw1394_id_table,
2956 .bus = &ieee1394_bus_type,
2960 /******************************************************************************/
2962 static struct hpsb_highlevel raw1394_highlevel = {
2963 .name = RAW1394_DEVICE_NAME,
2964 .add_host = add_host,
2965 .remove_host = remove_host,
2966 .host_reset = host_reset,
2967 .iso_receive = iso_receive,
2968 .fcp_request = fcp_request,
2971 static struct cdev raw1394_cdev;
2972 static struct file_operations raw1394_fops = {
2973 .owner = THIS_MODULE,
2974 .read = raw1394_read,
2975 .write = raw1394_write,
2976 .mmap = raw1394_mmap,
2977 .ioctl = raw1394_ioctl,
2978 // .compat_ioctl = ... someone needs to do this
2979 .poll = raw1394_poll,
2980 .open = raw1394_open,
2981 .release = raw1394_release,
2984 static int __init init_raw1394(void)
2988 hpsb_register_highlevel(&raw1394_highlevel);
2991 (class_device_create
2992 (hpsb_protocol_class, NULL,
2993 MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16), NULL,
2994 RAW1394_DEVICE_NAME))) {
2999 cdev_init(&raw1394_cdev, &raw1394_fops);
3000 raw1394_cdev.owner = THIS_MODULE;
3001 kobject_set_name(&raw1394_cdev.kobj, RAW1394_DEVICE_NAME);
3002 ret = cdev_add(&raw1394_cdev, IEEE1394_RAW1394_DEV, 1);
3004 HPSB_ERR("raw1394 failed to register minor device block");
3008 HPSB_INFO("raw1394: /dev/%s device initialized", RAW1394_DEVICE_NAME);
3010 ret = hpsb_register_protocol(&raw1394_driver);
3012 HPSB_ERR("raw1394: failed to register protocol");
3013 cdev_del(&raw1394_cdev);
3020 class_device_destroy(hpsb_protocol_class,
3021 MKDEV(IEEE1394_MAJOR,
3022 IEEE1394_MINOR_BLOCK_RAW1394 * 16));
3024 hpsb_unregister_highlevel(&raw1394_highlevel);
3029 static void __exit cleanup_raw1394(void)
3031 class_device_destroy(hpsb_protocol_class,
3032 MKDEV(IEEE1394_MAJOR,
3033 IEEE1394_MINOR_BLOCK_RAW1394 * 16));
3034 cdev_del(&raw1394_cdev);
3035 hpsb_unregister_highlevel(&raw1394_highlevel);
3036 hpsb_unregister_protocol(&raw1394_driver);
3039 module_init(init_raw1394);
3040 module_exit(cleanup_raw1394);
3041 MODULE_LICENSE("GPL");