4 * Core support: hpsb_packet management, packet handling and forwarding to
5 * highlevel or lowlevel code
7 * Copyright (C) 1999, 2000 Andreas E. Bombe
8 * 2002 Manfred Weihs <weihs@ict.tuwien.ac.at>
10 * This code is licensed under the GPL. See the file COPYING in the root
11 * directory of the kernel sources for details.
16 * Manfred Weihs <weihs@ict.tuwien.ac.at>
17 * loopback functionality in hpsb_send_packet
18 * allow highlevel drivers to disable automatic response generation
19 * and to generate responses themselves (deferred)
23 #include <linux/config.h>
24 #include <linux/kernel.h>
25 #include <linux/list.h>
26 #include <linux/string.h>
27 #include <linux/init.h>
28 #include <linux/slab.h>
29 #include <linux/interrupt.h>
30 #include <linux/module.h>
31 #include <linux/moduleparam.h>
32 #include <linux/bitops.h>
33 #include <linux/kdev_t.h>
34 #include <linux/skbuff.h>
35 #include <linux/suspend.h>
37 #include <asm/byteorder.h>
38 #include <asm/semaphore.h>
40 #include "ieee1394_types.h"
43 #include "ieee1394_core.h"
44 #include "highlevel.h"
45 #include "ieee1394_transactions.h"
50 #include "config_roms.h"
53 * Disable the nodemgr detection and config rom reading functionality.
55 static int disable_nodemgr;
56 module_param(disable_nodemgr, int, 0444);
57 MODULE_PARM_DESC(disable_nodemgr, "Disable nodemgr functionality.");
59 /* Disable Isochronous Resource Manager functionality */
60 int hpsb_disable_irm = 0;
61 module_param_named(disable_irm, hpsb_disable_irm, bool, 0444);
62 MODULE_PARM_DESC(disable_irm,
63 "Disable Isochronous Resource Manager functionality.");
65 /* We are GPL, so treat us special */
66 MODULE_LICENSE("GPL");
68 /* Some globals used */
69 const char *hpsb_speedto_str[] = { "S100", "S200", "S400", "S800", "S1600", "S3200" };
70 struct class *hpsb_protocol_class;
72 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
73 static void dump_packet(const char *text, quadlet_t *data, int size, int speed)
78 size = (size > 4 ? 4 : size);
80 printk(KERN_DEBUG "ieee1394: %s", text);
81 if (speed > -1 && speed < 6)
82 printk(" at %s", hpsb_speedto_str[speed]);
84 for (i = 0; i < size; i++)
85 printk(" %08x", data[i]);
89 #define dump_packet(a,b,c,d)
92 static void abort_requests(struct hpsb_host *host);
93 static void queue_packet_complete(struct hpsb_packet *packet);
97 * hpsb_set_packet_complete_task - set the task that runs when a packet
98 * completes. You cannot call this more than once on a single packet
101 * @packet: the packet whose completion we want the task added to
102 * @routine: function to call
103 * @data: data (if any) to pass to the above function
105 void hpsb_set_packet_complete_task(struct hpsb_packet *packet,
106 void (*routine)(void *), void *data)
108 WARN_ON(packet->complete_routine != NULL);
109 packet->complete_routine = routine;
110 packet->complete_data = data;
115 * hpsb_alloc_packet - allocate new packet structure
116 * @data_size: size of the data block to be allocated
118 * This function allocates, initializes and returns a new &struct hpsb_packet.
119 * It can be used in interrupt context. A header block is always included, its
120 * size is big enough to contain all possible 1394 headers. The data block is
121 * only allocated when @data_size is not zero.
123 * For packets for which responses will be received the @data_size has to be big
124 * enough to contain the response's data block since no further allocation
125 * occurs at response matching time.
127 * The packet's generation value will be set to the current generation number
128 * for ease of use. Remember to overwrite it with your own recorded generation
129 * number if you can not be sure that your code will not race with a bus reset.
131 * Return value: A pointer to a &struct hpsb_packet or NULL on allocation
134 struct hpsb_packet *hpsb_alloc_packet(size_t data_size)
136 struct hpsb_packet *packet = NULL;
139 data_size = ((data_size + 3) & ~3);
141 skb = alloc_skb(data_size + sizeof(*packet), GFP_ATOMIC);
145 memset(skb->data, 0, data_size + sizeof(*packet));
147 packet = (struct hpsb_packet *)skb->data;
150 packet->header = packet->embedded_header;
151 packet->state = hpsb_unused;
152 packet->generation = -1;
153 INIT_LIST_HEAD(&packet->driver_list);
154 atomic_set(&packet->refcnt, 1);
157 packet->data = (quadlet_t *)(skb->data + sizeof(*packet));
158 packet->data_size = data_size;
166 * hpsb_free_packet - free packet and data associated with it
167 * @packet: packet to free (is NULL safe)
169 * This function will free packet->data and finally the packet itself.
171 void hpsb_free_packet(struct hpsb_packet *packet)
173 if (packet && atomic_dec_and_test(&packet->refcnt)) {
174 BUG_ON(!list_empty(&packet->driver_list));
175 kfree_skb(packet->skb);
180 int hpsb_reset_bus(struct hpsb_host *host, int type)
182 if (!host->in_bus_reset) {
183 host->driver->devctl(host, RESET_BUS, type);
191 int hpsb_bus_reset(struct hpsb_host *host)
193 if (host->in_bus_reset) {
194 HPSB_NOTICE("%s called while bus reset already in progress",
199 abort_requests(host);
200 host->in_bus_reset = 1;
203 host->busmgr_id = -1;
206 host->node_count = 0;
207 host->selfid_count = 0;
214 * Verify num_of_selfids SelfIDs and return number of nodes. Return zero in
215 * case verification failed.
217 static int check_selfids(struct hpsb_host *host)
220 int rest_of_selfids = host->selfid_count;
221 struct selfid *sid = (struct selfid *)host->topology_map;
222 struct ext_selfid *esid;
225 host->nodes_active = 0;
227 while (rest_of_selfids--) {
228 if (!sid->extended) {
232 if (sid->phy_id != nodeid) {
233 HPSB_INFO("SelfIDs failed monotony check with "
238 if (sid->link_active) {
239 host->nodes_active++;
241 host->irm_id = LOCAL_BUS | sid->phy_id;
244 esid = (struct ext_selfid *)sid;
246 if ((esid->phy_id != nodeid)
247 || (esid->seq_nr != esid_seq)) {
248 HPSB_INFO("SelfIDs failed monotony check with "
249 "%d/%d", esid->phy_id, esid->seq_nr);
257 esid = (struct ext_selfid *)(sid - 1);
258 while (esid->extended) {
259 if ((esid->porta == SELFID_PORT_PARENT) ||
260 (esid->portb == SELFID_PORT_PARENT) ||
261 (esid->portc == SELFID_PORT_PARENT) ||
262 (esid->portd == SELFID_PORT_PARENT) ||
263 (esid->porte == SELFID_PORT_PARENT) ||
264 (esid->portf == SELFID_PORT_PARENT) ||
265 (esid->portg == SELFID_PORT_PARENT) ||
266 (esid->porth == SELFID_PORT_PARENT)) {
267 HPSB_INFO("SelfIDs failed root check on "
274 sid = (struct selfid *)esid;
275 if ((sid->port0 == SELFID_PORT_PARENT) ||
276 (sid->port1 == SELFID_PORT_PARENT) ||
277 (sid->port2 == SELFID_PORT_PARENT)) {
278 HPSB_INFO("SelfIDs failed root check");
282 host->node_count = nodeid + 1;
286 static void build_speed_map(struct hpsb_host *host, int nodecount)
288 u8 speedcap[nodecount];
289 u8 cldcnt[nodecount];
290 u8 *map = host->speed_map;
292 struct ext_selfid *esid;
295 for (i = 0; i < (nodecount * 64); i += 64) {
296 for (j = 0; j < nodecount; j++) {
297 map[i+j] = IEEE1394_SPEED_MAX;
301 for (i = 0; i < nodecount; i++) {
305 /* find direct children count and speed */
306 for (sid = (struct selfid *)&host->topology_map[host->selfid_count-1],
308 (void *)sid >= (void *)host->topology_map; sid--) {
310 esid = (struct ext_selfid *)sid;
312 if (esid->porta == SELFID_PORT_CHILD) cldcnt[n]++;
313 if (esid->portb == SELFID_PORT_CHILD) cldcnt[n]++;
314 if (esid->portc == SELFID_PORT_CHILD) cldcnt[n]++;
315 if (esid->portd == SELFID_PORT_CHILD) cldcnt[n]++;
316 if (esid->porte == SELFID_PORT_CHILD) cldcnt[n]++;
317 if (esid->portf == SELFID_PORT_CHILD) cldcnt[n]++;
318 if (esid->portg == SELFID_PORT_CHILD) cldcnt[n]++;
319 if (esid->porth == SELFID_PORT_CHILD) cldcnt[n]++;
321 if (sid->port0 == SELFID_PORT_CHILD) cldcnt[n]++;
322 if (sid->port1 == SELFID_PORT_CHILD) cldcnt[n]++;
323 if (sid->port2 == SELFID_PORT_CHILD) cldcnt[n]++;
325 speedcap[n] = sid->speed;
330 /* set self mapping */
331 for (i = 0; i < nodecount; i++) {
332 map[64*i + i] = speedcap[i];
335 /* fix up direct children count to total children count;
336 * also fix up speedcaps for sibling and parent communication */
337 for (i = 1; i < nodecount; i++) {
338 for (j = cldcnt[i], n = i - 1; j > 0; j--) {
339 cldcnt[i] += cldcnt[n];
340 speedcap[n] = min(speedcap[n], speedcap[i]);
345 for (n = 0; n < nodecount; n++) {
346 for (i = n - cldcnt[n]; i <= n; i++) {
347 for (j = 0; j < (n - cldcnt[n]); j++) {
348 map[j*64 + i] = map[i*64 + j] =
349 min(map[i*64 + j], speedcap[n]);
351 for (j = n + 1; j < nodecount; j++) {
352 map[j*64 + i] = map[i*64 + j] =
353 min(map[i*64 + j], speedcap[n]);
360 void hpsb_selfid_received(struct hpsb_host *host, quadlet_t sid)
362 if (host->in_bus_reset) {
363 HPSB_VERBOSE("Including SelfID 0x%x", sid);
364 host->topology_map[host->selfid_count++] = sid;
366 HPSB_NOTICE("Spurious SelfID packet (0x%08x) received from bus %d",
367 sid, NODEID_TO_BUS(host->node_id));
371 void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot)
373 if (!host->in_bus_reset)
374 HPSB_NOTICE("SelfID completion called outside of bus reset!");
376 host->node_id = LOCAL_BUS | phyid;
377 host->is_root = isroot;
379 if (!check_selfids(host)) {
380 if (host->reset_retries++ < 20) {
381 /* selfid stage did not complete without error */
382 HPSB_NOTICE("Error in SelfID stage, resetting");
383 host->in_bus_reset = 0;
384 /* this should work from ohci1394 now... */
385 hpsb_reset_bus(host, LONG_RESET);
388 HPSB_NOTICE("Stopping out-of-control reset loop");
389 HPSB_NOTICE("Warning - topology map and speed map will not be valid");
390 host->reset_retries = 0;
393 host->reset_retries = 0;
394 build_speed_map(host, host->node_count);
397 HPSB_VERBOSE("selfid_complete called with successful SelfID stage "
398 "... irm_id: 0x%X node_id: 0x%X",host->irm_id,host->node_id);
400 /* irm_id is kept up to date by check_selfids() */
401 if (host->irm_id == host->node_id) {
409 host->driver->devctl(host, ACT_CYCLE_MASTER, 1);
412 atomic_inc(&host->generation);
413 host->in_bus_reset = 0;
414 highlevel_host_reset(host);
418 void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet,
423 spin_lock_irqsave(&host->pending_packet_queue.lock, flags);
425 packet->ack_code = ackcode;
427 if (packet->no_waiter || packet->state == hpsb_complete) {
428 /* if packet->no_waiter, must not have a tlabel allocated */
429 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
430 hpsb_free_packet(packet);
434 atomic_dec(&packet->refcnt); /* drop HC's reference */
435 /* here the packet must be on the host->pending_packet_queue */
437 if (ackcode != ACK_PENDING || !packet->expect_response) {
438 packet->state = hpsb_complete;
439 __skb_unlink(packet->skb, &host->pending_packet_queue);
440 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
441 queue_packet_complete(packet);
445 packet->state = hpsb_pending;
446 packet->sendtime = jiffies;
448 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
450 mod_timer(&host->timeout, jiffies + host->timeout_interval);
454 * hpsb_send_phy_config - transmit a PHY configuration packet on the bus
455 * @host: host that PHY config packet gets sent through
456 * @rootid: root whose force_root bit should get set (-1 = don't set force_root)
457 * @gapcnt: gap count value to set (-1 = don't set gap count)
459 * This function sends a PHY config packet on the bus through the specified host.
461 * Return value: 0 for success or error number otherwise.
463 int hpsb_send_phy_config(struct hpsb_host *host, int rootid, int gapcnt)
465 struct hpsb_packet *packet;
469 if (rootid >= ALL_NODES || rootid < -1 || gapcnt > 0x3f || gapcnt < -1 ||
470 (rootid == -1 && gapcnt == -1)) {
471 HPSB_DEBUG("Invalid Parameter: rootid = %d gapcnt = %d",
477 d |= PHYPACKET_PHYCONFIG_R | rootid << PHYPACKET_PORT_SHIFT;
479 d |= PHYPACKET_PHYCONFIG_T | gapcnt << PHYPACKET_GAPCOUNT_SHIFT;
481 packet = hpsb_make_phypacket(host, d);
485 packet->generation = get_hpsb_generation(host);
486 retval = hpsb_send_packet_and_wait(packet);
487 hpsb_free_packet(packet);
493 * hpsb_send_packet - transmit a packet on the bus
494 * @packet: packet to send
496 * The packet is sent through the host specified in the packet->host field.
497 * Before sending, the packet's transmit speed is automatically determined
498 * using the local speed map when it is an async, non-broadcast packet.
500 * Possibilities for failure are that host is either not initialized, in bus
501 * reset, the packet's generation number doesn't match the current generation
502 * number or the host reports a transmit error.
504 * Return value: 0 on success, negative errno on failure.
506 int hpsb_send_packet(struct hpsb_packet *packet)
508 struct hpsb_host *host = packet->host;
510 if (host->is_shutdown)
512 if (host->in_bus_reset ||
513 (packet->generation != get_hpsb_generation(host)))
516 packet->state = hpsb_queued;
518 /* This just seems silly to me */
519 WARN_ON(packet->no_waiter && packet->expect_response);
521 if (!packet->no_waiter || packet->expect_response) {
522 atomic_inc(&packet->refcnt);
523 /* Set the initial "sendtime" to 10 seconds from now, to
524 prevent premature expiry. If a packet takes more than
525 10 seconds to hit the wire, we have bigger problems :) */
526 packet->sendtime = jiffies + 10 * HZ;
527 skb_queue_tail(&host->pending_packet_queue, packet->skb);
530 if (packet->node_id == host->node_id) {
531 /* it is a local request, so handle it locally */
534 size_t size = packet->data_size + packet->header_size;
536 data = kmalloc(size, GFP_ATOMIC);
538 HPSB_ERR("unable to allocate memory for concatenating header and data");
542 memcpy(data, packet->header, packet->header_size);
544 if (packet->data_size)
545 memcpy(((u8*)data) + packet->header_size, packet->data, packet->data_size);
547 dump_packet("send packet local", packet->header, packet->header_size, -1);
549 hpsb_packet_sent(host, packet, packet->expect_response ? ACK_PENDING : ACK_COMPLETE);
550 hpsb_packet_received(host, data, size, 0);
557 if (packet->type == hpsb_async && packet->node_id != ALL_NODES) {
559 host->speed_map[NODEID_TO_NODE(host->node_id) * 64
560 + NODEID_TO_NODE(packet->node_id)];
563 dump_packet("send packet", packet->header, packet->header_size, packet->speed_code);
565 return host->driver->transmit_packet(host, packet);
568 /* We could just use complete() directly as the packet complete
569 * callback, but this is more typesafe, in the sense that we get a
570 * compiler error if the prototype for complete() changes. */
572 static void complete_packet(void *data)
574 complete((struct completion *) data);
577 int hpsb_send_packet_and_wait(struct hpsb_packet *packet)
579 struct completion done;
582 init_completion(&done);
583 hpsb_set_packet_complete_task(packet, complete_packet, &done);
584 retval = hpsb_send_packet(packet);
586 wait_for_completion(&done);
591 static void send_packet_nocare(struct hpsb_packet *packet)
593 if (hpsb_send_packet(packet) < 0) {
594 hpsb_free_packet(packet);
599 static void handle_packet_response(struct hpsb_host *host, int tcode,
600 quadlet_t *data, size_t size)
602 struct hpsb_packet *packet = NULL;
608 tlabel = (data[0] >> 10) & 0x3f;
610 spin_lock_irqsave(&host->pending_packet_queue.lock, flags);
612 skb_queue_walk(&host->pending_packet_queue, skb) {
613 packet = (struct hpsb_packet *)skb->data;
614 if ((packet->tlabel == tlabel)
615 && (packet->node_id == (data[1] >> 16))){
622 if (packet == NULL) {
623 HPSB_DEBUG("unsolicited response packet received - no tlabel match");
624 dump_packet("contents", data, 16, -1);
625 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
629 switch (packet->tcode) {
632 if (tcode != TCODE_WRITE_RESPONSE)
635 memcpy(packet->header, data, 12);
638 if (tcode != TCODE_READQ_RESPONSE)
641 memcpy(packet->header, data, 16);
644 if (tcode != TCODE_READB_RESPONSE)
647 BUG_ON(packet->skb->len - sizeof(*packet) < size - 16);
648 memcpy(packet->header, data, 16);
649 memcpy(packet->data, data + 4, size - 16);
651 case TCODE_LOCK_REQUEST:
652 if (tcode != TCODE_LOCK_RESPONSE)
655 size = min((size - 16), (size_t)8);
656 BUG_ON(packet->skb->len - sizeof(*packet) < size);
657 memcpy(packet->header, data, 16);
658 memcpy(packet->data, data + 4, size);
663 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
664 HPSB_INFO("unsolicited response packet received - tcode mismatch");
665 dump_packet("contents", data, 16, -1);
669 __skb_unlink(skb, &host->pending_packet_queue);
671 if (packet->state == hpsb_queued) {
672 packet->sendtime = jiffies;
673 packet->ack_code = ACK_PENDING;
676 packet->state = hpsb_complete;
677 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
679 queue_packet_complete(packet);
683 static struct hpsb_packet *create_reply_packet(struct hpsb_host *host,
684 quadlet_t *data, size_t dsize)
686 struct hpsb_packet *p;
688 p = hpsb_alloc_packet(dsize);
689 if (unlikely(p == NULL)) {
690 /* FIXME - send data_error response */
694 p->type = hpsb_async;
695 p->state = hpsb_unused;
697 p->node_id = data[1] >> 16;
698 p->tlabel = (data[0] >> 10) & 0x3f;
701 p->generation = get_hpsb_generation(host);
704 p->data[dsize / 4] = 0;
709 #define PREP_ASYNC_HEAD_RCODE(tc) \
710 packet->tcode = tc; \
711 packet->header[0] = (packet->node_id << 16) | (packet->tlabel << 10) \
712 | (1 << 8) | (tc << 4); \
713 packet->header[1] = (packet->host->node_id << 16) | (rcode << 12); \
714 packet->header[2] = 0
716 static void fill_async_readquad_resp(struct hpsb_packet *packet, int rcode,
719 PREP_ASYNC_HEAD_RCODE(TCODE_READQ_RESPONSE);
720 packet->header[3] = data;
721 packet->header_size = 16;
722 packet->data_size = 0;
725 static void fill_async_readblock_resp(struct hpsb_packet *packet, int rcode,
728 if (rcode != RCODE_COMPLETE)
731 PREP_ASYNC_HEAD_RCODE(TCODE_READB_RESPONSE);
732 packet->header[3] = length << 16;
733 packet->header_size = 16;
734 packet->data_size = length + (length % 4 ? 4 - (length % 4) : 0);
737 static void fill_async_write_resp(struct hpsb_packet *packet, int rcode)
739 PREP_ASYNC_HEAD_RCODE(TCODE_WRITE_RESPONSE);
740 packet->header[2] = 0;
741 packet->header_size = 12;
742 packet->data_size = 0;
745 static void fill_async_lock_resp(struct hpsb_packet *packet, int rcode, int extcode,
748 if (rcode != RCODE_COMPLETE)
751 PREP_ASYNC_HEAD_RCODE(TCODE_LOCK_RESPONSE);
752 packet->header[3] = (length << 16) | extcode;
753 packet->header_size = 16;
754 packet->data_size = length;
757 #define PREP_REPLY_PACKET(length) \
758 packet = create_reply_packet(host, data, length); \
759 if (packet == NULL) break
761 static void handle_incoming_packet(struct hpsb_host *host, int tcode,
762 quadlet_t *data, size_t size, int write_acked)
764 struct hpsb_packet *packet;
765 int length, rcode, extcode;
767 nodeid_t source = data[1] >> 16;
768 nodeid_t dest = data[0] >> 16;
769 u16 flags = (u16) data[0];
772 /* big FIXME - no error checking is done for an out of bounds length */
776 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
777 rcode = highlevel_write(host, source, dest, data+3,
781 && (NODEID_TO_NODE(data[0] >> 16) != NODE_MASK)
783 /* not a broadcast write, reply */
784 PREP_REPLY_PACKET(0);
785 fill_async_write_resp(packet, rcode);
786 send_packet_nocare(packet);
791 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
792 rcode = highlevel_write(host, source, dest, data+4,
793 addr, data[3]>>16, flags);
796 && (NODEID_TO_NODE(data[0] >> 16) != NODE_MASK)
798 /* not a broadcast write, reply */
799 PREP_REPLY_PACKET(0);
800 fill_async_write_resp(packet, rcode);
801 send_packet_nocare(packet);
806 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
807 rcode = highlevel_read(host, source, &buffer, addr, 4, flags);
810 PREP_REPLY_PACKET(0);
811 fill_async_readquad_resp(packet, rcode, buffer);
812 send_packet_nocare(packet);
817 length = data[3] >> 16;
818 PREP_REPLY_PACKET(length);
820 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
821 rcode = highlevel_read(host, source, packet->data, addr,
825 fill_async_readblock_resp(packet, rcode, length);
826 send_packet_nocare(packet);
828 hpsb_free_packet(packet);
832 case TCODE_LOCK_REQUEST:
833 length = data[3] >> 16;
834 extcode = data[3] & 0xffff;
835 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
837 PREP_REPLY_PACKET(8);
839 if ((extcode == 0) || (extcode >= 7)) {
840 /* let switch default handle error */
846 rcode = highlevel_lock(host, source, packet->data, addr,
847 data[4], 0, extcode,flags);
848 fill_async_lock_resp(packet, rcode, extcode, 4);
851 if ((extcode != EXTCODE_FETCH_ADD)
852 && (extcode != EXTCODE_LITTLE_ADD)) {
853 rcode = highlevel_lock(host, source,
857 fill_async_lock_resp(packet, rcode, extcode, 4);
859 rcode = highlevel_lock64(host, source,
860 (octlet_t *)packet->data, addr,
861 *(octlet_t *)(data + 4), 0ULL,
863 fill_async_lock_resp(packet, rcode, extcode, 8);
867 rcode = highlevel_lock64(host, source,
868 (octlet_t *)packet->data, addr,
869 *(octlet_t *)(data + 6),
870 *(octlet_t *)(data + 4),
872 fill_async_lock_resp(packet, rcode, extcode, 8);
875 rcode = RCODE_TYPE_ERROR;
876 fill_async_lock_resp(packet, rcode,
881 send_packet_nocare(packet);
883 hpsb_free_packet(packet);
889 #undef PREP_REPLY_PACKET
892 void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
897 if (host->in_bus_reset) {
898 HPSB_INFO("received packet during reset; ignoring");
902 dump_packet("received packet", data, size, -1);
904 tcode = (data[0] >> 4) & 0xf;
907 case TCODE_WRITE_RESPONSE:
908 case TCODE_READQ_RESPONSE:
909 case TCODE_READB_RESPONSE:
910 case TCODE_LOCK_RESPONSE:
911 handle_packet_response(host, tcode, data, size);
918 case TCODE_LOCK_REQUEST:
919 handle_incoming_packet(host, tcode, data, size, write_acked);
924 highlevel_iso_receive(host, data, size);
927 case TCODE_CYCLE_START:
928 /* simply ignore this packet if it is passed on */
932 HPSB_NOTICE("received packet with bogus transaction code %d",
939 static void abort_requests(struct hpsb_host *host)
941 struct hpsb_packet *packet;
944 host->driver->devctl(host, CANCEL_REQUESTS, 0);
946 while ((skb = skb_dequeue(&host->pending_packet_queue)) != NULL) {
947 packet = (struct hpsb_packet *)skb->data;
949 packet->state = hpsb_complete;
950 packet->ack_code = ACKX_ABORTED;
951 queue_packet_complete(packet);
955 void abort_timedouts(unsigned long __opaque)
957 struct hpsb_host *host = (struct hpsb_host *)__opaque;
959 struct hpsb_packet *packet;
961 unsigned long expire;
963 spin_lock_irqsave(&host->csr.lock, flags);
964 expire = host->csr.expire;
965 spin_unlock_irqrestore(&host->csr.lock, flags);
967 /* Hold the lock around this, since we aren't dequeuing all
968 * packets, just ones we need. */
969 spin_lock_irqsave(&host->pending_packet_queue.lock, flags);
971 while (!skb_queue_empty(&host->pending_packet_queue)) {
972 skb = skb_peek(&host->pending_packet_queue);
974 packet = (struct hpsb_packet *)skb->data;
976 if (time_before(packet->sendtime + expire, jiffies)) {
977 __skb_unlink(skb, &host->pending_packet_queue);
978 packet->state = hpsb_complete;
979 packet->ack_code = ACKX_TIMEOUT;
980 queue_packet_complete(packet);
982 /* Since packets are added to the tail, the oldest
983 * ones are first, always. When we get to one that
984 * isn't timed out, the rest aren't either. */
989 if (!skb_queue_empty(&host->pending_packet_queue))
990 mod_timer(&host->timeout, jiffies + host->timeout_interval);
992 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
996 /* Kernel thread and vars, which handles packets that are completed. Only
997 * packets that have a "complete" function are sent here. This way, the
998 * completion is run out of kernel context, and doesn't block the rest of
1000 static int khpsbpkt_pid = -1, khpsbpkt_kill;
1001 static DECLARE_COMPLETION(khpsbpkt_complete);
1002 static struct sk_buff_head hpsbpkt_queue;
1003 static DECLARE_MUTEX_LOCKED(khpsbpkt_sig);
1006 static void queue_packet_complete(struct hpsb_packet *packet)
1008 if (packet->no_waiter) {
1009 hpsb_free_packet(packet);
1012 if (packet->complete_routine != NULL) {
1013 skb_queue_tail(&hpsbpkt_queue, packet->skb);
1015 /* Signal the kernel thread to handle this */
1021 static int hpsbpkt_thread(void *__hi)
1023 struct sk_buff *skb;
1024 struct hpsb_packet *packet;
1025 void (*complete_routine)(void*);
1026 void *complete_data;
1028 daemonize("khpsbpkt");
1030 current->flags |= PF_NOFREEZE;
1033 if (down_interruptible(&khpsbpkt_sig)) {
1034 printk("khpsbpkt: received unexpected signal?!\n" );
1041 while ((skb = skb_dequeue(&hpsbpkt_queue)) != NULL) {
1042 packet = (struct hpsb_packet *)skb->data;
1044 complete_routine = packet->complete_routine;
1045 complete_data = packet->complete_data;
1047 packet->complete_routine = packet->complete_data = NULL;
1049 complete_routine(complete_data);
1053 complete_and_exit(&khpsbpkt_complete, 0);
1056 static int __init ieee1394_init(void)
1060 skb_queue_head_init(&hpsbpkt_queue);
1062 /* non-fatal error */
1063 if (hpsb_init_config_roms()) {
1064 HPSB_ERR("Failed to initialize some config rom entries.\n");
1065 HPSB_ERR("Some features may not be available\n");
1068 khpsbpkt_pid = kernel_thread(hpsbpkt_thread, NULL, CLONE_KERNEL);
1069 if (khpsbpkt_pid < 0) {
1070 HPSB_ERR("Failed to start hpsbpkt thread!\n");
1072 goto exit_cleanup_config_roms;
1075 if (register_chrdev_region(IEEE1394_CORE_DEV, 256, "ieee1394")) {
1076 HPSB_ERR("unable to register character device major %d!\n", IEEE1394_MAJOR);
1078 goto exit_release_kernel_thread;
1081 ret = bus_register(&ieee1394_bus_type);
1083 HPSB_INFO("bus register failed");
1084 goto release_chrdev;
1087 for (i = 0; fw_bus_attrs[i]; i++) {
1088 ret = bus_create_file(&ieee1394_bus_type, fw_bus_attrs[i]);
1091 bus_remove_file(&ieee1394_bus_type,
1094 bus_unregister(&ieee1394_bus_type);
1095 goto release_chrdev;
1099 ret = class_register(&hpsb_host_class);
1101 goto release_all_bus;
1103 hpsb_protocol_class = class_create(THIS_MODULE, "ieee1394_protocol");
1104 if (IS_ERR(hpsb_protocol_class)) {
1105 ret = PTR_ERR(hpsb_protocol_class);
1106 goto release_class_host;
1111 HPSB_INFO("init csr failed");
1113 goto release_class_protocol;
1116 if (disable_nodemgr) {
1117 HPSB_INFO("nodemgr and IRM functionality disabled");
1118 /* We shouldn't contend for IRM with nodemgr disabled, since
1119 nodemgr implements functionality required of ieee1394a-2000
1121 hpsb_disable_irm = 1;
1126 if (hpsb_disable_irm) {
1127 HPSB_INFO("IRM functionality disabled");
1130 ret = init_ieee1394_nodemgr();
1132 HPSB_INFO("init nodemgr failed");
1140 release_class_protocol:
1141 class_destroy(hpsb_protocol_class);
1143 class_unregister(&hpsb_host_class);
1145 for (i = 0; fw_bus_attrs[i]; i++)
1146 bus_remove_file(&ieee1394_bus_type, fw_bus_attrs[i]);
1147 bus_unregister(&ieee1394_bus_type);
1149 unregister_chrdev_region(IEEE1394_CORE_DEV, 256);
1150 exit_release_kernel_thread:
1151 if (khpsbpkt_pid >= 0) {
1152 kill_proc(khpsbpkt_pid, SIGTERM, 1);
1153 wait_for_completion(&khpsbpkt_complete);
1155 exit_cleanup_config_roms:
1156 hpsb_cleanup_config_roms();
1160 static void __exit ieee1394_cleanup(void)
1164 if (!disable_nodemgr)
1165 cleanup_ieee1394_nodemgr();
1169 class_destroy(hpsb_protocol_class);
1170 class_unregister(&hpsb_host_class);
1171 for (i = 0; fw_bus_attrs[i]; i++)
1172 bus_remove_file(&ieee1394_bus_type, fw_bus_attrs[i]);
1173 bus_unregister(&ieee1394_bus_type);
1175 if (khpsbpkt_pid >= 0) {
1179 wait_for_completion(&khpsbpkt_complete);
1182 hpsb_cleanup_config_roms();
1184 unregister_chrdev_region(IEEE1394_CORE_DEV, 256);
1187 module_init(ieee1394_init);
1188 module_exit(ieee1394_cleanup);
1190 /* Exported symbols */
1193 EXPORT_SYMBOL(hpsb_alloc_host);
1194 EXPORT_SYMBOL(hpsb_add_host);
1195 EXPORT_SYMBOL(hpsb_remove_host);
1196 EXPORT_SYMBOL(hpsb_update_config_rom_image);
1198 /** ieee1394_core.c **/
1199 EXPORT_SYMBOL(hpsb_speedto_str);
1200 EXPORT_SYMBOL(hpsb_protocol_class);
1201 EXPORT_SYMBOL(hpsb_set_packet_complete_task);
1202 EXPORT_SYMBOL(hpsb_alloc_packet);
1203 EXPORT_SYMBOL(hpsb_free_packet);
1204 EXPORT_SYMBOL(hpsb_send_packet);
1205 EXPORT_SYMBOL(hpsb_reset_bus);
1206 EXPORT_SYMBOL(hpsb_bus_reset);
1207 EXPORT_SYMBOL(hpsb_selfid_received);
1208 EXPORT_SYMBOL(hpsb_selfid_complete);
1209 EXPORT_SYMBOL(hpsb_packet_sent);
1210 EXPORT_SYMBOL(hpsb_packet_received);
1211 EXPORT_SYMBOL_GPL(hpsb_disable_irm);
1212 #ifdef CONFIG_IEEE1394_EXPORT_FULL_API
1213 EXPORT_SYMBOL(hpsb_send_phy_config);
1214 EXPORT_SYMBOL(hpsb_send_packet_and_wait);
1217 /** ieee1394_transactions.c **/
1218 EXPORT_SYMBOL(hpsb_get_tlabel);
1219 EXPORT_SYMBOL(hpsb_free_tlabel);
1220 EXPORT_SYMBOL(hpsb_make_readpacket);
1221 EXPORT_SYMBOL(hpsb_make_writepacket);
1222 EXPORT_SYMBOL(hpsb_make_streampacket);
1223 EXPORT_SYMBOL(hpsb_make_lockpacket);
1224 EXPORT_SYMBOL(hpsb_make_lock64packet);
1225 EXPORT_SYMBOL(hpsb_make_phypacket);
1226 EXPORT_SYMBOL(hpsb_make_isopacket);
1227 EXPORT_SYMBOL(hpsb_read);
1228 EXPORT_SYMBOL(hpsb_write);
1229 EXPORT_SYMBOL(hpsb_packet_success);
1232 EXPORT_SYMBOL(hpsb_register_highlevel);
1233 EXPORT_SYMBOL(hpsb_unregister_highlevel);
1234 EXPORT_SYMBOL(hpsb_register_addrspace);
1235 EXPORT_SYMBOL(hpsb_unregister_addrspace);
1236 EXPORT_SYMBOL(hpsb_allocate_and_register_addrspace);
1237 EXPORT_SYMBOL(hpsb_listen_channel);
1238 EXPORT_SYMBOL(hpsb_unlisten_channel);
1239 EXPORT_SYMBOL(hpsb_get_hostinfo);
1240 EXPORT_SYMBOL(hpsb_create_hostinfo);
1241 EXPORT_SYMBOL(hpsb_destroy_hostinfo);
1242 EXPORT_SYMBOL(hpsb_set_hostinfo_key);
1243 EXPORT_SYMBOL(hpsb_get_hostinfo_bykey);
1244 EXPORT_SYMBOL(hpsb_set_hostinfo);
1245 EXPORT_SYMBOL(highlevel_host_reset);
1246 #ifdef CONFIG_IEEE1394_EXPORT_FULL_API
1247 EXPORT_SYMBOL(highlevel_add_host);
1248 EXPORT_SYMBOL(highlevel_remove_host);
1252 EXPORT_SYMBOL(hpsb_node_fill_packet);
1253 EXPORT_SYMBOL(hpsb_node_write);
1254 EXPORT_SYMBOL(hpsb_register_protocol);
1255 EXPORT_SYMBOL(hpsb_unregister_protocol);
1256 EXPORT_SYMBOL(ieee1394_bus_type);
1257 #ifdef CONFIG_IEEE1394_EXPORT_FULL_API
1258 EXPORT_SYMBOL(nodemgr_for_each_host);
1262 EXPORT_SYMBOL(hpsb_update_config_rom);
1265 EXPORT_SYMBOL(dma_prog_region_init);
1266 EXPORT_SYMBOL(dma_prog_region_alloc);
1267 EXPORT_SYMBOL(dma_prog_region_free);
1268 EXPORT_SYMBOL(dma_region_init);
1269 EXPORT_SYMBOL(dma_region_alloc);
1270 EXPORT_SYMBOL(dma_region_free);
1271 EXPORT_SYMBOL(dma_region_sync_for_cpu);
1272 EXPORT_SYMBOL(dma_region_sync_for_device);
1273 EXPORT_SYMBOL(dma_region_mmap);
1274 EXPORT_SYMBOL(dma_region_offset_to_bus);
1277 EXPORT_SYMBOL(hpsb_iso_xmit_init);
1278 EXPORT_SYMBOL(hpsb_iso_recv_init);
1279 EXPORT_SYMBOL(hpsb_iso_xmit_start);
1280 EXPORT_SYMBOL(hpsb_iso_recv_start);
1281 EXPORT_SYMBOL(hpsb_iso_recv_listen_channel);
1282 EXPORT_SYMBOL(hpsb_iso_recv_unlisten_channel);
1283 EXPORT_SYMBOL(hpsb_iso_recv_set_channel_mask);
1284 EXPORT_SYMBOL(hpsb_iso_stop);
1285 EXPORT_SYMBOL(hpsb_iso_shutdown);
1286 EXPORT_SYMBOL(hpsb_iso_xmit_queue_packet);
1287 EXPORT_SYMBOL(hpsb_iso_xmit_sync);
1288 EXPORT_SYMBOL(hpsb_iso_recv_release_packets);
1289 EXPORT_SYMBOL(hpsb_iso_n_ready);
1290 EXPORT_SYMBOL(hpsb_iso_packet_sent);
1291 EXPORT_SYMBOL(hpsb_iso_packet_received);
1292 EXPORT_SYMBOL(hpsb_iso_wake);
1293 EXPORT_SYMBOL(hpsb_iso_recv_flush);
1296 EXPORT_SYMBOL(csr1212_new_directory);
1297 EXPORT_SYMBOL(csr1212_attach_keyval_to_directory);
1298 EXPORT_SYMBOL(csr1212_detach_keyval_from_directory);
1299 EXPORT_SYMBOL(csr1212_release_keyval);
1300 EXPORT_SYMBOL(csr1212_read);
1301 EXPORT_SYMBOL(csr1212_parse_keyval);
1302 EXPORT_SYMBOL(_csr1212_read_keyval);
1303 EXPORT_SYMBOL(_csr1212_destroy_keyval);
1304 #ifdef CONFIG_IEEE1394_EXPORT_FULL_API
1305 EXPORT_SYMBOL(csr1212_create_csr);
1306 EXPORT_SYMBOL(csr1212_init_local_csr);
1307 EXPORT_SYMBOL(csr1212_new_immediate);
1308 EXPORT_SYMBOL(csr1212_associate_keyval);
1309 EXPORT_SYMBOL(csr1212_new_string_descriptor_leaf);
1310 EXPORT_SYMBOL(csr1212_destroy_csr);
1311 EXPORT_SYMBOL(csr1212_generate_csr_image);
1312 EXPORT_SYMBOL(csr1212_parse_csr);