4 * Core support: hpsb_packet management, packet handling and forwarding to
5 * highlevel or lowlevel code
7 * Copyright (C) 1999, 2000 Andreas E. Bombe
8 * 2002 Manfred Weihs <weihs@ict.tuwien.ac.at>
10 * This code is licensed under the GPL. See the file COPYING in the root
11 * directory of the kernel sources for details.
16 * Manfred Weihs <weihs@ict.tuwien.ac.at>
17 * loopback functionality in hpsb_send_packet
18 * allow highlevel drivers to disable automatic response generation
19 * and to generate responses themselves (deferred)
23 #include <linux/config.h>
24 #include <linux/kernel.h>
25 #include <linux/list.h>
26 #include <linux/string.h>
27 #include <linux/init.h>
28 #include <linux/slab.h>
29 #include <linux/interrupt.h>
30 #include <linux/module.h>
31 #include <linux/moduleparam.h>
32 #include <linux/bitops.h>
33 #include <linux/kdev_t.h>
34 #include <linux/skbuff.h>
35 #include <linux/suspend.h>
36 #include <linux/kthread.h>
38 #include <asm/byteorder.h>
39 #include <asm/semaphore.h>
41 #include "ieee1394_types.h"
44 #include "ieee1394_core.h"
45 #include "highlevel.h"
46 #include "ieee1394_transactions.h"
51 #include "config_roms.h"
54 * Disable the nodemgr detection and config rom reading functionality.
56 static int disable_nodemgr;
57 module_param(disable_nodemgr, int, 0444);
58 MODULE_PARM_DESC(disable_nodemgr, "Disable nodemgr functionality.");
60 /* Disable Isochronous Resource Manager functionality */
61 int hpsb_disable_irm = 0;
62 module_param_named(disable_irm, hpsb_disable_irm, bool, 0444);
63 MODULE_PARM_DESC(disable_irm,
64 "Disable Isochronous Resource Manager functionality.");
66 /* We are GPL, so treat us special */
67 MODULE_LICENSE("GPL");
69 /* Some globals used */
70 const char *hpsb_speedto_str[] = { "S100", "S200", "S400", "S800", "S1600", "S3200" };
71 struct class *hpsb_protocol_class;
73 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
74 static void dump_packet(const char *text, quadlet_t *data, int size, int speed)
79 size = (size > 4 ? 4 : size);
81 printk(KERN_DEBUG "ieee1394: %s", text);
82 if (speed > -1 && speed < 6)
83 printk(" at %s", hpsb_speedto_str[speed]);
85 for (i = 0; i < size; i++)
86 printk(" %08x", data[i]);
90 #define dump_packet(a,b,c,d)
93 static void abort_requests(struct hpsb_host *host);
94 static void queue_packet_complete(struct hpsb_packet *packet);
98 * hpsb_set_packet_complete_task - set the task that runs when a packet
99 * completes. You cannot call this more than once on a single packet
102 * @packet: the packet whose completion we want the task added to
103 * @routine: function to call
104 * @data: data (if any) to pass to the above function
106 void hpsb_set_packet_complete_task(struct hpsb_packet *packet,
107 void (*routine)(void *), void *data)
109 WARN_ON(packet->complete_routine != NULL);
110 packet->complete_routine = routine;
111 packet->complete_data = data;
116 * hpsb_alloc_packet - allocate new packet structure
117 * @data_size: size of the data block to be allocated
119 * This function allocates, initializes and returns a new &struct hpsb_packet.
120 * It can be used in interrupt context. A header block is always included, its
121 * size is big enough to contain all possible 1394 headers. The data block is
122 * only allocated when @data_size is not zero.
124 * For packets for which responses will be received the @data_size has to be big
125 * enough to contain the response's data block since no further allocation
126 * occurs at response matching time.
128 * The packet's generation value will be set to the current generation number
129 * for ease of use. Remember to overwrite it with your own recorded generation
130 * number if you can not be sure that your code will not race with a bus reset.
132 * Return value: A pointer to a &struct hpsb_packet or NULL on allocation
135 struct hpsb_packet *hpsb_alloc_packet(size_t data_size)
137 struct hpsb_packet *packet = NULL;
140 data_size = ((data_size + 3) & ~3);
142 skb = alloc_skb(data_size + sizeof(*packet), GFP_ATOMIC);
146 memset(skb->data, 0, data_size + sizeof(*packet));
148 packet = (struct hpsb_packet *)skb->data;
151 packet->header = packet->embedded_header;
152 packet->state = hpsb_unused;
153 packet->generation = -1;
154 INIT_LIST_HEAD(&packet->driver_list);
155 atomic_set(&packet->refcnt, 1);
158 packet->data = (quadlet_t *)(skb->data + sizeof(*packet));
159 packet->data_size = data_size;
167 * hpsb_free_packet - free packet and data associated with it
168 * @packet: packet to free (is NULL safe)
170 * This function will free packet->data and finally the packet itself.
172 void hpsb_free_packet(struct hpsb_packet *packet)
174 if (packet && atomic_dec_and_test(&packet->refcnt)) {
175 BUG_ON(!list_empty(&packet->driver_list));
176 kfree_skb(packet->skb);
181 int hpsb_reset_bus(struct hpsb_host *host, int type)
183 if (!host->in_bus_reset) {
184 host->driver->devctl(host, RESET_BUS, type);
192 int hpsb_bus_reset(struct hpsb_host *host)
194 if (host->in_bus_reset) {
195 HPSB_NOTICE("%s called while bus reset already in progress",
200 abort_requests(host);
201 host->in_bus_reset = 1;
204 host->busmgr_id = -1;
207 host->node_count = 0;
208 host->selfid_count = 0;
215 * Verify num_of_selfids SelfIDs and return number of nodes. Return zero in
216 * case verification failed.
218 static int check_selfids(struct hpsb_host *host)
221 int rest_of_selfids = host->selfid_count;
222 struct selfid *sid = (struct selfid *)host->topology_map;
223 struct ext_selfid *esid;
226 host->nodes_active = 0;
228 while (rest_of_selfids--) {
229 if (!sid->extended) {
233 if (sid->phy_id != nodeid) {
234 HPSB_INFO("SelfIDs failed monotony check with "
239 if (sid->link_active) {
240 host->nodes_active++;
242 host->irm_id = LOCAL_BUS | sid->phy_id;
245 esid = (struct ext_selfid *)sid;
247 if ((esid->phy_id != nodeid)
248 || (esid->seq_nr != esid_seq)) {
249 HPSB_INFO("SelfIDs failed monotony check with "
250 "%d/%d", esid->phy_id, esid->seq_nr);
258 esid = (struct ext_selfid *)(sid - 1);
259 while (esid->extended) {
260 if ((esid->porta == SELFID_PORT_PARENT) ||
261 (esid->portb == SELFID_PORT_PARENT) ||
262 (esid->portc == SELFID_PORT_PARENT) ||
263 (esid->portd == SELFID_PORT_PARENT) ||
264 (esid->porte == SELFID_PORT_PARENT) ||
265 (esid->portf == SELFID_PORT_PARENT) ||
266 (esid->portg == SELFID_PORT_PARENT) ||
267 (esid->porth == SELFID_PORT_PARENT)) {
268 HPSB_INFO("SelfIDs failed root check on "
275 sid = (struct selfid *)esid;
276 if ((sid->port0 == SELFID_PORT_PARENT) ||
277 (sid->port1 == SELFID_PORT_PARENT) ||
278 (sid->port2 == SELFID_PORT_PARENT)) {
279 HPSB_INFO("SelfIDs failed root check");
283 host->node_count = nodeid + 1;
287 static void build_speed_map(struct hpsb_host *host, int nodecount)
289 u8 cldcnt[nodecount];
290 u8 *map = host->speed_map;
291 u8 *speedcap = host->speed;
293 struct ext_selfid *esid;
296 for (i = 0; i < (nodecount * 64); i += 64) {
297 for (j = 0; j < nodecount; j++) {
298 map[i+j] = IEEE1394_SPEED_MAX;
302 for (i = 0; i < nodecount; i++) {
306 /* find direct children count and speed */
307 for (sid = (struct selfid *)&host->topology_map[host->selfid_count-1],
309 (void *)sid >= (void *)host->topology_map; sid--) {
311 esid = (struct ext_selfid *)sid;
313 if (esid->porta == SELFID_PORT_CHILD) cldcnt[n]++;
314 if (esid->portb == SELFID_PORT_CHILD) cldcnt[n]++;
315 if (esid->portc == SELFID_PORT_CHILD) cldcnt[n]++;
316 if (esid->portd == SELFID_PORT_CHILD) cldcnt[n]++;
317 if (esid->porte == SELFID_PORT_CHILD) cldcnt[n]++;
318 if (esid->portf == SELFID_PORT_CHILD) cldcnt[n]++;
319 if (esid->portg == SELFID_PORT_CHILD) cldcnt[n]++;
320 if (esid->porth == SELFID_PORT_CHILD) cldcnt[n]++;
322 if (sid->port0 == SELFID_PORT_CHILD) cldcnt[n]++;
323 if (sid->port1 == SELFID_PORT_CHILD) cldcnt[n]++;
324 if (sid->port2 == SELFID_PORT_CHILD) cldcnt[n]++;
326 speedcap[n] = sid->speed;
331 /* set self mapping */
332 for (i = 0; i < nodecount; i++) {
333 map[64*i + i] = speedcap[i];
336 /* fix up direct children count to total children count;
337 * also fix up speedcaps for sibling and parent communication */
338 for (i = 1; i < nodecount; i++) {
339 for (j = cldcnt[i], n = i - 1; j > 0; j--) {
340 cldcnt[i] += cldcnt[n];
341 speedcap[n] = min(speedcap[n], speedcap[i]);
346 for (n = 0; n < nodecount; n++) {
347 for (i = n - cldcnt[n]; i <= n; i++) {
348 for (j = 0; j < (n - cldcnt[n]); j++) {
349 map[j*64 + i] = map[i*64 + j] =
350 min(map[i*64 + j], speedcap[n]);
352 for (j = n + 1; j < nodecount; j++) {
353 map[j*64 + i] = map[i*64 + j] =
354 min(map[i*64 + j], speedcap[n]);
359 /* assume maximum speed for 1394b PHYs, nodemgr will correct it */
360 for (n = 0; n < nodecount; n++)
361 if (speedcap[n] == 3)
362 speedcap[n] = IEEE1394_SPEED_MAX;
366 void hpsb_selfid_received(struct hpsb_host *host, quadlet_t sid)
368 if (host->in_bus_reset) {
369 HPSB_VERBOSE("Including SelfID 0x%x", sid);
370 host->topology_map[host->selfid_count++] = sid;
372 HPSB_NOTICE("Spurious SelfID packet (0x%08x) received from bus %d",
373 sid, NODEID_TO_BUS(host->node_id));
377 void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot)
379 if (!host->in_bus_reset)
380 HPSB_NOTICE("SelfID completion called outside of bus reset!");
382 host->node_id = LOCAL_BUS | phyid;
383 host->is_root = isroot;
385 if (!check_selfids(host)) {
386 if (host->reset_retries++ < 20) {
387 /* selfid stage did not complete without error */
388 HPSB_NOTICE("Error in SelfID stage, resetting");
389 host->in_bus_reset = 0;
390 /* this should work from ohci1394 now... */
391 hpsb_reset_bus(host, LONG_RESET);
394 HPSB_NOTICE("Stopping out-of-control reset loop");
395 HPSB_NOTICE("Warning - topology map and speed map will not be valid");
396 host->reset_retries = 0;
399 host->reset_retries = 0;
400 build_speed_map(host, host->node_count);
403 HPSB_VERBOSE("selfid_complete called with successful SelfID stage "
404 "... irm_id: 0x%X node_id: 0x%X",host->irm_id,host->node_id);
406 /* irm_id is kept up to date by check_selfids() */
407 if (host->irm_id == host->node_id) {
415 host->driver->devctl(host, ACT_CYCLE_MASTER, 1);
418 atomic_inc(&host->generation);
419 host->in_bus_reset = 0;
420 highlevel_host_reset(host);
424 void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet,
429 spin_lock_irqsave(&host->pending_packet_queue.lock, flags);
431 packet->ack_code = ackcode;
433 if (packet->no_waiter || packet->state == hpsb_complete) {
434 /* if packet->no_waiter, must not have a tlabel allocated */
435 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
436 hpsb_free_packet(packet);
440 atomic_dec(&packet->refcnt); /* drop HC's reference */
441 /* here the packet must be on the host->pending_packet_queue */
443 if (ackcode != ACK_PENDING || !packet->expect_response) {
444 packet->state = hpsb_complete;
445 __skb_unlink(packet->skb, &host->pending_packet_queue);
446 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
447 queue_packet_complete(packet);
451 packet->state = hpsb_pending;
452 packet->sendtime = jiffies;
454 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
456 mod_timer(&host->timeout, jiffies + host->timeout_interval);
460 * hpsb_send_phy_config - transmit a PHY configuration packet on the bus
461 * @host: host that PHY config packet gets sent through
462 * @rootid: root whose force_root bit should get set (-1 = don't set force_root)
463 * @gapcnt: gap count value to set (-1 = don't set gap count)
465 * This function sends a PHY config packet on the bus through the specified host.
467 * Return value: 0 for success or error number otherwise.
469 int hpsb_send_phy_config(struct hpsb_host *host, int rootid, int gapcnt)
471 struct hpsb_packet *packet;
475 if (rootid >= ALL_NODES || rootid < -1 || gapcnt > 0x3f || gapcnt < -1 ||
476 (rootid == -1 && gapcnt == -1)) {
477 HPSB_DEBUG("Invalid Parameter: rootid = %d gapcnt = %d",
483 d |= PHYPACKET_PHYCONFIG_R | rootid << PHYPACKET_PORT_SHIFT;
485 d |= PHYPACKET_PHYCONFIG_T | gapcnt << PHYPACKET_GAPCOUNT_SHIFT;
487 packet = hpsb_make_phypacket(host, d);
491 packet->generation = get_hpsb_generation(host);
492 retval = hpsb_send_packet_and_wait(packet);
493 hpsb_free_packet(packet);
499 * hpsb_send_packet - transmit a packet on the bus
500 * @packet: packet to send
502 * The packet is sent through the host specified in the packet->host field.
503 * Before sending, the packet's transmit speed is automatically determined
504 * using the local speed map when it is an async, non-broadcast packet.
506 * Possibilities for failure are that host is either not initialized, in bus
507 * reset, the packet's generation number doesn't match the current generation
508 * number or the host reports a transmit error.
510 * Return value: 0 on success, negative errno on failure.
512 int hpsb_send_packet(struct hpsb_packet *packet)
514 struct hpsb_host *host = packet->host;
516 if (host->is_shutdown)
518 if (host->in_bus_reset ||
519 (packet->generation != get_hpsb_generation(host)))
522 packet->state = hpsb_queued;
524 /* This just seems silly to me */
525 WARN_ON(packet->no_waiter && packet->expect_response);
527 if (!packet->no_waiter || packet->expect_response) {
528 atomic_inc(&packet->refcnt);
529 /* Set the initial "sendtime" to 10 seconds from now, to
530 prevent premature expiry. If a packet takes more than
531 10 seconds to hit the wire, we have bigger problems :) */
532 packet->sendtime = jiffies + 10 * HZ;
533 skb_queue_tail(&host->pending_packet_queue, packet->skb);
536 if (packet->node_id == host->node_id) {
537 /* it is a local request, so handle it locally */
540 size_t size = packet->data_size + packet->header_size;
542 data = kmalloc(size, GFP_ATOMIC);
544 HPSB_ERR("unable to allocate memory for concatenating header and data");
548 memcpy(data, packet->header, packet->header_size);
550 if (packet->data_size)
551 memcpy(((u8*)data) + packet->header_size, packet->data, packet->data_size);
553 dump_packet("send packet local", packet->header, packet->header_size, -1);
555 hpsb_packet_sent(host, packet, packet->expect_response ? ACK_PENDING : ACK_COMPLETE);
556 hpsb_packet_received(host, data, size, 0);
563 if (packet->type == hpsb_async &&
564 NODEID_TO_NODE(packet->node_id) != ALL_NODES)
566 host->speed[NODEID_TO_NODE(packet->node_id)];
568 dump_packet("send packet", packet->header, packet->header_size, packet->speed_code);
570 return host->driver->transmit_packet(host, packet);
573 /* We could just use complete() directly as the packet complete
574 * callback, but this is more typesafe, in the sense that we get a
575 * compiler error if the prototype for complete() changes. */
577 static void complete_packet(void *data)
579 complete((struct completion *) data);
582 int hpsb_send_packet_and_wait(struct hpsb_packet *packet)
584 struct completion done;
587 init_completion(&done);
588 hpsb_set_packet_complete_task(packet, complete_packet, &done);
589 retval = hpsb_send_packet(packet);
591 wait_for_completion(&done);
596 static void send_packet_nocare(struct hpsb_packet *packet)
598 if (hpsb_send_packet(packet) < 0) {
599 hpsb_free_packet(packet);
604 static void handle_packet_response(struct hpsb_host *host, int tcode,
605 quadlet_t *data, size_t size)
607 struct hpsb_packet *packet = NULL;
613 tlabel = (data[0] >> 10) & 0x3f;
615 spin_lock_irqsave(&host->pending_packet_queue.lock, flags);
617 skb_queue_walk(&host->pending_packet_queue, skb) {
618 packet = (struct hpsb_packet *)skb->data;
619 if ((packet->tlabel == tlabel)
620 && (packet->node_id == (data[1] >> 16))){
627 if (packet == NULL) {
628 HPSB_DEBUG("unsolicited response packet received - no tlabel match");
629 dump_packet("contents", data, 16, -1);
630 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
634 switch (packet->tcode) {
637 if (tcode != TCODE_WRITE_RESPONSE)
640 memcpy(packet->header, data, 12);
643 if (tcode != TCODE_READQ_RESPONSE)
646 memcpy(packet->header, data, 16);
649 if (tcode != TCODE_READB_RESPONSE)
652 BUG_ON(packet->skb->len - sizeof(*packet) < size - 16);
653 memcpy(packet->header, data, 16);
654 memcpy(packet->data, data + 4, size - 16);
656 case TCODE_LOCK_REQUEST:
657 if (tcode != TCODE_LOCK_RESPONSE)
660 size = min((size - 16), (size_t)8);
661 BUG_ON(packet->skb->len - sizeof(*packet) < size);
662 memcpy(packet->header, data, 16);
663 memcpy(packet->data, data + 4, size);
668 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
669 HPSB_INFO("unsolicited response packet received - tcode mismatch");
670 dump_packet("contents", data, 16, -1);
674 __skb_unlink(skb, &host->pending_packet_queue);
676 if (packet->state == hpsb_queued) {
677 packet->sendtime = jiffies;
678 packet->ack_code = ACK_PENDING;
681 packet->state = hpsb_complete;
682 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
684 queue_packet_complete(packet);
688 static struct hpsb_packet *create_reply_packet(struct hpsb_host *host,
689 quadlet_t *data, size_t dsize)
691 struct hpsb_packet *p;
693 p = hpsb_alloc_packet(dsize);
694 if (unlikely(p == NULL)) {
695 /* FIXME - send data_error response */
699 p->type = hpsb_async;
700 p->state = hpsb_unused;
702 p->node_id = data[1] >> 16;
703 p->tlabel = (data[0] >> 10) & 0x3f;
706 p->generation = get_hpsb_generation(host);
709 p->data[dsize / 4] = 0;
714 #define PREP_ASYNC_HEAD_RCODE(tc) \
715 packet->tcode = tc; \
716 packet->header[0] = (packet->node_id << 16) | (packet->tlabel << 10) \
717 | (1 << 8) | (tc << 4); \
718 packet->header[1] = (packet->host->node_id << 16) | (rcode << 12); \
719 packet->header[2] = 0
721 static void fill_async_readquad_resp(struct hpsb_packet *packet, int rcode,
724 PREP_ASYNC_HEAD_RCODE(TCODE_READQ_RESPONSE);
725 packet->header[3] = data;
726 packet->header_size = 16;
727 packet->data_size = 0;
730 static void fill_async_readblock_resp(struct hpsb_packet *packet, int rcode,
733 if (rcode != RCODE_COMPLETE)
736 PREP_ASYNC_HEAD_RCODE(TCODE_READB_RESPONSE);
737 packet->header[3] = length << 16;
738 packet->header_size = 16;
739 packet->data_size = length + (length % 4 ? 4 - (length % 4) : 0);
742 static void fill_async_write_resp(struct hpsb_packet *packet, int rcode)
744 PREP_ASYNC_HEAD_RCODE(TCODE_WRITE_RESPONSE);
745 packet->header[2] = 0;
746 packet->header_size = 12;
747 packet->data_size = 0;
750 static void fill_async_lock_resp(struct hpsb_packet *packet, int rcode, int extcode,
753 if (rcode != RCODE_COMPLETE)
756 PREP_ASYNC_HEAD_RCODE(TCODE_LOCK_RESPONSE);
757 packet->header[3] = (length << 16) | extcode;
758 packet->header_size = 16;
759 packet->data_size = length;
762 #define PREP_REPLY_PACKET(length) \
763 packet = create_reply_packet(host, data, length); \
764 if (packet == NULL) break
766 static void handle_incoming_packet(struct hpsb_host *host, int tcode,
767 quadlet_t *data, size_t size, int write_acked)
769 struct hpsb_packet *packet;
770 int length, rcode, extcode;
772 nodeid_t source = data[1] >> 16;
773 nodeid_t dest = data[0] >> 16;
774 u16 flags = (u16) data[0];
777 /* big FIXME - no error checking is done for an out of bounds length */
781 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
782 rcode = highlevel_write(host, source, dest, data+3,
786 && (NODEID_TO_NODE(data[0] >> 16) != NODE_MASK)
788 /* not a broadcast write, reply */
789 PREP_REPLY_PACKET(0);
790 fill_async_write_resp(packet, rcode);
791 send_packet_nocare(packet);
796 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
797 rcode = highlevel_write(host, source, dest, data+4,
798 addr, data[3]>>16, flags);
801 && (NODEID_TO_NODE(data[0] >> 16) != NODE_MASK)
803 /* not a broadcast write, reply */
804 PREP_REPLY_PACKET(0);
805 fill_async_write_resp(packet, rcode);
806 send_packet_nocare(packet);
811 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
812 rcode = highlevel_read(host, source, &buffer, addr, 4, flags);
815 PREP_REPLY_PACKET(0);
816 fill_async_readquad_resp(packet, rcode, buffer);
817 send_packet_nocare(packet);
822 length = data[3] >> 16;
823 PREP_REPLY_PACKET(length);
825 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
826 rcode = highlevel_read(host, source, packet->data, addr,
830 fill_async_readblock_resp(packet, rcode, length);
831 send_packet_nocare(packet);
833 hpsb_free_packet(packet);
837 case TCODE_LOCK_REQUEST:
838 length = data[3] >> 16;
839 extcode = data[3] & 0xffff;
840 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
842 PREP_REPLY_PACKET(8);
844 if ((extcode == 0) || (extcode >= 7)) {
845 /* let switch default handle error */
851 rcode = highlevel_lock(host, source, packet->data, addr,
852 data[4], 0, extcode,flags);
853 fill_async_lock_resp(packet, rcode, extcode, 4);
856 if ((extcode != EXTCODE_FETCH_ADD)
857 && (extcode != EXTCODE_LITTLE_ADD)) {
858 rcode = highlevel_lock(host, source,
862 fill_async_lock_resp(packet, rcode, extcode, 4);
864 rcode = highlevel_lock64(host, source,
865 (octlet_t *)packet->data, addr,
866 *(octlet_t *)(data + 4), 0ULL,
868 fill_async_lock_resp(packet, rcode, extcode, 8);
872 rcode = highlevel_lock64(host, source,
873 (octlet_t *)packet->data, addr,
874 *(octlet_t *)(data + 6),
875 *(octlet_t *)(data + 4),
877 fill_async_lock_resp(packet, rcode, extcode, 8);
880 rcode = RCODE_TYPE_ERROR;
881 fill_async_lock_resp(packet, rcode,
886 send_packet_nocare(packet);
888 hpsb_free_packet(packet);
894 #undef PREP_REPLY_PACKET
897 void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
902 if (host->in_bus_reset) {
903 HPSB_INFO("received packet during reset; ignoring");
907 dump_packet("received packet", data, size, -1);
909 tcode = (data[0] >> 4) & 0xf;
912 case TCODE_WRITE_RESPONSE:
913 case TCODE_READQ_RESPONSE:
914 case TCODE_READB_RESPONSE:
915 case TCODE_LOCK_RESPONSE:
916 handle_packet_response(host, tcode, data, size);
923 case TCODE_LOCK_REQUEST:
924 handle_incoming_packet(host, tcode, data, size, write_acked);
929 highlevel_iso_receive(host, data, size);
932 case TCODE_CYCLE_START:
933 /* simply ignore this packet if it is passed on */
937 HPSB_NOTICE("received packet with bogus transaction code %d",
944 static void abort_requests(struct hpsb_host *host)
946 struct hpsb_packet *packet;
949 host->driver->devctl(host, CANCEL_REQUESTS, 0);
951 while ((skb = skb_dequeue(&host->pending_packet_queue)) != NULL) {
952 packet = (struct hpsb_packet *)skb->data;
954 packet->state = hpsb_complete;
955 packet->ack_code = ACKX_ABORTED;
956 queue_packet_complete(packet);
960 void abort_timedouts(unsigned long __opaque)
962 struct hpsb_host *host = (struct hpsb_host *)__opaque;
964 struct hpsb_packet *packet;
966 unsigned long expire;
968 spin_lock_irqsave(&host->csr.lock, flags);
969 expire = host->csr.expire;
970 spin_unlock_irqrestore(&host->csr.lock, flags);
972 /* Hold the lock around this, since we aren't dequeuing all
973 * packets, just ones we need. */
974 spin_lock_irqsave(&host->pending_packet_queue.lock, flags);
976 while (!skb_queue_empty(&host->pending_packet_queue)) {
977 skb = skb_peek(&host->pending_packet_queue);
979 packet = (struct hpsb_packet *)skb->data;
981 if (time_before(packet->sendtime + expire, jiffies)) {
982 __skb_unlink(skb, &host->pending_packet_queue);
983 packet->state = hpsb_complete;
984 packet->ack_code = ACKX_TIMEOUT;
985 queue_packet_complete(packet);
987 /* Since packets are added to the tail, the oldest
988 * ones are first, always. When we get to one that
989 * isn't timed out, the rest aren't either. */
994 if (!skb_queue_empty(&host->pending_packet_queue))
995 mod_timer(&host->timeout, jiffies + host->timeout_interval);
997 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
1001 /* Kernel thread and vars, which handles packets that are completed. Only
1002 * packets that have a "complete" function are sent here. This way, the
1003 * completion is run out of kernel context, and doesn't block the rest of
1005 static struct task_struct *khpsbpkt_thread;
1006 static struct sk_buff_head hpsbpkt_queue;
1008 static void queue_packet_complete(struct hpsb_packet *packet)
1010 if (packet->no_waiter) {
1011 hpsb_free_packet(packet);
1014 if (packet->complete_routine != NULL) {
1015 skb_queue_tail(&hpsbpkt_queue, packet->skb);
1016 wake_up_process(khpsbpkt_thread);
1021 static int hpsbpkt_thread(void *__hi)
1023 struct sk_buff *skb;
1024 struct hpsb_packet *packet;
1025 void (*complete_routine)(void*);
1026 void *complete_data;
1028 current->flags |= PF_NOFREEZE;
1030 while (!kthread_should_stop()) {
1031 while ((skb = skb_dequeue(&hpsbpkt_queue)) != NULL) {
1032 packet = (struct hpsb_packet *)skb->data;
1034 complete_routine = packet->complete_routine;
1035 complete_data = packet->complete_data;
1037 packet->complete_routine = packet->complete_data = NULL;
1039 complete_routine(complete_data);
1042 set_current_state(TASK_INTERRUPTIBLE);
1043 if (!skb_peek(&hpsbpkt_queue))
1045 __set_current_state(TASK_RUNNING);
1050 static int __init ieee1394_init(void)
1054 skb_queue_head_init(&hpsbpkt_queue);
1056 /* non-fatal error */
1057 if (hpsb_init_config_roms()) {
1058 HPSB_ERR("Failed to initialize some config rom entries.\n");
1059 HPSB_ERR("Some features may not be available\n");
1062 khpsbpkt_thread = kthread_run(hpsbpkt_thread, NULL, "khpsbpkt");
1063 if (IS_ERR(khpsbpkt_thread)) {
1064 HPSB_ERR("Failed to start hpsbpkt thread!\n");
1065 ret = PTR_ERR(khpsbpkt_thread);
1066 goto exit_cleanup_config_roms;
1069 if (register_chrdev_region(IEEE1394_CORE_DEV, 256, "ieee1394")) {
1070 HPSB_ERR("unable to register character device major %d!\n", IEEE1394_MAJOR);
1072 goto exit_release_kernel_thread;
1075 ret = bus_register(&ieee1394_bus_type);
1077 HPSB_INFO("bus register failed");
1078 goto release_chrdev;
1081 for (i = 0; fw_bus_attrs[i]; i++) {
1082 ret = bus_create_file(&ieee1394_bus_type, fw_bus_attrs[i]);
1085 bus_remove_file(&ieee1394_bus_type,
1088 bus_unregister(&ieee1394_bus_type);
1089 goto release_chrdev;
1093 ret = class_register(&hpsb_host_class);
1095 goto release_all_bus;
1097 hpsb_protocol_class = class_create(THIS_MODULE, "ieee1394_protocol");
1098 if (IS_ERR(hpsb_protocol_class)) {
1099 ret = PTR_ERR(hpsb_protocol_class);
1100 goto release_class_host;
1105 HPSB_INFO("init csr failed");
1107 goto release_class_protocol;
1110 if (disable_nodemgr) {
1111 HPSB_INFO("nodemgr and IRM functionality disabled");
1112 /* We shouldn't contend for IRM with nodemgr disabled, since
1113 nodemgr implements functionality required of ieee1394a-2000
1115 hpsb_disable_irm = 1;
1120 if (hpsb_disable_irm) {
1121 HPSB_INFO("IRM functionality disabled");
1124 ret = init_ieee1394_nodemgr();
1126 HPSB_INFO("init nodemgr failed");
1134 release_class_protocol:
1135 class_destroy(hpsb_protocol_class);
1137 class_unregister(&hpsb_host_class);
1139 for (i = 0; fw_bus_attrs[i]; i++)
1140 bus_remove_file(&ieee1394_bus_type, fw_bus_attrs[i]);
1141 bus_unregister(&ieee1394_bus_type);
1143 unregister_chrdev_region(IEEE1394_CORE_DEV, 256);
1144 exit_release_kernel_thread:
1145 kthread_stop(khpsbpkt_thread);
1146 exit_cleanup_config_roms:
1147 hpsb_cleanup_config_roms();
1151 static void __exit ieee1394_cleanup(void)
1155 if (!disable_nodemgr)
1156 cleanup_ieee1394_nodemgr();
1160 class_destroy(hpsb_protocol_class);
1161 class_unregister(&hpsb_host_class);
1162 for (i = 0; fw_bus_attrs[i]; i++)
1163 bus_remove_file(&ieee1394_bus_type, fw_bus_attrs[i]);
1164 bus_unregister(&ieee1394_bus_type);
1166 kthread_stop(khpsbpkt_thread);
1168 hpsb_cleanup_config_roms();
1170 unregister_chrdev_region(IEEE1394_CORE_DEV, 256);
1173 module_init(ieee1394_init);
1174 module_exit(ieee1394_cleanup);
1176 /* Exported symbols */
1179 EXPORT_SYMBOL(hpsb_alloc_host);
1180 EXPORT_SYMBOL(hpsb_add_host);
1181 EXPORT_SYMBOL(hpsb_remove_host);
1182 EXPORT_SYMBOL(hpsb_update_config_rom_image);
1184 /** ieee1394_core.c **/
1185 EXPORT_SYMBOL(hpsb_speedto_str);
1186 EXPORT_SYMBOL(hpsb_protocol_class);
1187 EXPORT_SYMBOL(hpsb_set_packet_complete_task);
1188 EXPORT_SYMBOL(hpsb_alloc_packet);
1189 EXPORT_SYMBOL(hpsb_free_packet);
1190 EXPORT_SYMBOL(hpsb_send_packet);
1191 EXPORT_SYMBOL(hpsb_reset_bus);
1192 EXPORT_SYMBOL(hpsb_bus_reset);
1193 EXPORT_SYMBOL(hpsb_selfid_received);
1194 EXPORT_SYMBOL(hpsb_selfid_complete);
1195 EXPORT_SYMBOL(hpsb_packet_sent);
1196 EXPORT_SYMBOL(hpsb_packet_received);
1197 EXPORT_SYMBOL_GPL(hpsb_disable_irm);
1198 #ifdef CONFIG_IEEE1394_EXPORT_FULL_API
1199 EXPORT_SYMBOL(hpsb_send_phy_config);
1200 EXPORT_SYMBOL(hpsb_send_packet_and_wait);
1203 /** ieee1394_transactions.c **/
1204 EXPORT_SYMBOL(hpsb_get_tlabel);
1205 EXPORT_SYMBOL(hpsb_free_tlabel);
1206 EXPORT_SYMBOL(hpsb_make_readpacket);
1207 EXPORT_SYMBOL(hpsb_make_writepacket);
1208 EXPORT_SYMBOL(hpsb_make_streampacket);
1209 EXPORT_SYMBOL(hpsb_make_lockpacket);
1210 EXPORT_SYMBOL(hpsb_make_lock64packet);
1211 EXPORT_SYMBOL(hpsb_make_phypacket);
1212 EXPORT_SYMBOL(hpsb_make_isopacket);
1213 EXPORT_SYMBOL(hpsb_read);
1214 EXPORT_SYMBOL(hpsb_write);
1215 EXPORT_SYMBOL(hpsb_packet_success);
1218 EXPORT_SYMBOL(hpsb_register_highlevel);
1219 EXPORT_SYMBOL(hpsb_unregister_highlevel);
1220 EXPORT_SYMBOL(hpsb_register_addrspace);
1221 EXPORT_SYMBOL(hpsb_unregister_addrspace);
1222 EXPORT_SYMBOL(hpsb_allocate_and_register_addrspace);
1223 EXPORT_SYMBOL(hpsb_listen_channel);
1224 EXPORT_SYMBOL(hpsb_unlisten_channel);
1225 EXPORT_SYMBOL(hpsb_get_hostinfo);
1226 EXPORT_SYMBOL(hpsb_create_hostinfo);
1227 EXPORT_SYMBOL(hpsb_destroy_hostinfo);
1228 EXPORT_SYMBOL(hpsb_set_hostinfo_key);
1229 EXPORT_SYMBOL(hpsb_get_hostinfo_bykey);
1230 EXPORT_SYMBOL(hpsb_set_hostinfo);
1231 EXPORT_SYMBOL(highlevel_host_reset);
1232 #ifdef CONFIG_IEEE1394_EXPORT_FULL_API
1233 EXPORT_SYMBOL(highlevel_add_host);
1234 EXPORT_SYMBOL(highlevel_remove_host);
1238 EXPORT_SYMBOL(hpsb_node_fill_packet);
1239 EXPORT_SYMBOL(hpsb_node_write);
1240 EXPORT_SYMBOL(hpsb_register_protocol);
1241 EXPORT_SYMBOL(hpsb_unregister_protocol);
1242 EXPORT_SYMBOL(ieee1394_bus_type);
1243 #ifdef CONFIG_IEEE1394_EXPORT_FULL_API
1244 EXPORT_SYMBOL(nodemgr_for_each_host);
1248 EXPORT_SYMBOL(hpsb_update_config_rom);
1251 EXPORT_SYMBOL(dma_prog_region_init);
1252 EXPORT_SYMBOL(dma_prog_region_alloc);
1253 EXPORT_SYMBOL(dma_prog_region_free);
1254 EXPORT_SYMBOL(dma_region_init);
1255 EXPORT_SYMBOL(dma_region_alloc);
1256 EXPORT_SYMBOL(dma_region_free);
1257 EXPORT_SYMBOL(dma_region_sync_for_cpu);
1258 EXPORT_SYMBOL(dma_region_sync_for_device);
1259 EXPORT_SYMBOL(dma_region_mmap);
1260 EXPORT_SYMBOL(dma_region_offset_to_bus);
1263 EXPORT_SYMBOL(hpsb_iso_xmit_init);
1264 EXPORT_SYMBOL(hpsb_iso_recv_init);
1265 EXPORT_SYMBOL(hpsb_iso_xmit_start);
1266 EXPORT_SYMBOL(hpsb_iso_recv_start);
1267 EXPORT_SYMBOL(hpsb_iso_recv_listen_channel);
1268 EXPORT_SYMBOL(hpsb_iso_recv_unlisten_channel);
1269 EXPORT_SYMBOL(hpsb_iso_recv_set_channel_mask);
1270 EXPORT_SYMBOL(hpsb_iso_stop);
1271 EXPORT_SYMBOL(hpsb_iso_shutdown);
1272 EXPORT_SYMBOL(hpsb_iso_xmit_queue_packet);
1273 EXPORT_SYMBOL(hpsb_iso_xmit_sync);
1274 EXPORT_SYMBOL(hpsb_iso_recv_release_packets);
1275 EXPORT_SYMBOL(hpsb_iso_n_ready);
1276 EXPORT_SYMBOL(hpsb_iso_packet_sent);
1277 EXPORT_SYMBOL(hpsb_iso_packet_received);
1278 EXPORT_SYMBOL(hpsb_iso_wake);
1279 EXPORT_SYMBOL(hpsb_iso_recv_flush);
1282 EXPORT_SYMBOL(csr1212_new_directory);
1283 EXPORT_SYMBOL(csr1212_attach_keyval_to_directory);
1284 EXPORT_SYMBOL(csr1212_detach_keyval_from_directory);
1285 EXPORT_SYMBOL(csr1212_release_keyval);
1286 EXPORT_SYMBOL(csr1212_read);
1287 EXPORT_SYMBOL(csr1212_parse_keyval);
1288 EXPORT_SYMBOL(_csr1212_read_keyval);
1289 EXPORT_SYMBOL(_csr1212_destroy_keyval);
1290 #ifdef CONFIG_IEEE1394_EXPORT_FULL_API
1291 EXPORT_SYMBOL(csr1212_create_csr);
1292 EXPORT_SYMBOL(csr1212_init_local_csr);
1293 EXPORT_SYMBOL(csr1212_new_immediate);
1294 EXPORT_SYMBOL(csr1212_associate_keyval);
1295 EXPORT_SYMBOL(csr1212_new_string_descriptor_leaf);
1296 EXPORT_SYMBOL(csr1212_destroy_csr);
1297 EXPORT_SYMBOL(csr1212_generate_csr_image);
1298 EXPORT_SYMBOL(csr1212_parse_csr);