ieee1394: sbp2: remove irritating log message
[linux-2.6] / drivers / ieee1394 / sbp2.c
1 /*
2  * sbp2.c - SBP-2 protocol driver for IEEE-1394
3  *
4  * Copyright (C) 2000 James Goodwin, Filanet Corporation (www.filanet.com)
5  * jamesg@filanet.com (JSG)
6  *
7  * Copyright (C) 2003 Ben Collins <bcollins@debian.org>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software Foundation,
21  * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22  */
23
24 /*
25  * Brief Description:
26  *
27  * This driver implements the Serial Bus Protocol 2 (SBP-2) over IEEE-1394
28  * under Linux. The SBP-2 driver is implemented as an IEEE-1394 high-level
29  * driver. It also registers as a SCSI lower-level driver in order to accept
30  * SCSI commands for transport using SBP-2.
31  *
32  * You may access any attached SBP-2 storage devices as if they were SCSI
33  * devices (e.g. mount /dev/sda1,  fdisk, mkfs, etc.).
34  *
35  * Current Issues:
36  *
37  *      - Error Handling: SCSI aborts and bus reset requests are handled somewhat
38  *        but the code needs additional debugging.
39  */
40
41 #include <linux/blkdev.h>
42 #include <linux/compiler.h>
43 #include <linux/delay.h>
44 #include <linux/device.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/gfp.h>
47 #include <linux/init.h>
48 #include <linux/kernel.h>
49 #include <linux/list.h>
50 #include <linux/module.h>
51 #include <linux/moduleparam.h>
52 #include <linux/pci.h>
53 #include <linux/slab.h>
54 #include <linux/spinlock.h>
55 #include <linux/stat.h>
56 #include <linux/string.h>
57 #include <linux/stringify.h>
58 #include <linux/types.h>
59 #include <linux/wait.h>
60
61 #include <asm/byteorder.h>
62 #include <asm/errno.h>
63 #include <asm/param.h>
64 #include <asm/scatterlist.h>
65 #include <asm/system.h>
66 #include <asm/types.h>
67
68 #ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
69 #include <asm/io.h> /* for bus_to_virt */
70 #endif
71
72 #include <scsi/scsi.h>
73 #include <scsi/scsi_cmnd.h>
74 #include <scsi/scsi_dbg.h>
75 #include <scsi/scsi_device.h>
76 #include <scsi/scsi_host.h>
77
78 #include "csr1212.h"
79 #include "highlevel.h"
80 #include "hosts.h"
81 #include "ieee1394.h"
82 #include "ieee1394_core.h"
83 #include "ieee1394_hotplug.h"
84 #include "ieee1394_transactions.h"
85 #include "ieee1394_types.h"
86 #include "nodemgr.h"
87 #include "sbp2.h"
88
89 /*
90  * Module load parameter definitions
91  */
92
93 /*
94  * Change max_speed on module load if you have a bad IEEE-1394
95  * controller that has trouble running 2KB packets at 400mb.
96  *
97  * NOTE: On certain OHCI parts I have seen short packets on async transmit
98  * (probably due to PCI latency/throughput issues with the part). You can
99  * bump down the speed if you are running into problems.
100  */
101 static int max_speed = IEEE1394_SPEED_MAX;
102 module_param(max_speed, int, 0644);
103 MODULE_PARM_DESC(max_speed, "Force max speed (3 = 800mb, 2 = 400mb, 1 = 200mb, 0 = 100mb)");
104
105 /*
106  * Set serialize_io to 1 if you'd like only one scsi command sent
107  * down to us at a time (debugging). This might be necessary for very
108  * badly behaved sbp2 devices.
109  *
110  * TODO: Make this configurable per device.
111  */
112 static int serialize_io = 1;
113 module_param(serialize_io, int, 0444);
114 MODULE_PARM_DESC(serialize_io, "Serialize I/O coming from scsi drivers (default = 1, faster = 0)");
115
116 /*
117  * Bump up max_sectors if you'd like to support very large sized
118  * transfers. Please note that some older sbp2 bridge chips are broken for
119  * transfers greater or equal to 128KB.  Default is a value of 255
120  * sectors, or just under 128KB (at 512 byte sector size). I can note that
121  * the Oxsemi sbp2 chipsets have no problems supporting very large
122  * transfer sizes.
123  */
124 static int max_sectors = SBP2_MAX_SECTORS;
125 module_param(max_sectors, int, 0444);
126 MODULE_PARM_DESC(max_sectors, "Change max sectors per I/O supported (default = "
127                  __stringify(SBP2_MAX_SECTORS) ")");
128
129 /*
130  * Exclusive login to sbp2 device? In most cases, the sbp2 driver should
131  * do an exclusive login, as it's generally unsafe to have two hosts
132  * talking to a single sbp2 device at the same time (filesystem coherency,
133  * etc.). If you're running an sbp2 device that supports multiple logins,
134  * and you're either running read-only filesystems or some sort of special
135  * filesystem supporting multiple hosts, e.g. OpenGFS, Oracle Cluster
136  * File System, or Lustre, then set exclusive_login to zero.
137  *
138  * So far only bridges from Oxford Semiconductor are known to support
139  * concurrent logins. Depending on firmware, four or two concurrent logins
140  * are possible on OXFW911 and newer Oxsemi bridges.
141  */
142 static int exclusive_login = 1;
143 module_param(exclusive_login, int, 0644);
144 MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device (default = 1)");
145
146 /*
147  * If any of the following workarounds is required for your device to work,
148  * please submit the kernel messages logged by sbp2 to the linux1394-devel
149  * mailing list.
150  *
151  * - 128kB max transfer
152  *   Limit transfer size. Necessary for some old bridges.
153  *
154  * - 36 byte inquiry
155  *   When scsi_mod probes the device, let the inquiry command look like that
156  *   from MS Windows.
157  *
158  * - skip mode page 8
159  *   Suppress sending of mode_sense for mode page 8 if the device pretends to
160  *   support the SCSI Primary Block commands instead of Reduced Block Commands.
161  *
162  * - fix capacity
163  *   Tell sd_mod to correct the last sector number reported by read_capacity.
164  *   Avoids access beyond actual disk limits on devices with an off-by-one bug.
165  *   Don't use this with devices which don't have this bug.
166  *
167  * - override internal blacklist
168  *   Instead of adding to the built-in blacklist, use only the workarounds
169  *   specified in the module load parameter.
170  *   Useful if a blacklist entry interfered with a non-broken device.
171  */
172 static int sbp2_default_workarounds;
173 module_param_named(workarounds, sbp2_default_workarounds, int, 0644);
174 MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0"
175         ", 128kB max transfer = " __stringify(SBP2_WORKAROUND_128K_MAX_TRANS)
176         ", 36 byte inquiry = "    __stringify(SBP2_WORKAROUND_INQUIRY_36)
177         ", skip mode page 8 = "   __stringify(SBP2_WORKAROUND_MODE_SENSE_8)
178         ", fix capacity = "       __stringify(SBP2_WORKAROUND_FIX_CAPACITY)
179         ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE)
180         ", or a combination)");
181
182 /*
183  * Export information about protocols/devices supported by this driver.
184  */
185 static struct ieee1394_device_id sbp2_id_table[] = {
186         {
187          .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
188          .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY & 0xffffff,
189          .version = SBP2_SW_VERSION_ENTRY & 0xffffff},
190         {}
191 };
192
193 MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table);
194
195 /*
196  * Debug levels, configured via kernel config, or enable here.
197  */
198
199 #define CONFIG_IEEE1394_SBP2_DEBUG 0
200 /* #define CONFIG_IEEE1394_SBP2_DEBUG_ORBS */
201 /* #define CONFIG_IEEE1394_SBP2_DEBUG_DMA */
202 /* #define CONFIG_IEEE1394_SBP2_DEBUG 1 */
203 /* #define CONFIG_IEEE1394_SBP2_DEBUG 2 */
204 /* #define CONFIG_IEEE1394_SBP2_PACKET_DUMP */
205
206 #ifdef CONFIG_IEEE1394_SBP2_DEBUG_ORBS
207 #define SBP2_ORB_DEBUG(fmt, args...)    HPSB_ERR("sbp2(%s): "fmt, __FUNCTION__, ## args)
208 static u32 global_outstanding_command_orbs = 0;
209 #define outstanding_orb_incr global_outstanding_command_orbs++
210 #define outstanding_orb_decr global_outstanding_command_orbs--
211 #else
212 #define SBP2_ORB_DEBUG(fmt, args...)    do {} while (0)
213 #define outstanding_orb_incr            do {} while (0)
214 #define outstanding_orb_decr            do {} while (0)
215 #endif
216
217 #ifdef CONFIG_IEEE1394_SBP2_DEBUG_DMA
218 #define SBP2_DMA_ALLOC(fmt, args...) \
219         HPSB_ERR("sbp2(%s)alloc(%d): "fmt, __FUNCTION__, \
220                  ++global_outstanding_dmas, ## args)
221 #define SBP2_DMA_FREE(fmt, args...) \
222         HPSB_ERR("sbp2(%s)free(%d): "fmt, __FUNCTION__, \
223                  --global_outstanding_dmas, ## args)
224 static u32 global_outstanding_dmas = 0;
225 #else
226 #define SBP2_DMA_ALLOC(fmt, args...)    do {} while (0)
227 #define SBP2_DMA_FREE(fmt, args...)     do {} while (0)
228 #endif
229
230 #if CONFIG_IEEE1394_SBP2_DEBUG >= 2
231 #define SBP2_DEBUG(fmt, args...)        HPSB_ERR("sbp2: "fmt, ## args)
232 #define SBP2_INFO(fmt, args...)         HPSB_ERR("sbp2: "fmt, ## args)
233 #define SBP2_NOTICE(fmt, args...)       HPSB_ERR("sbp2: "fmt, ## args)
234 #define SBP2_WARN(fmt, args...)         HPSB_ERR("sbp2: "fmt, ## args)
235 #elif CONFIG_IEEE1394_SBP2_DEBUG == 1
236 #define SBP2_DEBUG(fmt, args...)        HPSB_DEBUG("sbp2: "fmt, ## args)
237 #define SBP2_INFO(fmt, args...)         HPSB_INFO("sbp2: "fmt, ## args)
238 #define SBP2_NOTICE(fmt, args...)       HPSB_NOTICE("sbp2: "fmt, ## args)
239 #define SBP2_WARN(fmt, args...)         HPSB_WARN("sbp2: "fmt, ## args)
240 #else
241 #define SBP2_DEBUG(fmt, args...)        do {} while (0)
242 #define SBP2_INFO(fmt, args...)         HPSB_INFO("sbp2: "fmt, ## args)
243 #define SBP2_NOTICE(fmt, args...)       HPSB_NOTICE("sbp2: "fmt, ## args)
244 #define SBP2_WARN(fmt, args...)         HPSB_WARN("sbp2: "fmt, ## args)
245 #endif
246
247 #define SBP2_ERR(fmt, args...)          HPSB_ERR("sbp2: "fmt, ## args)
248 #define SBP2_DEBUG_ENTER()              SBP2_DEBUG("%s", __FUNCTION__)
249
250 /*
251  * Globals
252  */
253
254 static void sbp2scsi_complete_all_commands(struct scsi_id_instance_data *scsi_id,
255                                            u32 status);
256
257 static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id,
258                                       u32 scsi_status, struct scsi_cmnd *SCpnt,
259                                       void (*done)(struct scsi_cmnd *));
260
261 static struct scsi_host_template scsi_driver_template;
262
263 static const u8 sbp2_speedto_max_payload[] = { 0x7, 0x8, 0x9, 0xA, 0xB, 0xC };
264
265 static void sbp2_host_reset(struct hpsb_host *host);
266
267 static int sbp2_probe(struct device *dev);
268 static int sbp2_remove(struct device *dev);
269 static int sbp2_update(struct unit_directory *ud);
270
271 static struct hpsb_highlevel sbp2_highlevel = {
272         .name =         SBP2_DEVICE_NAME,
273         .host_reset =   sbp2_host_reset,
274 };
275
276 static struct hpsb_address_ops sbp2_ops = {
277         .write = sbp2_handle_status_write
278 };
279
280 #ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
281 static struct hpsb_address_ops sbp2_physdma_ops = {
282         .read = sbp2_handle_physdma_read,
283         .write = sbp2_handle_physdma_write,
284 };
285 #endif
286
287 static struct hpsb_protocol_driver sbp2_driver = {
288         .name           = "SBP2 Driver",
289         .id_table       = sbp2_id_table,
290         .update         = sbp2_update,
291         .driver         = {
292                 .name           = SBP2_DEVICE_NAME,
293                 .bus            = &ieee1394_bus_type,
294                 .probe          = sbp2_probe,
295                 .remove         = sbp2_remove,
296         },
297 };
298
299 /*
300  * List of devices with known bugs.
301  *
302  * The firmware_revision field, masked with 0xffff00, is the best indicator
303  * for the type of bridge chip of a device.  It yields a few false positives
304  * but this did not break correctly behaving devices so far.
305  */
306 static const struct {
307         u32 firmware_revision;
308         u32 model_id;
309         unsigned workarounds;
310 } sbp2_workarounds_table[] = {
311         /* DViCO Momobay CX-1 with TSB42AA9 bridge */ {
312                 .firmware_revision      = 0x002800,
313                 .model_id               = 0x001010,
314                 .workarounds            = SBP2_WORKAROUND_INQUIRY_36 |
315                                           SBP2_WORKAROUND_MODE_SENSE_8,
316         },
317         /* Initio bridges, actually only needed for some older ones */ {
318                 .firmware_revision      = 0x000200,
319                 .workarounds            = SBP2_WORKAROUND_INQUIRY_36,
320         },
321         /* Symbios bridge */ {
322                 .firmware_revision      = 0xa0b800,
323                 .workarounds            = SBP2_WORKAROUND_128K_MAX_TRANS,
324         },
325         /*
326          * Note about the following Apple iPod blacklist entries:
327          *
328          * There are iPods (2nd gen, 3rd gen) with model_id==0.  Since our
329          * matching logic treats 0 as a wildcard, we cannot match this ID
330          * without rewriting the matching routine.  Fortunately these iPods
331          * do not feature the read_capacity bug according to one report.
332          * Read_capacity behaviour as well as model_id could change due to
333          * Apple-supplied firmware updates though.
334          */
335         /* iPod 4th generation */ {
336                 .firmware_revision      = 0x0a2700,
337                 .model_id               = 0x000021,
338                 .workarounds            = SBP2_WORKAROUND_FIX_CAPACITY,
339         },
340         /* iPod mini */ {
341                 .firmware_revision      = 0x0a2700,
342                 .model_id               = 0x000023,
343                 .workarounds            = SBP2_WORKAROUND_FIX_CAPACITY,
344         },
345         /* iPod Photo */ {
346                 .firmware_revision      = 0x0a2700,
347                 .model_id               = 0x00007e,
348                 .workarounds            = SBP2_WORKAROUND_FIX_CAPACITY,
349         }
350 };
351
352 /**************************************
353  * General utility functions
354  **************************************/
355
356 #ifndef __BIG_ENDIAN
357 /*
358  * Converts a buffer from be32 to cpu byte ordering. Length is in bytes.
359  */
360 static inline void sbp2util_be32_to_cpu_buffer(void *buffer, int length)
361 {
362         u32 *temp = buffer;
363
364         for (length = (length >> 2); length--; )
365                 temp[length] = be32_to_cpu(temp[length]);
366
367         return;
368 }
369
370 /*
371  * Converts a buffer from cpu to be32 byte ordering. Length is in bytes.
372  */
373 static inline void sbp2util_cpu_to_be32_buffer(void *buffer, int length)
374 {
375         u32 *temp = buffer;
376
377         for (length = (length >> 2); length--; )
378                 temp[length] = cpu_to_be32(temp[length]);
379
380         return;
381 }
382 #else /* BIG_ENDIAN */
383 /* Why waste the cpu cycles? */
384 #define sbp2util_be32_to_cpu_buffer(x,y) do {} while (0)
385 #define sbp2util_cpu_to_be32_buffer(x,y) do {} while (0)
386 #endif
387
388 #ifdef CONFIG_IEEE1394_SBP2_PACKET_DUMP
389 /*
390  * Debug packet dump routine. Length is in bytes.
391  */
392 static void sbp2util_packet_dump(void *buffer, int length, char *dump_name,
393                                  u32 dump_phys_addr)
394 {
395         int i;
396         unsigned char *dump = buffer;
397
398         if (!dump || !length || !dump_name)
399                 return;
400
401         if (dump_phys_addr)
402                 printk("[%s, 0x%x]", dump_name, dump_phys_addr);
403         else
404                 printk("[%s]", dump_name);
405         for (i = 0; i < length; i++) {
406                 if (i > 0x3f) {
407                         printk("\n   ...");
408                         break;
409                 }
410                 if ((i & 0x3) == 0)
411                         printk("  ");
412                 if ((i & 0xf) == 0)
413                         printk("\n   ");
414                 printk("%02x ", (int)dump[i]);
415         }
416         printk("\n");
417
418         return;
419 }
420 #else
421 #define sbp2util_packet_dump(w,x,y,z) do {} while (0)
422 #endif
423
424 static DECLARE_WAIT_QUEUE_HEAD(access_wq);
425
426 /*
427  * Waits for completion of an SBP-2 access request.
428  * Returns nonzero if timed out or prematurely interrupted.
429  */
430 static int sbp2util_access_timeout(struct scsi_id_instance_data *scsi_id,
431                                    int timeout)
432 {
433         long leftover = wait_event_interruptible_timeout(
434                                 access_wq, scsi_id->access_complete, timeout);
435
436         scsi_id->access_complete = 0;
437         return leftover <= 0;
438 }
439
440 /* Frees an allocated packet */
441 static void sbp2_free_packet(struct hpsb_packet *packet)
442 {
443         hpsb_free_tlabel(packet);
444         hpsb_free_packet(packet);
445 }
446
447 /* This is much like hpsb_node_write(), except it ignores the response
448  * subaction and returns immediately. Can be used from interrupts.
449  */
450 static int sbp2util_node_write_no_wait(struct node_entry *ne, u64 addr,
451                                        quadlet_t *buffer, size_t length)
452 {
453         struct hpsb_packet *packet;
454
455         packet = hpsb_make_writepacket(ne->host, ne->nodeid,
456                                        addr, buffer, length);
457         if (!packet)
458                 return -ENOMEM;
459
460         hpsb_set_packet_complete_task(packet,
461                                       (void (*)(void *))sbp2_free_packet,
462                                       packet);
463
464         hpsb_node_fill_packet(ne, packet);
465
466         if (hpsb_send_packet(packet) < 0) {
467                 sbp2_free_packet(packet);
468                 return -EIO;
469         }
470
471         return 0;
472 }
473
474 static void sbp2util_notify_fetch_agent(struct scsi_id_instance_data *scsi_id,
475                                         u64 offset, quadlet_t *data, size_t len)
476 {
477         /*
478          * There is a small window after a bus reset within which the node
479          * entry's generation is current but the reconnect wasn't completed.
480          */
481         if (unlikely(atomic_read(&scsi_id->state) == SBP2LU_STATE_IN_RESET))
482                 return;
483
484         if (hpsb_node_write(scsi_id->ne,
485                             scsi_id->sbp2_command_block_agent_addr + offset,
486                             data, len))
487                 SBP2_ERR("sbp2util_notify_fetch_agent failed.");
488         /*
489          * Now accept new SCSI commands, unless a bus reset happended during
490          * hpsb_node_write.
491          */
492         if (likely(atomic_read(&scsi_id->state) != SBP2LU_STATE_IN_RESET))
493                 scsi_unblock_requests(scsi_id->scsi_host);
494 }
495
496 static void sbp2util_write_orb_pointer(struct work_struct *work)
497 {
498         struct scsi_id_instance_data *scsi_id =
499                 container_of(work, struct scsi_id_instance_data,
500                              protocol_work.work);
501         quadlet_t data[2];
502
503         data[0] = ORB_SET_NODE_ID(scsi_id->hi->host->node_id);
504         data[1] = scsi_id->last_orb_dma;
505         sbp2util_cpu_to_be32_buffer(data, 8);
506         sbp2util_notify_fetch_agent(scsi_id, SBP2_ORB_POINTER_OFFSET, data, 8);
507 }
508
509 static void sbp2util_write_doorbell(struct work_struct *work)
510 {
511         struct scsi_id_instance_data *scsi_id =
512                 container_of(work, struct scsi_id_instance_data,
513                              protocol_work.work);
514         sbp2util_notify_fetch_agent(scsi_id, SBP2_DOORBELL_OFFSET, NULL, 4);
515 }
516
517 /*
518  * This function is called to create a pool of command orbs used for
519  * command processing. It is called when a new sbp2 device is detected.
520  */
521 static int sbp2util_create_command_orb_pool(struct scsi_id_instance_data *scsi_id)
522 {
523         struct sbp2scsi_host_info *hi = scsi_id->hi;
524         int i;
525         unsigned long flags, orbs;
526         struct sbp2_command_info *command;
527
528         orbs = serialize_io ? 2 : SBP2_MAX_CMDS;
529
530         spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
531         for (i = 0; i < orbs; i++) {
532                 command = kzalloc(sizeof(*command), GFP_ATOMIC);
533                 if (!command) {
534                         spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock,
535                                                flags);
536                         return -ENOMEM;
537                 }
538                 command->command_orb_dma =
539                     pci_map_single(hi->host->pdev, &command->command_orb,
540                                    sizeof(struct sbp2_command_orb),
541                                    PCI_DMA_TODEVICE);
542                 SBP2_DMA_ALLOC("single command orb DMA");
543                 command->sge_dma =
544                     pci_map_single(hi->host->pdev,
545                                    &command->scatter_gather_element,
546                                    sizeof(command->scatter_gather_element),
547                                    PCI_DMA_BIDIRECTIONAL);
548                 SBP2_DMA_ALLOC("scatter_gather_element");
549                 INIT_LIST_HEAD(&command->list);
550                 list_add_tail(&command->list, &scsi_id->sbp2_command_orb_completed);
551         }
552         spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
553         return 0;
554 }
555
556 /*
557  * This function is called to delete a pool of command orbs.
558  */
559 static void sbp2util_remove_command_orb_pool(struct scsi_id_instance_data *scsi_id)
560 {
561         struct hpsb_host *host = scsi_id->hi->host;
562         struct list_head *lh, *next;
563         struct sbp2_command_info *command;
564         unsigned long flags;
565
566         spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
567         if (!list_empty(&scsi_id->sbp2_command_orb_completed)) {
568                 list_for_each_safe(lh, next, &scsi_id->sbp2_command_orb_completed) {
569                         command = list_entry(lh, struct sbp2_command_info, list);
570
571                         /* Release our generic DMA's */
572                         pci_unmap_single(host->pdev, command->command_orb_dma,
573                                          sizeof(struct sbp2_command_orb),
574                                          PCI_DMA_TODEVICE);
575                         SBP2_DMA_FREE("single command orb DMA");
576                         pci_unmap_single(host->pdev, command->sge_dma,
577                                          sizeof(command->scatter_gather_element),
578                                          PCI_DMA_BIDIRECTIONAL);
579                         SBP2_DMA_FREE("scatter_gather_element");
580
581                         kfree(command);
582                 }
583         }
584         spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
585         return;
586 }
587
588 /*
589  * This function finds the sbp2_command for a given outstanding command
590  * orb.Only looks at the inuse list.
591  */
592 static struct sbp2_command_info *sbp2util_find_command_for_orb(
593                 struct scsi_id_instance_data *scsi_id, dma_addr_t orb)
594 {
595         struct sbp2_command_info *command;
596         unsigned long flags;
597
598         spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
599         if (!list_empty(&scsi_id->sbp2_command_orb_inuse)) {
600                 list_for_each_entry(command, &scsi_id->sbp2_command_orb_inuse, list) {
601                         if (command->command_orb_dma == orb) {
602                                 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
603                                 return command;
604                         }
605                 }
606         }
607         spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
608
609         SBP2_ORB_DEBUG("could not match command orb %x", (unsigned int)orb);
610
611         return NULL;
612 }
613
614 /*
615  * This function finds the sbp2_command for a given outstanding SCpnt.
616  * Only looks at the inuse list.
617  * Must be called with scsi_id->sbp2_command_orb_lock held.
618  */
619 static struct sbp2_command_info *sbp2util_find_command_for_SCpnt(
620                 struct scsi_id_instance_data *scsi_id, void *SCpnt)
621 {
622         struct sbp2_command_info *command;
623
624         if (!list_empty(&scsi_id->sbp2_command_orb_inuse))
625                 list_for_each_entry(command, &scsi_id->sbp2_command_orb_inuse, list)
626                         if (command->Current_SCpnt == SCpnt)
627                                 return command;
628         return NULL;
629 }
630
631 /*
632  * This function allocates a command orb used to send a scsi command.
633  */
634 static struct sbp2_command_info *sbp2util_allocate_command_orb(
635                 struct scsi_id_instance_data *scsi_id,
636                 struct scsi_cmnd *Current_SCpnt,
637                 void (*Current_done)(struct scsi_cmnd *))
638 {
639         struct list_head *lh;
640         struct sbp2_command_info *command = NULL;
641         unsigned long flags;
642
643         spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
644         if (!list_empty(&scsi_id->sbp2_command_orb_completed)) {
645                 lh = scsi_id->sbp2_command_orb_completed.next;
646                 list_del(lh);
647                 command = list_entry(lh, struct sbp2_command_info, list);
648                 command->Current_done = Current_done;
649                 command->Current_SCpnt = Current_SCpnt;
650                 list_add_tail(&command->list, &scsi_id->sbp2_command_orb_inuse);
651         } else {
652                 SBP2_ERR("%s: no orbs available", __FUNCTION__);
653         }
654         spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
655         return command;
656 }
657
658 /* Free our DMA's */
659 static void sbp2util_free_command_dma(struct sbp2_command_info *command)
660 {
661         struct scsi_id_instance_data *scsi_id =
662                 (struct scsi_id_instance_data *)command->Current_SCpnt->device->host->hostdata[0];
663         struct hpsb_host *host;
664
665         if (!scsi_id) {
666                 SBP2_ERR("%s: scsi_id == NULL", __FUNCTION__);
667                 return;
668         }
669
670         host = scsi_id->ud->ne->host;
671
672         if (command->cmd_dma) {
673                 if (command->dma_type == CMD_DMA_SINGLE) {
674                         pci_unmap_single(host->pdev, command->cmd_dma,
675                                          command->dma_size, command->dma_dir);
676                         SBP2_DMA_FREE("single bulk");
677                 } else if (command->dma_type == CMD_DMA_PAGE) {
678                         pci_unmap_page(host->pdev, command->cmd_dma,
679                                        command->dma_size, command->dma_dir);
680                         SBP2_DMA_FREE("single page");
681                 } /* XXX: Check for CMD_DMA_NONE bug */
682                 command->dma_type = CMD_DMA_NONE;
683                 command->cmd_dma = 0;
684         }
685
686         if (command->sge_buffer) {
687                 pci_unmap_sg(host->pdev, command->sge_buffer,
688                              command->dma_size, command->dma_dir);
689                 SBP2_DMA_FREE("scatter list");
690                 command->sge_buffer = NULL;
691         }
692 }
693
694 /*
695  * This function moves a command to the completed orb list.
696  * Must be called with scsi_id->sbp2_command_orb_lock held.
697  */
698 static void sbp2util_mark_command_completed(
699                 struct scsi_id_instance_data *scsi_id,
700                 struct sbp2_command_info *command)
701 {
702         list_del(&command->list);
703         sbp2util_free_command_dma(command);
704         list_add_tail(&command->list, &scsi_id->sbp2_command_orb_completed);
705 }
706
707 /*
708  * Is scsi_id valid? Is the 1394 node still present?
709  */
710 static inline int sbp2util_node_is_available(struct scsi_id_instance_data *scsi_id)
711 {
712         return scsi_id && scsi_id->ne && !scsi_id->ne->in_limbo;
713 }
714
715 /*********************************************
716  * IEEE-1394 core driver stack related section
717  *********************************************/
718 static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud);
719
720 static int sbp2_probe(struct device *dev)
721 {
722         struct unit_directory *ud;
723         struct scsi_id_instance_data *scsi_id;
724
725         SBP2_DEBUG_ENTER();
726
727         ud = container_of(dev, struct unit_directory, device);
728
729         /* Don't probe UD's that have the LUN flag. We'll probe the LUN(s)
730          * instead. */
731         if (ud->flags & UNIT_DIRECTORY_HAS_LUN_DIRECTORY)
732                 return -ENODEV;
733
734         scsi_id = sbp2_alloc_device(ud);
735
736         if (!scsi_id)
737                 return -ENOMEM;
738
739         sbp2_parse_unit_directory(scsi_id, ud);
740
741         return sbp2_start_device(scsi_id);
742 }
743
744 static int sbp2_remove(struct device *dev)
745 {
746         struct unit_directory *ud;
747         struct scsi_id_instance_data *scsi_id;
748         struct scsi_device *sdev;
749
750         SBP2_DEBUG_ENTER();
751
752         ud = container_of(dev, struct unit_directory, device);
753         scsi_id = ud->device.driver_data;
754         if (!scsi_id)
755                 return 0;
756
757         if (scsi_id->scsi_host) {
758                 /* Get rid of enqueued commands if there is no chance to
759                  * send them. */
760                 if (!sbp2util_node_is_available(scsi_id))
761                         sbp2scsi_complete_all_commands(scsi_id, DID_NO_CONNECT);
762                 /* scsi_remove_device() will trigger shutdown functions of SCSI
763                  * highlevel drivers which would deadlock if blocked. */
764                 atomic_set(&scsi_id->state, SBP2LU_STATE_IN_SHUTDOWN);
765                 scsi_unblock_requests(scsi_id->scsi_host);
766         }
767         sdev = scsi_id->sdev;
768         if (sdev) {
769                 scsi_id->sdev = NULL;
770                 scsi_remove_device(sdev);
771         }
772
773         sbp2_logout_device(scsi_id);
774         sbp2_remove_device(scsi_id);
775
776         return 0;
777 }
778
779 static int sbp2_update(struct unit_directory *ud)
780 {
781         struct scsi_id_instance_data *scsi_id = ud->device.driver_data;
782
783         SBP2_DEBUG_ENTER();
784
785         if (sbp2_reconnect_device(scsi_id)) {
786
787                 /*
788                  * Ok, reconnect has failed. Perhaps we didn't
789                  * reconnect fast enough. Try doing a regular login, but
790                  * first do a logout just in case of any weirdness.
791                  */
792                 sbp2_logout_device(scsi_id);
793
794                 if (sbp2_login_device(scsi_id)) {
795                         /* Login failed too, just fail, and the backend
796                          * will call our sbp2_remove for us */
797                         SBP2_ERR("Failed to reconnect to sbp2 device!");
798                         return -EBUSY;
799                 }
800         }
801
802         /* Set max retries to something large on the device. */
803         sbp2_set_busy_timeout(scsi_id);
804
805         /* Do a SBP-2 fetch agent reset. */
806         sbp2_agent_reset(scsi_id, 1);
807
808         /* Get the max speed and packet size that we can use. */
809         sbp2_max_speed_and_size(scsi_id);
810
811         /* Complete any pending commands with busy (so they get
812          * retried) and remove them from our queue
813          */
814         sbp2scsi_complete_all_commands(scsi_id, DID_BUS_BUSY);
815
816         /* Accept new commands unless there was another bus reset in the
817          * meantime. */
818         if (hpsb_node_entry_valid(scsi_id->ne)) {
819                 atomic_set(&scsi_id->state, SBP2LU_STATE_RUNNING);
820                 scsi_unblock_requests(scsi_id->scsi_host);
821         }
822         return 0;
823 }
824
825 /* This functions is called by the sbp2_probe, for each new device. We now
826  * allocate one scsi host for each scsi_id (unit directory). */
827 static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud)
828 {
829         struct sbp2scsi_host_info *hi;
830         struct Scsi_Host *scsi_host = NULL;
831         struct scsi_id_instance_data *scsi_id = NULL;
832
833         SBP2_DEBUG_ENTER();
834
835         scsi_id = kzalloc(sizeof(*scsi_id), GFP_KERNEL);
836         if (!scsi_id) {
837                 SBP2_ERR("failed to create scsi_id");
838                 goto failed_alloc;
839         }
840
841         scsi_id->ne = ud->ne;
842         scsi_id->ud = ud;
843         scsi_id->speed_code = IEEE1394_SPEED_100;
844         scsi_id->max_payload_size = sbp2_speedto_max_payload[IEEE1394_SPEED_100];
845         scsi_id->status_fifo_addr = CSR1212_INVALID_ADDR_SPACE;
846         INIT_LIST_HEAD(&scsi_id->sbp2_command_orb_inuse);
847         INIT_LIST_HEAD(&scsi_id->sbp2_command_orb_completed);
848         INIT_LIST_HEAD(&scsi_id->scsi_list);
849         spin_lock_init(&scsi_id->sbp2_command_orb_lock);
850         atomic_set(&scsi_id->state, SBP2LU_STATE_RUNNING);
851         INIT_DELAYED_WORK(&scsi_id->protocol_work, NULL);
852
853         ud->device.driver_data = scsi_id;
854
855         hi = hpsb_get_hostinfo(&sbp2_highlevel, ud->ne->host);
856         if (!hi) {
857                 hi = hpsb_create_hostinfo(&sbp2_highlevel, ud->ne->host, sizeof(*hi));
858                 if (!hi) {
859                         SBP2_ERR("failed to allocate hostinfo");
860                         goto failed_alloc;
861                 }
862                 SBP2_DEBUG("sbp2_alloc_device: allocated hostinfo");
863                 hi->host = ud->ne->host;
864                 INIT_LIST_HEAD(&hi->scsi_ids);
865
866 #ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
867                 /* Handle data movement if physical dma is not
868                  * enabled or not supported on host controller */
869                 if (!hpsb_register_addrspace(&sbp2_highlevel, ud->ne->host,
870                                              &sbp2_physdma_ops,
871                                              0x0ULL, 0xfffffffcULL)) {
872                         SBP2_ERR("failed to register lower 4GB address range");
873                         goto failed_alloc;
874                 }
875 #endif
876         }
877
878         /* Prevent unloading of the 1394 host */
879         if (!try_module_get(hi->host->driver->owner)) {
880                 SBP2_ERR("failed to get a reference on 1394 host driver");
881                 goto failed_alloc;
882         }
883
884         scsi_id->hi = hi;
885
886         list_add_tail(&scsi_id->scsi_list, &hi->scsi_ids);
887
888         /* Register the status FIFO address range. We could use the same FIFO
889          * for targets at different nodes. However we need different FIFOs per
890          * target in order to support multi-unit devices.
891          * The FIFO is located out of the local host controller's physical range
892          * but, if possible, within the posted write area. Status writes will
893          * then be performed as unified transactions. This slightly reduces
894          * bandwidth usage, and some Prolific based devices seem to require it.
895          */
896         scsi_id->status_fifo_addr = hpsb_allocate_and_register_addrspace(
897                         &sbp2_highlevel, ud->ne->host, &sbp2_ops,
898                         sizeof(struct sbp2_status_block), sizeof(quadlet_t),
899                         ud->ne->host->low_addr_space, CSR1212_ALL_SPACE_END);
900         if (scsi_id->status_fifo_addr == CSR1212_INVALID_ADDR_SPACE) {
901                 SBP2_ERR("failed to allocate status FIFO address range");
902                 goto failed_alloc;
903         }
904
905         /* Register our host with the SCSI stack. */
906         scsi_host = scsi_host_alloc(&scsi_driver_template,
907                                     sizeof(unsigned long));
908         if (!scsi_host) {
909                 SBP2_ERR("failed to register scsi host");
910                 goto failed_alloc;
911         }
912
913         scsi_host->hostdata[0] = (unsigned long)scsi_id;
914
915         if (!scsi_add_host(scsi_host, &ud->device)) {
916                 scsi_id->scsi_host = scsi_host;
917                 return scsi_id;
918         }
919
920         SBP2_ERR("failed to add scsi host");
921         scsi_host_put(scsi_host);
922
923 failed_alloc:
924         sbp2_remove_device(scsi_id);
925         return NULL;
926 }
927
928 static void sbp2_host_reset(struct hpsb_host *host)
929 {
930         struct sbp2scsi_host_info *hi;
931         struct scsi_id_instance_data *scsi_id;
932
933         hi = hpsb_get_hostinfo(&sbp2_highlevel, host);
934         if (!hi)
935                 return;
936         list_for_each_entry(scsi_id, &hi->scsi_ids, scsi_list)
937                 if (likely(atomic_read(&scsi_id->state) !=
938                            SBP2LU_STATE_IN_SHUTDOWN)) {
939                         atomic_set(&scsi_id->state, SBP2LU_STATE_IN_RESET);
940                         scsi_block_requests(scsi_id->scsi_host);
941                 }
942 }
943
944 /*
945  * This function is where we first pull the node unique ids, and then
946  * allocate memory and register a SBP-2 device.
947  */
948 static int sbp2_start_device(struct scsi_id_instance_data *scsi_id)
949 {
950         struct sbp2scsi_host_info *hi = scsi_id->hi;
951         int error;
952
953         SBP2_DEBUG_ENTER();
954
955         /* Login FIFO DMA */
956         scsi_id->login_response =
957                 pci_alloc_consistent(hi->host->pdev,
958                                      sizeof(struct sbp2_login_response),
959                                      &scsi_id->login_response_dma);
960         if (!scsi_id->login_response)
961                 goto alloc_fail;
962         SBP2_DMA_ALLOC("consistent DMA region for login FIFO");
963
964         /* Query logins ORB DMA */
965         scsi_id->query_logins_orb =
966                 pci_alloc_consistent(hi->host->pdev,
967                                      sizeof(struct sbp2_query_logins_orb),
968                                      &scsi_id->query_logins_orb_dma);
969         if (!scsi_id->query_logins_orb)
970                 goto alloc_fail;
971         SBP2_DMA_ALLOC("consistent DMA region for query logins ORB");
972
973         /* Query logins response DMA */
974         scsi_id->query_logins_response =
975                 pci_alloc_consistent(hi->host->pdev,
976                                      sizeof(struct sbp2_query_logins_response),
977                                      &scsi_id->query_logins_response_dma);
978         if (!scsi_id->query_logins_response)
979                 goto alloc_fail;
980         SBP2_DMA_ALLOC("consistent DMA region for query logins response");
981
982         /* Reconnect ORB DMA */
983         scsi_id->reconnect_orb =
984                 pci_alloc_consistent(hi->host->pdev,
985                                      sizeof(struct sbp2_reconnect_orb),
986                                      &scsi_id->reconnect_orb_dma);
987         if (!scsi_id->reconnect_orb)
988                 goto alloc_fail;
989         SBP2_DMA_ALLOC("consistent DMA region for reconnect ORB");
990
991         /* Logout ORB DMA */
992         scsi_id->logout_orb =
993                 pci_alloc_consistent(hi->host->pdev,
994                                      sizeof(struct sbp2_logout_orb),
995                                      &scsi_id->logout_orb_dma);
996         if (!scsi_id->logout_orb)
997                 goto alloc_fail;
998         SBP2_DMA_ALLOC("consistent DMA region for logout ORB");
999
1000         /* Login ORB DMA */
1001         scsi_id->login_orb =
1002                 pci_alloc_consistent(hi->host->pdev,
1003                                      sizeof(struct sbp2_login_orb),
1004                                      &scsi_id->login_orb_dma);
1005         if (!scsi_id->login_orb)
1006                 goto alloc_fail;
1007         SBP2_DMA_ALLOC("consistent DMA region for login ORB");
1008
1009         SBP2_DEBUG("New SBP-2 device inserted, SCSI ID = %x", scsi_id->ud->id);
1010
1011         /*
1012          * Create our command orb pool
1013          */
1014         if (sbp2util_create_command_orb_pool(scsi_id)) {
1015                 SBP2_ERR("sbp2util_create_command_orb_pool failed!");
1016                 sbp2_remove_device(scsi_id);
1017                 return -ENOMEM;
1018         }
1019
1020         /* Schedule a timeout here. The reason is that we may be so close
1021          * to a bus reset, that the device is not available for logins.
1022          * This can happen when the bus reset is caused by the host
1023          * connected to the sbp2 device being removed. That host would
1024          * have a certain amount of time to relogin before the sbp2 device
1025          * allows someone else to login instead. One second makes sense. */
1026         if (msleep_interruptible(1000)) {
1027                 sbp2_remove_device(scsi_id);
1028                 return -EINTR;
1029         }
1030
1031         /*
1032          * Login to the sbp-2 device
1033          */
1034         if (sbp2_login_device(scsi_id)) {
1035                 /* Login failed, just remove the device. */
1036                 sbp2_remove_device(scsi_id);
1037                 return -EBUSY;
1038         }
1039
1040         /*
1041          * Set max retries to something large on the device
1042          */
1043         sbp2_set_busy_timeout(scsi_id);
1044
1045         /*
1046          * Do a SBP-2 fetch agent reset
1047          */
1048         sbp2_agent_reset(scsi_id, 1);
1049
1050         /*
1051          * Get the max speed and packet size that we can use
1052          */
1053         sbp2_max_speed_and_size(scsi_id);
1054
1055         /* Add this device to the scsi layer now */
1056         error = scsi_add_device(scsi_id->scsi_host, 0, scsi_id->ud->id, 0);
1057         if (error) {
1058                 SBP2_ERR("scsi_add_device failed");
1059                 sbp2_logout_device(scsi_id);
1060                 sbp2_remove_device(scsi_id);
1061                 return error;
1062         }
1063
1064         return 0;
1065
1066 alloc_fail:
1067         SBP2_ERR("Could not allocate memory for scsi_id");
1068         sbp2_remove_device(scsi_id);
1069         return -ENOMEM;
1070 }
1071
1072 /*
1073  * This function removes an sbp2 device from the sbp2scsi_host_info struct.
1074  */
1075 static void sbp2_remove_device(struct scsi_id_instance_data *scsi_id)
1076 {
1077         struct sbp2scsi_host_info *hi;
1078
1079         SBP2_DEBUG_ENTER();
1080
1081         if (!scsi_id)
1082                 return;
1083
1084         hi = scsi_id->hi;
1085
1086         /* This will remove our scsi device aswell */
1087         if (scsi_id->scsi_host) {
1088                 scsi_remove_host(scsi_id->scsi_host);
1089                 scsi_host_put(scsi_id->scsi_host);
1090         }
1091         flush_scheduled_work();
1092         sbp2util_remove_command_orb_pool(scsi_id);
1093
1094         list_del(&scsi_id->scsi_list);
1095
1096         if (scsi_id->login_response) {
1097                 pci_free_consistent(hi->host->pdev,
1098                                     sizeof(struct sbp2_login_response),
1099                                     scsi_id->login_response,
1100                                     scsi_id->login_response_dma);
1101                 SBP2_DMA_FREE("single login FIFO");
1102         }
1103
1104         if (scsi_id->login_orb) {
1105                 pci_free_consistent(hi->host->pdev,
1106                                     sizeof(struct sbp2_login_orb),
1107                                     scsi_id->login_orb,
1108                                     scsi_id->login_orb_dma);
1109                 SBP2_DMA_FREE("single login ORB");
1110         }
1111
1112         if (scsi_id->reconnect_orb) {
1113                 pci_free_consistent(hi->host->pdev,
1114                                     sizeof(struct sbp2_reconnect_orb),
1115                                     scsi_id->reconnect_orb,
1116                                     scsi_id->reconnect_orb_dma);
1117                 SBP2_DMA_FREE("single reconnect orb");
1118         }
1119
1120         if (scsi_id->logout_orb) {
1121                 pci_free_consistent(hi->host->pdev,
1122                                     sizeof(struct sbp2_logout_orb),
1123                                     scsi_id->logout_orb,
1124                                     scsi_id->logout_orb_dma);
1125                 SBP2_DMA_FREE("single logout orb");
1126         }
1127
1128         if (scsi_id->query_logins_orb) {
1129                 pci_free_consistent(hi->host->pdev,
1130                                     sizeof(struct sbp2_query_logins_orb),
1131                                     scsi_id->query_logins_orb,
1132                                     scsi_id->query_logins_orb_dma);
1133                 SBP2_DMA_FREE("single query logins orb");
1134         }
1135
1136         if (scsi_id->query_logins_response) {
1137                 pci_free_consistent(hi->host->pdev,
1138                                     sizeof(struct sbp2_query_logins_response),
1139                                     scsi_id->query_logins_response,
1140                                     scsi_id->query_logins_response_dma);
1141                 SBP2_DMA_FREE("single query logins data");
1142         }
1143
1144         if (scsi_id->status_fifo_addr != CSR1212_INVALID_ADDR_SPACE)
1145                 hpsb_unregister_addrspace(&sbp2_highlevel, hi->host,
1146                                           scsi_id->status_fifo_addr);
1147
1148         scsi_id->ud->device.driver_data = NULL;
1149
1150         if (hi)
1151                 module_put(hi->host->driver->owner);
1152
1153         SBP2_DEBUG("SBP-2 device removed, SCSI ID = %d", scsi_id->ud->id);
1154
1155         kfree(scsi_id);
1156 }
1157
1158 #ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
1159 /*
1160  * This function deals with physical dma write requests (for adapters that do not support
1161  * physical dma in hardware). Mostly just here for debugging...
1162  */
1163 static int sbp2_handle_physdma_write(struct hpsb_host *host, int nodeid,
1164                                      int destid, quadlet_t *data, u64 addr,
1165                                      size_t length, u16 flags)
1166 {
1167
1168         /*
1169          * Manually put the data in the right place.
1170          */
1171         memcpy(bus_to_virt((u32) addr), data, length);
1172         sbp2util_packet_dump(data, length, "sbp2 phys dma write by device",
1173                              (u32) addr);
1174         return RCODE_COMPLETE;
1175 }
1176
1177 /*
1178  * This function deals with physical dma read requests (for adapters that do not support
1179  * physical dma in hardware). Mostly just here for debugging...
1180  */
1181 static int sbp2_handle_physdma_read(struct hpsb_host *host, int nodeid,
1182                                     quadlet_t *data, u64 addr, size_t length,
1183                                     u16 flags)
1184 {
1185
1186         /*
1187          * Grab data from memory and send a read response.
1188          */
1189         memcpy(data, bus_to_virt((u32) addr), length);
1190         sbp2util_packet_dump(data, length, "sbp2 phys dma read by device",
1191                              (u32) addr);
1192         return RCODE_COMPLETE;
1193 }
1194 #endif
1195
1196 /**************************************
1197  * SBP-2 protocol related section
1198  **************************************/
1199
1200 /*
1201  * This function queries the device for the maximum concurrent logins it
1202  * supports.
1203  */
1204 static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id)
1205 {
1206         struct sbp2scsi_host_info *hi = scsi_id->hi;
1207         quadlet_t data[2];
1208         int max_logins;
1209         int active_logins;
1210
1211         SBP2_DEBUG_ENTER();
1212
1213         scsi_id->query_logins_orb->reserved1 = 0x0;
1214         scsi_id->query_logins_orb->reserved2 = 0x0;
1215
1216         scsi_id->query_logins_orb->query_response_lo = scsi_id->query_logins_response_dma;
1217         scsi_id->query_logins_orb->query_response_hi = ORB_SET_NODE_ID(hi->host->node_id);
1218
1219         scsi_id->query_logins_orb->lun_misc = ORB_SET_FUNCTION(SBP2_QUERY_LOGINS_REQUEST);
1220         scsi_id->query_logins_orb->lun_misc |= ORB_SET_NOTIFY(1);
1221         scsi_id->query_logins_orb->lun_misc |= ORB_SET_LUN(scsi_id->sbp2_lun);
1222
1223         scsi_id->query_logins_orb->reserved_resp_length =
1224                 ORB_SET_QUERY_LOGINS_RESP_LENGTH(sizeof(struct sbp2_query_logins_response));
1225
1226         scsi_id->query_logins_orb->status_fifo_hi =
1227                 ORB_SET_STATUS_FIFO_HI(scsi_id->status_fifo_addr, hi->host->node_id);
1228         scsi_id->query_logins_orb->status_fifo_lo =
1229                 ORB_SET_STATUS_FIFO_LO(scsi_id->status_fifo_addr);
1230
1231         sbp2util_cpu_to_be32_buffer(scsi_id->query_logins_orb, sizeof(struct sbp2_query_logins_orb));
1232
1233         sbp2util_packet_dump(scsi_id->query_logins_orb, sizeof(struct sbp2_query_logins_orb),
1234                              "sbp2 query logins orb", scsi_id->query_logins_orb_dma);
1235
1236         memset(scsi_id->query_logins_response, 0, sizeof(struct sbp2_query_logins_response));
1237
1238         data[0] = ORB_SET_NODE_ID(hi->host->node_id);
1239         data[1] = scsi_id->query_logins_orb_dma;
1240         sbp2util_cpu_to_be32_buffer(data, 8);
1241
1242         hpsb_node_write(scsi_id->ne, scsi_id->sbp2_management_agent_addr, data, 8);
1243
1244         if (sbp2util_access_timeout(scsi_id, 2*HZ)) {
1245                 SBP2_INFO("Error querying logins to SBP-2 device - timed out");
1246                 return -EIO;
1247         }
1248
1249         if (scsi_id->status_block.ORB_offset_lo != scsi_id->query_logins_orb_dma) {
1250                 SBP2_INFO("Error querying logins to SBP-2 device - timed out");
1251                 return -EIO;
1252         }
1253
1254         if (STATUS_TEST_RDS(scsi_id->status_block.ORB_offset_hi_misc)) {
1255                 SBP2_INFO("Error querying logins to SBP-2 device - failed");
1256                 return -EIO;
1257         }
1258
1259         sbp2util_cpu_to_be32_buffer(scsi_id->query_logins_response, sizeof(struct sbp2_query_logins_response));
1260
1261         SBP2_DEBUG("length_max_logins = %x",
1262                    (unsigned int)scsi_id->query_logins_response->length_max_logins);
1263
1264         max_logins = RESPONSE_GET_MAX_LOGINS(scsi_id->query_logins_response->length_max_logins);
1265         SBP2_INFO("Maximum concurrent logins supported: %d", max_logins);
1266
1267         active_logins = RESPONSE_GET_ACTIVE_LOGINS(scsi_id->query_logins_response->length_max_logins);
1268         SBP2_INFO("Number of active logins: %d", active_logins);
1269
1270         if (active_logins >= max_logins) {
1271                 return -EIO;
1272         }
1273
1274         return 0;
1275 }
1276
1277 /*
1278  * This function is called in order to login to a particular SBP-2 device,
1279  * after a bus reset.
1280  */
1281 static int sbp2_login_device(struct scsi_id_instance_data *scsi_id)
1282 {
1283         struct sbp2scsi_host_info *hi = scsi_id->hi;
1284         quadlet_t data[2];
1285
1286         SBP2_DEBUG_ENTER();
1287
1288         if (!scsi_id->login_orb) {
1289                 SBP2_DEBUG("%s: login_orb not alloc'd!", __FUNCTION__);
1290                 return -EIO;
1291         }
1292
1293         if (!exclusive_login) {
1294                 if (sbp2_query_logins(scsi_id)) {
1295                         SBP2_INFO("Device does not support any more concurrent logins");
1296                         return -EIO;
1297                 }
1298         }
1299
1300         /* Set-up login ORB, assume no password */
1301         scsi_id->login_orb->password_hi = 0;
1302         scsi_id->login_orb->password_lo = 0;
1303
1304         scsi_id->login_orb->login_response_lo = scsi_id->login_response_dma;
1305         scsi_id->login_orb->login_response_hi = ORB_SET_NODE_ID(hi->host->node_id);
1306
1307         scsi_id->login_orb->lun_misc = ORB_SET_FUNCTION(SBP2_LOGIN_REQUEST);
1308         scsi_id->login_orb->lun_misc |= ORB_SET_RECONNECT(0);   /* One second reconnect time */
1309         scsi_id->login_orb->lun_misc |= ORB_SET_EXCLUSIVE(exclusive_login);     /* Exclusive access to device */
1310         scsi_id->login_orb->lun_misc |= ORB_SET_NOTIFY(1);      /* Notify us of login complete */
1311         scsi_id->login_orb->lun_misc |= ORB_SET_LUN(scsi_id->sbp2_lun);
1312
1313         scsi_id->login_orb->passwd_resp_lengths =
1314                 ORB_SET_LOGIN_RESP_LENGTH(sizeof(struct sbp2_login_response));
1315
1316         scsi_id->login_orb->status_fifo_hi =
1317                 ORB_SET_STATUS_FIFO_HI(scsi_id->status_fifo_addr, hi->host->node_id);
1318         scsi_id->login_orb->status_fifo_lo =
1319                 ORB_SET_STATUS_FIFO_LO(scsi_id->status_fifo_addr);
1320
1321         sbp2util_cpu_to_be32_buffer(scsi_id->login_orb, sizeof(struct sbp2_login_orb));
1322
1323         sbp2util_packet_dump(scsi_id->login_orb, sizeof(struct sbp2_login_orb),
1324                              "sbp2 login orb", scsi_id->login_orb_dma);
1325
1326         memset(scsi_id->login_response, 0, sizeof(struct sbp2_login_response));
1327
1328         data[0] = ORB_SET_NODE_ID(hi->host->node_id);
1329         data[1] = scsi_id->login_orb_dma;
1330         sbp2util_cpu_to_be32_buffer(data, 8);
1331
1332         hpsb_node_write(scsi_id->ne, scsi_id->sbp2_management_agent_addr, data, 8);
1333
1334         /*
1335          * Wait for login status (up to 20 seconds)...
1336          */
1337         if (sbp2util_access_timeout(scsi_id, 20*HZ)) {
1338                 SBP2_ERR("Error logging into SBP-2 device - timed out");
1339                 return -EIO;
1340         }
1341
1342         /*
1343          * Sanity. Make sure status returned matches login orb.
1344          */
1345         if (scsi_id->status_block.ORB_offset_lo != scsi_id->login_orb_dma) {
1346                 SBP2_ERR("Error logging into SBP-2 device - timed out");
1347                 return -EIO;
1348         }
1349
1350         if (STATUS_TEST_RDS(scsi_id->status_block.ORB_offset_hi_misc)) {
1351                 SBP2_ERR("Error logging into SBP-2 device - failed");
1352                 return -EIO;
1353         }
1354
1355         /*
1356          * Byte swap the login response, for use when reconnecting or
1357          * logging out.
1358          */
1359         sbp2util_cpu_to_be32_buffer(scsi_id->login_response, sizeof(struct sbp2_login_response));
1360
1361         /*
1362          * Grab our command block agent address from the login response.
1363          */
1364         SBP2_DEBUG("command_block_agent_hi = %x",
1365                    (unsigned int)scsi_id->login_response->command_block_agent_hi);
1366         SBP2_DEBUG("command_block_agent_lo = %x",
1367                    (unsigned int)scsi_id->login_response->command_block_agent_lo);
1368
1369         scsi_id->sbp2_command_block_agent_addr =
1370                 ((u64)scsi_id->login_response->command_block_agent_hi) << 32;
1371         scsi_id->sbp2_command_block_agent_addr |= ((u64)scsi_id->login_response->command_block_agent_lo);
1372         scsi_id->sbp2_command_block_agent_addr &= 0x0000ffffffffffffULL;
1373
1374         SBP2_INFO("Logged into SBP-2 device");
1375         return 0;
1376 }
1377
1378 /*
1379  * This function is called in order to logout from a particular SBP-2
1380  * device, usually called during driver unload.
1381  */
1382 static int sbp2_logout_device(struct scsi_id_instance_data *scsi_id)
1383 {
1384         struct sbp2scsi_host_info *hi = scsi_id->hi;
1385         quadlet_t data[2];
1386         int error;
1387
1388         SBP2_DEBUG_ENTER();
1389
1390         /*
1391          * Set-up logout ORB
1392          */
1393         scsi_id->logout_orb->reserved1 = 0x0;
1394         scsi_id->logout_orb->reserved2 = 0x0;
1395         scsi_id->logout_orb->reserved3 = 0x0;
1396         scsi_id->logout_orb->reserved4 = 0x0;
1397
1398         scsi_id->logout_orb->login_ID_misc = ORB_SET_FUNCTION(SBP2_LOGOUT_REQUEST);
1399         scsi_id->logout_orb->login_ID_misc |= ORB_SET_LOGIN_ID(scsi_id->login_response->length_login_ID);
1400
1401         /* Notify us when complete */
1402         scsi_id->logout_orb->login_ID_misc |= ORB_SET_NOTIFY(1);
1403
1404         scsi_id->logout_orb->reserved5 = 0x0;
1405         scsi_id->logout_orb->status_fifo_hi =
1406                 ORB_SET_STATUS_FIFO_HI(scsi_id->status_fifo_addr, hi->host->node_id);
1407         scsi_id->logout_orb->status_fifo_lo =
1408                 ORB_SET_STATUS_FIFO_LO(scsi_id->status_fifo_addr);
1409
1410         /*
1411          * Byte swap ORB if necessary
1412          */
1413         sbp2util_cpu_to_be32_buffer(scsi_id->logout_orb, sizeof(struct sbp2_logout_orb));
1414
1415         sbp2util_packet_dump(scsi_id->logout_orb, sizeof(struct sbp2_logout_orb),
1416                              "sbp2 logout orb", scsi_id->logout_orb_dma);
1417
1418         /*
1419          * Ok, let's write to the target's management agent register
1420          */
1421         data[0] = ORB_SET_NODE_ID(hi->host->node_id);
1422         data[1] = scsi_id->logout_orb_dma;
1423         sbp2util_cpu_to_be32_buffer(data, 8);
1424
1425         error = hpsb_node_write(scsi_id->ne,
1426                                 scsi_id->sbp2_management_agent_addr, data, 8);
1427         if (error)
1428                 return error;
1429
1430         /* Wait for device to logout...1 second. */
1431         if (sbp2util_access_timeout(scsi_id, HZ))
1432                 return -EIO;
1433
1434         SBP2_INFO("Logged out of SBP-2 device");
1435         return 0;
1436 }
1437
1438 /*
1439  * This function is called in order to reconnect to a particular SBP-2
1440  * device, after a bus reset.
1441  */
1442 static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id)
1443 {
1444         struct sbp2scsi_host_info *hi = scsi_id->hi;
1445         quadlet_t data[2];
1446         int error;
1447
1448         SBP2_DEBUG_ENTER();
1449
1450         /*
1451          * Set-up reconnect ORB
1452          */
1453         scsi_id->reconnect_orb->reserved1 = 0x0;
1454         scsi_id->reconnect_orb->reserved2 = 0x0;
1455         scsi_id->reconnect_orb->reserved3 = 0x0;
1456         scsi_id->reconnect_orb->reserved4 = 0x0;
1457
1458         scsi_id->reconnect_orb->login_ID_misc = ORB_SET_FUNCTION(SBP2_RECONNECT_REQUEST);
1459         scsi_id->reconnect_orb->login_ID_misc |=
1460                 ORB_SET_LOGIN_ID(scsi_id->login_response->length_login_ID);
1461
1462         /* Notify us when complete */
1463         scsi_id->reconnect_orb->login_ID_misc |= ORB_SET_NOTIFY(1);
1464
1465         scsi_id->reconnect_orb->reserved5 = 0x0;
1466         scsi_id->reconnect_orb->status_fifo_hi =
1467                 ORB_SET_STATUS_FIFO_HI(scsi_id->status_fifo_addr, hi->host->node_id);
1468         scsi_id->reconnect_orb->status_fifo_lo =
1469                 ORB_SET_STATUS_FIFO_LO(scsi_id->status_fifo_addr);
1470
1471         /*
1472          * Byte swap ORB if necessary
1473          */
1474         sbp2util_cpu_to_be32_buffer(scsi_id->reconnect_orb, sizeof(struct sbp2_reconnect_orb));
1475
1476         sbp2util_packet_dump(scsi_id->reconnect_orb, sizeof(struct sbp2_reconnect_orb),
1477                              "sbp2 reconnect orb", scsi_id->reconnect_orb_dma);
1478
1479         data[0] = ORB_SET_NODE_ID(hi->host->node_id);
1480         data[1] = scsi_id->reconnect_orb_dma;
1481         sbp2util_cpu_to_be32_buffer(data, 8);
1482
1483         error = hpsb_node_write(scsi_id->ne,
1484                                 scsi_id->sbp2_management_agent_addr, data, 8);
1485         if (error)
1486                 return error;
1487
1488         /*
1489          * Wait for reconnect status (up to 1 second)...
1490          */
1491         if (sbp2util_access_timeout(scsi_id, HZ)) {
1492                 SBP2_ERR("Error reconnecting to SBP-2 device - timed out");
1493                 return -EIO;
1494         }
1495
1496         /*
1497          * Sanity. Make sure status returned matches reconnect orb.
1498          */
1499         if (scsi_id->status_block.ORB_offset_lo != scsi_id->reconnect_orb_dma) {
1500                 SBP2_ERR("Error reconnecting to SBP-2 device - timed out");
1501                 return -EIO;
1502         }
1503
1504         if (STATUS_TEST_RDS(scsi_id->status_block.ORB_offset_hi_misc)) {
1505                 SBP2_ERR("Error reconnecting to SBP-2 device - failed");
1506                 return -EIO;
1507         }
1508
1509         HPSB_DEBUG("Reconnected to SBP-2 device");
1510         return 0;
1511 }
1512
1513 /*
1514  * This function is called in order to set the busy timeout (number of
1515  * retries to attempt) on the sbp2 device.
1516  */
1517 static int sbp2_set_busy_timeout(struct scsi_id_instance_data *scsi_id)
1518 {
1519         quadlet_t data;
1520
1521         SBP2_DEBUG_ENTER();
1522
1523         data = cpu_to_be32(SBP2_BUSY_TIMEOUT_VALUE);
1524         if (hpsb_node_write(scsi_id->ne, SBP2_BUSY_TIMEOUT_ADDRESS, &data, 4))
1525                 SBP2_ERR("%s error", __FUNCTION__);
1526         return 0;
1527 }
1528
1529 /*
1530  * This function is called to parse sbp2 device's config rom unit
1531  * directory. Used to determine things like sbp2 management agent offset,
1532  * and command set used (SCSI or RBC).
1533  */
1534 static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
1535                                       struct unit_directory *ud)
1536 {
1537         struct csr1212_keyval *kv;
1538         struct csr1212_dentry *dentry;
1539         u64 management_agent_addr;
1540         u32 command_set_spec_id, command_set, unit_characteristics,
1541             firmware_revision;
1542         unsigned workarounds;
1543         int i;
1544
1545         SBP2_DEBUG_ENTER();
1546
1547         management_agent_addr = 0x0;
1548         command_set_spec_id = 0x0;
1549         command_set = 0x0;
1550         unit_characteristics = 0x0;
1551         firmware_revision = 0x0;
1552
1553         /* Handle different fields in the unit directory, based on keys */
1554         csr1212_for_each_dir_entry(ud->ne->csr, kv, ud->ud_kv, dentry) {
1555                 switch (kv->key.id) {
1556                 case CSR1212_KV_ID_DEPENDENT_INFO:
1557                         if (kv->key.type == CSR1212_KV_TYPE_CSR_OFFSET) {
1558                                 /* Save off the management agent address */
1559                                 management_agent_addr =
1560                                     CSR1212_REGISTER_SPACE_BASE +
1561                                     (kv->value.csr_offset << 2);
1562
1563                                 SBP2_DEBUG("sbp2_management_agent_addr = %x",
1564                                            (unsigned int)management_agent_addr);
1565                         } else if (kv->key.type == CSR1212_KV_TYPE_IMMEDIATE) {
1566                                 scsi_id->sbp2_lun =
1567                                     ORB_SET_LUN(kv->value.immediate);
1568                         }
1569                         break;
1570
1571                 case SBP2_COMMAND_SET_SPEC_ID_KEY:
1572                         /* Command spec organization */
1573                         command_set_spec_id = kv->value.immediate;
1574                         SBP2_DEBUG("sbp2_command_set_spec_id = %x",
1575                                    (unsigned int)command_set_spec_id);
1576                         break;
1577
1578                 case SBP2_COMMAND_SET_KEY:
1579                         /* Command set used by sbp2 device */
1580                         command_set = kv->value.immediate;
1581                         SBP2_DEBUG("sbp2_command_set = %x",
1582                                    (unsigned int)command_set);
1583                         break;
1584
1585                 case SBP2_UNIT_CHARACTERISTICS_KEY:
1586                         /*
1587                          * Unit characterisitcs (orb related stuff
1588                          * that I'm not yet paying attention to)
1589                          */
1590                         unit_characteristics = kv->value.immediate;
1591                         SBP2_DEBUG("sbp2_unit_characteristics = %x",
1592                                    (unsigned int)unit_characteristics);
1593                         break;
1594
1595                 case SBP2_FIRMWARE_REVISION_KEY:
1596                         /* Firmware revision */
1597                         firmware_revision = kv->value.immediate;
1598                         SBP2_DEBUG("sbp2_firmware_revision = %x",
1599                                    (unsigned int)firmware_revision);
1600                         break;
1601
1602                 default:
1603                         break;
1604                 }
1605         }
1606
1607         workarounds = sbp2_default_workarounds;
1608
1609         if (!(workarounds & SBP2_WORKAROUND_OVERRIDE))
1610                 for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) {
1611                         if (sbp2_workarounds_table[i].firmware_revision &&
1612                             sbp2_workarounds_table[i].firmware_revision !=
1613                             (firmware_revision & 0xffff00))
1614                                 continue;
1615                         if (sbp2_workarounds_table[i].model_id &&
1616                             sbp2_workarounds_table[i].model_id != ud->model_id)
1617                                 continue;
1618                         workarounds |= sbp2_workarounds_table[i].workarounds;
1619                         break;
1620                 }
1621
1622         if (workarounds)
1623                 SBP2_INFO("Workarounds for node " NODE_BUS_FMT ": 0x%x "
1624                           "(firmware_revision 0x%06x, vendor_id 0x%06x,"
1625                           " model_id 0x%06x)",
1626                           NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid),
1627                           workarounds, firmware_revision,
1628                           ud->vendor_id ? ud->vendor_id : ud->ne->vendor_id,
1629                           ud->model_id);
1630
1631         /* We would need one SCSI host template for each target to adjust
1632          * max_sectors on the fly, therefore warn only. */
1633         if (workarounds & SBP2_WORKAROUND_128K_MAX_TRANS &&
1634             (max_sectors * 512) > (128 * 1024))
1635                 SBP2_WARN("Node " NODE_BUS_FMT ": Bridge only supports 128KB "
1636                           "max transfer size. WARNING: Current max_sectors "
1637                           "setting is larger than 128KB (%d sectors)",
1638                           NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid),
1639                           max_sectors);
1640
1641         /* If this is a logical unit directory entry, process the parent
1642          * to get the values. */
1643         if (ud->flags & UNIT_DIRECTORY_LUN_DIRECTORY) {
1644                 struct unit_directory *parent_ud =
1645                         container_of(ud->device.parent, struct unit_directory, device);
1646                 sbp2_parse_unit_directory(scsi_id, parent_ud);
1647         } else {
1648                 scsi_id->sbp2_management_agent_addr = management_agent_addr;
1649                 scsi_id->sbp2_command_set_spec_id = command_set_spec_id;
1650                 scsi_id->sbp2_command_set = command_set;
1651                 scsi_id->sbp2_unit_characteristics = unit_characteristics;
1652                 scsi_id->sbp2_firmware_revision = firmware_revision;
1653                 scsi_id->workarounds = workarounds;
1654                 if (ud->flags & UNIT_DIRECTORY_HAS_LUN)
1655                         scsi_id->sbp2_lun = ORB_SET_LUN(ud->lun);
1656         }
1657 }
1658
1659 #define SBP2_PAYLOAD_TO_BYTES(p) (1 << ((p) + 2))
1660
1661 /*
1662  * This function is called in order to determine the max speed and packet
1663  * size we can use in our ORBs. Note, that we (the driver and host) only
1664  * initiate the transaction. The SBP-2 device actually transfers the data
1665  * (by reading from the DMA area we tell it). This means that the SBP-2
1666  * device decides the actual maximum data it can transfer. We just tell it
1667  * the speed that it needs to use, and the max_rec the host supports, and
1668  * it takes care of the rest.
1669  */
1670 static int sbp2_max_speed_and_size(struct scsi_id_instance_data *scsi_id)
1671 {
1672         struct sbp2scsi_host_info *hi = scsi_id->hi;
1673         u8 payload;
1674
1675         SBP2_DEBUG_ENTER();
1676
1677         scsi_id->speed_code =
1678             hi->host->speed[NODEID_TO_NODE(scsi_id->ne->nodeid)];
1679
1680         /* Bump down our speed if the user requested it */
1681         if (scsi_id->speed_code > max_speed) {
1682                 scsi_id->speed_code = max_speed;
1683                 SBP2_ERR("Forcing SBP-2 max speed down to %s",
1684                          hpsb_speedto_str[scsi_id->speed_code]);
1685         }
1686
1687         /* Payload size is the lesser of what our speed supports and what
1688          * our host supports.  */
1689         payload = min(sbp2_speedto_max_payload[scsi_id->speed_code],
1690                       (u8) (hi->host->csr.max_rec - 1));
1691
1692         /* If physical DMA is off, work around limitation in ohci1394:
1693          * packet size must not exceed PAGE_SIZE */
1694         if (scsi_id->ne->host->low_addr_space < (1ULL << 32))
1695                 while (SBP2_PAYLOAD_TO_BYTES(payload) + 24 > PAGE_SIZE &&
1696                        payload)
1697                         payload--;
1698
1699         HPSB_DEBUG("Node " NODE_BUS_FMT ": Max speed [%s] - Max payload [%u]",
1700                    NODE_BUS_ARGS(hi->host, scsi_id->ne->nodeid),
1701                    hpsb_speedto_str[scsi_id->speed_code],
1702                    SBP2_PAYLOAD_TO_BYTES(payload));
1703
1704         scsi_id->max_payload_size = payload;
1705         return 0;
1706 }
1707
1708 /*
1709  * This function is called in order to perform a SBP-2 agent reset.
1710  */
1711 static int sbp2_agent_reset(struct scsi_id_instance_data *scsi_id, int wait)
1712 {
1713         quadlet_t data;
1714         u64 addr;
1715         int retval;
1716         unsigned long flags;
1717
1718         SBP2_DEBUG_ENTER();
1719
1720         cancel_delayed_work(&scsi_id->protocol_work);
1721         if (wait)
1722                 flush_scheduled_work();
1723
1724         data = ntohl(SBP2_AGENT_RESET_DATA);
1725         addr = scsi_id->sbp2_command_block_agent_addr + SBP2_AGENT_RESET_OFFSET;
1726
1727         if (wait)
1728                 retval = hpsb_node_write(scsi_id->ne, addr, &data, 4);
1729         else
1730                 retval = sbp2util_node_write_no_wait(scsi_id->ne, addr, &data, 4);
1731
1732         if (retval < 0) {
1733                 SBP2_ERR("hpsb_node_write failed.\n");
1734                 return -EIO;
1735         }
1736
1737         /*
1738          * Need to make sure orb pointer is written on next command
1739          */
1740         spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
1741         scsi_id->last_orb = NULL;
1742         spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
1743
1744         return 0;
1745 }
1746
1747 static void sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb,
1748                                      struct sbp2scsi_host_info *hi,
1749                                      struct sbp2_command_info *command,
1750                                      unsigned int scsi_use_sg,
1751                                      struct scatterlist *sgpnt,
1752                                      u32 orb_direction,
1753                                      enum dma_data_direction dma_dir)
1754 {
1755         command->dma_dir = dma_dir;
1756         orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
1757         orb->misc |= ORB_SET_DIRECTION(orb_direction);
1758
1759         /* Special case if only one element (and less than 64KB in size) */
1760         if ((scsi_use_sg == 1) &&
1761             (sgpnt[0].length <= SBP2_MAX_SG_ELEMENT_LENGTH)) {
1762
1763                 SBP2_DEBUG("Only one s/g element");
1764                 command->dma_size = sgpnt[0].length;
1765                 command->dma_type = CMD_DMA_PAGE;
1766                 command->cmd_dma = pci_map_page(hi->host->pdev,
1767                                                 sgpnt[0].page,
1768                                                 sgpnt[0].offset,
1769                                                 command->dma_size,
1770                                                 command->dma_dir);
1771                 SBP2_DMA_ALLOC("single page scatter element");
1772
1773                 orb->data_descriptor_lo = command->cmd_dma;
1774                 orb->misc |= ORB_SET_DATA_SIZE(command->dma_size);
1775
1776         } else {
1777                 struct sbp2_unrestricted_page_table *sg_element =
1778                                         &command->scatter_gather_element[0];
1779                 u32 sg_count, sg_len;
1780                 dma_addr_t sg_addr;
1781                 int i, count = pci_map_sg(hi->host->pdev, sgpnt, scsi_use_sg,
1782                                           dma_dir);
1783
1784                 SBP2_DMA_ALLOC("scatter list");
1785
1786                 command->dma_size = scsi_use_sg;
1787                 command->sge_buffer = sgpnt;
1788
1789                 /* use page tables (s/g) */
1790                 orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1);
1791                 orb->data_descriptor_lo = command->sge_dma;
1792
1793                 /*
1794                  * Loop through and fill out our sbp-2 page tables
1795                  * (and split up anything too large)
1796                  */
1797                 for (i = 0, sg_count = 0 ; i < count; i++, sgpnt++) {
1798                         sg_len = sg_dma_len(sgpnt);
1799                         sg_addr = sg_dma_address(sgpnt);
1800                         while (sg_len) {
1801                                 sg_element[sg_count].segment_base_lo = sg_addr;
1802                                 if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) {
1803                                         sg_element[sg_count].length_segment_base_hi =
1804                                                 PAGE_TABLE_SET_SEGMENT_LENGTH(SBP2_MAX_SG_ELEMENT_LENGTH);
1805                                         sg_addr += SBP2_MAX_SG_ELEMENT_LENGTH;
1806                                         sg_len -= SBP2_MAX_SG_ELEMENT_LENGTH;
1807                                 } else {
1808                                         sg_element[sg_count].length_segment_base_hi =
1809                                                 PAGE_TABLE_SET_SEGMENT_LENGTH(sg_len);
1810                                         sg_len = 0;
1811                                 }
1812                                 sg_count++;
1813                         }
1814                 }
1815
1816                 /* Number of page table (s/g) elements */
1817                 orb->misc |= ORB_SET_DATA_SIZE(sg_count);
1818
1819                 sbp2util_packet_dump(sg_element,
1820                                      (sizeof(struct sbp2_unrestricted_page_table)) * sg_count,
1821                                      "sbp2 s/g list", command->sge_dma);
1822
1823                 /* Byte swap page tables if necessary */
1824                 sbp2util_cpu_to_be32_buffer(sg_element,
1825                                             (sizeof(struct sbp2_unrestricted_page_table)) *
1826                                             sg_count);
1827         }
1828 }
1829
1830 static void sbp2_prep_command_orb_no_sg(struct sbp2_command_orb *orb,
1831                                         struct sbp2scsi_host_info *hi,
1832                                         struct sbp2_command_info *command,
1833                                         struct scatterlist *sgpnt,
1834                                         u32 orb_direction,
1835                                         unsigned int scsi_request_bufflen,
1836                                         void *scsi_request_buffer,
1837                                         enum dma_data_direction dma_dir)
1838 {
1839         command->dma_dir = dma_dir;
1840         command->dma_size = scsi_request_bufflen;
1841         command->dma_type = CMD_DMA_SINGLE;
1842         command->cmd_dma = pci_map_single(hi->host->pdev, scsi_request_buffer,
1843                                           command->dma_size, command->dma_dir);
1844         orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
1845         orb->misc |= ORB_SET_DIRECTION(orb_direction);
1846
1847         SBP2_DMA_ALLOC("single bulk");
1848
1849         /*
1850          * Handle case where we get a command w/o s/g enabled (but
1851          * check for transfers larger than 64K)
1852          */
1853         if (scsi_request_bufflen <= SBP2_MAX_SG_ELEMENT_LENGTH) {
1854
1855                 orb->data_descriptor_lo = command->cmd_dma;
1856                 orb->misc |= ORB_SET_DATA_SIZE(scsi_request_bufflen);
1857
1858         } else {
1859                 struct sbp2_unrestricted_page_table *sg_element =
1860                         &command->scatter_gather_element[0];
1861                 u32 sg_count, sg_len;
1862                 dma_addr_t sg_addr;
1863
1864                 /*
1865                  * Need to turn this into page tables, since the
1866                  * buffer is too large.
1867                  */
1868                 orb->data_descriptor_lo = command->sge_dma;
1869
1870                 /* Use page tables (s/g) */
1871                 orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1);
1872
1873                 /*
1874                  * fill out our sbp-2 page tables (and split up
1875                  * the large buffer)
1876                  */
1877                 sg_count = 0;
1878                 sg_len = scsi_request_bufflen;
1879                 sg_addr = command->cmd_dma;
1880                 while (sg_len) {
1881                         sg_element[sg_count].segment_base_lo = sg_addr;
1882                         if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) {
1883                                 sg_element[sg_count].length_segment_base_hi =
1884                                         PAGE_TABLE_SET_SEGMENT_LENGTH(SBP2_MAX_SG_ELEMENT_LENGTH);
1885                                 sg_addr += SBP2_MAX_SG_ELEMENT_LENGTH;
1886                                 sg_len -= SBP2_MAX_SG_ELEMENT_LENGTH;
1887                         } else {
1888                                 sg_element[sg_count].length_segment_base_hi =
1889                                         PAGE_TABLE_SET_SEGMENT_LENGTH(sg_len);
1890                                 sg_len = 0;
1891                         }
1892                         sg_count++;
1893                 }
1894
1895                 /* Number of page table (s/g) elements */
1896                 orb->misc |= ORB_SET_DATA_SIZE(sg_count);
1897
1898                 sbp2util_packet_dump(sg_element,
1899                                      (sizeof(struct sbp2_unrestricted_page_table)) * sg_count,
1900                                      "sbp2 s/g list", command->sge_dma);
1901
1902                 /* Byte swap page tables if necessary */
1903                 sbp2util_cpu_to_be32_buffer(sg_element,
1904                                             (sizeof(struct sbp2_unrestricted_page_table)) *
1905                                              sg_count);
1906         }
1907 }
1908
1909 /*
1910  * This function is called to create the actual command orb and s/g list
1911  * out of the scsi command itself.
1912  */
1913 static void sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id,
1914                                     struct sbp2_command_info *command,
1915                                     unchar *scsi_cmd,
1916                                     unsigned int scsi_use_sg,
1917                                     unsigned int scsi_request_bufflen,
1918                                     void *scsi_request_buffer,
1919                                     enum dma_data_direction dma_dir)
1920 {
1921         struct sbp2scsi_host_info *hi = scsi_id->hi;
1922         struct scatterlist *sgpnt = (struct scatterlist *)scsi_request_buffer;
1923         struct sbp2_command_orb *command_orb = &command->command_orb;
1924         u32 orb_direction;
1925
1926         /*
1927          * Set-up our command ORB..
1928          *
1929          * NOTE: We're doing unrestricted page tables (s/g), as this is
1930          * best performance (at least with the devices I have). This means
1931          * that data_size becomes the number of s/g elements, and
1932          * page_size should be zero (for unrestricted).
1933          */
1934         command_orb->next_ORB_hi = ORB_SET_NULL_PTR(1);
1935         command_orb->next_ORB_lo = 0x0;
1936         command_orb->misc = ORB_SET_MAX_PAYLOAD(scsi_id->max_payload_size);
1937         command_orb->misc |= ORB_SET_SPEED(scsi_id->speed_code);
1938         command_orb->misc |= ORB_SET_NOTIFY(1); /* Notify us when complete */
1939
1940         if (dma_dir == DMA_NONE)
1941                 orb_direction = ORB_DIRECTION_NO_DATA_TRANSFER;
1942         else if (dma_dir == DMA_TO_DEVICE && scsi_request_bufflen)
1943                 orb_direction = ORB_DIRECTION_WRITE_TO_MEDIA;
1944         else if (dma_dir == DMA_FROM_DEVICE && scsi_request_bufflen)
1945                 orb_direction = ORB_DIRECTION_READ_FROM_MEDIA;
1946         else {
1947                 SBP2_WARN("Falling back to DMA_NONE");
1948                 orb_direction = ORB_DIRECTION_NO_DATA_TRANSFER;
1949         }
1950
1951         /* Set-up our pagetable stuff */
1952         if (orb_direction == ORB_DIRECTION_NO_DATA_TRANSFER) {
1953                 SBP2_DEBUG("No data transfer");
1954                 command_orb->data_descriptor_hi = 0x0;
1955                 command_orb->data_descriptor_lo = 0x0;
1956                 command_orb->misc |= ORB_SET_DIRECTION(1);
1957         } else if (scsi_use_sg) {
1958                 SBP2_DEBUG("Use scatter/gather");
1959                 sbp2_prep_command_orb_sg(command_orb, hi, command, scsi_use_sg,
1960                                          sgpnt, orb_direction, dma_dir);
1961         } else {
1962                 SBP2_DEBUG("No scatter/gather");
1963                 sbp2_prep_command_orb_no_sg(command_orb, hi, command, sgpnt,
1964                                             orb_direction, scsi_request_bufflen,
1965                                             scsi_request_buffer, dma_dir);
1966         }
1967
1968         /* Byte swap command ORB if necessary */
1969         sbp2util_cpu_to_be32_buffer(command_orb, sizeof(struct sbp2_command_orb));
1970
1971         /* Put our scsi command in the command ORB */
1972         memset(command_orb->cdb, 0, 12);
1973         memcpy(command_orb->cdb, scsi_cmd, COMMAND_SIZE(*scsi_cmd));
1974 }
1975
1976 /*
1977  * This function is called in order to begin a regular SBP-2 command.
1978  */
1979 static void sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id,
1980                                  struct sbp2_command_info *command)
1981 {
1982         struct sbp2scsi_host_info *hi = scsi_id->hi;
1983         struct sbp2_command_orb *command_orb = &command->command_orb;
1984         struct sbp2_command_orb *last_orb;
1985         dma_addr_t last_orb_dma;
1986         u64 addr = scsi_id->sbp2_command_block_agent_addr;
1987         quadlet_t data[2];
1988         size_t length;
1989         unsigned long flags;
1990
1991         outstanding_orb_incr;
1992         SBP2_ORB_DEBUG("sending command orb %p, total orbs = %x",
1993                        command_orb, global_outstanding_command_orbs);
1994
1995         pci_dma_sync_single_for_device(hi->host->pdev, command->command_orb_dma,
1996                                        sizeof(struct sbp2_command_orb),
1997                                        PCI_DMA_TODEVICE);
1998         pci_dma_sync_single_for_device(hi->host->pdev, command->sge_dma,
1999                                        sizeof(command->scatter_gather_element),
2000                                        PCI_DMA_BIDIRECTIONAL);
2001         /*
2002          * Check to see if there are any previous orbs to use
2003          */
2004         spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
2005         last_orb = scsi_id->last_orb;
2006         last_orb_dma = scsi_id->last_orb_dma;
2007         if (!last_orb) {
2008                 /*
2009                  * last_orb == NULL means: We know that the target's fetch agent
2010                  * is not active right now.
2011                  */
2012                 addr += SBP2_ORB_POINTER_OFFSET;
2013                 data[0] = ORB_SET_NODE_ID(hi->host->node_id);
2014                 data[1] = command->command_orb_dma;
2015                 sbp2util_cpu_to_be32_buffer(data, 8);
2016                 length = 8;
2017         } else {
2018                 /*
2019                  * last_orb != NULL means: We know that the target's fetch agent
2020                  * is (very probably) not dead or in reset state right now.
2021                  * We have an ORB already sent that we can append a new one to.
2022                  * The target's fetch agent may or may not have read this
2023                  * previous ORB yet.
2024                  */
2025                 pci_dma_sync_single_for_cpu(hi->host->pdev, last_orb_dma,
2026                                             sizeof(struct sbp2_command_orb),
2027                                             PCI_DMA_TODEVICE);
2028                 last_orb->next_ORB_lo = cpu_to_be32(command->command_orb_dma);
2029                 wmb();
2030                 /* Tells hardware that this pointer is valid */
2031                 last_orb->next_ORB_hi = 0;
2032                 pci_dma_sync_single_for_device(hi->host->pdev, last_orb_dma,
2033                                                sizeof(struct sbp2_command_orb),
2034                                                PCI_DMA_TODEVICE);
2035                 addr += SBP2_DOORBELL_OFFSET;
2036                 data[0] = 0;
2037                 length = 4;
2038         }
2039         scsi_id->last_orb = command_orb;
2040         scsi_id->last_orb_dma = command->command_orb_dma;
2041         spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
2042
2043         SBP2_ORB_DEBUG("write to %s register, command orb %p",
2044                         last_orb ? "DOORBELL" : "ORB_POINTER", command_orb);
2045         if (sbp2util_node_write_no_wait(scsi_id->ne, addr, data, length)) {
2046                 /*
2047                  * sbp2util_node_write_no_wait failed. We certainly ran out
2048                  * of transaction labels, perhaps just because there were no
2049                  * context switches which gave khpsbpkt a chance to collect
2050                  * free tlabels. Try again in non-atomic context. If necessary,
2051                  * the workqueue job will sleep to guaranteedly get a tlabel.
2052                  * We do not accept new commands until the job is over.
2053                  */
2054                 scsi_block_requests(scsi_id->scsi_host);
2055                 PREPARE_DELAYED_WORK(&scsi_id->protocol_work,
2056                              last_orb ? sbp2util_write_doorbell:
2057                                         sbp2util_write_orb_pointer);
2058                 schedule_delayed_work(&scsi_id->protocol_work, 0);
2059         }
2060 }
2061
2062 /*
2063  * This function is called in order to begin a regular SBP-2 command.
2064  */
2065 static int sbp2_send_command(struct scsi_id_instance_data *scsi_id,
2066                              struct scsi_cmnd *SCpnt,
2067                              void (*done)(struct scsi_cmnd *))
2068 {
2069         unchar *cmd = (unchar *) SCpnt->cmnd;
2070         unsigned int request_bufflen = SCpnt->request_bufflen;
2071         struct sbp2_command_info *command;
2072
2073         SBP2_DEBUG_ENTER();
2074         SBP2_DEBUG("SCSI transfer size = %x", request_bufflen);
2075         SBP2_DEBUG("SCSI s/g elements = %x", (unsigned int)SCpnt->use_sg);
2076
2077         /*
2078          * Allocate a command orb and s/g structure
2079          */
2080         command = sbp2util_allocate_command_orb(scsi_id, SCpnt, done);
2081         if (!command) {
2082                 return -EIO;
2083         }
2084
2085         /*
2086          * Now actually fill in the comamnd orb and sbp2 s/g list
2087          */
2088         sbp2_create_command_orb(scsi_id, command, cmd, SCpnt->use_sg,
2089                                 request_bufflen, SCpnt->request_buffer,
2090                                 SCpnt->sc_data_direction);
2091
2092         sbp2util_packet_dump(&command->command_orb, sizeof(struct sbp2_command_orb),
2093                              "sbp2 command orb", command->command_orb_dma);
2094
2095         /*
2096          * Link up the orb, and ring the doorbell if needed
2097          */
2098         sbp2_link_orb_command(scsi_id, command);
2099
2100         return 0;
2101 }
2102
2103 /*
2104  * Translates SBP-2 status into SCSI sense data for check conditions
2105  */
2106 static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status, unchar *sense_data)
2107 {
2108         SBP2_DEBUG_ENTER();
2109
2110         /*
2111          * Ok, it's pretty ugly...   ;-)
2112          */
2113         sense_data[0] = 0x70;
2114         sense_data[1] = 0x0;
2115         sense_data[2] = sbp2_status[9];
2116         sense_data[3] = sbp2_status[12];
2117         sense_data[4] = sbp2_status[13];
2118         sense_data[5] = sbp2_status[14];
2119         sense_data[6] = sbp2_status[15];
2120         sense_data[7] = 10;
2121         sense_data[8] = sbp2_status[16];
2122         sense_data[9] = sbp2_status[17];
2123         sense_data[10] = sbp2_status[18];
2124         sense_data[11] = sbp2_status[19];
2125         sense_data[12] = sbp2_status[10];
2126         sense_data[13] = sbp2_status[11];
2127         sense_data[14] = sbp2_status[20];
2128         sense_data[15] = sbp2_status[21];
2129
2130         return sbp2_status[8] & 0x3f;   /* return scsi status */
2131 }
2132
2133 /*
2134  * This function deals with status writes from the SBP-2 device
2135  */
2136 static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid,
2137                                     int destid, quadlet_t *data, u64 addr,
2138                                     size_t length, u16 fl)
2139 {
2140         struct sbp2scsi_host_info *hi;
2141         struct scsi_id_instance_data *scsi_id = NULL, *scsi_id_tmp;
2142         struct scsi_cmnd *SCpnt = NULL;
2143         struct sbp2_status_block *sb;
2144         u32 scsi_status = SBP2_SCSI_STATUS_GOOD;
2145         struct sbp2_command_info *command;
2146         unsigned long flags;
2147
2148         SBP2_DEBUG_ENTER();
2149
2150         sbp2util_packet_dump(data, length, "sbp2 status write by device", (u32)addr);
2151
2152         if (unlikely(length < 8 || length > sizeof(struct sbp2_status_block))) {
2153                 SBP2_ERR("Wrong size of status block");
2154                 return RCODE_ADDRESS_ERROR;
2155         }
2156         if (unlikely(!host)) {
2157                 SBP2_ERR("host is NULL - this is bad!");
2158                 return RCODE_ADDRESS_ERROR;
2159         }
2160         hi = hpsb_get_hostinfo(&sbp2_highlevel, host);
2161         if (unlikely(!hi)) {
2162                 SBP2_ERR("host info is NULL - this is bad!");
2163                 return RCODE_ADDRESS_ERROR;
2164         }
2165         /*
2166          * Find our scsi_id structure by looking at the status fifo address
2167          * written to by the sbp2 device.
2168          */
2169         list_for_each_entry(scsi_id_tmp, &hi->scsi_ids, scsi_list) {
2170                 if (scsi_id_tmp->ne->nodeid == nodeid &&
2171                     scsi_id_tmp->status_fifo_addr == addr) {
2172                         scsi_id = scsi_id_tmp;
2173                         break;
2174                 }
2175         }
2176         if (unlikely(!scsi_id)) {
2177                 SBP2_ERR("scsi_id is NULL - device is gone?");
2178                 return RCODE_ADDRESS_ERROR;
2179         }
2180
2181         /*
2182          * Put response into scsi_id status fifo buffer. The first two bytes
2183          * come in big endian bit order. Often the target writes only a
2184          * truncated status block, minimally the first two quadlets. The rest
2185          * is implied to be zeros.
2186          */
2187         sb = &scsi_id->status_block;
2188         memset(sb->command_set_dependent, 0, sizeof(sb->command_set_dependent));
2189         memcpy(sb, data, length);
2190         sbp2util_be32_to_cpu_buffer(sb, 8);
2191
2192         /*
2193          * Ignore unsolicited status. Handle command ORB status.
2194          */
2195         if (unlikely(STATUS_GET_SRC(sb->ORB_offset_hi_misc) == 2))
2196                 command = NULL;
2197         else
2198                 command = sbp2util_find_command_for_orb(scsi_id,
2199                                                         sb->ORB_offset_lo);
2200         if (command) {
2201                 SBP2_DEBUG("Found status for command ORB");
2202                 pci_dma_sync_single_for_cpu(hi->host->pdev, command->command_orb_dma,
2203                                             sizeof(struct sbp2_command_orb),
2204                                             PCI_DMA_TODEVICE);
2205                 pci_dma_sync_single_for_cpu(hi->host->pdev, command->sge_dma,
2206                                             sizeof(command->scatter_gather_element),
2207                                             PCI_DMA_BIDIRECTIONAL);
2208
2209                 SBP2_ORB_DEBUG("matched command orb %p", &command->command_orb);
2210                 outstanding_orb_decr;
2211
2212                 /*
2213                  * Matched status with command, now grab scsi command pointers
2214                  * and check status.
2215                  */
2216                 /*
2217                  * FIXME: If the src field in the status is 1, the ORB DMA must
2218                  * not be reused until status for a subsequent ORB is received.
2219                  */
2220                 SCpnt = command->Current_SCpnt;
2221                 spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
2222                 sbp2util_mark_command_completed(scsi_id, command);
2223                 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
2224
2225                 if (SCpnt) {
2226                         u32 h = sb->ORB_offset_hi_misc;
2227                         u32 r = STATUS_GET_RESP(h);
2228
2229                         if (r != RESP_STATUS_REQUEST_COMPLETE) {
2230                                 SBP2_WARN("resp 0x%x, sbp_status 0x%x",
2231                                           r, STATUS_GET_SBP_STATUS(h));
2232                                 scsi_status =
2233                                         r == RESP_STATUS_TRANSPORT_FAILURE ?
2234                                         SBP2_SCSI_STATUS_BUSY :
2235                                         SBP2_SCSI_STATUS_COMMAND_TERMINATED;
2236                         }
2237                         /*
2238                          * See if the target stored any scsi status information.
2239                          */
2240                         if (STATUS_GET_LEN(h) > 1) {
2241                                 SBP2_DEBUG("CHECK CONDITION");
2242                                 scsi_status = sbp2_status_to_sense_data(
2243                                         (unchar *)sb, SCpnt->sense_buffer);
2244                         }
2245                         /*
2246                          * Check to see if the dead bit is set. If so, we'll
2247                          * have to initiate a fetch agent reset.
2248                          */
2249                         if (STATUS_TEST_DEAD(h)) {
2250                                 SBP2_DEBUG("Dead bit set - "
2251                                            "initiating fetch agent reset");
2252                                 sbp2_agent_reset(scsi_id, 0);
2253                         }
2254                         SBP2_ORB_DEBUG("completing command orb %p", &command->command_orb);
2255                 }
2256
2257                 /*
2258                  * Check here to see if there are no commands in-use. If there
2259                  * are none, we know that the fetch agent left the active state
2260                  * _and_ that we did not reactivate it yet. Therefore clear
2261                  * last_orb so that next time we write directly to the
2262                  * ORB_POINTER register. That way the fetch agent does not need
2263                  * to refetch the next_ORB.
2264                  */
2265                 spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
2266                 if (list_empty(&scsi_id->sbp2_command_orb_inuse))
2267                         scsi_id->last_orb = NULL;
2268                 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
2269
2270         } else {
2271                 /*
2272                  * It's probably a login/logout/reconnect status.
2273                  */
2274                 if ((sb->ORB_offset_lo == scsi_id->reconnect_orb_dma) ||
2275                     (sb->ORB_offset_lo == scsi_id->login_orb_dma) ||
2276                     (sb->ORB_offset_lo == scsi_id->query_logins_orb_dma) ||
2277                     (sb->ORB_offset_lo == scsi_id->logout_orb_dma)) {
2278                         scsi_id->access_complete = 1;
2279                         wake_up_interruptible(&access_wq);
2280                 }
2281         }
2282
2283         if (SCpnt) {
2284                 SBP2_DEBUG("Completing SCSI command");
2285                 sbp2scsi_complete_command(scsi_id, scsi_status, SCpnt,
2286                                           command->Current_done);
2287                 SBP2_ORB_DEBUG("command orb completed");
2288         }
2289
2290         return RCODE_COMPLETE;
2291 }
2292
2293 /**************************************
2294  * SCSI interface related section
2295  **************************************/
2296
2297 /*
2298  * This routine is the main request entry routine for doing I/O. It is
2299  * called from the scsi stack directly.
2300  */
2301 static int sbp2scsi_queuecommand(struct scsi_cmnd *SCpnt,
2302                                  void (*done)(struct scsi_cmnd *))
2303 {
2304         struct scsi_id_instance_data *scsi_id =
2305                 (struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0];
2306         struct sbp2scsi_host_info *hi;
2307         int result = DID_NO_CONNECT << 16;
2308
2309         SBP2_DEBUG_ENTER();
2310 #if (CONFIG_IEEE1394_SBP2_DEBUG >= 2) || defined(CONFIG_IEEE1394_SBP2_PACKET_DUMP)
2311         scsi_print_command(SCpnt);
2312 #endif
2313
2314         if (!sbp2util_node_is_available(scsi_id))
2315                 goto done;
2316
2317         hi = scsi_id->hi;
2318
2319         if (!hi) {
2320                 SBP2_ERR("sbp2scsi_host_info is NULL - this is bad!");
2321                 goto done;
2322         }
2323
2324         /*
2325          * Until we handle multiple luns, just return selection time-out
2326          * to any IO directed at non-zero LUNs
2327          */
2328         if (SCpnt->device->lun)
2329                 goto done;
2330
2331         /*
2332          * Check for request sense command, and handle it here
2333          * (autorequest sense)
2334          */
2335         if (SCpnt->cmnd[0] == REQUEST_SENSE) {
2336                 SBP2_DEBUG("REQUEST_SENSE");
2337                 memcpy(SCpnt->request_buffer, SCpnt->sense_buffer, SCpnt->request_bufflen);
2338                 memset(SCpnt->sense_buffer, 0, sizeof(SCpnt->sense_buffer));
2339                 sbp2scsi_complete_command(scsi_id, SBP2_SCSI_STATUS_GOOD, SCpnt, done);
2340                 return 0;
2341         }
2342
2343         /*
2344          * Check to see if we are in the middle of a bus reset.
2345          */
2346         if (!hpsb_node_entry_valid(scsi_id->ne)) {
2347                 SBP2_ERR("Bus reset in progress - rejecting command");
2348                 result = DID_BUS_BUSY << 16;
2349                 goto done;
2350         }
2351
2352         /*
2353          * Bidirectional commands are not yet implemented,
2354          * and unknown transfer direction not handled.
2355          */
2356         if (SCpnt->sc_data_direction == DMA_BIDIRECTIONAL) {
2357                 SBP2_ERR("Cannot handle DMA_BIDIRECTIONAL - rejecting command");
2358                 result = DID_ERROR << 16;
2359                 goto done;
2360         }
2361
2362         /*
2363          * Try and send our SCSI command
2364          */
2365         if (sbp2_send_command(scsi_id, SCpnt, done)) {
2366                 SBP2_ERR("Error sending SCSI command");
2367                 sbp2scsi_complete_command(scsi_id, SBP2_SCSI_STATUS_SELECTION_TIMEOUT,
2368                                           SCpnt, done);
2369         }
2370         return 0;
2371
2372 done:
2373         SCpnt->result = result;
2374         done(SCpnt);
2375         return 0;
2376 }
2377
2378 /*
2379  * This function is called in order to complete all outstanding SBP-2
2380  * commands (in case of resets, etc.).
2381  */
2382 static void sbp2scsi_complete_all_commands(struct scsi_id_instance_data *scsi_id,
2383                                            u32 status)
2384 {
2385         struct sbp2scsi_host_info *hi = scsi_id->hi;
2386         struct list_head *lh;
2387         struct sbp2_command_info *command;
2388         unsigned long flags;
2389
2390         SBP2_DEBUG_ENTER();
2391
2392         spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
2393         while (!list_empty(&scsi_id->sbp2_command_orb_inuse)) {
2394                 SBP2_DEBUG("Found pending command to complete");
2395                 lh = scsi_id->sbp2_command_orb_inuse.next;
2396                 command = list_entry(lh, struct sbp2_command_info, list);
2397                 pci_dma_sync_single_for_cpu(hi->host->pdev, command->command_orb_dma,
2398                                             sizeof(struct sbp2_command_orb),
2399                                             PCI_DMA_TODEVICE);
2400                 pci_dma_sync_single_for_cpu(hi->host->pdev, command->sge_dma,
2401                                             sizeof(command->scatter_gather_element),
2402                                             PCI_DMA_BIDIRECTIONAL);
2403                 sbp2util_mark_command_completed(scsi_id, command);
2404                 if (command->Current_SCpnt) {
2405                         command->Current_SCpnt->result = status << 16;
2406                         command->Current_done(command->Current_SCpnt);
2407                 }
2408         }
2409         spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
2410
2411         return;
2412 }
2413
2414 /*
2415  * This function is called in order to complete a regular SBP-2 command.
2416  *
2417  * This can be called in interrupt context.
2418  */
2419 static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id,
2420                                       u32 scsi_status, struct scsi_cmnd *SCpnt,
2421                                       void (*done)(struct scsi_cmnd *))
2422 {
2423         SBP2_DEBUG_ENTER();
2424
2425         /*
2426          * Sanity
2427          */
2428         if (!SCpnt) {
2429                 SBP2_ERR("SCpnt is NULL");
2430                 return;
2431         }
2432
2433         /*
2434          * If a bus reset is in progress and there was an error, don't
2435          * complete the command, just let it get retried at the end of the
2436          * bus reset.
2437          */
2438         if (!hpsb_node_entry_valid(scsi_id->ne)
2439             && (scsi_status != SBP2_SCSI_STATUS_GOOD)) {
2440                 SBP2_ERR("Bus reset in progress - retry command later");
2441                 return;
2442         }
2443
2444         /*
2445          * Switch on scsi status
2446          */
2447         switch (scsi_status) {
2448         case SBP2_SCSI_STATUS_GOOD:
2449                 SCpnt->result = DID_OK << 16;
2450                 break;
2451
2452         case SBP2_SCSI_STATUS_BUSY:
2453                 SBP2_ERR("SBP2_SCSI_STATUS_BUSY");
2454                 SCpnt->result = DID_BUS_BUSY << 16;
2455                 break;
2456
2457         case SBP2_SCSI_STATUS_CHECK_CONDITION:
2458                 SBP2_DEBUG("SBP2_SCSI_STATUS_CHECK_CONDITION");
2459                 SCpnt->result = CHECK_CONDITION << 1 | DID_OK << 16;
2460 #if CONFIG_IEEE1394_SBP2_DEBUG >= 1
2461                 scsi_print_command(SCpnt);
2462                 scsi_print_sense(SBP2_DEVICE_NAME, SCpnt);
2463 #endif
2464                 break;
2465
2466         case SBP2_SCSI_STATUS_SELECTION_TIMEOUT:
2467                 SBP2_ERR("SBP2_SCSI_STATUS_SELECTION_TIMEOUT");
2468                 SCpnt->result = DID_NO_CONNECT << 16;
2469                 scsi_print_command(SCpnt);
2470                 break;
2471
2472         case SBP2_SCSI_STATUS_CONDITION_MET:
2473         case SBP2_SCSI_STATUS_RESERVATION_CONFLICT:
2474         case SBP2_SCSI_STATUS_COMMAND_TERMINATED:
2475                 SBP2_ERR("Bad SCSI status = %x", scsi_status);
2476                 SCpnt->result = DID_ERROR << 16;
2477                 scsi_print_command(SCpnt);
2478                 break;
2479
2480         default:
2481                 SBP2_ERR("Unsupported SCSI status = %x", scsi_status);
2482                 SCpnt->result = DID_ERROR << 16;
2483         }
2484
2485         /*
2486          * If a bus reset is in progress and there was an error, complete
2487          * the command as busy so that it will get retried.
2488          */
2489         if (!hpsb_node_entry_valid(scsi_id->ne)
2490             && (scsi_status != SBP2_SCSI_STATUS_GOOD)) {
2491                 SBP2_ERR("Completing command with busy (bus reset)");
2492                 SCpnt->result = DID_BUS_BUSY << 16;
2493         }
2494
2495         /*
2496          * If a unit attention occurs, return busy status so it gets
2497          * retried... it could have happened because of a 1394 bus reset
2498          * or hot-plug...
2499          * XXX  DID_BUS_BUSY is actually a bad idea because it will defy
2500          * the scsi layer's retry logic.
2501          */
2502 #if 0
2503         if ((scsi_status == SBP2_SCSI_STATUS_CHECK_CONDITION) &&
2504             (SCpnt->sense_buffer[2] == UNIT_ATTENTION)) {
2505                 SBP2_DEBUG("UNIT ATTENTION - return busy");
2506                 SCpnt->result = DID_BUS_BUSY << 16;
2507         }
2508 #endif
2509
2510         /*
2511          * Tell scsi stack that we're done with this command
2512          */
2513         done(SCpnt);
2514 }
2515
2516 static int sbp2scsi_slave_alloc(struct scsi_device *sdev)
2517 {
2518         struct scsi_id_instance_data *scsi_id =
2519                 (struct scsi_id_instance_data *)sdev->host->hostdata[0];
2520
2521         scsi_id->sdev = sdev;
2522         sdev->allow_restart = 1;
2523
2524         if (scsi_id->workarounds & SBP2_WORKAROUND_INQUIRY_36)
2525                 sdev->inquiry_len = 36;
2526         return 0;
2527 }
2528
2529 static int sbp2scsi_slave_configure(struct scsi_device *sdev)
2530 {
2531         struct scsi_id_instance_data *scsi_id =
2532                 (struct scsi_id_instance_data *)sdev->host->hostdata[0];
2533
2534         blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
2535         sdev->use_10_for_rw = 1;
2536
2537         if (sdev->type == TYPE_DISK &&
2538             scsi_id->workarounds & SBP2_WORKAROUND_MODE_SENSE_8)
2539                 sdev->skip_ms_page_8 = 1;
2540         if (scsi_id->workarounds & SBP2_WORKAROUND_FIX_CAPACITY)
2541                 sdev->fix_capacity = 1;
2542         return 0;
2543 }
2544
2545 static void sbp2scsi_slave_destroy(struct scsi_device *sdev)
2546 {
2547         ((struct scsi_id_instance_data *)sdev->host->hostdata[0])->sdev = NULL;
2548         return;
2549 }
2550
2551 /*
2552  * Called by scsi stack when something has really gone wrong.  Usually
2553  * called when a command has timed-out for some reason.
2554  */
2555 static int sbp2scsi_abort(struct scsi_cmnd *SCpnt)
2556 {
2557         struct scsi_id_instance_data *scsi_id =
2558                 (struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0];
2559         struct sbp2scsi_host_info *hi = scsi_id->hi;
2560         struct sbp2_command_info *command;
2561         unsigned long flags;
2562
2563         SBP2_ERR("aborting sbp2 command");
2564         scsi_print_command(SCpnt);
2565
2566         if (sbp2util_node_is_available(scsi_id)) {
2567                 sbp2_agent_reset(scsi_id, 1);
2568
2569                 /* Return a matching command structure to the free pool. */
2570                 spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
2571                 command = sbp2util_find_command_for_SCpnt(scsi_id, SCpnt);
2572                 if (command) {
2573                         SBP2_DEBUG("Found command to abort");
2574                         pci_dma_sync_single_for_cpu(hi->host->pdev,
2575                                                     command->command_orb_dma,
2576                                                     sizeof(struct sbp2_command_orb),
2577                                                     PCI_DMA_TODEVICE);
2578                         pci_dma_sync_single_for_cpu(hi->host->pdev,
2579                                                     command->sge_dma,
2580                                                     sizeof(command->scatter_gather_element),
2581                                                     PCI_DMA_BIDIRECTIONAL);
2582                         sbp2util_mark_command_completed(scsi_id, command);
2583                         if (command->Current_SCpnt) {
2584                                 command->Current_SCpnt->result = DID_ABORT << 16;
2585                                 command->Current_done(command->Current_SCpnt);
2586                         }
2587                 }
2588                 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
2589
2590                 sbp2scsi_complete_all_commands(scsi_id, DID_BUS_BUSY);
2591         }
2592
2593         return SUCCESS;
2594 }
2595
2596 /*
2597  * Called by scsi stack when something has really gone wrong.
2598  */
2599 static int sbp2scsi_reset(struct scsi_cmnd *SCpnt)
2600 {
2601         struct scsi_id_instance_data *scsi_id =
2602                 (struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0];
2603
2604         SBP2_ERR("reset requested");
2605
2606         if (sbp2util_node_is_available(scsi_id)) {
2607                 SBP2_ERR("Generating sbp2 fetch agent reset");
2608                 sbp2_agent_reset(scsi_id, 1);
2609         }
2610
2611         return SUCCESS;
2612 }
2613
2614 static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev,
2615                                            struct device_attribute *attr,
2616                                            char *buf)
2617 {
2618         struct scsi_device *sdev;
2619         struct scsi_id_instance_data *scsi_id;
2620         int lun;
2621
2622         if (!(sdev = to_scsi_device(dev)))
2623                 return 0;
2624
2625         if (!(scsi_id = (struct scsi_id_instance_data *)sdev->host->hostdata[0]))
2626                 return 0;
2627
2628         lun = ORB_SET_LUN(scsi_id->sbp2_lun);
2629
2630         return sprintf(buf, "%016Lx:%d:%d\n", (unsigned long long)scsi_id->ne->guid,
2631                        scsi_id->ud->id, lun);
2632 }
2633 static DEVICE_ATTR(ieee1394_id, S_IRUGO, sbp2_sysfs_ieee1394_id_show, NULL);
2634
2635 static struct device_attribute *sbp2_sysfs_sdev_attrs[] = {
2636         &dev_attr_ieee1394_id,
2637         NULL
2638 };
2639
2640 MODULE_AUTHOR("Ben Collins <bcollins@debian.org>");
2641 MODULE_DESCRIPTION("IEEE-1394 SBP-2 protocol driver");
2642 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
2643 MODULE_LICENSE("GPL");
2644
2645 /* SCSI host template */
2646 static struct scsi_host_template scsi_driver_template = {
2647         .module =                       THIS_MODULE,
2648         .name =                         "SBP-2 IEEE-1394",
2649         .proc_name =                    SBP2_DEVICE_NAME,
2650         .queuecommand =                 sbp2scsi_queuecommand,
2651         .eh_abort_handler =             sbp2scsi_abort,
2652         .eh_device_reset_handler =      sbp2scsi_reset,
2653         .slave_alloc =                  sbp2scsi_slave_alloc,
2654         .slave_configure =              sbp2scsi_slave_configure,
2655         .slave_destroy =                sbp2scsi_slave_destroy,
2656         .this_id =                      -1,
2657         .sg_tablesize =                 SG_ALL,
2658         .use_clustering =               ENABLE_CLUSTERING,
2659         .cmd_per_lun =                  SBP2_MAX_CMDS,
2660         .can_queue =                    SBP2_MAX_CMDS,
2661         .emulated =                     1,
2662         .sdev_attrs =                   sbp2_sysfs_sdev_attrs,
2663 };
2664
2665 static int sbp2_module_init(void)
2666 {
2667         int ret;
2668
2669         SBP2_DEBUG_ENTER();
2670
2671         /* Module load debug option to force one command at a time (serializing I/O) */
2672         if (serialize_io) {
2673                 scsi_driver_template.can_queue = 1;
2674                 scsi_driver_template.cmd_per_lun = 1;
2675         }
2676
2677         if (sbp2_default_workarounds & SBP2_WORKAROUND_128K_MAX_TRANS &&
2678             (max_sectors * 512) > (128 * 1024))
2679                 max_sectors = 128 * 1024 / 512;
2680         scsi_driver_template.max_sectors = max_sectors;
2681
2682         /* Register our high level driver with 1394 stack */
2683         hpsb_register_highlevel(&sbp2_highlevel);
2684
2685         ret = hpsb_register_protocol(&sbp2_driver);
2686         if (ret) {
2687                 SBP2_ERR("Failed to register protocol");
2688                 hpsb_unregister_highlevel(&sbp2_highlevel);
2689                 return ret;
2690         }
2691
2692         return 0;
2693 }
2694
2695 static void __exit sbp2_module_exit(void)
2696 {
2697         SBP2_DEBUG_ENTER();
2698
2699         hpsb_unregister_protocol(&sbp2_driver);
2700
2701         hpsb_unregister_highlevel(&sbp2_highlevel);
2702 }
2703
2704 module_init(sbp2_module_init);
2705 module_exit(sbp2_module_exit);