Pull rework-memory-attribute-aliasing into release branch
[linux-2.6] / drivers / scsi / aacraid / aachba.c
1 /*
2  *      Adaptec AAC series RAID controller driver
3  *      (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
4  *
5  * based on the old aacraid driver that is..
6  * Adaptec aacraid device driver for Linux.
7  *
8  * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2, or (at your option)
13  * any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; see the file COPYING.  If not, write to
22  * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23  *
24  */
25
26 #include <linux/kernel.h>
27 #include <linux/init.h>
28 #include <linux/types.h>
29 #include <linux/sched.h>
30 #include <linux/pci.h>
31 #include <linux/spinlock.h>
32 #include <linux/slab.h>
33 #include <linux/completion.h>
34 #include <linux/blkdev.h>
35 #include <linux/dma-mapping.h>
36 #include <asm/semaphore.h>
37 #include <asm/uaccess.h>
38
39 #include <scsi/scsi.h>
40 #include <scsi/scsi_cmnd.h>
41 #include <scsi/scsi_device.h>
42 #include <scsi/scsi_host.h>
43
44 #include "aacraid.h"
45
46 /* values for inqd_pdt: Peripheral device type in plain English */
47 #define INQD_PDT_DA     0x00    /* Direct-access (DISK) device */
48 #define INQD_PDT_PROC   0x03    /* Processor device */
49 #define INQD_PDT_CHNGR  0x08    /* Changer (jukebox, scsi2) */
50 #define INQD_PDT_COMM   0x09    /* Communication device (scsi2) */
51 #define INQD_PDT_NOLUN2 0x1f    /* Unknown Device (scsi2) */
52 #define INQD_PDT_NOLUN  0x7f    /* Logical Unit Not Present */
53
54 #define INQD_PDT_DMASK  0x1F    /* Peripheral Device Type Mask */
55 #define INQD_PDT_QMASK  0xE0    /* Peripheral Device Qualifer Mask */
56
57 /*
58  *      Sense codes
59  */
60  
61 #define SENCODE_NO_SENSE                        0x00
62 #define SENCODE_END_OF_DATA                     0x00
63 #define SENCODE_BECOMING_READY                  0x04
64 #define SENCODE_INIT_CMD_REQUIRED               0x04
65 #define SENCODE_PARAM_LIST_LENGTH_ERROR         0x1A
66 #define SENCODE_INVALID_COMMAND                 0x20
67 #define SENCODE_LBA_OUT_OF_RANGE                0x21
68 #define SENCODE_INVALID_CDB_FIELD               0x24
69 #define SENCODE_LUN_NOT_SUPPORTED               0x25
70 #define SENCODE_INVALID_PARAM_FIELD             0x26
71 #define SENCODE_PARAM_NOT_SUPPORTED             0x26
72 #define SENCODE_PARAM_VALUE_INVALID             0x26
73 #define SENCODE_RESET_OCCURRED                  0x29
74 #define SENCODE_LUN_NOT_SELF_CONFIGURED_YET     0x3E
75 #define SENCODE_INQUIRY_DATA_CHANGED            0x3F
76 #define SENCODE_SAVING_PARAMS_NOT_SUPPORTED     0x39
77 #define SENCODE_DIAGNOSTIC_FAILURE              0x40
78 #define SENCODE_INTERNAL_TARGET_FAILURE         0x44
79 #define SENCODE_INVALID_MESSAGE_ERROR           0x49
80 #define SENCODE_LUN_FAILED_SELF_CONFIG          0x4c
81 #define SENCODE_OVERLAPPED_COMMAND              0x4E
82
83 /*
84  *      Additional sense codes
85  */
86  
87 #define ASENCODE_NO_SENSE                       0x00
88 #define ASENCODE_END_OF_DATA                    0x05
89 #define ASENCODE_BECOMING_READY                 0x01
90 #define ASENCODE_INIT_CMD_REQUIRED              0x02
91 #define ASENCODE_PARAM_LIST_LENGTH_ERROR        0x00
92 #define ASENCODE_INVALID_COMMAND                0x00
93 #define ASENCODE_LBA_OUT_OF_RANGE               0x00
94 #define ASENCODE_INVALID_CDB_FIELD              0x00
95 #define ASENCODE_LUN_NOT_SUPPORTED              0x00
96 #define ASENCODE_INVALID_PARAM_FIELD            0x00
97 #define ASENCODE_PARAM_NOT_SUPPORTED            0x01
98 #define ASENCODE_PARAM_VALUE_INVALID            0x02
99 #define ASENCODE_RESET_OCCURRED                 0x00
100 #define ASENCODE_LUN_NOT_SELF_CONFIGURED_YET    0x00
101 #define ASENCODE_INQUIRY_DATA_CHANGED           0x03
102 #define ASENCODE_SAVING_PARAMS_NOT_SUPPORTED    0x00
103 #define ASENCODE_DIAGNOSTIC_FAILURE             0x80
104 #define ASENCODE_INTERNAL_TARGET_FAILURE        0x00
105 #define ASENCODE_INVALID_MESSAGE_ERROR          0x00
106 #define ASENCODE_LUN_FAILED_SELF_CONFIG         0x00
107 #define ASENCODE_OVERLAPPED_COMMAND             0x00
108
109 #define BYTE0(x) (unsigned char)(x)
110 #define BYTE1(x) (unsigned char)((x) >> 8)
111 #define BYTE2(x) (unsigned char)((x) >> 16)
112 #define BYTE3(x) (unsigned char)((x) >> 24)
113
114 /*------------------------------------------------------------------------------
115  *              S T R U C T S / T Y P E D E F S
116  *----------------------------------------------------------------------------*/
117 /* SCSI inquiry data */
118 struct inquiry_data {
119         u8 inqd_pdt;    /* Peripheral qualifier | Peripheral Device Type  */
120         u8 inqd_dtq;    /* RMB | Device Type Qualifier  */
121         u8 inqd_ver;    /* ISO version | ECMA version | ANSI-approved version */
122         u8 inqd_rdf;    /* AENC | TrmIOP | Response data format */
123         u8 inqd_len;    /* Additional length (n-4) */
124         u8 inqd_pad1[2];/* Reserved - must be zero */
125         u8 inqd_pad2;   /* RelAdr | WBus32 | WBus16 |  Sync  | Linked |Reserved| CmdQue | SftRe */
126         u8 inqd_vid[8]; /* Vendor ID */
127         u8 inqd_pid[16];/* Product ID */
128         u8 inqd_prl[4]; /* Product Revision Level */
129 };
130
131 /*
132  *              M O D U L E   G L O B A L S
133  */
134  
135 static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* sgmap);
136 static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* psg);
137 static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw* psg);
138 static int aac_send_srb_fib(struct scsi_cmnd* scsicmd);
139 #ifdef AAC_DETAILED_STATUS_INFO
140 static char *aac_get_status_string(u32 status);
141 #endif
142
143 /*
144  *      Non dasd selection is handled entirely in aachba now
145  */     
146  
147 static int nondasd = -1;
148 static int dacmode = -1;
149
150 static int commit = -1;
151 int startup_timeout = 180;
152 int aif_timeout = 120;
153
154 module_param(nondasd, int, S_IRUGO|S_IWUSR);
155 MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices. 0=off, 1=on");
156 module_param(dacmode, int, S_IRUGO|S_IWUSR);
157 MODULE_PARM_DESC(dacmode, "Control whether dma addressing is using 64 bit DAC. 0=off, 1=on");
158 module_param(commit, int, S_IRUGO|S_IWUSR);
159 MODULE_PARM_DESC(commit, "Control whether a COMMIT_CONFIG is issued to the adapter for foreign arrays.\nThis is typically needed in systems that do not have a BIOS. 0=off, 1=on");
160 module_param(startup_timeout, int, S_IRUGO|S_IWUSR);
161 MODULE_PARM_DESC(startup_timeout, "The duration of time in seconds to wait for adapter to have it's kernel up and\nrunning. This is typically adjusted for large systems that do not have a BIOS.");
162 module_param(aif_timeout, int, S_IRUGO|S_IWUSR);
163 MODULE_PARM_DESC(aif_timeout, "The duration of time in seconds to wait for applications to pick up AIFs before\nderegistering them. This is typically adjusted for heavily burdened systems.");
164
165 int numacb = -1;
166 module_param(numacb, int, S_IRUGO|S_IWUSR);
167 MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control blocks (FIB) allocated. Valid values are 512 and down. Default is to use suggestion from Firmware.");
168
169 int acbsize = -1;
170 module_param(acbsize, int, S_IRUGO|S_IWUSR);
171 MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB) size. Valid values are 512, 2048, 4096 and 8192. Default is to use suggestion from Firmware.");
172 /**
173  *      aac_get_config_status   -       check the adapter configuration
174  *      @common: adapter to query
175  *
176  *      Query config status, and commit the configuration if needed.
177  */
178 int aac_get_config_status(struct aac_dev *dev)
179 {
180         int status = 0;
181         struct fib * fibptr;
182
183         if (!(fibptr = aac_fib_alloc(dev)))
184                 return -ENOMEM;
185
186         aac_fib_init(fibptr);
187         {
188                 struct aac_get_config_status *dinfo;
189                 dinfo = (struct aac_get_config_status *) fib_data(fibptr);
190
191                 dinfo->command = cpu_to_le32(VM_ContainerConfig);
192                 dinfo->type = cpu_to_le32(CT_GET_CONFIG_STATUS);
193                 dinfo->count = cpu_to_le32(sizeof(((struct aac_get_config_status_resp *)NULL)->data));
194         }
195
196         status = aac_fib_send(ContainerCommand,
197                             fibptr,
198                             sizeof (struct aac_get_config_status),
199                             FsaNormal,
200                             1, 1,
201                             NULL, NULL);
202         if (status < 0 ) {
203                 printk(KERN_WARNING "aac_get_config_status: SendFIB failed.\n");
204         } else {
205                 struct aac_get_config_status_resp *reply
206                   = (struct aac_get_config_status_resp *) fib_data(fibptr);
207                 dprintk((KERN_WARNING
208                   "aac_get_config_status: response=%d status=%d action=%d\n",
209                   le32_to_cpu(reply->response),
210                   le32_to_cpu(reply->status),
211                   le32_to_cpu(reply->data.action)));
212                 if ((le32_to_cpu(reply->response) != ST_OK) ||
213                      (le32_to_cpu(reply->status) != CT_OK) ||
214                      (le32_to_cpu(reply->data.action) > CFACT_PAUSE)) {
215                         printk(KERN_WARNING "aac_get_config_status: Will not issue the Commit Configuration\n");
216                         status = -EINVAL;
217                 }
218         }
219         aac_fib_complete(fibptr);
220         /* Send a CT_COMMIT_CONFIG to enable discovery of devices */
221         if (status >= 0) {
222                 if (commit == 1) {
223                         struct aac_commit_config * dinfo;
224                         aac_fib_init(fibptr);
225                         dinfo = (struct aac_commit_config *) fib_data(fibptr);
226         
227                         dinfo->command = cpu_to_le32(VM_ContainerConfig);
228                         dinfo->type = cpu_to_le32(CT_COMMIT_CONFIG);
229         
230                         status = aac_fib_send(ContainerCommand,
231                                     fibptr,
232                                     sizeof (struct aac_commit_config),
233                                     FsaNormal,
234                                     1, 1,
235                                     NULL, NULL);
236                         aac_fib_complete(fibptr);
237                 } else if (commit == 0) {
238                         printk(KERN_WARNING
239                           "aac_get_config_status: Foreign device configurations are being ignored\n");
240                 }
241         }
242         aac_fib_free(fibptr);
243         return status;
244 }
245
246 /**
247  *      aac_get_containers      -       list containers
248  *      @common: adapter to probe
249  *
250  *      Make a list of all containers on this controller
251  */
252 int aac_get_containers(struct aac_dev *dev)
253 {
254         struct fsa_dev_info *fsa_dev_ptr;
255         u32 index; 
256         int status = 0;
257         struct fib * fibptr;
258         unsigned instance;
259         struct aac_get_container_count *dinfo;
260         struct aac_get_container_count_resp *dresp;
261         int maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
262
263         instance = dev->scsi_host_ptr->unique_id;
264
265         if (!(fibptr = aac_fib_alloc(dev)))
266                 return -ENOMEM;
267
268         aac_fib_init(fibptr);
269         dinfo = (struct aac_get_container_count *) fib_data(fibptr);
270         dinfo->command = cpu_to_le32(VM_ContainerConfig);
271         dinfo->type = cpu_to_le32(CT_GET_CONTAINER_COUNT);
272
273         status = aac_fib_send(ContainerCommand,
274                     fibptr,
275                     sizeof (struct aac_get_container_count),
276                     FsaNormal,
277                     1, 1,
278                     NULL, NULL);
279         if (status >= 0) {
280                 dresp = (struct aac_get_container_count_resp *)fib_data(fibptr);
281                 maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries);
282                 aac_fib_complete(fibptr);
283         }
284
285         if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS)
286                 maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
287         fsa_dev_ptr = (struct fsa_dev_info *) kmalloc(
288           sizeof(*fsa_dev_ptr) * maximum_num_containers, GFP_KERNEL);
289         if (!fsa_dev_ptr) {
290                 aac_fib_free(fibptr);
291                 return -ENOMEM;
292         }
293         memset(fsa_dev_ptr, 0, sizeof(*fsa_dev_ptr) * maximum_num_containers);
294
295         dev->fsa_dev = fsa_dev_ptr;
296         dev->maximum_num_containers = maximum_num_containers;
297
298         for (index = 0; index < dev->maximum_num_containers; index++) {
299                 struct aac_query_mount *dinfo;
300                 struct aac_mount *dresp;
301
302                 fsa_dev_ptr[index].devname[0] = '\0';
303
304                 aac_fib_init(fibptr);
305                 dinfo = (struct aac_query_mount *) fib_data(fibptr);
306
307                 dinfo->command = cpu_to_le32(VM_NameServe);
308                 dinfo->count = cpu_to_le32(index);
309                 dinfo->type = cpu_to_le32(FT_FILESYS);
310
311                 status = aac_fib_send(ContainerCommand,
312                                     fibptr,
313                                     sizeof (struct aac_query_mount),
314                                     FsaNormal,
315                                     1, 1,
316                                     NULL, NULL);
317                 if (status < 0 ) {
318                         printk(KERN_WARNING "aac_get_containers: SendFIB failed.\n");
319                         break;
320                 }
321                 dresp = (struct aac_mount *)fib_data(fibptr);
322
323                 if ((le32_to_cpu(dresp->status) == ST_OK) &&
324                     (le32_to_cpu(dresp->mnt[0].vol) == CT_NONE)) {
325                         dinfo->command = cpu_to_le32(VM_NameServe64);
326                         dinfo->count = cpu_to_le32(index);
327                         dinfo->type = cpu_to_le32(FT_FILESYS);
328
329                         if (aac_fib_send(ContainerCommand,
330                                     fibptr,
331                                     sizeof(struct aac_query_mount),
332                                     FsaNormal,
333                                     1, 1,
334                                     NULL, NULL) < 0)
335                                 continue;
336                 } else
337                         dresp->mnt[0].capacityhigh = 0;
338
339                 dprintk ((KERN_DEBUG
340                   "VM_NameServe cid=%d status=%d vol=%d state=%d cap=%llu\n",
341                   (int)index, (int)le32_to_cpu(dresp->status),
342                   (int)le32_to_cpu(dresp->mnt[0].vol),
343                   (int)le32_to_cpu(dresp->mnt[0].state),
344                   ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
345                     (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32)));
346                 if ((le32_to_cpu(dresp->status) == ST_OK) &&
347                     (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
348                     (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
349                         fsa_dev_ptr[index].valid = 1;
350                         fsa_dev_ptr[index].type = le32_to_cpu(dresp->mnt[0].vol);
351                         fsa_dev_ptr[index].size
352                           = ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
353                             (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32);
354                         if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
355                                     fsa_dev_ptr[index].ro = 1;
356                 }
357                 aac_fib_complete(fibptr);
358                 /*
359                  *      If there are no more containers, then stop asking.
360                  */
361                 if ((index + 1) >= le32_to_cpu(dresp->count)){
362                         break;
363                 }
364         }
365         aac_fib_free(fibptr);
366         return status;
367 }
368
369 static void aac_internal_transfer(struct scsi_cmnd *scsicmd, void *data, unsigned int offset, unsigned int len)
370 {
371         void *buf;
372         unsigned int transfer_len;
373         struct scatterlist *sg = scsicmd->request_buffer;
374
375         if (scsicmd->use_sg) {
376                 buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
377                 transfer_len = min(sg->length, len + offset);
378         } else {
379                 buf = scsicmd->request_buffer;
380                 transfer_len = min(scsicmd->request_bufflen, len + offset);
381         }
382
383         memcpy(buf + offset, data, transfer_len - offset);
384
385         if (scsicmd->use_sg) 
386                 kunmap_atomic(buf - sg->offset, KM_IRQ0);
387
388 }
389
390 static void get_container_name_callback(void *context, struct fib * fibptr)
391 {
392         struct aac_get_name_resp * get_name_reply;
393         struct scsi_cmnd * scsicmd;
394
395         scsicmd = (struct scsi_cmnd *) context;
396         scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
397
398         dprintk((KERN_DEBUG "get_container_name_callback[cpu %d]: t = %ld.\n", smp_processor_id(), jiffies));
399         if (fibptr == NULL)
400                 BUG();
401
402         get_name_reply = (struct aac_get_name_resp *) fib_data(fibptr);
403         /* Failure is irrelevant, using default value instead */
404         if ((le32_to_cpu(get_name_reply->status) == CT_OK)
405          && (get_name_reply->data[0] != '\0')) {
406                 char *sp = get_name_reply->data;
407                 sp[sizeof(((struct aac_get_name_resp *)NULL)->data)-1] = '\0';
408                 while (*sp == ' ')
409                         ++sp;
410                 if (*sp) {
411                         char d[sizeof(((struct inquiry_data *)NULL)->inqd_pid)];
412                         int count = sizeof(d);
413                         char *dp = d;
414                         do {
415                                 *dp++ = (*sp) ? *sp++ : ' ';
416                         } while (--count > 0);
417                         aac_internal_transfer(scsicmd, d, 
418                           offsetof(struct inquiry_data, inqd_pid), sizeof(d));
419                 }
420         }
421
422         scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
423
424         aac_fib_complete(fibptr);
425         aac_fib_free(fibptr);
426         scsicmd->scsi_done(scsicmd);
427 }
428
429 /**
430  *      aac_get_container_name  -       get container name, none blocking.
431  */
432 static int aac_get_container_name(struct scsi_cmnd * scsicmd, int cid)
433 {
434         int status;
435         struct aac_get_name *dinfo;
436         struct fib * cmd_fibcontext;
437         struct aac_dev * dev;
438
439         dev = (struct aac_dev *)scsicmd->device->host->hostdata;
440
441         if (!(cmd_fibcontext = aac_fib_alloc(dev)))
442                 return -ENOMEM;
443
444         aac_fib_init(cmd_fibcontext);
445         dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext);
446
447         dinfo->command = cpu_to_le32(VM_ContainerConfig);
448         dinfo->type = cpu_to_le32(CT_READ_NAME);
449         dinfo->cid = cpu_to_le32(cid);
450         dinfo->count = cpu_to_le32(sizeof(((struct aac_get_name_resp *)NULL)->data));
451
452         status = aac_fib_send(ContainerCommand,
453                   cmd_fibcontext, 
454                   sizeof (struct aac_get_name),
455                   FsaNormal, 
456                   0, 1, 
457                   (fib_callback) get_container_name_callback, 
458                   (void *) scsicmd);
459         
460         /*
461          *      Check that the command queued to the controller
462          */
463         if (status == -EINPROGRESS) {
464                 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
465                 return 0;
466         }
467                 
468         printk(KERN_WARNING "aac_get_container_name: aac_fib_send failed with status: %d.\n", status);
469         aac_fib_complete(cmd_fibcontext);
470         aac_fib_free(cmd_fibcontext);
471         return -1;
472 }
473
474 /**
475  *      aac_probe_container             -       query a logical volume
476  *      @dev: device to query
477  *      @cid: container identifier
478  *
479  *      Queries the controller about the given volume. The volume information
480  *      is updated in the struct fsa_dev_info structure rather than returned.
481  */
482  
483 int aac_probe_container(struct aac_dev *dev, int cid)
484 {
485         struct fsa_dev_info *fsa_dev_ptr;
486         int status;
487         struct aac_query_mount *dinfo;
488         struct aac_mount *dresp;
489         struct fib * fibptr;
490         unsigned instance;
491
492         fsa_dev_ptr = dev->fsa_dev;
493         instance = dev->scsi_host_ptr->unique_id;
494
495         if (!(fibptr = aac_fib_alloc(dev)))
496                 return -ENOMEM;
497
498         aac_fib_init(fibptr);
499
500         dinfo = (struct aac_query_mount *)fib_data(fibptr);
501
502         dinfo->command = cpu_to_le32(VM_NameServe);
503         dinfo->count = cpu_to_le32(cid);
504         dinfo->type = cpu_to_le32(FT_FILESYS);
505
506         status = aac_fib_send(ContainerCommand,
507                             fibptr,
508                             sizeof(struct aac_query_mount),
509                             FsaNormal,
510                             1, 1,
511                             NULL, NULL);
512         if (status < 0) {
513                 printk(KERN_WARNING "aacraid: aac_probe_container query failed.\n");
514                 goto error;
515         }
516
517         dresp = (struct aac_mount *) fib_data(fibptr);
518
519         if ((le32_to_cpu(dresp->status) == ST_OK) &&
520             (le32_to_cpu(dresp->mnt[0].vol) == CT_NONE)) {
521                 dinfo->command = cpu_to_le32(VM_NameServe64);
522                 dinfo->count = cpu_to_le32(cid);
523                 dinfo->type = cpu_to_le32(FT_FILESYS);
524
525                 if (aac_fib_send(ContainerCommand,
526                             fibptr,
527                             sizeof(struct aac_query_mount),
528                             FsaNormal,
529                             1, 1,
530                             NULL, NULL) < 0)
531                         goto error;
532         } else
533                 dresp->mnt[0].capacityhigh = 0;
534
535         if ((le32_to_cpu(dresp->status) == ST_OK) &&
536             (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
537             (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
538                 fsa_dev_ptr[cid].valid = 1;
539                 fsa_dev_ptr[cid].type = le32_to_cpu(dresp->mnt[0].vol);
540                 fsa_dev_ptr[cid].size
541                   = ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
542                     (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32);
543                 if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
544                         fsa_dev_ptr[cid].ro = 1;
545         }
546
547 error:
548         aac_fib_complete(fibptr);
549         aac_fib_free(fibptr);
550
551         return status;
552 }
553
554 /* Local Structure to set SCSI inquiry data strings */
555 struct scsi_inq {
556         char vid[8];         /* Vendor ID */
557         char pid[16];        /* Product ID */
558         char prl[4];         /* Product Revision Level */
559 };
560
561 /**
562  *      InqStrCopy      -       string merge
563  *      @a:     string to copy from
564  *      @b:     string to copy to
565  *
566  *      Copy a String from one location to another
567  *      without copying \0
568  */
569
570 static void inqstrcpy(char *a, char *b)
571 {
572
573         while(*a != (char)0) 
574                 *b++ = *a++;
575 }
576
577 static char *container_types[] = {
578         "None",
579         "Volume",
580         "Mirror",
581         "Stripe",
582         "RAID5",
583         "SSRW",
584         "SSRO",
585         "Morph",
586         "Legacy",
587         "RAID4",
588         "RAID10",             
589         "RAID00",             
590         "V-MIRRORS",          
591         "PSEUDO R4",          
592         "RAID50",
593         "RAID5D",
594         "RAID5D0",
595         "RAID1E",
596         "RAID6",
597         "RAID60",
598         "Unknown"
599 };
600
601
602
603 /* Function: setinqstr
604  *
605  * Arguments: [1] pointer to void [1] int
606  *
607  * Purpose: Sets SCSI inquiry data strings for vendor, product
608  * and revision level. Allows strings to be set in platform dependant
609  * files instead of in OS dependant driver source.
610  */
611
612 static void setinqstr(struct aac_dev *dev, void *data, int tindex)
613 {
614         struct scsi_inq *str;
615
616         str = (struct scsi_inq *)(data); /* cast data to scsi inq block */
617         memset(str, ' ', sizeof(*str));
618
619         if (dev->supplement_adapter_info.AdapterTypeText[0]) {
620                 char * cp = dev->supplement_adapter_info.AdapterTypeText;
621                 int c = sizeof(str->vid);
622                 while (*cp && *cp != ' ' && --c)
623                         ++cp;
624                 c = *cp;
625                 *cp = '\0';
626                 inqstrcpy (dev->supplement_adapter_info.AdapterTypeText,
627                   str->vid); 
628                 *cp = c;
629                 while (*cp && *cp != ' ')
630                         ++cp;
631                 while (*cp == ' ')
632                         ++cp;
633                 /* last six chars reserved for vol type */
634                 c = 0;
635                 if (strlen(cp) > sizeof(str->pid)) {
636                         c = cp[sizeof(str->pid)];
637                         cp[sizeof(str->pid)] = '\0';
638                 }
639                 inqstrcpy (cp, str->pid);
640                 if (c)
641                         cp[sizeof(str->pid)] = c;
642         } else {
643                 struct aac_driver_ident *mp = aac_get_driver_ident(dev->cardtype);
644
645                 inqstrcpy (mp->vname, str->vid);
646                 /* last six chars reserved for vol type */
647                 inqstrcpy (mp->model, str->pid);
648         }
649
650         if (tindex < ARRAY_SIZE(container_types)){
651                 char *findit = str->pid;
652
653                 for ( ; *findit != ' '; findit++); /* walk till we find a space */
654                 /* RAID is superfluous in the context of a RAID device */
655                 if (memcmp(findit-4, "RAID", 4) == 0)
656                         *(findit -= 4) = ' ';
657                 if (((findit - str->pid) + strlen(container_types[tindex]))
658                  < (sizeof(str->pid) + sizeof(str->prl)))
659                         inqstrcpy (container_types[tindex], findit + 1);
660         }
661         inqstrcpy ("V1.0", str->prl);
662 }
663
664 static void set_sense(u8 *sense_buf, u8 sense_key, u8 sense_code,
665                       u8 a_sense_code, u8 incorrect_length,
666                       u8 bit_pointer, u16 field_pointer,
667                       u32 residue)
668 {
669         sense_buf[0] = 0xF0;    /* Sense data valid, err code 70h (current error) */
670         sense_buf[1] = 0;       /* Segment number, always zero */
671
672         if (incorrect_length) {
673                 sense_buf[2] = sense_key | 0x20;/* Set ILI bit | sense key */
674                 sense_buf[3] = BYTE3(residue);
675                 sense_buf[4] = BYTE2(residue);
676                 sense_buf[5] = BYTE1(residue);
677                 sense_buf[6] = BYTE0(residue);
678         } else
679                 sense_buf[2] = sense_key;       /* Sense key */
680
681         if (sense_key == ILLEGAL_REQUEST)
682                 sense_buf[7] = 10;      /* Additional sense length */
683         else
684                 sense_buf[7] = 6;       /* Additional sense length */
685
686         sense_buf[12] = sense_code;     /* Additional sense code */
687         sense_buf[13] = a_sense_code;   /* Additional sense code qualifier */
688         if (sense_key == ILLEGAL_REQUEST) {
689                 sense_buf[15] = 0;
690
691                 if (sense_code == SENCODE_INVALID_PARAM_FIELD)
692                         sense_buf[15] = 0x80;/* Std sense key specific field */
693                 /* Illegal parameter is in the parameter block */
694
695                 if (sense_code == SENCODE_INVALID_CDB_FIELD)
696                         sense_buf[15] = 0xc0;/* Std sense key specific field */
697                 /* Illegal parameter is in the CDB block */
698                 sense_buf[15] |= bit_pointer;
699                 sense_buf[16] = field_pointer >> 8;     /* MSB */
700                 sense_buf[17] = field_pointer;          /* LSB */
701         }
702 }
703
704 int aac_get_adapter_info(struct aac_dev* dev)
705 {
706         struct fib* fibptr;
707         int rcode;
708         u32 tmp;
709         struct aac_adapter_info *info;
710         struct aac_bus_info *command;
711         struct aac_bus_info_response *bus_info;
712
713         if (!(fibptr = aac_fib_alloc(dev)))
714                 return -ENOMEM;
715
716         aac_fib_init(fibptr);
717         info = (struct aac_adapter_info *) fib_data(fibptr);
718         memset(info,0,sizeof(*info));
719
720         rcode = aac_fib_send(RequestAdapterInfo,
721                          fibptr, 
722                          sizeof(*info),
723                          FsaNormal, 
724                          -1, 1, /* First `interrupt' command uses special wait */
725                          NULL, 
726                          NULL);
727
728         if (rcode < 0) {
729                 aac_fib_complete(fibptr);
730                 aac_fib_free(fibptr);
731                 return rcode;
732         }
733         memcpy(&dev->adapter_info, info, sizeof(*info));
734
735         if (dev->adapter_info.options & AAC_OPT_SUPPLEMENT_ADAPTER_INFO) {
736                 struct aac_supplement_adapter_info * info;
737
738                 aac_fib_init(fibptr);
739
740                 info = (struct aac_supplement_adapter_info *) fib_data(fibptr);
741
742                 memset(info,0,sizeof(*info));
743
744                 rcode = aac_fib_send(RequestSupplementAdapterInfo,
745                                  fibptr,
746                                  sizeof(*info),
747                                  FsaNormal,
748                                  1, 1,
749                                  NULL,
750                                  NULL);
751
752                 if (rcode >= 0)
753                         memcpy(&dev->supplement_adapter_info, info, sizeof(*info));
754         }
755
756
757         /* 
758          * GetBusInfo 
759          */
760
761         aac_fib_init(fibptr);
762
763         bus_info = (struct aac_bus_info_response *) fib_data(fibptr);
764
765         memset(bus_info, 0, sizeof(*bus_info));
766
767         command = (struct aac_bus_info *)bus_info;
768
769         command->Command = cpu_to_le32(VM_Ioctl);
770         command->ObjType = cpu_to_le32(FT_DRIVE);
771         command->MethodId = cpu_to_le32(1);
772         command->CtlCmd = cpu_to_le32(GetBusInfo);
773
774         rcode = aac_fib_send(ContainerCommand,
775                          fibptr,
776                          sizeof (*bus_info),
777                          FsaNormal,
778                          1, 1,
779                          NULL, NULL);
780
781         if (rcode >= 0 && le32_to_cpu(bus_info->Status) == ST_OK) {
782                 dev->maximum_num_physicals = le32_to_cpu(bus_info->TargetsPerBus);
783                 dev->maximum_num_channels = le32_to_cpu(bus_info->BusCount);
784         }
785
786         tmp = le32_to_cpu(dev->adapter_info.kernelrev);
787         printk(KERN_INFO "%s%d: kernel %d.%d-%d[%d] %.*s\n", 
788                         dev->name, 
789                         dev->id,
790                         tmp>>24,
791                         (tmp>>16)&0xff,
792                         tmp&0xff,
793                         le32_to_cpu(dev->adapter_info.kernelbuild),
794                         (int)sizeof(dev->supplement_adapter_info.BuildDate),
795                         dev->supplement_adapter_info.BuildDate);
796         tmp = le32_to_cpu(dev->adapter_info.monitorrev);
797         printk(KERN_INFO "%s%d: monitor %d.%d-%d[%d]\n", 
798                         dev->name, dev->id,
799                         tmp>>24,(tmp>>16)&0xff,tmp&0xff,
800                         le32_to_cpu(dev->adapter_info.monitorbuild));
801         tmp = le32_to_cpu(dev->adapter_info.biosrev);
802         printk(KERN_INFO "%s%d: bios %d.%d-%d[%d]\n", 
803                         dev->name, dev->id,
804                         tmp>>24,(tmp>>16)&0xff,tmp&0xff,
805                         le32_to_cpu(dev->adapter_info.biosbuild));
806         if (le32_to_cpu(dev->adapter_info.serial[0]) != 0xBAD0)
807                 printk(KERN_INFO "%s%d: serial %x\n",
808                         dev->name, dev->id,
809                         le32_to_cpu(dev->adapter_info.serial[0]));
810
811         dev->nondasd_support = 0;
812         dev->raid_scsi_mode = 0;
813         if(dev->adapter_info.options & AAC_OPT_NONDASD){
814                 dev->nondasd_support = 1;
815         }
816
817         /*
818          * If the firmware supports ROMB RAID/SCSI mode and we are currently
819          * in RAID/SCSI mode, set the flag. For now if in this mode we will
820          * force nondasd support on. If we decide to allow the non-dasd flag
821          * additional changes changes will have to be made to support
822          * RAID/SCSI.  the function aac_scsi_cmd in this module will have to be
823          * changed to support the new dev->raid_scsi_mode flag instead of
824          * leaching off of the dev->nondasd_support flag. Also in linit.c the
825          * function aac_detect will have to be modified where it sets up the
826          * max number of channels based on the aac->nondasd_support flag only.
827          */
828         if ((dev->adapter_info.options & AAC_OPT_SCSI_MANAGED) &&
829             (dev->adapter_info.options & AAC_OPT_RAID_SCSI_MODE)) {
830                 dev->nondasd_support = 1;
831                 dev->raid_scsi_mode = 1;
832         }
833         if (dev->raid_scsi_mode != 0)
834                 printk(KERN_INFO "%s%d: ROMB RAID/SCSI mode enabled\n",
835                                 dev->name, dev->id);
836                 
837         if(nondasd != -1) {  
838                 dev->nondasd_support = (nondasd!=0);
839         }
840         if(dev->nondasd_support != 0){
841                 printk(KERN_INFO "%s%d: Non-DASD support enabled.\n",dev->name, dev->id);
842         }
843
844         dev->dac_support = 0;
845         if( (sizeof(dma_addr_t) > 4) && (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)){
846                 printk(KERN_INFO "%s%d: 64bit support enabled.\n", dev->name, dev->id);
847                 dev->dac_support = 1;
848         }
849
850         if(dacmode != -1) {
851                 dev->dac_support = (dacmode!=0);
852         }
853         if(dev->dac_support != 0) {
854                 if (!pci_set_dma_mask(dev->pdev, DMA_64BIT_MASK) &&
855                         !pci_set_consistent_dma_mask(dev->pdev, DMA_64BIT_MASK)) {
856                         printk(KERN_INFO"%s%d: 64 Bit DAC enabled\n",
857                                 dev->name, dev->id);
858                 } else if (!pci_set_dma_mask(dev->pdev, DMA_32BIT_MASK) &&
859                         !pci_set_consistent_dma_mask(dev->pdev, DMA_32BIT_MASK)) {
860                         printk(KERN_INFO"%s%d: DMA mask set failed, 64 Bit DAC disabled\n",
861                                 dev->name, dev->id);
862                         dev->dac_support = 0;
863                 } else {
864                         printk(KERN_WARNING"%s%d: No suitable DMA available.\n",
865                                 dev->name, dev->id);
866                         rcode = -ENOMEM;
867                 }
868         }
869         /* 
870          * 57 scatter gather elements 
871          */
872         if (!(dev->raw_io_interface)) {
873                 dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size -
874                         sizeof(struct aac_fibhdr) -
875                         sizeof(struct aac_write) + sizeof(struct sgentry)) /
876                                 sizeof(struct sgentry);
877                 if (dev->dac_support) {
878                         /* 
879                          * 38 scatter gather elements 
880                          */
881                         dev->scsi_host_ptr->sg_tablesize =
882                                 (dev->max_fib_size -
883                                 sizeof(struct aac_fibhdr) -
884                                 sizeof(struct aac_write64) +
885                                 sizeof(struct sgentry64)) /
886                                         sizeof(struct sgentry64);
887                 }
888                 dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT;
889                 if(!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) {
890                         /*
891                          * Worst case size that could cause sg overflow when
892                          * we break up SG elements that are larger than 64KB.
893                          * Would be nice if we could tell the SCSI layer what
894                          * the maximum SG element size can be. Worst case is
895                          * (sg_tablesize-1) 4KB elements with one 64KB
896                          * element.
897                          *      32bit -> 468 or 238KB   64bit -> 424 or 212KB
898                          */
899                         dev->scsi_host_ptr->max_sectors =
900                           (dev->scsi_host_ptr->sg_tablesize * 8) + 112;
901                 }
902         }
903
904         aac_fib_complete(fibptr);
905         aac_fib_free(fibptr);
906
907         return rcode;
908 }
909
910
911 static void io_callback(void *context, struct fib * fibptr)
912 {
913         struct aac_dev *dev;
914         struct aac_read_reply *readreply;
915         struct scsi_cmnd *scsicmd;
916         u32 cid;
917
918         scsicmd = (struct scsi_cmnd *) context;
919         scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
920
921         dev = (struct aac_dev *)scsicmd->device->host->hostdata;
922         cid = scmd_id(scsicmd);
923
924         if (nblank(dprintk(x))) {
925                 u64 lba;
926                 switch (scsicmd->cmnd[0]) {
927                 case WRITE_6:
928                 case READ_6:
929                         lba = ((scsicmd->cmnd[1] & 0x1F) << 16) |
930                             (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
931                         break;
932                 case WRITE_16:
933                 case READ_16:
934                         lba = ((u64)scsicmd->cmnd[2] << 56) |
935                               ((u64)scsicmd->cmnd[3] << 48) |
936                               ((u64)scsicmd->cmnd[4] << 40) |
937                               ((u64)scsicmd->cmnd[5] << 32) |
938                               ((u64)scsicmd->cmnd[6] << 24) |
939                               (scsicmd->cmnd[7] << 16) |
940                               (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
941                         break;
942                 case WRITE_12:
943                 case READ_12:
944                         lba = ((u64)scsicmd->cmnd[2] << 24) |
945                               (scsicmd->cmnd[3] << 16) |
946                               (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
947                         break;
948                 default:
949                         lba = ((u64)scsicmd->cmnd[2] << 24) |
950                                (scsicmd->cmnd[3] << 16) |
951                                (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
952                         break;
953                 }
954                 printk(KERN_DEBUG
955                   "io_callback[cpu %d]: lba = %llu, t = %ld.\n",
956                   smp_processor_id(), (unsigned long long)lba, jiffies);
957         }
958
959         if (fibptr == NULL)
960                 BUG();
961                 
962         if(scsicmd->use_sg)
963                 pci_unmap_sg(dev->pdev, 
964                         (struct scatterlist *)scsicmd->request_buffer,
965                         scsicmd->use_sg,
966                         scsicmd->sc_data_direction);
967         else if(scsicmd->request_bufflen)
968                 pci_unmap_single(dev->pdev, scsicmd->SCp.dma_handle,
969                                  scsicmd->request_bufflen,
970                                  scsicmd->sc_data_direction);
971         readreply = (struct aac_read_reply *)fib_data(fibptr);
972         if (le32_to_cpu(readreply->status) == ST_OK)
973                 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
974         else {
975 #ifdef AAC_DETAILED_STATUS_INFO
976                 printk(KERN_WARNING "io_callback: io failed, status = %d\n",
977                   le32_to_cpu(readreply->status));
978 #endif
979                 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
980                 set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
981                                     HARDWARE_ERROR,
982                                     SENCODE_INTERNAL_TARGET_FAILURE,
983                                     ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
984                                     0, 0);
985                 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
986                   (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer))
987                     ? sizeof(scsicmd->sense_buffer)
988                     : sizeof(dev->fsa_dev[cid].sense_data));
989         }
990         aac_fib_complete(fibptr);
991         aac_fib_free(fibptr);
992
993         scsicmd->scsi_done(scsicmd);
994 }
995
996 static int aac_read(struct scsi_cmnd * scsicmd, int cid)
997 {
998         u64 lba;
999         u32 count;
1000         int status;
1001
1002         u16 fibsize;
1003         struct aac_dev *dev;
1004         struct fib * cmd_fibcontext;
1005
1006         dev = (struct aac_dev *)scsicmd->device->host->hostdata;
1007         /*
1008          *      Get block address and transfer length
1009          */
1010         switch (scsicmd->cmnd[0]) {
1011         case READ_6:
1012                 dprintk((KERN_DEBUG "aachba: received a read(6) command on id %d.\n", cid));
1013
1014                 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | 
1015                         (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
1016                 count = scsicmd->cmnd[4];
1017
1018                 if (count == 0)
1019                         count = 256;
1020                 break;
1021         case READ_16:
1022                 dprintk((KERN_DEBUG "aachba: received a read(16) command on id %d.\n", cid));
1023
1024                 lba =   ((u64)scsicmd->cmnd[2] << 56) |
1025                         ((u64)scsicmd->cmnd[3] << 48) |
1026                         ((u64)scsicmd->cmnd[4] << 40) |
1027                         ((u64)scsicmd->cmnd[5] << 32) |
1028                         ((u64)scsicmd->cmnd[6] << 24) | 
1029                         (scsicmd->cmnd[7] << 16) |
1030                         (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
1031                 count = (scsicmd->cmnd[10] << 24) | 
1032                         (scsicmd->cmnd[11] << 16) |
1033                         (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
1034                 break;
1035         case READ_12:
1036                 dprintk((KERN_DEBUG "aachba: received a read(12) command on id %d.\n", cid));
1037
1038                 lba = ((u64)scsicmd->cmnd[2] << 24) | 
1039                         (scsicmd->cmnd[3] << 16) |
1040                         (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
1041                 count = (scsicmd->cmnd[6] << 24) | 
1042                         (scsicmd->cmnd[7] << 16) |
1043                         (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
1044                 break;
1045         default:
1046                 dprintk((KERN_DEBUG "aachba: received a read(10) command on id %d.\n", cid));
1047
1048                 lba = ((u64)scsicmd->cmnd[2] << 24) | 
1049                         (scsicmd->cmnd[3] << 16) | 
1050                         (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
1051                 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
1052                 break;
1053         }
1054         dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n",
1055           smp_processor_id(), (unsigned long long)lba, jiffies));
1056         if ((!(dev->raw_io_interface) || !(dev->raw_io_64)) &&
1057                 (lba & 0xffffffff00000000LL)) {
1058                 dprintk((KERN_DEBUG "aac_read: Illegal lba\n"));
1059                 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | 
1060                         SAM_STAT_CHECK_CONDITION;
1061                 set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
1062                             HARDWARE_ERROR,
1063                             SENCODE_INTERNAL_TARGET_FAILURE,
1064                             ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
1065                             0, 0);
1066                 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
1067                   (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer))
1068                     ? sizeof(scsicmd->sense_buffer)
1069                     : sizeof(dev->fsa_dev[cid].sense_data));
1070                 scsicmd->scsi_done(scsicmd);
1071                 return 0;
1072         }
1073         /*
1074          *      Alocate and initialize a Fib
1075          */
1076         if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
1077                 return -1;
1078         }
1079
1080         aac_fib_init(cmd_fibcontext);
1081
1082         if (dev->raw_io_interface) {
1083                 struct aac_raw_io *readcmd;
1084                 readcmd = (struct aac_raw_io *) fib_data(cmd_fibcontext);
1085                 readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
1086                 readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
1087                 readcmd->count = cpu_to_le32(count<<9);
1088                 readcmd->cid = cpu_to_le16(cid);
1089                 readcmd->flags = cpu_to_le16(1);
1090                 readcmd->bpTotal = 0;
1091                 readcmd->bpComplete = 0;
1092                 
1093                 aac_build_sgraw(scsicmd, &readcmd->sg);
1094                 fibsize = sizeof(struct aac_raw_io) + ((le32_to_cpu(readcmd->sg.count) - 1) * sizeof (struct sgentryraw));
1095                 if (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))
1096                         BUG();
1097                 /*
1098                  *      Now send the Fib to the adapter
1099                  */
1100                 status = aac_fib_send(ContainerRawIo,
1101                           cmd_fibcontext, 
1102                           fibsize, 
1103                           FsaNormal, 
1104                           0, 1, 
1105                           (fib_callback) io_callback, 
1106                           (void *) scsicmd);
1107         } else if (dev->dac_support == 1) {
1108                 struct aac_read64 *readcmd;
1109                 readcmd = (struct aac_read64 *) fib_data(cmd_fibcontext);
1110                 readcmd->command = cpu_to_le32(VM_CtHostRead64);
1111                 readcmd->cid = cpu_to_le16(cid);
1112                 readcmd->sector_count = cpu_to_le16(count);
1113                 readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
1114                 readcmd->pad   = 0;
1115                 readcmd->flags = 0; 
1116
1117                 aac_build_sg64(scsicmd, &readcmd->sg);
1118                 fibsize = sizeof(struct aac_read64) + 
1119                         ((le32_to_cpu(readcmd->sg.count) - 1) * 
1120                          sizeof (struct sgentry64));
1121                 BUG_ON (fibsize > (dev->max_fib_size - 
1122                                         sizeof(struct aac_fibhdr)));
1123                 /*
1124                  *      Now send the Fib to the adapter
1125                  */
1126                 status = aac_fib_send(ContainerCommand64,
1127                           cmd_fibcontext, 
1128                           fibsize, 
1129                           FsaNormal, 
1130                           0, 1, 
1131                           (fib_callback) io_callback, 
1132                           (void *) scsicmd);
1133         } else {
1134                 struct aac_read *readcmd;
1135                 readcmd = (struct aac_read *) fib_data(cmd_fibcontext);
1136                 readcmd->command = cpu_to_le32(VM_CtBlockRead);
1137                 readcmd->cid = cpu_to_le32(cid);
1138                 readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
1139                 readcmd->count = cpu_to_le32(count * 512);
1140
1141                 aac_build_sg(scsicmd, &readcmd->sg);
1142                 fibsize = sizeof(struct aac_read) + 
1143                         ((le32_to_cpu(readcmd->sg.count) - 1) * 
1144                          sizeof (struct sgentry));
1145                 BUG_ON (fibsize > (dev->max_fib_size -
1146                                         sizeof(struct aac_fibhdr)));
1147                 /*
1148                  *      Now send the Fib to the adapter
1149                  */
1150                 status = aac_fib_send(ContainerCommand,
1151                           cmd_fibcontext, 
1152                           fibsize, 
1153                           FsaNormal, 
1154                           0, 1, 
1155                           (fib_callback) io_callback, 
1156                           (void *) scsicmd);
1157         }
1158
1159         
1160
1161         /*
1162          *      Check that the command queued to the controller
1163          */
1164         if (status == -EINPROGRESS) {
1165                 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
1166                 return 0;
1167         }
1168                 
1169         printk(KERN_WARNING "aac_read: aac_fib_send failed with status: %d.\n", status);
1170         /*
1171          *      For some reason, the Fib didn't queue, return QUEUE_FULL
1172          */
1173         scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
1174         scsicmd->scsi_done(scsicmd);
1175         aac_fib_complete(cmd_fibcontext);
1176         aac_fib_free(cmd_fibcontext);
1177         return 0;
1178 }
1179
1180 static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1181 {
1182         u64 lba;
1183         u32 count;
1184         int status;
1185         u16 fibsize;
1186         struct aac_dev *dev;
1187         struct fib * cmd_fibcontext;
1188
1189         dev = (struct aac_dev *)scsicmd->device->host->hostdata;
1190         /*
1191          *      Get block address and transfer length
1192          */
1193         if (scsicmd->cmnd[0] == WRITE_6)        /* 6 byte command */
1194         {
1195                 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
1196                 count = scsicmd->cmnd[4];
1197                 if (count == 0)
1198                         count = 256;
1199         } else if (scsicmd->cmnd[0] == WRITE_16) { /* 16 byte command */
1200                 dprintk((KERN_DEBUG "aachba: received a write(16) command on id %d.\n", cid));
1201
1202                 lba =   ((u64)scsicmd->cmnd[2] << 56) |
1203                         ((u64)scsicmd->cmnd[3] << 48) |
1204                         ((u64)scsicmd->cmnd[4] << 40) |
1205                         ((u64)scsicmd->cmnd[5] << 32) |
1206                         ((u64)scsicmd->cmnd[6] << 24) | 
1207                         (scsicmd->cmnd[7] << 16) |
1208                         (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
1209                 count = (scsicmd->cmnd[10] << 24) | (scsicmd->cmnd[11] << 16) |
1210                         (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
1211         } else if (scsicmd->cmnd[0] == WRITE_12) { /* 12 byte command */
1212                 dprintk((KERN_DEBUG "aachba: received a write(12) command on id %d.\n", cid));
1213
1214                 lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16)
1215                     | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
1216                 count = (scsicmd->cmnd[6] << 24) | (scsicmd->cmnd[7] << 16)
1217                       | (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
1218         } else {
1219                 dprintk((KERN_DEBUG "aachba: received a write(10) command on id %d.\n", cid));
1220                 lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
1221                 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
1222         }
1223         dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n",
1224           smp_processor_id(), (unsigned long long)lba, jiffies));
1225         if ((!(dev->raw_io_interface) || !(dev->raw_io_64))
1226          && (lba & 0xffffffff00000000LL)) {
1227                 dprintk((KERN_DEBUG "aac_write: Illegal lba\n"));
1228                 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
1229                 set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
1230                             HARDWARE_ERROR,
1231                             SENCODE_INTERNAL_TARGET_FAILURE,
1232                             ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
1233                             0, 0);
1234                 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
1235                   (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer))
1236                     ? sizeof(scsicmd->sense_buffer)
1237                     : sizeof(dev->fsa_dev[cid].sense_data));
1238                 scsicmd->scsi_done(scsicmd);
1239                 return 0;
1240         }
1241         /*
1242          *      Allocate and initialize a Fib then setup a BlockWrite command
1243          */
1244         if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
1245                 scsicmd->result = DID_ERROR << 16;
1246                 scsicmd->scsi_done(scsicmd);
1247                 return 0;
1248         }
1249         aac_fib_init(cmd_fibcontext);
1250
1251         if (dev->raw_io_interface) {
1252                 struct aac_raw_io *writecmd;
1253                 writecmd = (struct aac_raw_io *) fib_data(cmd_fibcontext);
1254                 writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
1255                 writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
1256                 writecmd->count = cpu_to_le32(count<<9);
1257                 writecmd->cid = cpu_to_le16(cid);
1258                 writecmd->flags = 0; 
1259                 writecmd->bpTotal = 0;
1260                 writecmd->bpComplete = 0;
1261                 
1262                 aac_build_sgraw(scsicmd, &writecmd->sg);
1263                 fibsize = sizeof(struct aac_raw_io) + ((le32_to_cpu(writecmd->sg.count) - 1) * sizeof (struct sgentryraw));
1264                 if (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))
1265                         BUG();
1266                 /*
1267                  *      Now send the Fib to the adapter
1268                  */
1269                 status = aac_fib_send(ContainerRawIo,
1270                           cmd_fibcontext, 
1271                           fibsize, 
1272                           FsaNormal, 
1273                           0, 1, 
1274                           (fib_callback) io_callback, 
1275                           (void *) scsicmd);
1276         } else if (dev->dac_support == 1) {
1277                 struct aac_write64 *writecmd;
1278                 writecmd = (struct aac_write64 *) fib_data(cmd_fibcontext);
1279                 writecmd->command = cpu_to_le32(VM_CtHostWrite64);
1280                 writecmd->cid = cpu_to_le16(cid);
1281                 writecmd->sector_count = cpu_to_le16(count); 
1282                 writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
1283                 writecmd->pad   = 0;
1284                 writecmd->flags = 0;
1285
1286                 aac_build_sg64(scsicmd, &writecmd->sg);
1287                 fibsize = sizeof(struct aac_write64) + 
1288                         ((le32_to_cpu(writecmd->sg.count) - 1) * 
1289                          sizeof (struct sgentry64));
1290                 BUG_ON (fibsize > (dev->max_fib_size -
1291                                         sizeof(struct aac_fibhdr)));
1292                 /*
1293                  *      Now send the Fib to the adapter
1294                  */
1295                 status = aac_fib_send(ContainerCommand64,
1296                           cmd_fibcontext, 
1297                           fibsize, 
1298                           FsaNormal, 
1299                           0, 1, 
1300                           (fib_callback) io_callback, 
1301                           (void *) scsicmd);
1302         } else {
1303                 struct aac_write *writecmd;
1304                 writecmd = (struct aac_write *) fib_data(cmd_fibcontext);
1305                 writecmd->command = cpu_to_le32(VM_CtBlockWrite);
1306                 writecmd->cid = cpu_to_le32(cid);
1307                 writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
1308                 writecmd->count = cpu_to_le32(count * 512);
1309                 writecmd->sg.count = cpu_to_le32(1);
1310                 /* ->stable is not used - it did mean which type of write */
1311
1312                 aac_build_sg(scsicmd, &writecmd->sg);
1313                 fibsize = sizeof(struct aac_write) + 
1314                         ((le32_to_cpu(writecmd->sg.count) - 1) * 
1315                          sizeof (struct sgentry));
1316                 BUG_ON (fibsize > (dev->max_fib_size -
1317                                         sizeof(struct aac_fibhdr)));
1318                 /*
1319                  *      Now send the Fib to the adapter
1320                  */
1321                 status = aac_fib_send(ContainerCommand,
1322                           cmd_fibcontext, 
1323                           fibsize, 
1324                           FsaNormal, 
1325                           0, 1, 
1326                           (fib_callback) io_callback, 
1327                           (void *) scsicmd);
1328         }
1329
1330         /*
1331          *      Check that the command queued to the controller
1332          */
1333         if (status == -EINPROGRESS) {
1334                 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
1335                 return 0;
1336         }
1337
1338         printk(KERN_WARNING "aac_write: aac_fib_send failed with status: %d\n", status);
1339         /*
1340          *      For some reason, the Fib didn't queue, return QUEUE_FULL
1341          */
1342         scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
1343         scsicmd->scsi_done(scsicmd);
1344
1345         aac_fib_complete(cmd_fibcontext);
1346         aac_fib_free(cmd_fibcontext);
1347         return 0;
1348 }
1349
1350 static void synchronize_callback(void *context, struct fib *fibptr)
1351 {
1352         struct aac_synchronize_reply *synchronizereply;
1353         struct scsi_cmnd *cmd;
1354
1355         cmd = context;
1356         cmd->SCp.phase = AAC_OWNER_MIDLEVEL;
1357
1358         dprintk((KERN_DEBUG "synchronize_callback[cpu %d]: t = %ld.\n", 
1359                                 smp_processor_id(), jiffies));
1360         BUG_ON(fibptr == NULL);
1361
1362
1363         synchronizereply = fib_data(fibptr);
1364         if (le32_to_cpu(synchronizereply->status) == CT_OK)
1365                 cmd->result = DID_OK << 16 | 
1366                         COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1367         else {
1368                 struct scsi_device *sdev = cmd->device;
1369                 struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
1370                 u32 cid = sdev_id(sdev);
1371                 printk(KERN_WARNING 
1372                      "synchronize_callback: synchronize failed, status = %d\n",
1373                      le32_to_cpu(synchronizereply->status));
1374                 cmd->result = DID_OK << 16 | 
1375                         COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
1376                 set_sense((u8 *)&dev->fsa_dev[cid].sense_data,
1377                                     HARDWARE_ERROR,
1378                                     SENCODE_INTERNAL_TARGET_FAILURE,
1379                                     ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
1380                                     0, 0);
1381                 memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
1382                   min(sizeof(dev->fsa_dev[cid].sense_data), 
1383                           sizeof(cmd->sense_buffer)));
1384         }
1385
1386         aac_fib_complete(fibptr);
1387         aac_fib_free(fibptr);
1388         cmd->scsi_done(cmd);
1389 }
1390
1391 static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid)
1392 {
1393         int status;
1394         struct fib *cmd_fibcontext;
1395         struct aac_synchronize *synchronizecmd;
1396         struct scsi_cmnd *cmd;
1397         struct scsi_device *sdev = scsicmd->device;
1398         int active = 0;
1399         unsigned long flags;
1400
1401         /*
1402          * Wait for all outstanding queued commands to complete to this
1403          * specific target (block).
1404          */
1405         spin_lock_irqsave(&sdev->list_lock, flags);
1406         list_for_each_entry(cmd, &sdev->cmd_list, list)
1407                 if (cmd != scsicmd && cmd->SCp.phase == AAC_OWNER_FIRMWARE) {
1408                         ++active;
1409                         break;
1410                 }
1411
1412         spin_unlock_irqrestore(&sdev->list_lock, flags);
1413
1414         /*
1415          *      Yield the processor (requeue for later)
1416          */
1417         if (active)
1418                 return SCSI_MLQUEUE_DEVICE_BUSY;
1419
1420         /*
1421          *      Allocate and initialize a Fib
1422          */
1423         if (!(cmd_fibcontext = 
1424             aac_fib_alloc((struct aac_dev *)scsicmd->device->host->hostdata)))
1425                 return SCSI_MLQUEUE_HOST_BUSY;
1426
1427         aac_fib_init(cmd_fibcontext);
1428
1429         synchronizecmd = fib_data(cmd_fibcontext);
1430         synchronizecmd->command = cpu_to_le32(VM_ContainerConfig);
1431         synchronizecmd->type = cpu_to_le32(CT_FLUSH_CACHE);
1432         synchronizecmd->cid = cpu_to_le32(cid);
1433         synchronizecmd->count = 
1434              cpu_to_le32(sizeof(((struct aac_synchronize_reply *)NULL)->data));
1435
1436         /*
1437          *      Now send the Fib to the adapter
1438          */
1439         status = aac_fib_send(ContainerCommand,
1440                   cmd_fibcontext,
1441                   sizeof(struct aac_synchronize),
1442                   FsaNormal,
1443                   0, 1,
1444                   (fib_callback)synchronize_callback,
1445                   (void *)scsicmd);
1446
1447         /*
1448          *      Check that the command queued to the controller
1449          */
1450         if (status == -EINPROGRESS) {
1451                 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
1452                 return 0;
1453         }
1454
1455         printk(KERN_WARNING 
1456                 "aac_synchronize: aac_fib_send failed with status: %d.\n", status);
1457         aac_fib_complete(cmd_fibcontext);
1458         aac_fib_free(cmd_fibcontext);
1459         return SCSI_MLQUEUE_HOST_BUSY;
1460 }
1461
1462 /**
1463  *      aac_scsi_cmd()          -       Process SCSI command
1464  *      @scsicmd:               SCSI command block
1465  *
1466  *      Emulate a SCSI command and queue the required request for the
1467  *      aacraid firmware.
1468  */
1469  
1470 int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1471 {
1472         u32 cid = 0;
1473         struct Scsi_Host *host = scsicmd->device->host;
1474         struct aac_dev *dev = (struct aac_dev *)host->hostdata;
1475         struct fsa_dev_info *fsa_dev_ptr = dev->fsa_dev;
1476         
1477         /*
1478          *      If the bus, id or lun is out of range, return fail
1479          *      Test does not apply to ID 16, the pseudo id for the controller
1480          *      itself.
1481          */
1482         if (scmd_id(scsicmd) != host->this_id) {
1483                 if ((scmd_channel(scsicmd) == CONTAINER_CHANNEL)) {
1484                         if((scmd_id(scsicmd) >= dev->maximum_num_containers) ||
1485                                         (scsicmd->device->lun != 0)) {
1486                                 scsicmd->result = DID_NO_CONNECT << 16;
1487                                 scsicmd->scsi_done(scsicmd);
1488                                 return 0;
1489                         }
1490                         cid = scmd_id(scsicmd);
1491
1492                         /*
1493                          *      If the target container doesn't exist, it may have
1494                          *      been newly created
1495                          */
1496                         if ((fsa_dev_ptr[cid].valid & 1) == 0) {
1497                                 switch (scsicmd->cmnd[0]) {
1498                                 case SERVICE_ACTION_IN:
1499                                         if (!(dev->raw_io_interface) ||
1500                                             !(dev->raw_io_64) ||
1501                                             ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
1502                                                 break;
1503                                 case INQUIRY:
1504                                 case READ_CAPACITY:
1505                                 case TEST_UNIT_READY:
1506                                         spin_unlock_irq(host->host_lock);
1507                                         aac_probe_container(dev, cid);
1508                                         if ((fsa_dev_ptr[cid].valid & 1) == 0)
1509                                                 fsa_dev_ptr[cid].valid = 0;
1510                                         spin_lock_irq(host->host_lock);
1511                                         if (fsa_dev_ptr[cid].valid == 0) {
1512                                                 scsicmd->result = DID_NO_CONNECT << 16;
1513                                                 scsicmd->scsi_done(scsicmd);
1514                                                 return 0;
1515                                         }
1516                                 default:
1517                                         break;
1518                                 }
1519                         }
1520                         /*
1521                          *      If the target container still doesn't exist, 
1522                          *      return failure
1523                          */
1524                         if (fsa_dev_ptr[cid].valid == 0) {
1525                                 scsicmd->result = DID_BAD_TARGET << 16;
1526                                 scsicmd->scsi_done(scsicmd);
1527                                 return 0;
1528                         }
1529                 } else {  /* check for physical non-dasd devices */
1530                         if(dev->nondasd_support == 1){
1531                                 return aac_send_srb_fib(scsicmd);
1532                         } else {
1533                                 scsicmd->result = DID_NO_CONNECT << 16;
1534                                 scsicmd->scsi_done(scsicmd);
1535                                 return 0;
1536                         }
1537                 }
1538         }
1539         /*
1540          * else Command for the controller itself
1541          */
1542         else if ((scsicmd->cmnd[0] != INQUIRY) &&       /* only INQUIRY & TUR cmnd supported for controller */
1543                 (scsicmd->cmnd[0] != TEST_UNIT_READY)) 
1544         {
1545                 dprintk((KERN_WARNING "Only INQUIRY & TUR command supported for controller, rcvd = 0x%x.\n", scsicmd->cmnd[0]));
1546                 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
1547                 set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
1548                             ILLEGAL_REQUEST,
1549                             SENCODE_INVALID_COMMAND,
1550                             ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
1551                 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
1552                   (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer))
1553                     ? sizeof(scsicmd->sense_buffer)
1554                     : sizeof(dev->fsa_dev[cid].sense_data));
1555                 scsicmd->scsi_done(scsicmd);
1556                 return 0;
1557         }
1558
1559
1560         /* Handle commands here that don't really require going out to the adapter */
1561         switch (scsicmd->cmnd[0]) {
1562         case INQUIRY:
1563         {
1564                 struct inquiry_data inq_data;
1565
1566                 dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", scmd_id(scsicmd)));
1567                 memset(&inq_data, 0, sizeof (struct inquiry_data));
1568
1569                 inq_data.inqd_ver = 2;  /* claim compliance to SCSI-2 */
1570                 inq_data.inqd_rdf = 2;  /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */
1571                 inq_data.inqd_len = 31;
1572                 /*Format for "pad2" is  RelAdr | WBus32 | WBus16 |  Sync  | Linked |Reserved| CmdQue | SftRe */
1573                 inq_data.inqd_pad2= 0x32 ;       /*WBus16|Sync|CmdQue */
1574                 /*
1575                  *      Set the Vendor, Product, and Revision Level
1576                  *      see: <vendor>.c i.e. aac.c
1577                  */
1578                 if (scmd_id(scsicmd) == host->this_id) {
1579                         setinqstr(dev, (void *) (inq_data.inqd_vid), ARRAY_SIZE(container_types));
1580                         inq_data.inqd_pdt = INQD_PDT_PROC;      /* Processor device */
1581                         aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data));
1582                         scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1583                         scsicmd->scsi_done(scsicmd);
1584                         return 0;
1585                 }
1586                 setinqstr(dev, (void *) (inq_data.inqd_vid), fsa_dev_ptr[cid].type);
1587                 inq_data.inqd_pdt = INQD_PDT_DA;        /* Direct/random access device */
1588                 aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data));
1589                 return aac_get_container_name(scsicmd, cid);
1590         }
1591         case SERVICE_ACTION_IN:
1592                 if (!(dev->raw_io_interface) ||
1593                     !(dev->raw_io_64) ||
1594                     ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
1595                         break;
1596         {
1597                 u64 capacity;
1598                 char cp[13];
1599
1600                 dprintk((KERN_DEBUG "READ CAPACITY_16 command.\n"));
1601                 capacity = fsa_dev_ptr[cid].size - 1;
1602                 cp[0] = (capacity >> 56) & 0xff;
1603                 cp[1] = (capacity >> 48) & 0xff;
1604                 cp[2] = (capacity >> 40) & 0xff;
1605                 cp[3] = (capacity >> 32) & 0xff;
1606                 cp[4] = (capacity >> 24) & 0xff;
1607                 cp[5] = (capacity >> 16) & 0xff;
1608                 cp[6] = (capacity >> 8) & 0xff;
1609                 cp[7] = (capacity >> 0) & 0xff;
1610                 cp[8] = 0;
1611                 cp[9] = 0;
1612                 cp[10] = 2;
1613                 cp[11] = 0;
1614                 cp[12] = 0;
1615                 aac_internal_transfer(scsicmd, cp, 0,
1616                   min_t(size_t, scsicmd->cmnd[13], sizeof(cp)));
1617                 if (sizeof(cp) < scsicmd->cmnd[13]) {
1618                         unsigned int len, offset = sizeof(cp);
1619
1620                         memset(cp, 0, offset);
1621                         do {
1622                                 len = min_t(size_t, scsicmd->cmnd[13] - offset,
1623                                                 sizeof(cp));
1624                                 aac_internal_transfer(scsicmd, cp, offset, len);
1625                         } while ((offset += len) < scsicmd->cmnd[13]);
1626                 }
1627
1628                 /* Do not cache partition table for arrays */
1629                 scsicmd->device->removable = 1;
1630
1631                 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1632                 scsicmd->scsi_done(scsicmd);
1633
1634                 return 0;
1635         }
1636
1637         case READ_CAPACITY:
1638         {
1639                 u32 capacity;
1640                 char cp[8];
1641
1642                 dprintk((KERN_DEBUG "READ CAPACITY command.\n"));
1643                 if (fsa_dev_ptr[cid].size <= 0x100000000ULL)
1644                         capacity = fsa_dev_ptr[cid].size - 1;
1645                 else
1646                         capacity = (u32)-1;
1647
1648                 cp[0] = (capacity >> 24) & 0xff;
1649                 cp[1] = (capacity >> 16) & 0xff;
1650                 cp[2] = (capacity >> 8) & 0xff;
1651                 cp[3] = (capacity >> 0) & 0xff;
1652                 cp[4] = 0;
1653                 cp[5] = 0;
1654                 cp[6] = 2;
1655                 cp[7] = 0;
1656                 aac_internal_transfer(scsicmd, cp, 0, sizeof(cp));
1657                 /* Do not cache partition table for arrays */
1658                 scsicmd->device->removable = 1;
1659
1660                 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1661                 scsicmd->scsi_done(scsicmd);
1662
1663                 return 0;
1664         }
1665
1666         case MODE_SENSE:
1667         {
1668                 char mode_buf[4];
1669
1670                 dprintk((KERN_DEBUG "MODE SENSE command.\n"));
1671                 mode_buf[0] = 3;        /* Mode data length */
1672                 mode_buf[1] = 0;        /* Medium type - default */
1673                 mode_buf[2] = 0;        /* Device-specific param, bit 8: 0/1 = write enabled/protected */
1674                 mode_buf[3] = 0;        /* Block descriptor length */
1675
1676                 aac_internal_transfer(scsicmd, mode_buf, 0, sizeof(mode_buf));
1677                 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1678                 scsicmd->scsi_done(scsicmd);
1679
1680                 return 0;
1681         }
1682         case MODE_SENSE_10:
1683         {
1684                 char mode_buf[8];
1685
1686                 dprintk((KERN_DEBUG "MODE SENSE 10 byte command.\n"));
1687                 mode_buf[0] = 0;        /* Mode data length (MSB) */
1688                 mode_buf[1] = 6;        /* Mode data length (LSB) */
1689                 mode_buf[2] = 0;        /* Medium type - default */
1690                 mode_buf[3] = 0;        /* Device-specific param, bit 8: 0/1 = write enabled/protected */
1691                 mode_buf[4] = 0;        /* reserved */
1692                 mode_buf[5] = 0;        /* reserved */
1693                 mode_buf[6] = 0;        /* Block descriptor length (MSB) */
1694                 mode_buf[7] = 0;        /* Block descriptor length (LSB) */
1695                 aac_internal_transfer(scsicmd, mode_buf, 0, sizeof(mode_buf));
1696
1697                 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1698                 scsicmd->scsi_done(scsicmd);
1699
1700                 return 0;
1701         }
1702         case REQUEST_SENSE:
1703                 dprintk((KERN_DEBUG "REQUEST SENSE command.\n"));
1704                 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, sizeof (struct sense_data));
1705                 memset(&dev->fsa_dev[cid].sense_data, 0, sizeof (struct sense_data));
1706                 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1707                 scsicmd->scsi_done(scsicmd);
1708                 return 0;
1709
1710         case ALLOW_MEDIUM_REMOVAL:
1711                 dprintk((KERN_DEBUG "LOCK command.\n"));
1712                 if (scsicmd->cmnd[4])
1713                         fsa_dev_ptr[cid].locked = 1;
1714                 else
1715                         fsa_dev_ptr[cid].locked = 0;
1716
1717                 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1718                 scsicmd->scsi_done(scsicmd);
1719                 return 0;
1720         /*
1721          *      These commands are all No-Ops
1722          */
1723         case TEST_UNIT_READY:
1724         case RESERVE:
1725         case RELEASE:
1726         case REZERO_UNIT:
1727         case REASSIGN_BLOCKS:
1728         case SEEK_10:
1729         case START_STOP:
1730                 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1731                 scsicmd->scsi_done(scsicmd);
1732                 return 0;
1733         }
1734
1735         switch (scsicmd->cmnd[0]) 
1736         {
1737                 case READ_6:
1738                 case READ_10:
1739                 case READ_12:
1740                 case READ_16:
1741                         /*
1742                          *      Hack to keep track of ordinal number of the device that
1743                          *      corresponds to a container. Needed to convert
1744                          *      containers to /dev/sd device names
1745                          */
1746                          
1747                         if (scsicmd->request->rq_disk)
1748                                 strlcpy(fsa_dev_ptr[cid].devname,
1749                                 scsicmd->request->rq_disk->disk_name,
1750                                 min(sizeof(fsa_dev_ptr[cid].devname),
1751                                 sizeof(scsicmd->request->rq_disk->disk_name) + 1));
1752
1753                         return aac_read(scsicmd, cid);
1754
1755                 case WRITE_6:
1756                 case WRITE_10:
1757                 case WRITE_12:
1758                 case WRITE_16:
1759                         return aac_write(scsicmd, cid);
1760
1761                 case SYNCHRONIZE_CACHE:
1762                         /* Issue FIB to tell Firmware to flush it's cache */
1763                         return aac_synchronize(scsicmd, cid);
1764                         
1765                 default:
1766                         /*
1767                          *      Unhandled commands
1768                          */
1769                         dprintk((KERN_WARNING "Unhandled SCSI Command: 0x%x.\n", scsicmd->cmnd[0]));
1770                         scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
1771                         set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
1772                                 ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
1773                                 ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
1774                         memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
1775                           (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer))
1776                             ? sizeof(scsicmd->sense_buffer)
1777                             : sizeof(dev->fsa_dev[cid].sense_data));
1778                         scsicmd->scsi_done(scsicmd);
1779                         return 0;
1780         }
1781 }
1782
1783 static int query_disk(struct aac_dev *dev, void __user *arg)
1784 {
1785         struct aac_query_disk qd;
1786         struct fsa_dev_info *fsa_dev_ptr;
1787
1788         fsa_dev_ptr = dev->fsa_dev;
1789         if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk)))
1790                 return -EFAULT;
1791         if (qd.cnum == -1)
1792                 qd.cnum = qd.id;
1793         else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1)) 
1794         {
1795                 if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers)
1796                         return -EINVAL;
1797                 qd.instance = dev->scsi_host_ptr->host_no;
1798                 qd.bus = 0;
1799                 qd.id = CONTAINER_TO_ID(qd.cnum);
1800                 qd.lun = CONTAINER_TO_LUN(qd.cnum);
1801         }
1802         else return -EINVAL;
1803
1804         qd.valid = fsa_dev_ptr[qd.cnum].valid;
1805         qd.locked = fsa_dev_ptr[qd.cnum].locked;
1806         qd.deleted = fsa_dev_ptr[qd.cnum].deleted;
1807
1808         if (fsa_dev_ptr[qd.cnum].devname[0] == '\0')
1809                 qd.unmapped = 1;
1810         else
1811                 qd.unmapped = 0;
1812
1813         strlcpy(qd.name, fsa_dev_ptr[qd.cnum].devname,
1814           min(sizeof(qd.name), sizeof(fsa_dev_ptr[qd.cnum].devname) + 1));
1815
1816         if (copy_to_user(arg, &qd, sizeof (struct aac_query_disk)))
1817                 return -EFAULT;
1818         return 0;
1819 }
1820
1821 static int force_delete_disk(struct aac_dev *dev, void __user *arg)
1822 {
1823         struct aac_delete_disk dd;
1824         struct fsa_dev_info *fsa_dev_ptr;
1825
1826         fsa_dev_ptr = dev->fsa_dev;
1827
1828         if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
1829                 return -EFAULT;
1830
1831         if (dd.cnum >= dev->maximum_num_containers)
1832                 return -EINVAL;
1833         /*
1834          *      Mark this container as being deleted.
1835          */
1836         fsa_dev_ptr[dd.cnum].deleted = 1;
1837         /*
1838          *      Mark the container as no longer valid
1839          */
1840         fsa_dev_ptr[dd.cnum].valid = 0;
1841         return 0;
1842 }
1843
1844 static int delete_disk(struct aac_dev *dev, void __user *arg)
1845 {
1846         struct aac_delete_disk dd;
1847         struct fsa_dev_info *fsa_dev_ptr;
1848
1849         fsa_dev_ptr = dev->fsa_dev;
1850
1851         if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
1852                 return -EFAULT;
1853
1854         if (dd.cnum >= dev->maximum_num_containers)
1855                 return -EINVAL;
1856         /*
1857          *      If the container is locked, it can not be deleted by the API.
1858          */
1859         if (fsa_dev_ptr[dd.cnum].locked)
1860                 return -EBUSY;
1861         else {
1862                 /*
1863                  *      Mark the container as no longer being valid.
1864                  */
1865                 fsa_dev_ptr[dd.cnum].valid = 0;
1866                 fsa_dev_ptr[dd.cnum].devname[0] = '\0';
1867                 return 0;
1868         }
1869 }
1870
1871 int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg)
1872 {
1873         switch (cmd) {
1874         case FSACTL_QUERY_DISK:
1875                 return query_disk(dev, arg);
1876         case FSACTL_DELETE_DISK:
1877                 return delete_disk(dev, arg);
1878         case FSACTL_FORCE_DELETE_DISK:
1879                 return force_delete_disk(dev, arg);
1880         case FSACTL_GET_CONTAINERS:
1881                 return aac_get_containers(dev);
1882         default:
1883                 return -ENOTTY;
1884         }
1885 }
1886
1887 /**
1888  *
1889  * aac_srb_callback
1890  * @context: the context set in the fib - here it is scsi cmd
1891  * @fibptr: pointer to the fib
1892  *
1893  * Handles the completion of a scsi command to a non dasd device
1894  *
1895  */
1896
1897 static void aac_srb_callback(void *context, struct fib * fibptr)
1898 {
1899         struct aac_dev *dev;
1900         struct aac_srb_reply *srbreply;
1901         struct scsi_cmnd *scsicmd;
1902
1903         scsicmd = (struct scsi_cmnd *) context;
1904         scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
1905         dev = (struct aac_dev *)scsicmd->device->host->hostdata;
1906
1907         if (fibptr == NULL)
1908                 BUG();
1909
1910         srbreply = (struct aac_srb_reply *) fib_data(fibptr);
1911
1912         scsicmd->sense_buffer[0] = '\0';  /* Initialize sense valid flag to false */
1913         /*
1914          *      Calculate resid for sg 
1915          */
1916          
1917         scsicmd->resid = scsicmd->request_bufflen - 
1918                 le32_to_cpu(srbreply->data_xfer_length);
1919
1920         if(scsicmd->use_sg)
1921                 pci_unmap_sg(dev->pdev, 
1922                         (struct scatterlist *)scsicmd->request_buffer,
1923                         scsicmd->use_sg,
1924                         scsicmd->sc_data_direction);
1925         else if(scsicmd->request_bufflen)
1926                 pci_unmap_single(dev->pdev, scsicmd->SCp.dma_handle, scsicmd->request_bufflen,
1927                         scsicmd->sc_data_direction);
1928
1929         /*
1930          * First check the fib status
1931          */
1932
1933         if (le32_to_cpu(srbreply->status) != ST_OK){
1934                 int len;
1935                 printk(KERN_WARNING "aac_srb_callback: srb failed, status = %d\n", le32_to_cpu(srbreply->status));
1936                 len = (le32_to_cpu(srbreply->sense_data_size) > 
1937                                 sizeof(scsicmd->sense_buffer)) ?
1938                                 sizeof(scsicmd->sense_buffer) : 
1939                                 le32_to_cpu(srbreply->sense_data_size);
1940                 scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
1941                 memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
1942         }
1943
1944         /*
1945          * Next check the srb status
1946          */
1947         switch( (le32_to_cpu(srbreply->srb_status))&0x3f){
1948         case SRB_STATUS_ERROR_RECOVERY:
1949         case SRB_STATUS_PENDING:
1950         case SRB_STATUS_SUCCESS:
1951                 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1952                 break;
1953         case SRB_STATUS_DATA_OVERRUN:
1954                 switch(scsicmd->cmnd[0]){
1955                 case  READ_6:
1956                 case  WRITE_6:
1957                 case  READ_10:
1958                 case  WRITE_10:
1959                 case  READ_12:
1960                 case  WRITE_12:
1961                 case  READ_16:
1962                 case  WRITE_16:
1963                         if(le32_to_cpu(srbreply->data_xfer_length) < scsicmd->underflow ) {
1964                                 printk(KERN_WARNING"aacraid: SCSI CMD underflow\n");
1965                         } else {
1966                                 printk(KERN_WARNING"aacraid: SCSI CMD Data Overrun\n");
1967                         }
1968                         scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
1969                         break;
1970                 case INQUIRY: {
1971                         scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1972                         break;
1973                 }
1974                 default:
1975                         scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1976                         break;
1977                 }
1978                 break;
1979         case SRB_STATUS_ABORTED:
1980                 scsicmd->result = DID_ABORT << 16 | ABORT << 8;
1981                 break;
1982         case SRB_STATUS_ABORT_FAILED:
1983                 // Not sure about this one - but assuming the hba was trying to abort for some reason
1984                 scsicmd->result = DID_ERROR << 16 | ABORT << 8;
1985                 break;
1986         case SRB_STATUS_PARITY_ERROR:
1987                 scsicmd->result = DID_PARITY << 16 | MSG_PARITY_ERROR << 8;
1988                 break;
1989         case SRB_STATUS_NO_DEVICE:
1990         case SRB_STATUS_INVALID_PATH_ID:
1991         case SRB_STATUS_INVALID_TARGET_ID:
1992         case SRB_STATUS_INVALID_LUN:
1993         case SRB_STATUS_SELECTION_TIMEOUT:
1994                 scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
1995                 break;
1996
1997         case SRB_STATUS_COMMAND_TIMEOUT:
1998         case SRB_STATUS_TIMEOUT:
1999                 scsicmd->result = DID_TIME_OUT << 16 | COMMAND_COMPLETE << 8;
2000                 break;
2001
2002         case SRB_STATUS_BUSY:
2003                 scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
2004                 break;
2005
2006         case SRB_STATUS_BUS_RESET:
2007                 scsicmd->result = DID_RESET << 16 | COMMAND_COMPLETE << 8;
2008                 break;
2009
2010         case SRB_STATUS_MESSAGE_REJECTED:
2011                 scsicmd->result = DID_ERROR << 16 | MESSAGE_REJECT << 8;
2012                 break;
2013         case SRB_STATUS_REQUEST_FLUSHED:
2014         case SRB_STATUS_ERROR:
2015         case SRB_STATUS_INVALID_REQUEST:
2016         case SRB_STATUS_REQUEST_SENSE_FAILED:
2017         case SRB_STATUS_NO_HBA:
2018         case SRB_STATUS_UNEXPECTED_BUS_FREE:
2019         case SRB_STATUS_PHASE_SEQUENCE_FAILURE:
2020         case SRB_STATUS_BAD_SRB_BLOCK_LENGTH:
2021         case SRB_STATUS_DELAYED_RETRY:
2022         case SRB_STATUS_BAD_FUNCTION:
2023         case SRB_STATUS_NOT_STARTED:
2024         case SRB_STATUS_NOT_IN_USE:
2025         case SRB_STATUS_FORCE_ABORT:
2026         case SRB_STATUS_DOMAIN_VALIDATION_FAIL:
2027         default:
2028 #ifdef AAC_DETAILED_STATUS_INFO
2029                 printk("aacraid: SRB ERROR(%u) %s scsi cmd 0x%x - scsi status 0x%x\n",
2030                         le32_to_cpu(srbreply->srb_status) & 0x3F,
2031                         aac_get_status_string(
2032                                 le32_to_cpu(srbreply->srb_status) & 0x3F), 
2033                         scsicmd->cmnd[0], 
2034                         le32_to_cpu(srbreply->scsi_status));
2035 #endif
2036                 scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
2037                 break;
2038         }
2039         if (le32_to_cpu(srbreply->scsi_status) == 0x02 ){  // Check Condition
2040                 int len;
2041                 scsicmd->result |= SAM_STAT_CHECK_CONDITION;
2042                 len = (le32_to_cpu(srbreply->sense_data_size) > 
2043                                 sizeof(scsicmd->sense_buffer)) ?
2044                                 sizeof(scsicmd->sense_buffer) :
2045                                 le32_to_cpu(srbreply->sense_data_size);
2046 #ifdef AAC_DETAILED_STATUS_INFO
2047                 printk(KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n",
2048                                         le32_to_cpu(srbreply->status), len);
2049 #endif
2050                 memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
2051                 
2052         }
2053         /*
2054          * OR in the scsi status (already shifted up a bit)
2055          */
2056         scsicmd->result |= le32_to_cpu(srbreply->scsi_status);
2057
2058         aac_fib_complete(fibptr);
2059         aac_fib_free(fibptr);
2060         scsicmd->scsi_done(scsicmd);
2061 }
2062
2063 /**
2064  *
2065  * aac_send_scb_fib
2066  * @scsicmd: the scsi command block
2067  *
2068  * This routine will form a FIB and fill in the aac_srb from the 
2069  * scsicmd passed in.
2070  */
2071
2072 static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
2073 {
2074         struct fib* cmd_fibcontext;
2075         struct aac_dev* dev;
2076         int status;
2077         struct aac_srb *srbcmd;
2078         u16 fibsize;
2079         u32 flag;
2080         u32 timeout;
2081
2082         dev = (struct aac_dev *)scsicmd->device->host->hostdata;
2083         if (scmd_id(scsicmd) >= dev->maximum_num_physicals ||
2084                         scsicmd->device->lun > 7) {
2085                 scsicmd->result = DID_NO_CONNECT << 16;
2086                 scsicmd->scsi_done(scsicmd);
2087                 return 0;
2088         }
2089
2090         switch(scsicmd->sc_data_direction){
2091         case DMA_TO_DEVICE:
2092                 flag = SRB_DataOut;
2093                 break;
2094         case DMA_BIDIRECTIONAL:
2095                 flag = SRB_DataIn | SRB_DataOut;
2096                 break;
2097         case DMA_FROM_DEVICE:
2098                 flag = SRB_DataIn;
2099                 break;
2100         case DMA_NONE:
2101         default:        /* shuts up some versions of gcc */
2102                 flag = SRB_NoDataXfer;
2103                 break;
2104         }
2105
2106
2107         /*
2108          *      Allocate and initialize a Fib then setup a BlockWrite command
2109          */
2110         if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
2111                 return -1;
2112         }
2113         aac_fib_init(cmd_fibcontext);
2114
2115         srbcmd = (struct aac_srb*) fib_data(cmd_fibcontext);
2116         srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
2117         srbcmd->channel  = cpu_to_le32(aac_logical_to_phys(scmd_channel(scsicmd)));
2118         srbcmd->id   = cpu_to_le32(scmd_id(scsicmd));
2119         srbcmd->lun      = cpu_to_le32(scsicmd->device->lun);
2120         srbcmd->flags    = cpu_to_le32(flag);
2121         timeout = scsicmd->timeout_per_command/HZ;
2122         if(timeout == 0){
2123                 timeout = 1;
2124         }
2125         srbcmd->timeout  = cpu_to_le32(timeout);  // timeout in seconds
2126         srbcmd->retry_limit = 0; /* Obsolete parameter */
2127         srbcmd->cdb_size = cpu_to_le32(scsicmd->cmd_len);
2128         
2129         if( dev->dac_support == 1 ) {
2130                 aac_build_sg64(scsicmd, (struct sgmap64*) &srbcmd->sg);
2131                 srbcmd->count = cpu_to_le32(scsicmd->request_bufflen);
2132
2133                 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
2134                 memcpy(srbcmd->cdb, scsicmd->cmnd, scsicmd->cmd_len);
2135                 /*
2136                  *      Build Scatter/Gather list
2137                  */
2138                 fibsize = sizeof (struct aac_srb) - sizeof (struct sgentry) +
2139                         ((le32_to_cpu(srbcmd->sg.count) & 0xff) * 
2140                          sizeof (struct sgentry64));
2141                 BUG_ON (fibsize > (dev->max_fib_size -
2142                                         sizeof(struct aac_fibhdr)));
2143
2144                 /*
2145                  *      Now send the Fib to the adapter
2146                  */
2147                 status = aac_fib_send(ScsiPortCommand64, cmd_fibcontext,
2148                                 fibsize, FsaNormal, 0, 1,
2149                                   (fib_callback) aac_srb_callback, 
2150                                   (void *) scsicmd);
2151         } else {
2152                 aac_build_sg(scsicmd, (struct sgmap*)&srbcmd->sg);
2153                 srbcmd->count = cpu_to_le32(scsicmd->request_bufflen);
2154
2155                 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
2156                 memcpy(srbcmd->cdb, scsicmd->cmnd, scsicmd->cmd_len);
2157                 /*
2158                  *      Build Scatter/Gather list
2159                  */
2160                 fibsize = sizeof (struct aac_srb) + 
2161                         (((le32_to_cpu(srbcmd->sg.count) & 0xff) - 1) * 
2162                          sizeof (struct sgentry));
2163                 BUG_ON (fibsize > (dev->max_fib_size -
2164                                         sizeof(struct aac_fibhdr)));
2165
2166                 /*
2167                  *      Now send the Fib to the adapter
2168                  */
2169                 status = aac_fib_send(ScsiPortCommand, cmd_fibcontext, fibsize, FsaNormal, 0, 1,
2170                                   (fib_callback) aac_srb_callback, (void *) scsicmd);
2171         }
2172         /*
2173          *      Check that the command queued to the controller
2174          */
2175         if (status == -EINPROGRESS) {
2176                 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
2177                 return 0;
2178         }
2179
2180         printk(KERN_WARNING "aac_srb: aac_fib_send failed with status: %d\n", status);
2181         aac_fib_complete(cmd_fibcontext);
2182         aac_fib_free(cmd_fibcontext);
2183
2184         return -1;
2185 }
2186
2187 static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* psg)
2188 {
2189         struct aac_dev *dev;
2190         unsigned long byte_count = 0;
2191
2192         dev = (struct aac_dev *)scsicmd->device->host->hostdata;
2193         // Get rid of old data
2194         psg->count = 0;
2195         psg->sg[0].addr = 0;
2196         psg->sg[0].count = 0;  
2197         if (scsicmd->use_sg) {
2198                 struct scatterlist *sg;
2199                 int i;
2200                 int sg_count;
2201                 sg = (struct scatterlist *) scsicmd->request_buffer;
2202
2203                 sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg,
2204                         scsicmd->sc_data_direction);
2205                 psg->count = cpu_to_le32(sg_count);
2206
2207                 for (i = 0; i < sg_count; i++) {
2208                         psg->sg[i].addr = cpu_to_le32(sg_dma_address(sg));
2209                         psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
2210                         byte_count += sg_dma_len(sg);
2211                         sg++;
2212                 }
2213                 /* hba wants the size to be exact */
2214                 if(byte_count > scsicmd->request_bufflen){
2215                         u32 temp = le32_to_cpu(psg->sg[i-1].count) - 
2216                                 (byte_count - scsicmd->request_bufflen);
2217                         psg->sg[i-1].count = cpu_to_le32(temp);
2218                         byte_count = scsicmd->request_bufflen;
2219                 }
2220                 /* Check for command underflow */
2221                 if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
2222                         printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
2223                                         byte_count, scsicmd->underflow);
2224                 }
2225         }
2226         else if(scsicmd->request_bufflen) {
2227                 u32 addr;
2228                 scsicmd->SCp.dma_handle = pci_map_single(dev->pdev,
2229                                 scsicmd->request_buffer,
2230                                 scsicmd->request_bufflen,
2231                                 scsicmd->sc_data_direction);
2232                 addr = scsicmd->SCp.dma_handle;
2233                 psg->count = cpu_to_le32(1);
2234                 psg->sg[0].addr = cpu_to_le32(addr);
2235                 psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen);  
2236                 byte_count = scsicmd->request_bufflen;
2237         }
2238         return byte_count;
2239 }
2240
2241
2242 static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* psg)
2243 {
2244         struct aac_dev *dev;
2245         unsigned long byte_count = 0;
2246         u64 addr;
2247
2248         dev = (struct aac_dev *)scsicmd->device->host->hostdata;
2249         // Get rid of old data
2250         psg->count = 0;
2251         psg->sg[0].addr[0] = 0;
2252         psg->sg[0].addr[1] = 0;
2253         psg->sg[0].count = 0;
2254         if (scsicmd->use_sg) {
2255                 struct scatterlist *sg;
2256                 int i;
2257                 int sg_count;
2258                 sg = (struct scatterlist *) scsicmd->request_buffer;
2259
2260                 sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg,
2261                         scsicmd->sc_data_direction);
2262
2263                 for (i = 0; i < sg_count; i++) {
2264                         int count = sg_dma_len(sg);
2265                         addr = sg_dma_address(sg);
2266                         psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
2267                         psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
2268                         psg->sg[i].count = cpu_to_le32(count);
2269                         byte_count += count;
2270                         sg++;
2271                 }
2272                 psg->count = cpu_to_le32(sg_count);
2273                 /* hba wants the size to be exact */
2274                 if(byte_count > scsicmd->request_bufflen){
2275                         u32 temp = le32_to_cpu(psg->sg[i-1].count) - 
2276                                 (byte_count - scsicmd->request_bufflen);
2277                         psg->sg[i-1].count = cpu_to_le32(temp);
2278                         byte_count = scsicmd->request_bufflen;
2279                 }
2280                 /* Check for command underflow */
2281                 if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
2282                         printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
2283                                         byte_count, scsicmd->underflow);
2284                 }
2285         }
2286         else if(scsicmd->request_bufflen) {
2287                 scsicmd->SCp.dma_handle = pci_map_single(dev->pdev,
2288                                 scsicmd->request_buffer,
2289                                 scsicmd->request_bufflen,
2290                                 scsicmd->sc_data_direction);
2291                 addr = scsicmd->SCp.dma_handle;
2292                 psg->count = cpu_to_le32(1);
2293                 psg->sg[0].addr[0] = cpu_to_le32(addr & 0xffffffff);
2294                 psg->sg[0].addr[1] = cpu_to_le32(addr >> 32);
2295                 psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen);  
2296                 byte_count = scsicmd->request_bufflen;
2297         }
2298         return byte_count;
2299 }
2300
2301 static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw* psg)
2302 {
2303         struct Scsi_Host *host = scsicmd->device->host;
2304         struct aac_dev *dev = (struct aac_dev *)host->hostdata;
2305         unsigned long byte_count = 0;
2306
2307         // Get rid of old data
2308         psg->count = 0;
2309         psg->sg[0].next = 0;
2310         psg->sg[0].prev = 0;
2311         psg->sg[0].addr[0] = 0;
2312         psg->sg[0].addr[1] = 0;
2313         psg->sg[0].count = 0;
2314         psg->sg[0].flags = 0;
2315         if (scsicmd->use_sg) {
2316                 struct scatterlist *sg;
2317                 int i;
2318                 int sg_count;
2319                 sg = (struct scatterlist *) scsicmd->request_buffer;
2320
2321                 sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg,
2322                         scsicmd->sc_data_direction);
2323
2324                 for (i = 0; i < sg_count; i++) {
2325                         int count = sg_dma_len(sg);
2326                         u64 addr = sg_dma_address(sg);
2327                         psg->sg[i].next = 0;
2328                         psg->sg[i].prev = 0;
2329                         psg->sg[i].addr[1] = cpu_to_le32((u32)(addr>>32));
2330                         psg->sg[i].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
2331                         psg->sg[i].count = cpu_to_le32(count);
2332                         psg->sg[i].flags = 0;
2333                         byte_count += count;
2334                         sg++;
2335                 }
2336                 psg->count = cpu_to_le32(sg_count);
2337                 /* hba wants the size to be exact */
2338                 if(byte_count > scsicmd->request_bufflen){
2339                         u32 temp = le32_to_cpu(psg->sg[i-1].count) - 
2340                                 (byte_count - scsicmd->request_bufflen);
2341                         psg->sg[i-1].count = cpu_to_le32(temp);
2342                         byte_count = scsicmd->request_bufflen;
2343                 }
2344                 /* Check for command underflow */
2345                 if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
2346                         printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
2347                                         byte_count, scsicmd->underflow);
2348                 }
2349         }
2350         else if(scsicmd->request_bufflen) {
2351                 int count;
2352                 u64 addr;
2353                 scsicmd->SCp.dma_handle = pci_map_single(dev->pdev,
2354                                 scsicmd->request_buffer,
2355                                 scsicmd->request_bufflen,
2356                                 scsicmd->sc_data_direction);
2357                 addr = scsicmd->SCp.dma_handle;
2358                 count = scsicmd->request_bufflen;
2359                 psg->count = cpu_to_le32(1);
2360                 psg->sg[0].next = 0;
2361                 psg->sg[0].prev = 0;
2362                 psg->sg[0].addr[1] = cpu_to_le32((u32)(addr>>32));
2363                 psg->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
2364                 psg->sg[0].count = cpu_to_le32(count);
2365                 psg->sg[0].flags = 0;
2366                 byte_count = scsicmd->request_bufflen;
2367         }
2368         return byte_count;
2369 }
2370
2371 #ifdef AAC_DETAILED_STATUS_INFO
2372
2373 struct aac_srb_status_info {
2374         u32     status;
2375         char    *str;
2376 };
2377
2378
2379 static struct aac_srb_status_info srb_status_info[] = {
2380         { SRB_STATUS_PENDING,           "Pending Status"},
2381         { SRB_STATUS_SUCCESS,           "Success"},
2382         { SRB_STATUS_ABORTED,           "Aborted Command"},
2383         { SRB_STATUS_ABORT_FAILED,      "Abort Failed"},
2384         { SRB_STATUS_ERROR,             "Error Event"},
2385         { SRB_STATUS_BUSY,              "Device Busy"},
2386         { SRB_STATUS_INVALID_REQUEST,   "Invalid Request"},
2387         { SRB_STATUS_INVALID_PATH_ID,   "Invalid Path ID"},
2388         { SRB_STATUS_NO_DEVICE,         "No Device"},
2389         { SRB_STATUS_TIMEOUT,           "Timeout"},
2390         { SRB_STATUS_SELECTION_TIMEOUT, "Selection Timeout"},
2391         { SRB_STATUS_COMMAND_TIMEOUT,   "Command Timeout"},
2392         { SRB_STATUS_MESSAGE_REJECTED,  "Message Rejected"},
2393         { SRB_STATUS_BUS_RESET,         "Bus Reset"},
2394         { SRB_STATUS_PARITY_ERROR,      "Parity Error"},
2395         { SRB_STATUS_REQUEST_SENSE_FAILED,"Request Sense Failed"},
2396         { SRB_STATUS_NO_HBA,            "No HBA"},
2397         { SRB_STATUS_DATA_OVERRUN,      "Data Overrun/Data Underrun"},
2398         { SRB_STATUS_UNEXPECTED_BUS_FREE,"Unexpected Bus Free"},
2399         { SRB_STATUS_PHASE_SEQUENCE_FAILURE,"Phase Error"},
2400         { SRB_STATUS_BAD_SRB_BLOCK_LENGTH,"Bad Srb Block Length"},
2401         { SRB_STATUS_REQUEST_FLUSHED,   "Request Flushed"},
2402         { SRB_STATUS_DELAYED_RETRY,     "Delayed Retry"},
2403         { SRB_STATUS_INVALID_LUN,       "Invalid LUN"},
2404         { SRB_STATUS_INVALID_TARGET_ID, "Invalid TARGET ID"},
2405         { SRB_STATUS_BAD_FUNCTION,      "Bad Function"},
2406         { SRB_STATUS_ERROR_RECOVERY,    "Error Recovery"},
2407         { SRB_STATUS_NOT_STARTED,       "Not Started"},
2408         { SRB_STATUS_NOT_IN_USE,        "Not In Use"},
2409         { SRB_STATUS_FORCE_ABORT,       "Force Abort"},
2410         { SRB_STATUS_DOMAIN_VALIDATION_FAIL,"Domain Validation Failure"},
2411         { 0xff,                         "Unknown Error"}
2412 };
2413
2414 char *aac_get_status_string(u32 status)
2415 {
2416         int i;
2417
2418         for (i = 0; i < ARRAY_SIZE(srb_status_info); i++)
2419                 if (srb_status_info[i].status == status)
2420                         return srb_status_info[i].str;
2421
2422         return "Bad Status Code";
2423 }
2424
2425 #endif