Merge branch 'master'
[linux-2.6] / drivers / scsi / dpt_i2o.c
1 /***************************************************************************
2                           dpti.c  -  description
3                              -------------------
4     begin                : Thu Sep 7 2000
5     copyright            : (C) 2000 by Adaptec
6
7                            July 30, 2001 First version being submitted
8                            for inclusion in the kernel.  V2.4
9
10     See Documentation/scsi/dpti.txt for history, notes, license info
11     and credits
12  ***************************************************************************/
13
14 /***************************************************************************
15  *                                                                         *
16  *   This program is free software; you can redistribute it and/or modify  *
17  *   it under the terms of the GNU General Public License as published by  *
18  *   the Free Software Foundation; either version 2 of the License, or     *
19  *   (at your option) any later version.                                   *
20  *                                                                         *
21  ***************************************************************************/
22 /***************************************************************************
23  * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
24  - Support 2.6 kernel and DMA-mapping
25  - ioctl fix for raid tools
26  - use schedule_timeout in long long loop
27  **************************************************************************/
28
29 /*#define DEBUG 1 */
30 /*#define UARTDELAY 1 */
31
32 /* On the real kernel ADDR32 should always be zero for 2.4. GFP_HIGH allocates
33    high pages. Keep the macro around because of the broken unmerged ia64 tree */
34
35 #define ADDR32 (0)
36
37 #include <linux/module.h>
38
39 MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
40 MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
41
42 ////////////////////////////////////////////////////////////////
43
44 #include <linux/ioctl.h>        /* For SCSI-Passthrough */
45 #include <asm/uaccess.h>
46
47 #include <linux/stat.h>
48 #include <linux/slab.h>         /* for kmalloc() */
49 #include <linux/config.h>       /* for CONFIG_PCI */
50 #include <linux/pci.h>          /* for PCI support */
51 #include <linux/proc_fs.h>
52 #include <linux/blkdev.h>
53 #include <linux/delay.h>        /* for udelay */
54 #include <linux/interrupt.h>
55 #include <linux/kernel.h>       /* for printk */
56 #include <linux/sched.h>
57 #include <linux/reboot.h>
58 #include <linux/spinlock.h>
59 #include <linux/smp_lock.h>
60
61 #include <linux/timer.h>
62 #include <linux/string.h>
63 #include <linux/ioport.h>
64
65 #include <asm/processor.h>      /* for boot_cpu_data */
66 #include <asm/pgtable.h>
67 #include <asm/io.h>             /* for virt_to_bus, etc. */
68
69 #include <scsi/scsi.h>
70 #include <scsi/scsi_cmnd.h>
71 #include <scsi/scsi_device.h>
72 #include <scsi/scsi_host.h>
73 #include <scsi/scsi_tcq.h>
74
75 #include "dpt/dptsig.h"
76 #include "dpti.h"
77
78 /*============================================================================
79  * Create a binary signature - this is read by dptsig
80  * Needed for our management apps
81  *============================================================================
82  */
83 static dpt_sig_S DPTI_sig = {
84         {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
85 #ifdef __i386__
86         PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
87 #elif defined(__ia64__)
88         PROC_INTEL, PROC_IA64,
89 #elif defined(__sparc__)
90         PROC_ULTRASPARC, PROC_ULTRASPARC,
91 #elif defined(__alpha__)
92         PROC_ALPHA, PROC_ALPHA,
93 #else
94         (-1),(-1),
95 #endif
96          FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
97         ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
98         DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
99 };
100
101
102
103
104 /*============================================================================
105  * Globals
106  *============================================================================
107  */
108
109 static DECLARE_MUTEX(adpt_configuration_lock);
110
111 static struct i2o_sys_tbl *sys_tbl = NULL;
112 static int sys_tbl_ind = 0;
113 static int sys_tbl_len = 0;
114
115 static adpt_hba* hba_chain = NULL;
116 static int hba_count = 0;
117
118 static struct file_operations adpt_fops = {
119         .ioctl          = adpt_ioctl,
120         .open           = adpt_open,
121         .release        = adpt_close
122 };
123
124 #ifdef REBOOT_NOTIFIER
125 static struct notifier_block adpt_reboot_notifier =
126 {
127          adpt_reboot_event,
128          NULL,
129          0
130 };
131 #endif
132
133 /* Structures and definitions for synchronous message posting.
134  * See adpt_i2o_post_wait() for description
135  * */
136 struct adpt_i2o_post_wait_data
137 {
138         int status;
139         u32 id;
140         adpt_wait_queue_head_t *wq;
141         struct adpt_i2o_post_wait_data *next;
142 };
143
144 static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
145 static u32 adpt_post_wait_id = 0;
146 static DEFINE_SPINLOCK(adpt_post_wait_lock);
147
148
149 /*============================================================================
150  *                              Functions
151  *============================================================================
152  */
153
154 static u8 adpt_read_blink_led(adpt_hba* host)
155 {
156         if(host->FwDebugBLEDflag_P != 0) {
157                 if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
158                         return readb(host->FwDebugBLEDvalue_P);
159                 }
160         }
161         return 0;
162 }
163
164 /*============================================================================
165  * Scsi host template interface functions
166  *============================================================================
167  */
168
169 static struct pci_device_id dptids[] = {
170         { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
171         { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
172         { 0, }
173 };
174 MODULE_DEVICE_TABLE(pci,dptids);
175
176 static int adpt_detect(struct scsi_host_template* sht)
177 {
178         struct pci_dev *pDev = NULL;
179         adpt_hba* pHba;
180
181         adpt_init();
182
183         PINFO("Detecting Adaptec I2O RAID controllers...\n");
184
185         /* search for all Adatpec I2O RAID cards */
186         while ((pDev = pci_find_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
187                 if(pDev->device == PCI_DPT_DEVICE_ID ||
188                    pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
189                         if(adpt_install_hba(sht, pDev) ){
190                                 PERROR("Could not Init an I2O RAID device\n");
191                                 PERROR("Will not try to detect others.\n");
192                                 return hba_count-1;
193                         }
194                 }
195         }
196
197         /* In INIT state, Activate IOPs */
198         for (pHba = hba_chain; pHba; pHba = pHba->next) {
199                 // Activate does get status , init outbound, and get hrt
200                 if (adpt_i2o_activate_hba(pHba) < 0) {
201                         adpt_i2o_delete_hba(pHba);
202                 }
203         }
204
205
206         /* Active IOPs in HOLD state */
207
208 rebuild_sys_tab:
209         if (hba_chain == NULL) 
210                 return 0;
211
212         /*
213          * If build_sys_table fails, we kill everything and bail
214          * as we can't init the IOPs w/o a system table
215          */     
216         if (adpt_i2o_build_sys_table() < 0) {
217                 adpt_i2o_sys_shutdown();
218                 return 0;
219         }
220
221         PDEBUG("HBA's in HOLD state\n");
222
223         /* If IOP don't get online, we need to rebuild the System table */
224         for (pHba = hba_chain; pHba; pHba = pHba->next) {
225                 if (adpt_i2o_online_hba(pHba) < 0) {
226                         adpt_i2o_delete_hba(pHba);      
227                         goto rebuild_sys_tab;
228                 }
229         }
230
231         /* Active IOPs now in OPERATIONAL state */
232         PDEBUG("HBA's in OPERATIONAL state\n");
233
234         printk("dpti: If you have a lot of devices this could take a few minutes.\n");
235         for (pHba = hba_chain; pHba; pHba = pHba->next) {
236                 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
237                 if (adpt_i2o_lct_get(pHba) < 0){
238                         adpt_i2o_delete_hba(pHba);
239                         continue;
240                 }
241
242                 if (adpt_i2o_parse_lct(pHba) < 0){
243                         adpt_i2o_delete_hba(pHba);
244                         continue;
245                 }
246                 adpt_inquiry(pHba);
247         }
248
249         for (pHba = hba_chain; pHba; pHba = pHba->next) {
250                 if( adpt_scsi_register(pHba,sht) < 0){
251                         adpt_i2o_delete_hba(pHba);
252                         continue;
253                 }
254                 pHba->initialized = TRUE;
255                 pHba->state &= ~DPTI_STATE_RESET;
256         }
257
258         // Register our control device node
259         // nodes will need to be created in /dev to access this
260         // the nodes can not be created from within the driver
261         if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
262                 adpt_i2o_sys_shutdown();
263                 return 0;
264         }
265         return hba_count;
266 }
267
268
269 /*
270  * scsi_unregister will be called AFTER we return. 
271  */
272 static int adpt_release(struct Scsi_Host *host)
273 {
274         adpt_hba* pHba = (adpt_hba*) host->hostdata[0];
275 //      adpt_i2o_quiesce_hba(pHba);
276         adpt_i2o_delete_hba(pHba);
277         scsi_unregister(host);
278         return 0;
279 }
280
281
282 static void adpt_inquiry(adpt_hba* pHba)
283 {
284         u32 msg[14]; 
285         u32 *mptr;
286         u32 *lenptr;
287         int direction;
288         int scsidir;
289         u32 len;
290         u32 reqlen;
291         u8* buf;
292         u8  scb[16];
293         s32 rcode;
294
295         memset(msg, 0, sizeof(msg));
296         buf = (u8*)kmalloc(80,GFP_KERNEL|ADDR32);
297         if(!buf){
298                 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
299                 return;
300         }
301         memset((void*)buf, 0, 36);
302         
303         len = 36;
304         direction = 0x00000000; 
305         scsidir  =0x40000000;   // DATA IN  (iop<--dev)
306
307         reqlen = 14;            // SINGLE SGE
308         /* Stick the headers on */
309         msg[0] = reqlen<<16 | SGL_OFFSET_12;
310         msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
311         msg[2] = 0;
312         msg[3]  = 0;
313         // Adaptec/DPT Private stuff 
314         msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
315         msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
316         /* Direction, disconnect ok | sense data | simple queue , CDBLen */
317         // I2O_SCB_FLAG_ENABLE_DISCONNECT | 
318         // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG | 
319         // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
320         msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
321
322         mptr=msg+7;
323
324         memset(scb, 0, sizeof(scb));
325         // Write SCSI command into the message - always 16 byte block 
326         scb[0] = INQUIRY;
327         scb[1] = 0;
328         scb[2] = 0;
329         scb[3] = 0;
330         scb[4] = 36;
331         scb[5] = 0;
332         // Don't care about the rest of scb
333
334         memcpy(mptr, scb, sizeof(scb));
335         mptr+=4;
336         lenptr=mptr++;          /* Remember me - fill in when we know */
337
338         /* Now fill in the SGList and command */
339         *lenptr = len;
340         *mptr++ = 0xD0000000|direction|len;
341         *mptr++ = virt_to_bus(buf);
342
343         // Send it on it's way
344         rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
345         if (rcode != 0) {
346                 sprintf(pHba->detail, "Adaptec I2O RAID");
347                 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
348                 if (rcode != -ETIME && rcode != -EINTR)
349                         kfree(buf);
350         } else {
351                 memset(pHba->detail, 0, sizeof(pHba->detail));
352                 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
353                 memcpy(&(pHba->detail[16]), " Model: ", 8);
354                 memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
355                 memcpy(&(pHba->detail[40]), " FW: ", 4);
356                 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
357                 pHba->detail[48] = '\0';        /* precautionary */
358                 kfree(buf);
359         }
360         adpt_i2o_status_get(pHba);
361         return ;
362 }
363
364
365 static int adpt_slave_configure(struct scsi_device * device)
366 {
367         struct Scsi_Host *host = device->host;
368         adpt_hba* pHba;
369
370         pHba = (adpt_hba *) host->hostdata[0];
371
372         if (host->can_queue && device->tagged_supported) {
373                 scsi_adjust_queue_depth(device, MSG_SIMPLE_TAG,
374                                 host->can_queue - 1);
375         } else {
376                 scsi_adjust_queue_depth(device, 0, 1);
377         }
378         return 0;
379 }
380
381 static int adpt_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
382 {
383         adpt_hba* pHba = NULL;
384         struct adpt_device* pDev = NULL;        /* dpt per device information */
385
386         cmd->scsi_done = done;
387         /*
388          * SCSI REQUEST_SENSE commands will be executed automatically by the 
389          * Host Adapter for any errors, so they should not be executed 
390          * explicitly unless the Sense Data is zero indicating that no error 
391          * occurred.
392          */
393
394         if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
395                 cmd->result = (DID_OK << 16);
396                 cmd->scsi_done(cmd);
397                 return 0;
398         }
399
400         pHba = (adpt_hba*)cmd->device->host->hostdata[0];
401         if (!pHba) {
402                 return FAILED;
403         }
404
405         rmb();
406         /*
407          * TODO: I need to block here if I am processing ioctl cmds
408          * but if the outstanding cmds all finish before the ioctl,
409          * the scsi-core will not know to start sending cmds to me again.
410          * I need to a way to restart the scsi-cores queues or should I block
411          * calling scsi_done on the outstanding cmds instead
412          * for now we don't set the IOCTL state
413          */
414         if(((pHba->state) & DPTI_STATE_IOCTL) || ((pHba->state) & DPTI_STATE_RESET)) {
415                 pHba->host->last_reset = jiffies;
416                 pHba->host->resetting = 1;
417                 return 1;
418         }
419
420         // TODO if the cmd->device if offline then I may need to issue a bus rescan
421         // followed by a get_lct to see if the device is there anymore
422         if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
423                 /*
424                  * First command request for this device.  Set up a pointer
425                  * to the device structure.  This should be a TEST_UNIT_READY
426                  * command from scan_scsis_single.
427                  */
428                 if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun)) == NULL) {
429                         // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response 
430                         // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
431                         cmd->result = (DID_NO_CONNECT << 16);
432                         cmd->scsi_done(cmd);
433                         return 0;
434                 }
435                 cmd->device->hostdata = pDev;
436         }
437         pDev->pScsi_dev = cmd->device;
438
439         /*
440          * If we are being called from when the device is being reset, 
441          * delay processing of the command until later.
442          */
443         if (pDev->state & DPTI_DEV_RESET ) {
444                 return FAILED;
445         }
446         return adpt_scsi_to_i2o(pHba, cmd, pDev);
447 }
448
449 static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
450                 sector_t capacity, int geom[])
451 {
452         int heads=-1;
453         int sectors=-1;
454         int cylinders=-1;
455
456         // *** First lets set the default geometry ****
457         
458         // If the capacity is less than ox2000
459         if (capacity < 0x2000 ) {       // floppy
460                 heads = 18;
461                 sectors = 2;
462         } 
463         // else if between 0x2000 and 0x20000
464         else if (capacity < 0x20000) {
465                 heads = 64;
466                 sectors = 32;
467         }
468         // else if between 0x20000 and 0x40000
469         else if (capacity < 0x40000) {
470                 heads = 65;
471                 sectors = 63;
472         }
473         // else if between 0x4000 and 0x80000
474         else if (capacity < 0x80000) {
475                 heads = 128;
476                 sectors = 63;
477         }
478         // else if greater than 0x80000
479         else {
480                 heads = 255;
481                 sectors = 63;
482         }
483         cylinders = sector_div(capacity, heads * sectors);
484
485         // Special case if CDROM
486         if(sdev->type == 5) {  // CDROM
487                 heads = 252;
488                 sectors = 63;
489                 cylinders = 1111;
490         }
491
492         geom[0] = heads;
493         geom[1] = sectors;
494         geom[2] = cylinders;
495         
496         PDEBUG("adpt_bios_param: exit\n");
497         return 0;
498 }
499
500
501 static const char *adpt_info(struct Scsi_Host *host)
502 {
503         adpt_hba* pHba;
504
505         pHba = (adpt_hba *) host->hostdata[0];
506         return (char *) (pHba->detail);
507 }
508
509 static int adpt_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
510                   int length, int inout)
511 {
512         struct adpt_device* d;
513         int id;
514         int chan;
515         int len = 0;
516         int begin = 0;
517         int pos = 0;
518         adpt_hba* pHba;
519         int unit;
520
521         *start = buffer;
522         if (inout == TRUE) {
523                 /*
524                  * The user has done a write and wants us to take the
525                  * data in the buffer and do something with it.
526                  * proc_scsiwrite calls us with inout = 1
527                  *
528                  * Read data from buffer (writing to us) - NOT SUPPORTED
529                  */
530                 return -EINVAL;
531         }
532
533         /*
534          * inout = 0 means the user has done a read and wants information
535          * returned, so we write information about the cards into the buffer
536          * proc_scsiread() calls us with inout = 0
537          */
538
539         // Find HBA (host bus adapter) we are looking for
540         down(&adpt_configuration_lock);
541         for (pHba = hba_chain; pHba; pHba = pHba->next) {
542                 if (pHba->host == host) {
543                         break;  /* found adapter */
544                 }
545         }
546         up(&adpt_configuration_lock);
547         if (pHba == NULL) {
548                 return 0;
549         }
550         host = pHba->host;
551
552         len  = sprintf(buffer    , "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
553         len += sprintf(buffer+len, "%s\n", pHba->detail);
554         len += sprintf(buffer+len, "SCSI Host=scsi%d  Control Node=/dev/%s  irq=%d\n", 
555                         pHba->host->host_no, pHba->name, host->irq);
556         len += sprintf(buffer+len, "\tpost fifo size  = %d\n\treply fifo size = %d\n\tsg table size   = %d\n\n",
557                         host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
558
559         pos = begin + len;
560
561         /* CHECKPOINT */
562         if(pos > offset + length) {
563                 goto stop_output;
564         }
565         if(pos <= offset) {
566                 /*
567                  * If we haven't even written to where we last left
568                  * off (the last time we were called), reset the 
569                  * beginning pointer.
570                  */
571                 len = 0;
572                 begin = pos;
573         }
574         len +=  sprintf(buffer+len, "Devices:\n");
575         for(chan = 0; chan < MAX_CHANNEL; chan++) {
576                 for(id = 0; id < MAX_ID; id++) {
577                         d = pHba->channel[chan].device[id];
578                         while(d){
579                                 len += sprintf(buffer+len,"\t%-24.24s", d->pScsi_dev->vendor);
580                                 len += sprintf(buffer+len," Rev: %-8.8s\n", d->pScsi_dev->rev);
581                                 pos = begin + len;
582
583
584                                 /* CHECKPOINT */
585                                 if(pos > offset + length) {
586                                         goto stop_output;
587                                 }
588                                 if(pos <= offset) {
589                                         len = 0;
590                                         begin = pos;
591                                 }
592
593                                 unit = d->pI2o_dev->lct_data.tid;
594                                 len += sprintf(buffer+len, "\tTID=%d, (Channel=%d, Target=%d, Lun=%d)  (%s)\n\n",
595                                                unit, (int)d->scsi_channel, (int)d->scsi_id, (int)d->scsi_lun,
596                                                scsi_device_online(d->pScsi_dev)? "online":"offline"); 
597                                 pos = begin + len;
598
599                                 /* CHECKPOINT */
600                                 if(pos > offset + length) {
601                                         goto stop_output;
602                                 }
603                                 if(pos <= offset) {
604                                         len = 0;
605                                         begin = pos;
606                                 }
607
608                                 d = d->next_lun;
609                         }
610                 }
611         }
612
613         /*
614          * begin is where we last checked our position with regards to offset
615          * begin is always less than offset.  len is relative to begin.  It
616          * is the number of bytes written past begin
617          *
618          */
619 stop_output:
620         /* stop the output and calculate the correct length */
621         *(buffer + len) = '\0';
622
623         *start = buffer + (offset - begin);     /* Start of wanted data */
624         len -= (offset - begin);
625         if(len > length) {
626                 len = length;
627         } else if(len < 0){
628                 len = 0;
629                 **start = '\0';
630         }
631         return len;
632 }
633
634
635 /*===========================================================================
636  * Error Handling routines
637  *===========================================================================
638  */
639
640 static int adpt_abort(struct scsi_cmnd * cmd)
641 {
642         adpt_hba* pHba = NULL;  /* host bus adapter structure */
643         struct adpt_device* dptdevice;  /* dpt per device information */
644         u32 msg[5];
645         int rcode;
646
647         if(cmd->serial_number == 0){
648                 return FAILED;
649         }
650         pHba = (adpt_hba*) cmd->device->host->hostdata[0];
651         printk(KERN_INFO"%s: Trying to Abort cmd=%ld\n",pHba->name, cmd->serial_number);
652         if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
653                 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
654                 return FAILED;
655         }
656
657         memset(msg, 0, sizeof(msg));
658         msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
659         msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
660         msg[2] = 0;
661         msg[3]= 0; 
662         msg[4] = (u32)cmd;
663         if( (rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER)) != 0){
664                 if(rcode == -EOPNOTSUPP ){
665                         printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
666                         return FAILED;
667                 }
668                 printk(KERN_INFO"%s: Abort cmd=%ld failed.\n",pHba->name, cmd->serial_number);
669                 return FAILED;
670         } 
671         printk(KERN_INFO"%s: Abort cmd=%ld complete.\n",pHba->name, cmd->serial_number);
672         return SUCCESS;
673 }
674
675
676 #define I2O_DEVICE_RESET 0x27
677 // This is the same for BLK and SCSI devices
678 // NOTE this is wrong in the i2o.h definitions
679 // This is not currently supported by our adapter but we issue it anyway
680 static int adpt_device_reset(struct scsi_cmnd* cmd)
681 {
682         adpt_hba* pHba;
683         u32 msg[4];
684         u32 rcode;
685         int old_state;
686         struct adpt_device* d = cmd->device->hostdata;
687
688         pHba = (void*) cmd->device->host->hostdata[0];
689         printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
690         if (!d) {
691                 printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
692                 return FAILED;
693         }
694         memset(msg, 0, sizeof(msg));
695         msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
696         msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
697         msg[2] = 0;
698         msg[3] = 0;
699
700         old_state = d->state;
701         d->state |= DPTI_DEV_RESET;
702         if( (rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER)) ){
703                 d->state = old_state;
704                 if(rcode == -EOPNOTSUPP ){
705                         printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
706                         return FAILED;
707                 }
708                 printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
709                 return FAILED;
710         } else {
711                 d->state = old_state;
712                 printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
713                 return SUCCESS;
714         }
715 }
716
717
718 #define I2O_HBA_BUS_RESET 0x87
719 // This version of bus reset is called by the eh_error handler
720 static int adpt_bus_reset(struct scsi_cmnd* cmd)
721 {
722         adpt_hba* pHba;
723         u32 msg[4];
724
725         pHba = (adpt_hba*)cmd->device->host->hostdata[0];
726         memset(msg, 0, sizeof(msg));
727         printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
728         msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
729         msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
730         msg[2] = 0;
731         msg[3] = 0;
732         if(adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER) ){
733                 printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
734                 return FAILED;
735         } else {
736                 printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
737                 return SUCCESS;
738         }
739 }
740
741 // This version of reset is called by the eh_error_handler
742 static int __adpt_reset(struct scsi_cmnd* cmd)
743 {
744         adpt_hba* pHba;
745         int rcode;
746         pHba = (adpt_hba*)cmd->device->host->hostdata[0];
747         printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n",pHba->name,cmd->device->channel,pHba->channel[cmd->device->channel].tid );
748         rcode =  adpt_hba_reset(pHba);
749         if(rcode == 0){
750                 printk(KERN_WARNING"%s: HBA reset complete\n",pHba->name);
751                 return SUCCESS;
752         } else {
753                 printk(KERN_WARNING"%s: HBA reset failed (%x)\n",pHba->name, rcode);
754                 return FAILED;
755         }
756 }
757
758 static int adpt_reset(struct scsi_cmnd* cmd)
759 {
760         int rc;
761
762         spin_lock_irq(cmd->device->host->host_lock);
763         rc = __adpt_reset(cmd);
764         spin_unlock_irq(cmd->device->host->host_lock);
765
766         return rc;
767 }
768
769 // This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
770 static int adpt_hba_reset(adpt_hba* pHba)
771 {
772         int rcode;
773
774         pHba->state |= DPTI_STATE_RESET;
775
776         // Activate does get status , init outbound, and get hrt
777         if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
778                 printk(KERN_ERR "%s: Could not activate\n", pHba->name);
779                 adpt_i2o_delete_hba(pHba);
780                 return rcode;
781         }
782
783         if ((rcode=adpt_i2o_build_sys_table()) < 0) {
784                 adpt_i2o_delete_hba(pHba);
785                 return rcode;
786         }
787         PDEBUG("%s: in HOLD state\n",pHba->name);
788
789         if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
790                 adpt_i2o_delete_hba(pHba);      
791                 return rcode;
792         }
793         PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
794
795         if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
796                 adpt_i2o_delete_hba(pHba);
797                 return rcode;
798         }
799
800         if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
801                 adpt_i2o_delete_hba(pHba);
802                 return rcode;
803         }
804         pHba->state &= ~DPTI_STATE_RESET;
805
806         adpt_fail_posted_scbs(pHba);
807         return 0;       /* return success */
808 }
809
810 /*===========================================================================
811  * 
812  *===========================================================================
813  */
814
815
816 static void adpt_i2o_sys_shutdown(void)
817 {
818         adpt_hba *pHba, *pNext;
819         struct adpt_i2o_post_wait_data *p1, *p2;
820
821          printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
822          printk(KERN_INFO"   This could take a few minutes if there are many devices attached\n");
823         /* Delete all IOPs from the controller chain */
824         /* They should have already been released by the
825          * scsi-core
826          */
827         for (pHba = hba_chain; pHba; pHba = pNext) {
828                 pNext = pHba->next;
829                 adpt_i2o_delete_hba(pHba);
830         }
831
832         /* Remove any timedout entries from the wait queue.  */
833         p2 = NULL;
834 //      spin_lock_irqsave(&adpt_post_wait_lock, flags);
835         /* Nothing should be outstanding at this point so just
836          * free them 
837          */
838         for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p2->next) {
839                 kfree(p1);
840         }
841 //      spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
842         adpt_post_wait_queue = NULL;
843
844          printk(KERN_INFO "Adaptec I2O controllers down.\n");
845 }
846
847 /*
848  * reboot/shutdown notification.
849  *
850  * - Quiesce each IOP in the system
851  *
852  */
853
854 #ifdef REBOOT_NOTIFIER
855 static int adpt_reboot_event(struct notifier_block *n, ulong code, void *p)
856 {
857
858          if(code != SYS_RESTART && code != SYS_HALT && code != SYS_POWER_OFF)
859                   return NOTIFY_DONE;
860
861          adpt_i2o_sys_shutdown();
862
863          return NOTIFY_DONE;
864 }
865 #endif
866
867
868 static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev) 
869 {
870
871         adpt_hba* pHba = NULL;
872         adpt_hba* p = NULL;
873         ulong base_addr0_phys = 0;
874         ulong base_addr1_phys = 0;
875         u32 hba_map0_area_size = 0;
876         u32 hba_map1_area_size = 0;
877         void __iomem *base_addr_virt = NULL;
878         void __iomem *msg_addr_virt = NULL;
879
880         int raptorFlag = FALSE;
881
882         if(pci_enable_device(pDev)) {
883                 return -EINVAL;
884         }
885         pci_set_master(pDev);
886         if (pci_set_dma_mask(pDev, 0xffffffffffffffffULL) &&
887             pci_set_dma_mask(pDev, 0xffffffffULL))
888                 return -EINVAL;
889
890         base_addr0_phys = pci_resource_start(pDev,0);
891         hba_map0_area_size = pci_resource_len(pDev,0);
892
893         // Check if standard PCI card or single BAR Raptor
894         if(pDev->device == PCI_DPT_DEVICE_ID){
895                 if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
896                         // Raptor card with this device id needs 4M
897                         hba_map0_area_size = 0x400000;
898                 } else { // Not Raptor - it is a PCI card
899                         if(hba_map0_area_size > 0x100000 ){ 
900                                 hba_map0_area_size = 0x100000;
901                         }
902                 }
903         } else {// Raptor split BAR config
904                 // Use BAR1 in this configuration
905                 base_addr1_phys = pci_resource_start(pDev,1);
906                 hba_map1_area_size = pci_resource_len(pDev,1);
907                 raptorFlag = TRUE;
908         }
909
910         if (pci_request_regions(pDev, "dpt_i2o")) {
911                 PERROR("dpti: adpt_config_hba: pci request region failed\n");
912                 return -EINVAL;
913         }
914         base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
915         if (!base_addr_virt) {
916                 pci_release_regions(pDev);
917                 PERROR("dpti: adpt_config_hba: io remap failed\n");
918                 return -EINVAL;
919         }
920
921         if(raptorFlag == TRUE) {
922                 msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
923                 if (!msg_addr_virt) {
924                         PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
925                         iounmap(base_addr_virt);
926                         pci_release_regions(pDev);
927                         return -EINVAL;
928                 }
929         } else {
930                 msg_addr_virt = base_addr_virt;
931         }
932         
933         // Allocate and zero the data structure
934         pHba = kmalloc(sizeof(adpt_hba), GFP_KERNEL);
935         if( pHba == NULL) {
936                 if(msg_addr_virt != base_addr_virt){
937                         iounmap(msg_addr_virt);
938                 }
939                 iounmap(base_addr_virt);
940                 pci_release_regions(pDev);
941                 return -ENOMEM;
942         }
943         memset(pHba, 0, sizeof(adpt_hba));
944
945         down(&adpt_configuration_lock);
946
947         if(hba_chain != NULL){
948                 for(p = hba_chain; p->next; p = p->next);
949                 p->next = pHba;
950         } else {
951                 hba_chain = pHba;
952         }
953         pHba->next = NULL;
954         pHba->unit = hba_count;
955         sprintf(pHba->name, "dpti%d", hba_count);
956         hba_count++;
957         
958         up(&adpt_configuration_lock);
959
960         pHba->pDev = pDev;
961         pHba->base_addr_phys = base_addr0_phys;
962
963         // Set up the Virtual Base Address of the I2O Device
964         pHba->base_addr_virt = base_addr_virt;
965         pHba->msg_addr_virt = msg_addr_virt;
966         pHba->irq_mask = base_addr_virt+0x30;
967         pHba->post_port = base_addr_virt+0x40;
968         pHba->reply_port = base_addr_virt+0x44;
969
970         pHba->hrt = NULL;
971         pHba->lct = NULL;
972         pHba->lct_size = 0;
973         pHba->status_block = NULL;
974         pHba->post_count = 0;
975         pHba->state = DPTI_STATE_RESET;
976         pHba->pDev = pDev;
977         pHba->devices = NULL;
978
979         // Initializing the spinlocks
980         spin_lock_init(&pHba->state_lock);
981         spin_lock_init(&adpt_post_wait_lock);
982
983         if(raptorFlag == 0){
984                 printk(KERN_INFO"Adaptec I2O RAID controller %d at %p size=%x irq=%d\n", 
985                         hba_count-1, base_addr_virt, hba_map0_area_size, pDev->irq);
986         } else {
987                 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d\n",hba_count-1, pDev->irq);
988                 printk(KERN_INFO"     BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
989                 printk(KERN_INFO"     BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
990         }
991
992         if (request_irq (pDev->irq, adpt_isr, SA_SHIRQ, pHba->name, pHba)) {
993                 printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
994                 adpt_i2o_delete_hba(pHba);
995                 return -EINVAL;
996         }
997
998         return 0;
999 }
1000
1001
1002 static void adpt_i2o_delete_hba(adpt_hba* pHba)
1003 {
1004         adpt_hba* p1;
1005         adpt_hba* p2;
1006         struct i2o_device* d;
1007         struct i2o_device* next;
1008         int i;
1009         int j;
1010         struct adpt_device* pDev;
1011         struct adpt_device* pNext;
1012
1013
1014         down(&adpt_configuration_lock);
1015         // scsi_unregister calls our adpt_release which
1016         // does a quiese
1017         if(pHba->host){
1018                 free_irq(pHba->host->irq, pHba);
1019         }
1020         p2 = NULL;
1021         for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1022                 if(p1 == pHba) {
1023                         if(p2) {
1024                                 p2->next = p1->next;
1025                         } else {
1026                                 hba_chain = p1->next;
1027                         }
1028                         break;
1029                 }
1030         }
1031
1032         hba_count--;
1033         up(&adpt_configuration_lock);
1034
1035         iounmap(pHba->base_addr_virt);
1036         pci_release_regions(pHba->pDev);
1037         if(pHba->msg_addr_virt != pHba->base_addr_virt){
1038                 iounmap(pHba->msg_addr_virt);
1039         }
1040         kfree(pHba->hrt);
1041         kfree(pHba->lct);
1042         kfree(pHba->status_block);
1043         kfree(pHba->reply_pool);
1044
1045         for(d = pHba->devices; d ; d = next){
1046                 next = d->next;
1047                 kfree(d);
1048         }
1049         for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1050                 for(j = 0; j < MAX_ID; j++){
1051                         if(pHba->channel[i].device[j] != NULL){
1052                                 for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1053                                         pNext = pDev->next_lun;
1054                                         kfree(pDev);
1055                                 }
1056                         }
1057                 }
1058         }
1059         kfree(pHba);
1060
1061         if(hba_count <= 0){
1062                 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);   
1063         }
1064 }
1065
1066
1067 static int adpt_init(void)
1068 {
1069         printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
1070 #ifdef REBOOT_NOTIFIER
1071         register_reboot_notifier(&adpt_reboot_notifier);
1072 #endif
1073
1074         return 0;
1075 }
1076
1077
1078 static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun)
1079 {
1080         struct adpt_device* d;
1081
1082         if(chan < 0 || chan >= MAX_CHANNEL)
1083                 return NULL;
1084         
1085         if( pHba->channel[chan].device == NULL){
1086                 printk(KERN_DEBUG"Adaptec I2O RAID: Trying to find device before they are allocated\n");
1087                 return NULL;
1088         }
1089
1090         d = pHba->channel[chan].device[id];
1091         if(!d || d->tid == 0) {
1092                 return NULL;
1093         }
1094
1095         /* If it is the only lun at that address then this should match*/
1096         if(d->scsi_lun == lun){
1097                 return d;
1098         }
1099
1100         /* else we need to look through all the luns */
1101         for(d=d->next_lun ; d ; d = d->next_lun){
1102                 if(d->scsi_lun == lun){
1103                         return d;
1104                 }
1105         }
1106         return NULL;
1107 }
1108
1109
1110 static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1111 {
1112         // I used my own version of the WAIT_QUEUE_HEAD
1113         // to handle some version differences
1114         // When embedded in the kernel this could go back to the vanilla one
1115         ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1116         int status = 0;
1117         ulong flags = 0;
1118         struct adpt_i2o_post_wait_data *p1, *p2;
1119         struct adpt_i2o_post_wait_data *wait_data =
1120                 kmalloc(sizeof(struct adpt_i2o_post_wait_data),GFP_KERNEL);
1121         DECLARE_WAITQUEUE(wait, current);
1122
1123         if (!wait_data)
1124                 return -ENOMEM;
1125
1126         /*
1127          * The spin locking is needed to keep anyone from playing
1128          * with the queue pointers and id while we do the same
1129          */
1130         spin_lock_irqsave(&adpt_post_wait_lock, flags);
1131        // TODO we need a MORE unique way of getting ids
1132        // to support async LCT get
1133         wait_data->next = adpt_post_wait_queue;
1134         adpt_post_wait_queue = wait_data;
1135         adpt_post_wait_id++;
1136         adpt_post_wait_id &= 0x7fff;
1137         wait_data->id =  adpt_post_wait_id;
1138         spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1139
1140         wait_data->wq = &adpt_wq_i2o_post;
1141         wait_data->status = -ETIMEDOUT;
1142
1143         add_wait_queue(&adpt_wq_i2o_post, &wait);
1144
1145         msg[2] |= 0x80000000 | ((u32)wait_data->id);
1146         timeout *= HZ;
1147         if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1148                 set_current_state(TASK_INTERRUPTIBLE);
1149                 if(pHba->host)
1150                         spin_unlock_irq(pHba->host->host_lock);
1151                 if (!timeout)
1152                         schedule();
1153                 else{
1154                         timeout = schedule_timeout(timeout);
1155                         if (timeout == 0) {
1156                                 // I/O issued, but cannot get result in
1157                                 // specified time. Freeing resorces is
1158                                 // dangerous.
1159                                 status = -ETIME;
1160                         }
1161                 }
1162                 if(pHba->host)
1163                         spin_lock_irq(pHba->host->host_lock);
1164         }
1165         remove_wait_queue(&adpt_wq_i2o_post, &wait);
1166
1167         if(status == -ETIMEDOUT){
1168                 printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1169                 // We will have to free the wait_data memory during shutdown
1170                 return status;
1171         }
1172
1173         /* Remove the entry from the queue.  */
1174         p2 = NULL;
1175         spin_lock_irqsave(&adpt_post_wait_lock, flags);
1176         for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1177                 if(p1 == wait_data) {
1178                         if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1179                                 status = -EOPNOTSUPP;
1180                         }
1181                         if(p2) {
1182                                 p2->next = p1->next;
1183                         } else {
1184                                 adpt_post_wait_queue = p1->next;
1185                         }
1186                         break;
1187                 }
1188         }
1189         spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1190
1191         kfree(wait_data);
1192
1193         return status;
1194 }
1195
1196
1197 static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1198 {
1199
1200         u32 m = EMPTY_QUEUE;
1201         u32 __iomem *msg;
1202         ulong timeout = jiffies + 30*HZ;
1203         do {
1204                 rmb();
1205                 m = readl(pHba->post_port);
1206                 if (m != EMPTY_QUEUE) {
1207                         break;
1208                 }
1209                 if(time_after(jiffies,timeout)){
1210                         printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1211                         return -ETIMEDOUT;
1212                 }
1213                 schedule_timeout_uninterruptible(1);
1214         } while(m == EMPTY_QUEUE);
1215                 
1216         msg = pHba->msg_addr_virt + m;
1217         memcpy_toio(msg, data, len);
1218         wmb();
1219
1220         //post message
1221         writel(m, pHba->post_port);
1222         wmb();
1223
1224         return 0;
1225 }
1226
1227
1228 static void adpt_i2o_post_wait_complete(u32 context, int status)
1229 {
1230         struct adpt_i2o_post_wait_data *p1 = NULL;
1231         /*
1232          * We need to search through the adpt_post_wait
1233          * queue to see if the given message is still
1234          * outstanding.  If not, it means that the IOP
1235          * took longer to respond to the message than we
1236          * had allowed and timer has already expired.
1237          * Not much we can do about that except log
1238          * it for debug purposes, increase timeout, and recompile
1239          *
1240          * Lock needed to keep anyone from moving queue pointers
1241          * around while we're looking through them.
1242          */
1243
1244         context &= 0x7fff;
1245
1246         spin_lock(&adpt_post_wait_lock);
1247         for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1248                 if(p1->id == context) {
1249                         p1->status = status;
1250                         spin_unlock(&adpt_post_wait_lock);
1251                         wake_up_interruptible(p1->wq);
1252                         return;
1253                 }
1254         }
1255         spin_unlock(&adpt_post_wait_lock);
1256         // If this happens we lose commands that probably really completed
1257         printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1258         printk(KERN_DEBUG"      Tasks in wait queue:\n");
1259         for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1260                 printk(KERN_DEBUG"           %d\n",p1->id);
1261         }
1262         return;
1263 }
1264
1265 static s32 adpt_i2o_reset_hba(adpt_hba* pHba)                   
1266 {
1267         u32 msg[8];
1268         u8* status;
1269         u32 m = EMPTY_QUEUE ;
1270         ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1271
1272         if(pHba->initialized  == FALSE) {       // First time reset should be quick
1273                 timeout = jiffies + (25*HZ);
1274         } else {
1275                 adpt_i2o_quiesce_hba(pHba);
1276         }
1277
1278         do {
1279                 rmb();
1280                 m = readl(pHba->post_port);
1281                 if (m != EMPTY_QUEUE) {
1282                         break;
1283                 }
1284                 if(time_after(jiffies,timeout)){
1285                         printk(KERN_WARNING"Timeout waiting for message!\n");
1286                         return -ETIMEDOUT;
1287                 }
1288                 schedule_timeout_uninterruptible(1);
1289         } while (m == EMPTY_QUEUE);
1290
1291         status = (u8*)kmalloc(4, GFP_KERNEL|ADDR32);
1292         if(status == NULL) {
1293                 adpt_send_nop(pHba, m);
1294                 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1295                 return -ENOMEM;
1296         }
1297         memset(status,0,4);
1298
1299         msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1300         msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1301         msg[2]=0;
1302         msg[3]=0;
1303         msg[4]=0;
1304         msg[5]=0;
1305         msg[6]=virt_to_bus(status);
1306         msg[7]=0;     
1307
1308         memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1309         wmb();
1310         writel(m, pHba->post_port);
1311         wmb();
1312
1313         while(*status == 0){
1314                 if(time_after(jiffies,timeout)){
1315                         printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1316                         kfree(status);
1317                         return -ETIMEDOUT;
1318                 }
1319                 rmb();
1320                 schedule_timeout_uninterruptible(1);
1321         }
1322
1323         if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1324                 PDEBUG("%s: Reset in progress...\n", pHba->name);
1325                 // Here we wait for message frame to become available
1326                 // indicated that reset has finished
1327                 do {
1328                         rmb();
1329                         m = readl(pHba->post_port);
1330                         if (m != EMPTY_QUEUE) {
1331                                 break;
1332                         }
1333                         if(time_after(jiffies,timeout)){
1334                                 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1335                                 return -ETIMEDOUT;
1336                         }
1337                         schedule_timeout_uninterruptible(1);
1338                 } while (m == EMPTY_QUEUE);
1339                 // Flush the offset
1340                 adpt_send_nop(pHba, m);
1341         }
1342         adpt_i2o_status_get(pHba);
1343         if(*status == 0x02 ||
1344                         pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1345                 printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1346                                 pHba->name);
1347         } else {
1348                 PDEBUG("%s: Reset completed.\n", pHba->name);
1349         }
1350
1351         kfree(status);
1352 #ifdef UARTDELAY
1353         // This delay is to allow someone attached to the card through the debug UART to 
1354         // set up the dump levels that they want before the rest of the initialization sequence
1355         adpt_delay(20000);
1356 #endif
1357         return 0;
1358 }
1359
1360
1361 static int adpt_i2o_parse_lct(adpt_hba* pHba)
1362 {
1363         int i;
1364         int max;
1365         int tid;
1366         struct i2o_device *d;
1367         i2o_lct *lct = pHba->lct;
1368         u8 bus_no = 0;
1369         s16 scsi_id;
1370         s16 scsi_lun;
1371         u32 buf[10]; // larger than 7, or 8 ...
1372         struct adpt_device* pDev; 
1373         
1374         if (lct == NULL) {
1375                 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1376                 return -1;
1377         }
1378         
1379         max = lct->table_size;  
1380         max -= 3;
1381         max /= 9;
1382
1383         for(i=0;i<max;i++) {
1384                 if( lct->lct_entry[i].user_tid != 0xfff){
1385                         /*
1386                          * If we have hidden devices, we need to inform the upper layers about
1387                          * the possible maximum id reference to handle device access when
1388                          * an array is disassembled. This code has no other purpose but to
1389                          * allow us future access to devices that are currently hidden
1390                          * behind arrays, hotspares or have not been configured (JBOD mode).
1391                          */
1392                         if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1393                             lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1394                             lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1395                                 continue;
1396                         }
1397                         tid = lct->lct_entry[i].tid;
1398                         // I2O_DPT_DEVICE_INFO_GROUP_NO;
1399                         if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1400                                 continue;
1401                         }
1402                         bus_no = buf[0]>>16;
1403                         scsi_id = buf[1];
1404                         scsi_lun = (buf[2]>>8 )&0xff;
1405                         if(bus_no >= MAX_CHANNEL) {     // Something wrong skip it
1406                                 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1407                                 continue;
1408                         }
1409                         if (scsi_id >= MAX_ID){
1410                                 printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1411                                 continue;
1412                         }
1413                         if(bus_no > pHba->top_scsi_channel){
1414                                 pHba->top_scsi_channel = bus_no;
1415                         }
1416                         if(scsi_id > pHba->top_scsi_id){
1417                                 pHba->top_scsi_id = scsi_id;
1418                         }
1419                         if(scsi_lun > pHba->top_scsi_lun){
1420                                 pHba->top_scsi_lun = scsi_lun;
1421                         }
1422                         continue;
1423                 }
1424                 d = (struct i2o_device *)kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1425                 if(d==NULL)
1426                 {
1427                         printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1428                         return -ENOMEM;
1429                 }
1430                 
1431                 d->controller = pHba;
1432                 d->next = NULL;
1433
1434                 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1435
1436                 d->flags = 0;
1437                 tid = d->lct_data.tid;
1438                 adpt_i2o_report_hba_unit(pHba, d);
1439                 adpt_i2o_install_device(pHba, d);
1440         }
1441         bus_no = 0;
1442         for(d = pHba->devices; d ; d = d->next) {
1443                 if(d->lct_data.class_id  == I2O_CLASS_BUS_ADAPTER_PORT ||
1444                    d->lct_data.class_id  == I2O_CLASS_FIBRE_CHANNEL_PORT){
1445                         tid = d->lct_data.tid;
1446                         // TODO get the bus_no from hrt-but for now they are in order
1447                         //bus_no = 
1448                         if(bus_no > pHba->top_scsi_channel){
1449                                 pHba->top_scsi_channel = bus_no;
1450                         }
1451                         pHba->channel[bus_no].type = d->lct_data.class_id;
1452                         pHba->channel[bus_no].tid = tid;
1453                         if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1454                         {
1455                                 pHba->channel[bus_no].scsi_id = buf[1];
1456                                 PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1457                         }
1458                         // TODO remove - this is just until we get from hrt
1459                         bus_no++;
1460                         if(bus_no >= MAX_CHANNEL) {     // Something wrong skip it
1461                                 printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1462                                 break;
1463                         }
1464                 }
1465         }
1466
1467         // Setup adpt_device table
1468         for(d = pHba->devices; d ; d = d->next) {
1469                 if(d->lct_data.class_id  == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1470                    d->lct_data.class_id  == I2O_CLASS_SCSI_PERIPHERAL ||
1471                    d->lct_data.class_id  == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1472
1473                         tid = d->lct_data.tid;
1474                         scsi_id = -1;
1475                         // I2O_DPT_DEVICE_INFO_GROUP_NO;
1476                         if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1477                                 bus_no = buf[0]>>16;
1478                                 scsi_id = buf[1];
1479                                 scsi_lun = (buf[2]>>8 )&0xff;
1480                                 if(bus_no >= MAX_CHANNEL) {     // Something wrong skip it
1481                                         continue;
1482                                 }
1483                                 if (scsi_id >= MAX_ID) {
1484                                         continue;
1485                                 }
1486                                 if( pHba->channel[bus_no].device[scsi_id] == NULL){
1487                                         pDev =  kmalloc(sizeof(struct adpt_device),GFP_KERNEL);
1488                                         if(pDev == NULL) {
1489                                                 return -ENOMEM;
1490                                         }
1491                                         pHba->channel[bus_no].device[scsi_id] = pDev;
1492                                         memset(pDev,0,sizeof(struct adpt_device));
1493                                 } else {
1494                                         for( pDev = pHba->channel[bus_no].device[scsi_id];      
1495                                                         pDev->next_lun; pDev = pDev->next_lun){
1496                                         }
1497                                         pDev->next_lun = kmalloc(sizeof(struct adpt_device),GFP_KERNEL);
1498                                         if(pDev->next_lun == NULL) {
1499                                                 return -ENOMEM;
1500                                         }
1501                                         memset(pDev->next_lun,0,sizeof(struct adpt_device));
1502                                         pDev = pDev->next_lun;
1503                                 }
1504                                 pDev->tid = tid;
1505                                 pDev->scsi_channel = bus_no;
1506                                 pDev->scsi_id = scsi_id;
1507                                 pDev->scsi_lun = scsi_lun;
1508                                 pDev->pI2o_dev = d;
1509                                 d->owner = pDev;
1510                                 pDev->type = (buf[0])&0xff;
1511                                 pDev->flags = (buf[0]>>8)&0xff;
1512                                 if(scsi_id > pHba->top_scsi_id){
1513                                         pHba->top_scsi_id = scsi_id;
1514                                 }
1515                                 if(scsi_lun > pHba->top_scsi_lun){
1516                                         pHba->top_scsi_lun = scsi_lun;
1517                                 }
1518                         }
1519                         if(scsi_id == -1){
1520                                 printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1521                                                 d->lct_data.identity_tag);
1522                         }
1523                 }
1524         }
1525         return 0;
1526 }
1527
1528
1529 /*
1530  *      Each I2O controller has a chain of devices on it - these match
1531  *      the useful parts of the LCT of the board.
1532  */
1533  
1534 static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1535 {
1536         down(&adpt_configuration_lock);
1537         d->controller=pHba;
1538         d->owner=NULL;
1539         d->next=pHba->devices;
1540         d->prev=NULL;
1541         if (pHba->devices != NULL){
1542                 pHba->devices->prev=d;
1543         }
1544         pHba->devices=d;
1545         *d->dev_name = 0;
1546
1547         up(&adpt_configuration_lock);
1548         return 0;
1549 }
1550
1551 static int adpt_open(struct inode *inode, struct file *file)
1552 {
1553         int minor;
1554         adpt_hba* pHba;
1555
1556         //TODO check for root access
1557         //
1558         minor = iminor(inode);
1559         if (minor >= hba_count) {
1560                 return -ENXIO;
1561         }
1562         down(&adpt_configuration_lock);
1563         for (pHba = hba_chain; pHba; pHba = pHba->next) {
1564                 if (pHba->unit == minor) {
1565                         break;  /* found adapter */
1566                 }
1567         }
1568         if (pHba == NULL) {
1569                 up(&adpt_configuration_lock);
1570                 return -ENXIO;
1571         }
1572
1573 //      if(pHba->in_use){
1574         //      up(&adpt_configuration_lock);
1575 //              return -EBUSY;
1576 //      }
1577
1578         pHba->in_use = 1;
1579         up(&adpt_configuration_lock);
1580
1581         return 0;
1582 }
1583
1584 static int adpt_close(struct inode *inode, struct file *file)
1585 {
1586         int minor;
1587         adpt_hba* pHba;
1588
1589         minor = iminor(inode);
1590         if (minor >= hba_count) {
1591                 return -ENXIO;
1592         }
1593         down(&adpt_configuration_lock);
1594         for (pHba = hba_chain; pHba; pHba = pHba->next) {
1595                 if (pHba->unit == minor) {
1596                         break;  /* found adapter */
1597                 }
1598         }
1599         up(&adpt_configuration_lock);
1600         if (pHba == NULL) {
1601                 return -ENXIO;
1602         }
1603
1604         pHba->in_use = 0;
1605
1606         return 0;
1607 }
1608
1609
1610 static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1611 {
1612         u32 msg[MAX_MESSAGE_SIZE];
1613         u32* reply = NULL;
1614         u32 size = 0;
1615         u32 reply_size = 0;
1616         u32 __user *user_msg = arg;
1617         u32 __user * user_reply = NULL;
1618         void *sg_list[pHba->sg_tablesize];
1619         u32 sg_offset = 0;
1620         u32 sg_count = 0;
1621         int sg_index = 0;
1622         u32 i = 0;
1623         u32 rcode = 0;
1624         void *p = NULL;
1625         ulong flags = 0;
1626
1627         memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1628         // get user msg size in u32s 
1629         if(get_user(size, &user_msg[0])){
1630                 return -EFAULT;
1631         }
1632         size = size>>16;
1633
1634         user_reply = &user_msg[size];
1635         if(size > MAX_MESSAGE_SIZE){
1636                 return -EFAULT;
1637         }
1638         size *= 4; // Convert to bytes
1639
1640         /* Copy in the user's I2O command */
1641         if(copy_from_user(msg, user_msg, size)) {
1642                 return -EFAULT;
1643         }
1644         get_user(reply_size, &user_reply[0]);
1645         reply_size = reply_size>>16;
1646         if(reply_size > REPLY_FRAME_SIZE){
1647                 reply_size = REPLY_FRAME_SIZE;
1648         }
1649         reply_size *= 4;
1650         reply = kmalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
1651         if(reply == NULL) {
1652                 printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1653                 return -ENOMEM;
1654         }
1655         memset(reply,0,REPLY_FRAME_SIZE*4);
1656         sg_offset = (msg[0]>>4)&0xf;
1657         msg[2] = 0x40000000; // IOCTL context
1658         msg[3] = (u32)reply;
1659         memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
1660         if(sg_offset) {
1661                 // TODO 64bit fix
1662                 struct sg_simple_element *sg =  (struct sg_simple_element*) (msg+sg_offset);
1663                 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1664                 if (sg_count > pHba->sg_tablesize){
1665                         printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1666                         kfree (reply);
1667                         return -EINVAL;
1668                 }
1669
1670                 for(i = 0; i < sg_count; i++) {
1671                         int sg_size;
1672
1673                         if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
1674                                 printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i,  sg[i].flag_count);
1675                                 rcode = -EINVAL;
1676                                 goto cleanup;
1677                         }
1678                         sg_size = sg[i].flag_count & 0xffffff;      
1679                         /* Allocate memory for the transfer */
1680                         p = kmalloc(sg_size, GFP_KERNEL|ADDR32);
1681                         if(!p) {
1682                                 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1683                                                 pHba->name,sg_size,i,sg_count);
1684                                 rcode = -ENOMEM;
1685                                 goto cleanup;
1686                         }
1687                         sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1688                         /* Copy in the user's SG buffer if necessary */
1689                         if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
1690                                 // TODO 64bit fix
1691                                 if (copy_from_user(p,(void __user *)sg[i].addr_bus, sg_size)) {
1692                                         printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1693                                         rcode = -EFAULT;
1694                                         goto cleanup;
1695                                 }
1696                         }
1697                         //TODO 64bit fix
1698                         sg[i].addr_bus = (u32)virt_to_bus(p);
1699                 }
1700         }
1701
1702         do {
1703                 if(pHba->host)
1704                         spin_lock_irqsave(pHba->host->host_lock, flags);
1705                 // This state stops any new commands from enterring the
1706                 // controller while processing the ioctl
1707 //              pHba->state |= DPTI_STATE_IOCTL;
1708 //              We can't set this now - The scsi subsystem sets host_blocked and
1709 //              the queue empties and stops.  We need a way to restart the queue
1710                 rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1711                 if (rcode != 0)
1712                         printk("adpt_i2o_passthru: post wait failed %d %p\n",
1713                                         rcode, reply);
1714 //              pHba->state &= ~DPTI_STATE_IOCTL;
1715                 if(pHba->host)
1716                         spin_unlock_irqrestore(pHba->host->host_lock, flags);
1717         } while(rcode == -ETIMEDOUT);  
1718
1719         if(rcode){
1720                 goto cleanup;
1721         }
1722
1723         if(sg_offset) {
1724         /* Copy back the Scatter Gather buffers back to user space */
1725                 u32 j;
1726                 // TODO 64bit fix
1727                 struct sg_simple_element* sg;
1728                 int sg_size;
1729
1730                 // re-acquire the original message to handle correctly the sg copy operation
1731                 memset(&msg, 0, MAX_MESSAGE_SIZE*4); 
1732                 // get user msg size in u32s 
1733                 if(get_user(size, &user_msg[0])){
1734                         rcode = -EFAULT; 
1735                         goto cleanup; 
1736                 }
1737                 size = size>>16;
1738                 size *= 4;
1739                 /* Copy in the user's I2O command */
1740                 if (copy_from_user (msg, user_msg, size)) {
1741                         rcode = -EFAULT;
1742                         goto cleanup;
1743                 }
1744                 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1745
1746                 // TODO 64bit fix
1747                 sg       = (struct sg_simple_element*)(msg + sg_offset);
1748                 for (j = 0; j < sg_count; j++) {
1749                         /* Copy out the SG list to user's buffer if necessary */
1750                         if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1751                                 sg_size = sg[j].flag_count & 0xffffff; 
1752                                 // TODO 64bit fix
1753                                 if (copy_to_user((void __user *)sg[j].addr_bus,sg_list[j], sg_size)) {
1754                                         printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1755                                         rcode = -EFAULT;
1756                                         goto cleanup;
1757                                 }
1758                         }
1759                 }
1760         } 
1761
1762         /* Copy back the reply to user space */
1763         if (reply_size) {
1764                 // we wrote our own values for context - now restore the user supplied ones
1765                 if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1766                         printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1767                         rcode = -EFAULT;
1768                 }
1769                 if(copy_to_user(user_reply, reply, reply_size)) {
1770                         printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1771                         rcode = -EFAULT;
1772                 }
1773         }
1774
1775
1776 cleanup:
1777         if (rcode != -ETIME && rcode != -EINTR)
1778                 kfree (reply);
1779         while(sg_index) {
1780                 if(sg_list[--sg_index]) {
1781                         if (rcode != -ETIME && rcode != -EINTR)
1782                                 kfree(sg_list[sg_index]);
1783                 }
1784         }
1785         return rcode;
1786 }
1787
1788
1789 /*
1790  * This routine returns information about the system.  This does not effect
1791  * any logic and if the info is wrong - it doesn't matter.
1792  */
1793
1794 /* Get all the info we can not get from kernel services */
1795 static int adpt_system_info(void __user *buffer)
1796 {
1797         sysInfo_S si;
1798
1799         memset(&si, 0, sizeof(si));
1800
1801         si.osType = OS_LINUX;
1802         si.osMajorVersion = 0;
1803         si.osMinorVersion = 0;
1804         si.osRevision = 0;
1805         si.busType = SI_PCI_BUS;
1806         si.processorFamily = DPTI_sig.dsProcessorFamily;
1807
1808 #if defined __i386__ 
1809         adpt_i386_info(&si);
1810 #elif defined (__ia64__)
1811         adpt_ia64_info(&si);
1812 #elif defined(__sparc__)
1813         adpt_sparc_info(&si);
1814 #elif defined (__alpha__)
1815         adpt_alpha_info(&si);
1816 #else
1817         si.processorType = 0xff ;
1818 #endif
1819         if(copy_to_user(buffer, &si, sizeof(si))){
1820                 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
1821                 return -EFAULT;
1822         }
1823
1824         return 0;
1825 }
1826
1827 #if defined __ia64__ 
1828 static void adpt_ia64_info(sysInfo_S* si)
1829 {
1830         // This is all the info we need for now
1831         // We will add more info as our new
1832         // managmenent utility requires it
1833         si->processorType = PROC_IA64;
1834 }
1835 #endif
1836
1837
1838 #if defined __sparc__ 
1839 static void adpt_sparc_info(sysInfo_S* si)
1840 {
1841         // This is all the info we need for now
1842         // We will add more info as our new
1843         // managmenent utility requires it
1844         si->processorType = PROC_ULTRASPARC;
1845 }
1846 #endif
1847
1848 #if defined __alpha__ 
1849 static void adpt_alpha_info(sysInfo_S* si)
1850 {
1851         // This is all the info we need for now
1852         // We will add more info as our new
1853         // managmenent utility requires it
1854         si->processorType = PROC_ALPHA;
1855 }
1856 #endif
1857
1858 #if defined __i386__
1859
1860 static void adpt_i386_info(sysInfo_S* si)
1861 {
1862         // This is all the info we need for now
1863         // We will add more info as our new
1864         // managmenent utility requires it
1865         switch (boot_cpu_data.x86) {
1866         case CPU_386:
1867                 si->processorType = PROC_386;
1868                 break;
1869         case CPU_486:
1870                 si->processorType = PROC_486;
1871                 break;
1872         case CPU_586:
1873                 si->processorType = PROC_PENTIUM;
1874                 break;
1875         default:  // Just in case 
1876                 si->processorType = PROC_PENTIUM;
1877                 break;
1878         }
1879 }
1880
1881 #endif
1882
1883
1884 static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd,
1885               ulong arg)
1886 {
1887         int minor;
1888         int error = 0;
1889         adpt_hba* pHba;
1890         ulong flags = 0;
1891         void __user *argp = (void __user *)arg;
1892
1893         minor = iminor(inode);
1894         if (minor >= DPTI_MAX_HBA){
1895                 return -ENXIO;
1896         }
1897         down(&adpt_configuration_lock);
1898         for (pHba = hba_chain; pHba; pHba = pHba->next) {
1899                 if (pHba->unit == minor) {
1900                         break;  /* found adapter */
1901                 }
1902         }
1903         up(&adpt_configuration_lock);
1904         if(pHba == NULL){
1905                 return -ENXIO;
1906         }
1907
1908         while((volatile u32) pHba->state & DPTI_STATE_RESET )
1909                 schedule_timeout_uninterruptible(2);
1910
1911         switch (cmd) {
1912         // TODO: handle 3 cases
1913         case DPT_SIGNATURE:
1914                 if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
1915                         return -EFAULT;
1916                 }
1917                 break;
1918         case I2OUSRCMD:
1919                 return adpt_i2o_passthru(pHba, argp);
1920
1921         case DPT_CTRLINFO:{
1922                 drvrHBAinfo_S HbaInfo;
1923
1924 #define FLG_OSD_PCI_VALID 0x0001
1925 #define FLG_OSD_DMA       0x0002
1926 #define FLG_OSD_I2O       0x0004
1927                 memset(&HbaInfo, 0, sizeof(HbaInfo));
1928                 HbaInfo.drvrHBAnum = pHba->unit;
1929                 HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
1930                 HbaInfo.blinkState = adpt_read_blink_led(pHba);
1931                 HbaInfo.pciBusNum =  pHba->pDev->bus->number;
1932                 HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn); 
1933                 HbaInfo.Interrupt = pHba->pDev->irq; 
1934                 HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
1935                 if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
1936                         printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
1937                         return -EFAULT;
1938                 }
1939                 break;
1940                 }
1941         case DPT_SYSINFO:
1942                 return adpt_system_info(argp);
1943         case DPT_BLINKLED:{
1944                 u32 value;
1945                 value = (u32)adpt_read_blink_led(pHba);
1946                 if (copy_to_user(argp, &value, sizeof(value))) {
1947                         return -EFAULT;
1948                 }
1949                 break;
1950                 }
1951         case I2ORESETCMD:
1952                 if(pHba->host)
1953                         spin_lock_irqsave(pHba->host->host_lock, flags);
1954                 adpt_hba_reset(pHba);
1955                 if(pHba->host)
1956                         spin_unlock_irqrestore(pHba->host->host_lock, flags);
1957                 break;
1958         case I2ORESCANCMD:
1959                 adpt_rescan(pHba);
1960                 break;
1961         default:
1962                 return -EINVAL;
1963         }
1964
1965         return error;
1966 }
1967
1968
1969 static irqreturn_t adpt_isr(int irq, void *dev_id, struct pt_regs *regs)
1970 {
1971         struct scsi_cmnd* cmd;
1972         adpt_hba* pHba = dev_id;
1973         u32 m;
1974         void __iomem *reply;
1975         u32 status=0;
1976         u32 context;
1977         ulong flags = 0;
1978         int handled = 0;
1979
1980         if (pHba == NULL){
1981                 printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
1982                 return IRQ_NONE;
1983         }
1984         if(pHba->host)
1985                 spin_lock_irqsave(pHba->host->host_lock, flags);
1986
1987         while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
1988                 m = readl(pHba->reply_port);
1989                 if(m == EMPTY_QUEUE){
1990                         // Try twice then give up
1991                         rmb();
1992                         m = readl(pHba->reply_port);
1993                         if(m == EMPTY_QUEUE){ 
1994                                 // This really should not happen
1995                                 printk(KERN_ERR"dpti: Could not get reply frame\n");
1996                                 goto out;
1997                         }
1998                 }
1999                 reply = bus_to_virt(m);
2000
2001                 if (readl(reply) & MSG_FAIL) {
2002                         u32 old_m = readl(reply+28); 
2003                         void __iomem *msg;
2004                         u32 old_context;
2005                         PDEBUG("%s: Failed message\n",pHba->name);
2006                         if(old_m >= 0x100000){
2007                                 printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2008                                 writel(m,pHba->reply_port);
2009                                 continue;
2010                         }
2011                         // Transaction context is 0 in failed reply frame
2012                         msg = pHba->msg_addr_virt + old_m;
2013                         old_context = readl(msg+12);
2014                         writel(old_context, reply+12);
2015                         adpt_send_nop(pHba, old_m);
2016                 } 
2017                 context = readl(reply+8);
2018                 if(context & 0x40000000){ // IOCTL
2019                         void *p = (void *)readl(reply+12);
2020                         if( p != NULL) {
2021                                 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
2022                         }
2023                         // All IOCTLs will also be post wait
2024                 }
2025                 if(context & 0x80000000){ // Post wait message
2026                         status = readl(reply+16);
2027                         if(status  >> 24){
2028                                 status &=  0xffff; /* Get detail status */
2029                         } else {
2030                                 status = I2O_POST_WAIT_OK;
2031                         }
2032                         if(!(context & 0x40000000)) {
2033                                 cmd = (struct scsi_cmnd*) readl(reply+12); 
2034                                 if(cmd != NULL) {
2035                                         printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2036                                 }
2037                         }
2038                         adpt_i2o_post_wait_complete(context, status);
2039                 } else { // SCSI message
2040                         cmd = (struct scsi_cmnd*) readl(reply+12); 
2041                         if(cmd != NULL){
2042                                 if(cmd->serial_number != 0) { // If not timedout
2043                                         adpt_i2o_to_scsi(reply, cmd);
2044                                 }
2045                         }
2046                 }
2047                 writel(m, pHba->reply_port);
2048                 wmb();
2049                 rmb();
2050         }
2051         handled = 1;
2052 out:    if(pHba->host)
2053                 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2054         return IRQ_RETVAL(handled);
2055 }
2056
2057 static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2058 {
2059         int i;
2060         u32 msg[MAX_MESSAGE_SIZE];
2061         u32* mptr;
2062         u32 *lenptr;
2063         int direction;
2064         int scsidir;
2065         u32 len;
2066         u32 reqlen;
2067         s32 rcode;
2068
2069         memset(msg, 0 , sizeof(msg));
2070         len = cmd->request_bufflen;
2071         direction = 0x00000000; 
2072         
2073         scsidir = 0x00000000;                   // DATA NO XFER
2074         if(len) {
2075                 /*
2076                  * Set SCBFlags to indicate if data is being transferred
2077                  * in or out, or no data transfer
2078                  * Note:  Do not have to verify index is less than 0 since
2079                  * cmd->cmnd[0] is an unsigned char
2080                  */
2081                 switch(cmd->sc_data_direction){
2082                 case DMA_FROM_DEVICE:
2083                         scsidir  =0x40000000;   // DATA IN  (iop<--dev)
2084                         break;
2085                 case DMA_TO_DEVICE:
2086                         direction=0x04000000;   // SGL OUT
2087                         scsidir  =0x80000000;   // DATA OUT (iop-->dev)
2088                         break;
2089                 case DMA_NONE:
2090                         break;
2091                 case DMA_BIDIRECTIONAL:
2092                         scsidir  =0x40000000;   // DATA IN  (iop<--dev)
2093                         // Assume In - and continue;
2094                         break;
2095                 default:
2096                         printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2097                              pHba->name, cmd->cmnd[0]);
2098                         cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2099                         cmd->scsi_done(cmd);
2100                         return  0;
2101                 }
2102         }
2103         // msg[0] is set later
2104         // I2O_CMD_SCSI_EXEC
2105         msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2106         msg[2] = 0;
2107         msg[3] = (u32)cmd;      /* We want the SCSI control block back */
2108         // Our cards use the transaction context as the tag for queueing
2109         // Adaptec/DPT Private stuff 
2110         msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2111         msg[5] = d->tid;
2112         /* Direction, disconnect ok | sense data | simple queue , CDBLen */
2113         // I2O_SCB_FLAG_ENABLE_DISCONNECT | 
2114         // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG | 
2115         // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
2116         msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2117
2118         mptr=msg+7;
2119
2120         // Write SCSI command into the message - always 16 byte block 
2121         memset(mptr, 0,  16);
2122         memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2123         mptr+=4;
2124         lenptr=mptr++;          /* Remember me - fill in when we know */
2125         reqlen = 14;            // SINGLE SGE
2126         /* Now fill in the SGList and command */
2127         if(cmd->use_sg) {
2128                 struct scatterlist *sg = (struct scatterlist *)cmd->request_buffer;
2129                 int sg_count = pci_map_sg(pHba->pDev, sg, cmd->use_sg,
2130                                 cmd->sc_data_direction);
2131
2132
2133                 len = 0;
2134                 for(i = 0 ; i < sg_count; i++) {
2135                         *mptr++ = direction|0x10000000|sg_dma_len(sg);
2136                         len+=sg_dma_len(sg);
2137                         *mptr++ = sg_dma_address(sg);
2138                         sg++;
2139                 }
2140                 /* Make this an end of list */
2141                 mptr[-2] = direction|0xD0000000|sg_dma_len(sg-1);
2142                 reqlen = mptr - msg;
2143                 *lenptr = len;
2144                 
2145                 if(cmd->underflow && len != cmd->underflow){
2146                         printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2147                                 len, cmd->underflow);
2148                 }
2149         } else {
2150                 *lenptr = len = cmd->request_bufflen;
2151                 if(len == 0) {
2152                         reqlen = 12;
2153                 } else {
2154                         *mptr++ = 0xD0000000|direction|cmd->request_bufflen;
2155                         *mptr++ = pci_map_single(pHba->pDev,
2156                                 cmd->request_buffer,
2157                                 cmd->request_bufflen,
2158                                 cmd->sc_data_direction);
2159                 }
2160         }
2161         
2162         /* Stick the headers on */
2163         msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2164         
2165         // Send it on it's way
2166         rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2167         if (rcode == 0) {
2168                 return 0;
2169         }
2170         return rcode;
2171 }
2172
2173
2174 static s32 adpt_scsi_register(adpt_hba* pHba,struct scsi_host_template * sht)
2175 {
2176         struct Scsi_Host *host = NULL;
2177
2178         host = scsi_register(sht, sizeof(adpt_hba*));
2179         if (host == NULL) {
2180                 printk ("%s: scsi_register returned NULL\n",pHba->name);
2181                 return -1;
2182         }
2183         host->hostdata[0] = (unsigned long)pHba;
2184         pHba->host = host;
2185
2186         host->irq = pHba->pDev->irq;
2187         /* no IO ports, so don't have to set host->io_port and 
2188          * host->n_io_port
2189          */
2190         host->io_port = 0;
2191         host->n_io_port = 0;
2192                                 /* see comments in hosts.h */
2193         host->max_id = 16;
2194         host->max_lun = 256;
2195         host->max_channel = pHba->top_scsi_channel + 1;
2196         host->cmd_per_lun = 1;
2197         host->unique_id = (uint) pHba;
2198         host->sg_tablesize = pHba->sg_tablesize;
2199         host->can_queue = pHba->post_fifo_size;
2200
2201         return 0;
2202 }
2203
2204
2205 static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
2206 {
2207         adpt_hba* pHba;
2208         u32 hba_status;
2209         u32 dev_status;
2210         u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits 
2211         // I know this would look cleaner if I just read bytes
2212         // but the model I have been using for all the rest of the
2213         // io is in 4 byte words - so I keep that model
2214         u16 detailed_status = readl(reply+16) &0xffff;
2215         dev_status = (detailed_status & 0xff);
2216         hba_status = detailed_status >> 8;
2217
2218         // calculate resid for sg 
2219         cmd->resid = cmd->request_bufflen - readl(reply+5);
2220
2221         pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2222
2223         cmd->sense_buffer[0] = '\0';  // initialize sense valid flag to false
2224
2225         if(!(reply_flags & MSG_FAIL)) {
2226                 switch(detailed_status & I2O_SCSI_DSC_MASK) {
2227                 case I2O_SCSI_DSC_SUCCESS:
2228                         cmd->result = (DID_OK << 16);
2229                         // handle underflow
2230                         if(readl(reply+5) < cmd->underflow ) {
2231                                 cmd->result = (DID_ERROR <<16);
2232                                 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2233                         }
2234                         break;
2235                 case I2O_SCSI_DSC_REQUEST_ABORTED:
2236                         cmd->result = (DID_ABORT << 16);
2237                         break;
2238                 case I2O_SCSI_DSC_PATH_INVALID:
2239                 case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2240                 case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2241                 case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2242                 case I2O_SCSI_DSC_NO_ADAPTER:
2243                 case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2244                         printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%d) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2245                                 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2246                         cmd->result = (DID_TIME_OUT << 16);
2247                         break;
2248                 case I2O_SCSI_DSC_ADAPTER_BUSY:
2249                 case I2O_SCSI_DSC_BUS_BUSY:
2250                         cmd->result = (DID_BUS_BUSY << 16);
2251                         break;
2252                 case I2O_SCSI_DSC_SCSI_BUS_RESET:
2253                 case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2254                         cmd->result = (DID_RESET << 16);
2255                         break;
2256                 case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2257                         printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2258                         cmd->result = (DID_PARITY << 16);
2259                         break;
2260                 case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2261                 case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2262                 case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2263                 case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2264                 case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2265                 case I2O_SCSI_DSC_DATA_OVERRUN:
2266                 case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2267                 case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2268                 case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2269                 case I2O_SCSI_DSC_PROVIDE_FAILURE:
2270                 case I2O_SCSI_DSC_REQUEST_TERMINATED:
2271                 case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2272                 case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2273                 case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2274                 case I2O_SCSI_DSC_INVALID_CDB:
2275                 case I2O_SCSI_DSC_LUN_INVALID:
2276                 case I2O_SCSI_DSC_SCSI_TID_INVALID:
2277                 case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2278                 case I2O_SCSI_DSC_NO_NEXUS:
2279                 case I2O_SCSI_DSC_CDB_RECEIVED:
2280                 case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2281                 case I2O_SCSI_DSC_QUEUE_FROZEN:
2282                 case I2O_SCSI_DSC_REQUEST_INVALID:
2283                 default:
2284                         printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2285                                 pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2286                                hba_status, dev_status, cmd->cmnd[0]);
2287                         cmd->result = (DID_ERROR << 16);
2288                         break;
2289                 }
2290
2291                 // copy over the request sense data if it was a check
2292                 // condition status
2293                 if(dev_status == 0x02 /*CHECK_CONDITION*/) {
2294                         u32 len = sizeof(cmd->sense_buffer);
2295                         len = (len > 40) ?  40 : len;
2296                         // Copy over the sense data
2297                         memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
2298                         if(cmd->sense_buffer[0] == 0x70 /* class 7 */ && 
2299                            cmd->sense_buffer[2] == DATA_PROTECT ){
2300                                 /* This is to handle an array failed */
2301                                 cmd->result = (DID_TIME_OUT << 16);
2302                                 printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2303                                         pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun, 
2304                                         hba_status, dev_status, cmd->cmnd[0]);
2305
2306                         }
2307                 }
2308         } else {
2309                 /* In this condtion we could not talk to the tid
2310                  * the card rejected it.  We should signal a retry
2311                  * for a limitted number of retries.
2312                  */
2313                 cmd->result = (DID_TIME_OUT << 16);
2314                 printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%d) tid=%d, cmd=0x%x\n",
2315                         pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2316                         ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2317         }
2318
2319         cmd->result |= (dev_status);
2320
2321         if(cmd->scsi_done != NULL){
2322                 cmd->scsi_done(cmd);
2323         } 
2324         return cmd->result;
2325 }
2326
2327
2328 static s32 adpt_rescan(adpt_hba* pHba)
2329 {
2330         s32 rcode;
2331         ulong flags = 0;
2332
2333         if(pHba->host)
2334                 spin_lock_irqsave(pHba->host->host_lock, flags);
2335         if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2336                 goto out;
2337         if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2338                 goto out;
2339         rcode = 0;
2340 out:    if(pHba->host)
2341                 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2342         return rcode;
2343 }
2344
2345
2346 static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2347 {
2348         int i;
2349         int max;
2350         int tid;
2351         struct i2o_device *d;
2352         i2o_lct *lct = pHba->lct;
2353         u8 bus_no = 0;
2354         s16 scsi_id;
2355         s16 scsi_lun;
2356         u32 buf[10]; // at least 8 u32's
2357         struct adpt_device* pDev = NULL;
2358         struct i2o_device* pI2o_dev = NULL;
2359         
2360         if (lct == NULL) {
2361                 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2362                 return -1;
2363         }
2364         
2365         max = lct->table_size;  
2366         max -= 3;
2367         max /= 9;
2368
2369         // Mark each drive as unscanned
2370         for (d = pHba->devices; d; d = d->next) {
2371                 pDev =(struct adpt_device*) d->owner;
2372                 if(!pDev){
2373                         continue;
2374                 }
2375                 pDev->state |= DPTI_DEV_UNSCANNED;
2376         }
2377
2378         printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2379         
2380         for(i=0;i<max;i++) {
2381                 if( lct->lct_entry[i].user_tid != 0xfff){
2382                         continue;
2383                 }
2384
2385                 if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2386                     lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2387                     lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2388                         tid = lct->lct_entry[i].tid;
2389                         if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2390                                 printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2391                                 continue;
2392                         }
2393                         bus_no = buf[0]>>16;
2394                         scsi_id = buf[1];
2395                         scsi_lun = (buf[2]>>8 )&0xff;
2396                         pDev = pHba->channel[bus_no].device[scsi_id];
2397                         /* da lun */
2398                         while(pDev) {
2399                                 if(pDev->scsi_lun == scsi_lun) {
2400                                         break;
2401                                 }
2402                                 pDev = pDev->next_lun;
2403                         }
2404                         if(!pDev ) { // Something new add it
2405                                 d = (struct i2o_device *)kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
2406                                 if(d==NULL)
2407                                 {
2408                                         printk(KERN_CRIT "Out of memory for I2O device data.\n");
2409                                         return -ENOMEM;
2410                                 }
2411                                 
2412                                 d->controller = pHba;
2413                                 d->next = NULL;
2414
2415                                 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2416
2417                                 d->flags = 0;
2418                                 adpt_i2o_report_hba_unit(pHba, d);
2419                                 adpt_i2o_install_device(pHba, d);
2420         
2421                                 if(bus_no >= MAX_CHANNEL) {     // Something wrong skip it
2422                                         printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
2423                                         continue;
2424                                 }
2425                                 pDev = pHba->channel[bus_no].device[scsi_id];   
2426                                 if( pDev == NULL){
2427                                         pDev =  kmalloc(sizeof(struct adpt_device),GFP_KERNEL);
2428                                         if(pDev == NULL) {
2429                                                 return -ENOMEM;
2430                                         }
2431                                         pHba->channel[bus_no].device[scsi_id] = pDev;
2432                                 } else {
2433                                         while (pDev->next_lun) {
2434                                                 pDev = pDev->next_lun;
2435                                         }
2436                                         pDev = pDev->next_lun = kmalloc(sizeof(struct adpt_device),GFP_KERNEL);
2437                                         if(pDev == NULL) {
2438                                                 return -ENOMEM;
2439                                         }
2440                                 }
2441                                 memset(pDev,0,sizeof(struct adpt_device));
2442                                 pDev->tid = d->lct_data.tid;
2443                                 pDev->scsi_channel = bus_no;
2444                                 pDev->scsi_id = scsi_id;
2445                                 pDev->scsi_lun = scsi_lun;
2446                                 pDev->pI2o_dev = d;
2447                                 d->owner = pDev;
2448                                 pDev->type = (buf[0])&0xff;
2449                                 pDev->flags = (buf[0]>>8)&0xff;
2450                                 // Too late, SCSI system has made up it's mind, but what the hey ...
2451                                 if(scsi_id > pHba->top_scsi_id){
2452                                         pHba->top_scsi_id = scsi_id;
2453                                 }
2454                                 if(scsi_lun > pHba->top_scsi_lun){
2455                                         pHba->top_scsi_lun = scsi_lun;
2456                                 }
2457                                 continue;
2458                         } // end of new i2o device
2459
2460                         // We found an old device - check it
2461                         while(pDev) {
2462                                 if(pDev->scsi_lun == scsi_lun) {
2463                                         if(!scsi_device_online(pDev->pScsi_dev)) {
2464                                                 printk(KERN_WARNING"%s: Setting device (%d,%d,%d) back online\n",
2465                                                                 pHba->name,bus_no,scsi_id,scsi_lun);
2466                                                 if (pDev->pScsi_dev) {
2467                                                         scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2468                                                 }
2469                                         }
2470                                         d = pDev->pI2o_dev;
2471                                         if(d->lct_data.tid != tid) { // something changed
2472                                                 pDev->tid = tid;
2473                                                 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2474                                                 if (pDev->pScsi_dev) {
2475                                                         pDev->pScsi_dev->changed = TRUE;
2476                                                         pDev->pScsi_dev->removable = TRUE;
2477                                                 }
2478                                         }
2479                                         // Found it - mark it scanned
2480                                         pDev->state = DPTI_DEV_ONLINE;
2481                                         break;
2482                                 }
2483                                 pDev = pDev->next_lun;
2484                         }
2485                 }
2486         }
2487         for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2488                 pDev =(struct adpt_device*) pI2o_dev->owner;
2489                 if(!pDev){
2490                         continue;
2491                 }
2492                 // Drive offline drives that previously existed but could not be found
2493                 // in the LCT table
2494                 if (pDev->state & DPTI_DEV_UNSCANNED){
2495                         pDev->state = DPTI_DEV_OFFLINE;
2496                         printk(KERN_WARNING"%s: Device (%d,%d,%d) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2497                         if (pDev->pScsi_dev) {
2498                                 scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2499                         }
2500                 }
2501         }
2502         return 0;
2503 }
2504
2505 static void adpt_fail_posted_scbs(adpt_hba* pHba)
2506 {
2507         struct scsi_cmnd*       cmd = NULL;
2508         struct scsi_device*     d = NULL;
2509
2510         shost_for_each_device(d, pHba->host) {
2511                 unsigned long flags;
2512                 spin_lock_irqsave(&d->list_lock, flags);
2513                 list_for_each_entry(cmd, &d->cmd_list, list) {
2514                         if(cmd->serial_number == 0){
2515                                 continue;
2516                         }
2517                         cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
2518                         cmd->scsi_done(cmd);
2519                 }
2520                 spin_unlock_irqrestore(&d->list_lock, flags);
2521         }
2522 }
2523
2524
2525 /*============================================================================
2526  *  Routines from i2o subsystem
2527  *============================================================================
2528  */
2529
2530
2531
2532 /*
2533  *      Bring an I2O controller into HOLD state. See the spec.
2534  */
2535 static int adpt_i2o_activate_hba(adpt_hba* pHba)
2536 {
2537         int rcode;
2538
2539         if(pHba->initialized ) {
2540                 if (adpt_i2o_status_get(pHba) < 0) {
2541                         if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2542                                 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2543                                 return rcode;
2544                         }
2545                         if (adpt_i2o_status_get(pHba) < 0) {
2546                                 printk(KERN_INFO "HBA not responding.\n");
2547                                 return -1;
2548                         }
2549                 }
2550
2551                 if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2552                         printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2553                         return -1;
2554                 }
2555
2556                 if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2557                     pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2558                     pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2559                     pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2560                         adpt_i2o_reset_hba(pHba);                       
2561                         if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2562                                 printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2563                                 return -1;
2564                         }
2565                 }
2566         } else {
2567                 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2568                         printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2569                         return rcode;
2570                 }
2571
2572         }
2573
2574         if (adpt_i2o_init_outbound_q(pHba) < 0) {
2575                 return -1;
2576         }
2577
2578         /* In HOLD state */
2579         
2580         if (adpt_i2o_hrt_get(pHba) < 0) {
2581                 return -1;
2582         }
2583
2584         return 0;
2585 }
2586
2587 /*
2588  *      Bring a controller online into OPERATIONAL state. 
2589  */
2590  
2591 static int adpt_i2o_online_hba(adpt_hba* pHba)
2592 {
2593         if (adpt_i2o_systab_send(pHba) < 0) {
2594                 adpt_i2o_delete_hba(pHba);
2595                 return -1;
2596         }
2597         /* In READY state */
2598
2599         if (adpt_i2o_enable_hba(pHba) < 0) {
2600                 adpt_i2o_delete_hba(pHba);
2601                 return -1;
2602         }
2603
2604         /* In OPERATIONAL state  */
2605         return 0;
2606 }
2607
2608 static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2609 {
2610         u32 __iomem *msg;
2611         ulong timeout = jiffies + 5*HZ;
2612
2613         while(m == EMPTY_QUEUE){
2614                 rmb();
2615                 m = readl(pHba->post_port);
2616                 if(m != EMPTY_QUEUE){
2617                         break;
2618                 }
2619                 if(time_after(jiffies,timeout)){
2620                         printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2621                         return 2;
2622                 }
2623                 schedule_timeout_uninterruptible(1);
2624         }
2625         msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2626         writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2627         writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2628         writel( 0,&msg[2]);
2629         wmb();
2630
2631         writel(m, pHba->post_port);
2632         wmb();
2633         return 0;
2634 }
2635
2636 static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2637 {
2638         u8 *status;
2639         u32 __iomem *msg = NULL;
2640         int i;
2641         ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2642         u32* ptr;
2643         u32 outbound_frame;  // This had to be a 32 bit address
2644         u32 m;
2645
2646         do {
2647                 rmb();
2648                 m = readl(pHba->post_port);
2649                 if (m != EMPTY_QUEUE) {
2650                         break;
2651                 }
2652
2653                 if(time_after(jiffies,timeout)){
2654                         printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2655                         return -ETIMEDOUT;
2656                 }
2657                 schedule_timeout_uninterruptible(1);
2658         } while(m == EMPTY_QUEUE);
2659
2660         msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2661
2662         status = kmalloc(4,GFP_KERNEL|ADDR32);
2663         if (status==NULL) {
2664                 adpt_send_nop(pHba, m);
2665                 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2666                         pHba->name);
2667                 return -ENOMEM;
2668         }
2669         memset(status, 0, 4);
2670
2671         writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2672         writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2673         writel(0, &msg[2]);
2674         writel(0x0106, &msg[3]);        /* Transaction context */
2675         writel(4096, &msg[4]);          /* Host page frame size */
2676         writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]);   /* Outbound msg frame size and Initcode */
2677         writel(0xD0000004, &msg[6]);            /* Simple SG LE, EOB */
2678         writel(virt_to_bus(status), &msg[7]);
2679
2680         writel(m, pHba->post_port);
2681         wmb();
2682
2683         // Wait for the reply status to come back
2684         do {
2685                 if (*status) {
2686                         if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2687                                 break;
2688                         }
2689                 }
2690                 rmb();
2691                 if(time_after(jiffies,timeout)){
2692                         printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2693                         return -ETIMEDOUT;
2694                 }
2695                 schedule_timeout_uninterruptible(1);
2696         } while (1);
2697
2698         // If the command was successful, fill the fifo with our reply
2699         // message packets
2700         if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
2701                 kfree(status);
2702                 return -2;
2703         }
2704         kfree(status);
2705
2706         kfree(pHba->reply_pool);
2707
2708         pHba->reply_pool = (u32*)kmalloc(pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4, GFP_KERNEL|ADDR32);
2709         if(!pHba->reply_pool){
2710                 printk(KERN_ERR"%s: Could not allocate reply pool\n",pHba->name);
2711                 return -1;
2712         }
2713         memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
2714
2715         ptr = pHba->reply_pool;
2716         for(i = 0; i < pHba->reply_fifo_size; i++) {
2717                 outbound_frame = (u32)virt_to_bus(ptr);
2718                 writel(outbound_frame, pHba->reply_port);
2719                 wmb();
2720                 ptr +=  REPLY_FRAME_SIZE;
2721         }
2722         adpt_i2o_status_get(pHba);
2723         return 0;
2724 }
2725
2726
2727 /*
2728  * I2O System Table.  Contains information about
2729  * all the IOPs in the system.  Used to inform IOPs
2730  * about each other's existence.
2731  *
2732  * sys_tbl_ver is the CurrentChangeIndicator that is
2733  * used by IOPs to track changes.
2734  */
2735
2736
2737
2738 static s32 adpt_i2o_status_get(adpt_hba* pHba)
2739 {
2740         ulong timeout;
2741         u32 m;
2742         u32 __iomem *msg;
2743         u8 *status_block=NULL;
2744         ulong status_block_bus;
2745
2746         if(pHba->status_block == NULL) {
2747                 pHba->status_block = (i2o_status_block*)
2748                         kmalloc(sizeof(i2o_status_block),GFP_KERNEL|ADDR32);
2749                 if(pHba->status_block == NULL) {
2750                         printk(KERN_ERR
2751                         "dpti%d: Get Status Block failed; Out of memory. \n", 
2752                         pHba->unit);
2753                         return -ENOMEM;
2754                 }
2755         }
2756         memset(pHba->status_block, 0, sizeof(i2o_status_block));
2757         status_block = (u8*)(pHba->status_block);
2758         status_block_bus = virt_to_bus(pHba->status_block);
2759         timeout = jiffies+TMOUT_GETSTATUS*HZ;
2760         do {
2761                 rmb();
2762                 m = readl(pHba->post_port);
2763                 if (m != EMPTY_QUEUE) {
2764                         break;
2765                 }
2766                 if(time_after(jiffies,timeout)){
2767                         printk(KERN_ERR "%s: Timeout waiting for message !\n",
2768                                         pHba->name);
2769                         return -ETIMEDOUT;
2770                 }
2771                 schedule_timeout_uninterruptible(1);
2772         } while(m==EMPTY_QUEUE);
2773
2774         
2775         msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2776
2777         writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
2778         writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
2779         writel(1, &msg[2]);
2780         writel(0, &msg[3]);
2781         writel(0, &msg[4]);
2782         writel(0, &msg[5]);
2783         writel(((u32)status_block_bus)&0xffffffff, &msg[6]);
2784         writel(0, &msg[7]);
2785         writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
2786
2787         //post message
2788         writel(m, pHba->post_port);
2789         wmb();
2790
2791         while(status_block[87]!=0xff){
2792                 if(time_after(jiffies,timeout)){
2793                         printk(KERN_ERR"dpti%d: Get status timeout.\n",
2794                                 pHba->unit);
2795                         return -ETIMEDOUT;
2796                 }
2797                 rmb();
2798                 schedule_timeout_uninterruptible(1);
2799         }
2800
2801         // Set up our number of outbound and inbound messages
2802         pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
2803         if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
2804                 pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
2805         }
2806
2807         pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
2808         if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
2809                 pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
2810         }
2811
2812         // Calculate the Scatter Gather list size
2813         pHba->sg_tablesize = (pHba->status_block->inbound_frame_size * 4 -40)/ sizeof(struct sg_simple_element);
2814         if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
2815                 pHba->sg_tablesize = SG_LIST_ELEMENTS;
2816         }
2817
2818
2819 #ifdef DEBUG
2820         printk("dpti%d: State = ",pHba->unit);
2821         switch(pHba->status_block->iop_state) {
2822                 case 0x01:
2823                         printk("INIT\n");
2824                         break;
2825                 case 0x02:
2826                         printk("RESET\n");
2827                         break;
2828                 case 0x04:
2829                         printk("HOLD\n");
2830                         break;
2831                 case 0x05:
2832                         printk("READY\n");
2833                         break;
2834                 case 0x08:
2835                         printk("OPERATIONAL\n");
2836                         break;
2837                 case 0x10:
2838                         printk("FAILED\n");
2839                         break;
2840                 case 0x11:
2841                         printk("FAULTED\n");
2842                         break;
2843                 default:
2844                         printk("%x (unknown!!)\n",pHba->status_block->iop_state);
2845         }
2846 #endif
2847         return 0;
2848 }
2849
2850 /*
2851  * Get the IOP's Logical Configuration Table
2852  */
2853 static int adpt_i2o_lct_get(adpt_hba* pHba)
2854 {
2855         u32 msg[8];
2856         int ret;
2857         u32 buf[16];
2858
2859         if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
2860                 pHba->lct_size = pHba->status_block->expected_lct_size;
2861         }
2862         do {
2863                 if (pHba->lct == NULL) {
2864                         pHba->lct = kmalloc(pHba->lct_size, GFP_KERNEL|ADDR32);
2865                         if(pHba->lct == NULL) {
2866                                 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
2867                                         pHba->name);
2868                                 return -ENOMEM;
2869                         }
2870                 }
2871                 memset(pHba->lct, 0, pHba->lct_size);
2872
2873                 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
2874                 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
2875                 msg[2] = 0;
2876                 msg[3] = 0;
2877                 msg[4] = 0xFFFFFFFF;    /* All devices */
2878                 msg[5] = 0x00000000;    /* Report now */
2879                 msg[6] = 0xD0000000|pHba->lct_size;
2880                 msg[7] = virt_to_bus(pHba->lct);
2881
2882                 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
2883                         printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n", 
2884                                 pHba->name, ret);       
2885                         printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
2886                         return ret;
2887                 }
2888
2889                 if ((pHba->lct->table_size << 2) > pHba->lct_size) {
2890                         pHba->lct_size = pHba->lct->table_size << 2;
2891                         kfree(pHba->lct);
2892                         pHba->lct = NULL;
2893                 }
2894         } while (pHba->lct == NULL);
2895
2896         PDEBUG("%s: Hardware resource table read.\n", pHba->name);
2897
2898
2899         // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
2900         if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
2901                 pHba->FwDebugBufferSize = buf[1];
2902                 pHba->FwDebugBuffer_P    = pHba->base_addr_virt + buf[0];
2903                 pHba->FwDebugFlags_P     = pHba->FwDebugBuffer_P + FW_DEBUG_FLAGS_OFFSET;
2904                 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P + FW_DEBUG_BLED_OFFSET;
2905                 pHba->FwDebugBLEDflag_P  = pHba->FwDebugBLEDvalue_P + 1;
2906                 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P + FW_DEBUG_STR_LENGTH_OFFSET;
2907                 pHba->FwDebugBuffer_P += buf[2]; 
2908                 pHba->FwDebugFlags = 0;
2909         }
2910
2911         return 0;
2912 }
2913
2914 static int adpt_i2o_build_sys_table(void)
2915 {
2916         adpt_hba* pHba = NULL;
2917         int count = 0;
2918
2919         sys_tbl_len = sizeof(struct i2o_sys_tbl) +      // Header + IOPs
2920                                 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
2921
2922         kfree(sys_tbl);
2923
2924         sys_tbl = kmalloc(sys_tbl_len, GFP_KERNEL|ADDR32);
2925         if(!sys_tbl) {
2926                 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");     
2927                 return -ENOMEM;
2928         }
2929         memset(sys_tbl, 0, sys_tbl_len);
2930
2931         sys_tbl->num_entries = hba_count;
2932         sys_tbl->version = I2OVERSION;
2933         sys_tbl->change_ind = sys_tbl_ind++;
2934
2935         for(pHba = hba_chain; pHba; pHba = pHba->next) {
2936                 // Get updated Status Block so we have the latest information
2937                 if (adpt_i2o_status_get(pHba)) {
2938                         sys_tbl->num_entries--;
2939                         continue; // try next one       
2940                 }
2941
2942                 sys_tbl->iops[count].org_id = pHba->status_block->org_id;
2943                 sys_tbl->iops[count].iop_id = pHba->unit + 2;
2944                 sys_tbl->iops[count].seg_num = 0;
2945                 sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
2946                 sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
2947                 sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
2948                 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
2949                 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
2950                 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
2951                 sys_tbl->iops[count].inbound_low = (u32)virt_to_bus(pHba->post_port);
2952                 sys_tbl->iops[count].inbound_high = (u32)((u64)virt_to_bus(pHba->post_port)>>32);
2953
2954                 count++;
2955         }
2956
2957 #ifdef DEBUG
2958 {
2959         u32 *table = (u32*)sys_tbl;
2960         printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
2961         for(count = 0; count < (sys_tbl_len >>2); count++) {
2962                 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n", 
2963                         count, table[count]);
2964         }
2965 }
2966 #endif
2967
2968         return 0;
2969 }
2970
2971
2972 /*
2973  *       Dump the information block associated with a given unit (TID)
2974  */
2975  
2976 static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
2977 {
2978         char buf[64];
2979         int unit = d->lct_data.tid;
2980
2981         printk(KERN_INFO "TID %3.3d ", unit);
2982
2983         if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
2984         {
2985                 buf[16]=0;
2986                 printk(" Vendor: %-12.12s", buf);
2987         }
2988         if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
2989         {
2990                 buf[16]=0;
2991                 printk(" Device: %-12.12s", buf);
2992         }
2993         if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
2994         {
2995                 buf[8]=0;
2996                 printk(" Rev: %-12.12s\n", buf);
2997         }
2998 #ifdef DEBUG
2999          printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3000          printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3001          printk(KERN_INFO "\tFlags: ");
3002
3003          if(d->lct_data.device_flags&(1<<0))
3004                   printk("C");       // ConfigDialog requested
3005          if(d->lct_data.device_flags&(1<<1))
3006                   printk("U");       // Multi-user capable
3007          if(!(d->lct_data.device_flags&(1<<4)))
3008                   printk("P");       // Peer service enabled!
3009          if(!(d->lct_data.device_flags&(1<<5)))
3010                   printk("M");       // Mgmt service enabled!
3011          printk("\n");
3012 #endif
3013 }
3014
3015 #ifdef DEBUG
3016 /*
3017  *      Do i2o class name lookup
3018  */
3019 static const char *adpt_i2o_get_class_name(int class)
3020 {
3021         int idx = 16;
3022         static char *i2o_class_name[] = {
3023                 "Executive",
3024                 "Device Driver Module",
3025                 "Block Device",
3026                 "Tape Device",
3027                 "LAN Interface",
3028                 "WAN Interface",
3029                 "Fibre Channel Port",
3030                 "Fibre Channel Device",
3031                 "SCSI Device",
3032                 "ATE Port",
3033                 "ATE Device",
3034                 "Floppy Controller",
3035                 "Floppy Device",
3036                 "Secondary Bus Port",
3037                 "Peer Transport Agent",
3038                 "Peer Transport",
3039                 "Unknown"
3040         };
3041         
3042         switch(class&0xFFF) {
3043         case I2O_CLASS_EXECUTIVE:
3044                 idx = 0; break;
3045         case I2O_CLASS_DDM:
3046                 idx = 1; break;
3047         case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3048                 idx = 2; break;
3049         case I2O_CLASS_SEQUENTIAL_STORAGE:
3050                 idx = 3; break;
3051         case I2O_CLASS_LAN:
3052                 idx = 4; break;
3053         case I2O_CLASS_WAN:
3054                 idx = 5; break;
3055         case I2O_CLASS_FIBRE_CHANNEL_PORT:
3056                 idx = 6; break;
3057         case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3058                 idx = 7; break;
3059         case I2O_CLASS_SCSI_PERIPHERAL:
3060                 idx = 8; break;
3061         case I2O_CLASS_ATE_PORT:
3062                 idx = 9; break;
3063         case I2O_CLASS_ATE_PERIPHERAL:
3064                 idx = 10; break;
3065         case I2O_CLASS_FLOPPY_CONTROLLER:
3066                 idx = 11; break;
3067         case I2O_CLASS_FLOPPY_DEVICE:
3068                 idx = 12; break;
3069         case I2O_CLASS_BUS_ADAPTER_PORT:
3070                 idx = 13; break;
3071         case I2O_CLASS_PEER_TRANSPORT_AGENT:
3072                 idx = 14; break;
3073         case I2O_CLASS_PEER_TRANSPORT:
3074                 idx = 15; break;
3075         }
3076         return i2o_class_name[idx];
3077 }
3078 #endif
3079
3080
3081 static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3082 {
3083         u32 msg[6];
3084         int ret, size = sizeof(i2o_hrt);
3085
3086         do {
3087                 if (pHba->hrt == NULL) {
3088                         pHba->hrt=kmalloc(size, GFP_KERNEL|ADDR32);
3089                         if (pHba->hrt == NULL) {
3090                                 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3091                                 return -ENOMEM;
3092                         }
3093                 }
3094
3095                 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3096                 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3097                 msg[2]= 0;
3098                 msg[3]= 0;
3099                 msg[4]= (0xD0000000 | size);    /* Simple transaction */
3100                 msg[5]= virt_to_bus(pHba->hrt);   /* Dump it here */
3101
3102                 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3103                         printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3104                         return ret;
3105                 }
3106
3107                 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3108                         size = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3109                         kfree(pHba->hrt);
3110                         pHba->hrt = NULL;
3111                 }
3112         } while(pHba->hrt == NULL);
3113         return 0;
3114 }                                                                                                                                       
3115
3116 /*
3117  *       Query one scalar group value or a whole scalar group.
3118  */                     
3119 static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid, 
3120                         int group, int field, void *buf, int buflen)
3121 {
3122         u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3123         u8 *resblk;
3124
3125         int size;
3126
3127         /* 8 bytes for header */
3128         resblk = kmalloc(sizeof(u8) * (8+buflen), GFP_KERNEL|ADDR32);
3129         if (resblk == NULL) {
3130                 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3131                 return -ENOMEM;
3132         }
3133
3134         if (field == -1)                /* whole group */
3135                         opblk[4] = -1;
3136
3137         size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid, 
3138                 opblk, sizeof(opblk), resblk, sizeof(u8)*(8+buflen));
3139         if (size == -ETIME) {
3140                 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3141                 return -ETIME;
3142         } else if (size == -EINTR) {
3143                 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3144                 return -EINTR;
3145         }
3146                         
3147         memcpy(buf, resblk+8, buflen);  /* cut off header */
3148
3149         kfree(resblk);
3150         if (size < 0)
3151                 return size;    
3152
3153         return buflen;
3154 }
3155
3156
3157 /*      Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3158  *
3159  *      This function can be used for all UtilParamsGet/Set operations.
3160  *      The OperationBlock is given in opblk-buffer, 
3161  *      and results are returned in resblk-buffer.
3162  *      Note that the minimum sized resblk is 8 bytes and contains
3163  *      ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3164  */
3165 static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid, 
3166                   void *opblk, int oplen, void *resblk, int reslen)
3167 {
3168         u32 msg[9]; 
3169         u32 *res = (u32 *)resblk;
3170         int wait_status;
3171
3172         msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3173         msg[1] = cmd << 24 | HOST_TID << 12 | tid; 
3174         msg[2] = 0;
3175         msg[3] = 0;
3176         msg[4] = 0;
3177         msg[5] = 0x54000000 | oplen;    /* OperationBlock */
3178         msg[6] = virt_to_bus(opblk);
3179         msg[7] = 0xD0000000 | reslen;   /* ResultBlock */
3180         msg[8] = virt_to_bus(resblk);
3181
3182         if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3183                 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk);
3184                 return wait_status;     /* -DetailedStatus */
3185         }
3186
3187         if (res[1]&0x00FF0000) {        /* BlockStatus != SUCCESS */
3188                 printk(KERN_WARNING "%s: %s - Error:\n  ErrorInfoSize = 0x%02x, "
3189                         "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3190                         pHba->name,
3191                         (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3192                                                          : "PARAMS_GET",   
3193                         res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3194                 return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3195         }
3196
3197          return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */ 
3198 }
3199
3200
3201 static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3202 {
3203         u32 msg[4];
3204         int ret;
3205
3206         adpt_i2o_status_get(pHba);
3207
3208         /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3209
3210         if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3211            (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3212                 return 0;
3213         }
3214
3215         msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3216         msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3217         msg[2] = 0;
3218         msg[3] = 0;
3219
3220         if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3221                 printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3222                                 pHba->unit, -ret);
3223         } else {
3224                 printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3225         }
3226
3227         adpt_i2o_status_get(pHba);
3228         return ret;
3229 }
3230
3231
3232 /* 
3233  * Enable IOP. Allows the IOP to resume external operations.
3234  */
3235 static int adpt_i2o_enable_hba(adpt_hba* pHba)
3236 {
3237         u32 msg[4];
3238         int ret;
3239         
3240         adpt_i2o_status_get(pHba);
3241         if(!pHba->status_block){
3242                 return -ENOMEM;
3243         }
3244         /* Enable only allowed on READY state */
3245         if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3246                 return 0;
3247
3248         if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3249                 return -EINVAL;
3250
3251         msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3252         msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3253         msg[2]= 0;
3254         msg[3]= 0;
3255
3256         if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3257                 printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n", 
3258                         pHba->name, ret);
3259         } else {
3260                 PDEBUG("%s: Enabled.\n", pHba->name);
3261         }
3262
3263         adpt_i2o_status_get(pHba);
3264         return ret;
3265 }
3266
3267
3268 static int adpt_i2o_systab_send(adpt_hba* pHba)
3269 {
3270          u32 msg[12];
3271          int ret;
3272
3273         msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3274         msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3275         msg[2] = 0;
3276         msg[3] = 0;
3277         msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3278         msg[5] = 0;                                /* Segment 0 */
3279
3280         /* 
3281          * Provide three SGL-elements:
3282          * System table (SysTab), Private memory space declaration and 
3283          * Private i/o space declaration  
3284          */
3285         msg[6] = 0x54000000 | sys_tbl_len;
3286         msg[7] = virt_to_phys(sys_tbl);
3287         msg[8] = 0x54000000 | 0;
3288         msg[9] = 0;
3289         msg[10] = 0xD4000000 | 0;
3290         msg[11] = 0;
3291
3292         if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3293                 printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n", 
3294                         pHba->name, ret);
3295         }
3296 #ifdef DEBUG
3297         else {
3298                 PINFO("%s: SysTab set.\n", pHba->name);
3299         }
3300 #endif
3301
3302         return ret;     
3303  }
3304
3305
3306 /*============================================================================
3307  *
3308  *============================================================================
3309  */
3310
3311
3312 #ifdef UARTDELAY 
3313
3314 static static void adpt_delay(int millisec)
3315 {
3316         int i;
3317         for (i = 0; i < millisec; i++) {
3318                 udelay(1000);   /* delay for one millisecond */
3319         }
3320 }
3321
3322 #endif
3323
3324 static struct scsi_host_template driver_template = {
3325         .name                   = "dpt_i2o",
3326         .proc_name              = "dpt_i2o",
3327         .proc_info              = adpt_proc_info,
3328         .detect                 = adpt_detect,  
3329         .release                = adpt_release,
3330         .info                   = adpt_info,
3331         .queuecommand           = adpt_queue,
3332         .eh_abort_handler       = adpt_abort,
3333         .eh_device_reset_handler = adpt_device_reset,
3334         .eh_bus_reset_handler   = adpt_bus_reset,
3335         .eh_host_reset_handler  = adpt_reset,
3336         .bios_param             = adpt_bios_param,
3337         .slave_configure        = adpt_slave_configure,
3338         .can_queue              = MAX_TO_IOP_MESSAGES,
3339         .this_id                = 7,
3340         .cmd_per_lun            = 1,
3341         .use_clustering         = ENABLE_CLUSTERING,
3342 };
3343 #include "scsi_module.c"
3344 MODULE_LICENSE("GPL");