Merge master.kernel.org:/pub/scm/linux/kernel/git/kyle/parisc-2.6
[linux-2.6] / drivers / scsi / arcmsr / arcmsr_hba.c
1 /*
2 *******************************************************************************
3 **        O.S   : Linux
4 **   FILE NAME  : arcmsr_hba.c
5 **        BY    : Erich Chen
6 **   Description: SCSI RAID Device Driver for
7 **                ARECA RAID Host adapter
8 *******************************************************************************
9 ** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved
10 **
11 **     Web site: www.areca.com.tw
12 **       E-mail: erich@areca.com.tw
13 **
14 ** This program is free software; you can redistribute it and/or modify
15 ** it under the terms of the GNU General Public License version 2 as
16 ** published by the Free Software Foundation.
17 ** This program is distributed in the hope that it will be useful,
18 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
19 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20 ** GNU General Public License for more details.
21 *******************************************************************************
22 ** Redistribution and use in source and binary forms, with or without
23 ** modification, are permitted provided that the following conditions
24 ** are met:
25 ** 1. Redistributions of source code must retain the above copyright
26 **    notice, this list of conditions and the following disclaimer.
27 ** 2. Redistributions in binary form must reproduce the above copyright
28 **    notice, this list of conditions and the following disclaimer in the
29 **    documentation and/or other materials provided with the distribution.
30 ** 3. The name of the author may not be used to endorse or promote products
31 **    derived from this software without specific prior written permission.
32 **
33 ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
34 ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
35 ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
36 ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
37 ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT
38 ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
39 ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
40 ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
41 ** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
42 ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 *******************************************************************************
44 ** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
45 **     Firmware Specification, see Documentation/scsi/arcmsr_spec.txt
46 *******************************************************************************
47 */
48 #include <linux/module.h>
49 #include <linux/reboot.h>
50 #include <linux/spinlock.h>
51 #include <linux/pci_ids.h>
52 #include <linux/interrupt.h>
53 #include <linux/moduleparam.h>
54 #include <linux/errno.h>
55 #include <linux/types.h>
56 #include <linux/delay.h>
57 #include <linux/dma-mapping.h>
58 #include <linux/timer.h>
59 #include <linux/pci.h>
60 #include <asm/dma.h>
61 #include <asm/io.h>
62 #include <asm/system.h>
63 #include <asm/uaccess.h>
64 #include <scsi/scsi_host.h>
65 #include <scsi/scsi.h>
66 #include <scsi/scsi_cmnd.h>
67 #include <scsi/scsi_tcq.h>
68 #include <scsi/scsi_device.h>
69 #include <scsi/scsi_transport.h>
70 #include <scsi/scsicam.h>
71 #include "arcmsr.h"
72
73 MODULE_AUTHOR("Erich Chen <erich@areca.com.tw>");
74 MODULE_DESCRIPTION("ARECA (ARC11xx/12xx) SATA RAID HOST Adapter");
75 MODULE_LICENSE("Dual BSD/GPL");
76 MODULE_VERSION(ARCMSR_DRIVER_VERSION);
77
78 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, struct scsi_cmnd *cmd);
79 static int arcmsr_abort(struct scsi_cmnd *);
80 static int arcmsr_bus_reset(struct scsi_cmnd *);
81 static int arcmsr_bios_param(struct scsi_device *sdev,
82                                 struct block_device *bdev, sector_t capacity, int *info);
83 static int arcmsr_queue_command(struct scsi_cmnd * cmd,
84                                 void (*done) (struct scsi_cmnd *));
85 static int arcmsr_probe(struct pci_dev *pdev,
86                                 const struct pci_device_id *id);
87 static void arcmsr_remove(struct pci_dev *pdev);
88 static void arcmsr_shutdown(struct pci_dev *pdev);
89 static void arcmsr_iop_init(struct AdapterControlBlock *acb);
90 static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb);
91 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
92 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb);
93 static uint8_t arcmsr_wait_msgint_ready(struct AdapterControlBlock *acb);
94 static const char *arcmsr_info(struct Scsi_Host *);
95 static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
96
97 static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth)
98 {
99         if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
100                 queue_depth = ARCMSR_MAX_CMD_PERLUN;
101         scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
102         return queue_depth;
103 }
104
105 static struct scsi_host_template arcmsr_scsi_host_template = {
106         .module                 = THIS_MODULE,
107         .name                   = "ARCMSR ARECA SATA RAID HOST Adapter" ARCMSR_DRIVER_VERSION,
108         .info                   = arcmsr_info,
109         .queuecommand           = arcmsr_queue_command,
110         .eh_abort_handler       = arcmsr_abort,
111         .eh_bus_reset_handler   = arcmsr_bus_reset,
112         .bios_param             = arcmsr_bios_param,
113         .change_queue_depth     = arcmsr_adjust_disk_queue_depth,
114         .can_queue              = ARCMSR_MAX_OUTSTANDING_CMD,
115         .this_id                = ARCMSR_SCSI_INITIATOR_ID,
116         .sg_tablesize           = ARCMSR_MAX_SG_ENTRIES,
117         .max_sectors            = ARCMSR_MAX_XFER_SECTORS,
118         .cmd_per_lun            = ARCMSR_MAX_CMD_PERLUN,
119         .use_clustering         = ENABLE_CLUSTERING,
120         .shost_attrs            = arcmsr_host_attrs,
121 };
122
123 static struct pci_device_id arcmsr_device_id_table[] = {
124         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)},
125         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120)},
126         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130)},
127         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160)},
128         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170)},
129         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210)},
130         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220)},
131         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230)},
132         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260)},
133         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270)},
134         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280)},
135         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380)},
136         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381)},
137         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680)},
138         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681)},
139         {0, 0}, /* Terminating entry */
140 };
141 MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table);
142 static struct pci_driver arcmsr_pci_driver = {
143         .name                   = "arcmsr",
144         .id_table               = arcmsr_device_id_table,
145         .probe                  = arcmsr_probe,
146         .remove                 = arcmsr_remove,
147         .shutdown               = arcmsr_shutdown
148 };
149
150 static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id)
151 {
152         irqreturn_t handle_state;
153         struct AdapterControlBlock *acb;
154         unsigned long flags;
155
156         acb = (struct AdapterControlBlock *)dev_id;
157
158         spin_lock_irqsave(acb->host->host_lock, flags);
159         handle_state = arcmsr_interrupt(acb);
160         spin_unlock_irqrestore(acb->host->host_lock, flags);
161         return handle_state;
162 }
163
164 static int arcmsr_bios_param(struct scsi_device *sdev,
165                 struct block_device *bdev, sector_t capacity, int *geom)
166 {
167         int ret, heads, sectors, cylinders, total_capacity;
168         unsigned char *buffer;/* return copy of block device's partition table */
169
170         buffer = scsi_bios_ptable(bdev);
171         if (buffer) {
172                 ret = scsi_partsize(buffer, capacity, &geom[2], &geom[0], &geom[1]);
173                 kfree(buffer);
174                 if (ret != -1)
175                         return ret;
176         }
177         total_capacity = capacity;
178         heads = 64;
179         sectors = 32;
180         cylinders = total_capacity / (heads * sectors);
181         if (cylinders > 1024) {
182                 heads = 255;
183                 sectors = 63;
184                 cylinders = total_capacity / (heads * sectors);
185         }
186         geom[0] = heads;
187         geom[1] = sectors;
188         geom[2] = cylinders;
189         return 0;
190 }
191
192 static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
193 {
194         struct pci_dev *pdev = acb->pdev;
195         struct MessageUnit __iomem *reg = acb->pmu;
196         u32 ccb_phyaddr_hi32;
197         void *dma_coherent;
198         dma_addr_t dma_coherent_handle, dma_addr;
199         struct CommandControlBlock *ccb_tmp;
200         int i, j;
201
202         dma_coherent = dma_alloc_coherent(&pdev->dev,
203                         ARCMSR_MAX_FREECCB_NUM *
204                         sizeof (struct CommandControlBlock) + 0x20,
205                         &dma_coherent_handle, GFP_KERNEL);
206         if (!dma_coherent)
207                 return -ENOMEM;
208
209         acb->dma_coherent = dma_coherent;
210         acb->dma_coherent_handle = dma_coherent_handle;
211
212         if (((unsigned long)dma_coherent & 0x1F)) {
213                 dma_coherent = dma_coherent +
214                         (0x20 - ((unsigned long)dma_coherent & 0x1F));
215                 dma_coherent_handle = dma_coherent_handle +
216                         (0x20 - ((unsigned long)dma_coherent_handle & 0x1F));
217         }
218
219         dma_addr = dma_coherent_handle;
220         ccb_tmp = (struct CommandControlBlock *)dma_coherent;
221         for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
222                 ccb_tmp->cdb_shifted_phyaddr = dma_addr >> 5;
223                 ccb_tmp->acb = acb;
224                 acb->pccb_pool[i] = ccb_tmp;
225                 list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
226                 dma_addr = dma_addr + sizeof (struct CommandControlBlock);
227                 ccb_tmp++;
228         }
229
230         acb->vir2phy_offset = (unsigned long)ccb_tmp -
231                               (unsigned long)dma_addr;
232         for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
233                 for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
234                         acb->devstate[i][j] = ARECA_RAID_GOOD;
235
236         /*
237         ** here we need to tell iop 331 our ccb_tmp.HighPart
238         ** if ccb_tmp.HighPart is not zero
239         */
240         ccb_phyaddr_hi32 = (uint32_t) ((dma_coherent_handle >> 16) >> 16);
241         if (ccb_phyaddr_hi32 != 0) {
242                 writel(ARCMSR_SIGNATURE_SET_CONFIG, &reg->message_rwbuffer[0]);
243                 writel(ccb_phyaddr_hi32, &reg->message_rwbuffer[1]);
244                 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0);
245                 if (arcmsr_wait_msgint_ready(acb))
246                         printk(KERN_NOTICE "arcmsr%d: "
247                                "'set ccb high part physical address' timeout\n",
248                                 acb->host->host_no);
249         }
250
251         writel(readl(&reg->outbound_intmask) |
252                         ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE,
253                &reg->outbound_intmask);
254         return 0;
255 }
256
257 static int arcmsr_probe(struct pci_dev *pdev,
258         const struct pci_device_id *id)
259 {
260         struct Scsi_Host *host;
261         struct AdapterControlBlock *acb;
262         uint8_t bus, dev_fun;
263         int error;
264
265         error = pci_enable_device(pdev);
266         if (error)
267                 goto out;
268         pci_set_master(pdev);
269
270         host = scsi_host_alloc(&arcmsr_scsi_host_template,
271                         sizeof(struct AdapterControlBlock));
272         if (!host) {
273                 error = -ENOMEM;
274                 goto out_disable_device;
275         }
276         acb = (struct AdapterControlBlock *)host->hostdata;
277         memset(acb, 0, sizeof (struct AdapterControlBlock));
278
279         error = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
280         if (error) {
281                 error = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
282                 if (error) {
283                         printk(KERN_WARNING
284                                "scsi%d: No suitable DMA mask available\n",
285                                host->host_no);
286                         goto out_host_put;
287                 }
288         }
289         bus = pdev->bus->number;
290         dev_fun = pdev->devfn;
291         acb->host = host;
292         acb->pdev = pdev;
293         host->max_sectors = ARCMSR_MAX_XFER_SECTORS;
294         host->max_lun = ARCMSR_MAX_TARGETLUN;
295         host->max_id = ARCMSR_MAX_TARGETID;/*16:8*/
296         host->max_cmd_len = 16;    /*this is issue of 64bit LBA, over 2T byte*/
297         host->sg_tablesize = ARCMSR_MAX_SG_ENTRIES;
298         host->can_queue = ARCMSR_MAX_FREECCB_NUM; /* max simultaneous cmds */
299         host->cmd_per_lun = ARCMSR_MAX_CMD_PERLUN;
300         host->this_id = ARCMSR_SCSI_INITIATOR_ID;
301         host->unique_id = (bus << 8) | dev_fun;
302         host->irq = pdev->irq;
303         error = pci_request_regions(pdev, "arcmsr");
304         if (error)
305                 goto out_host_put;
306
307         acb->pmu = ioremap(pci_resource_start(pdev, 0),
308                            pci_resource_len(pdev, 0));
309         if (!acb->pmu) {
310                 printk(KERN_NOTICE "arcmsr%d: memory"
311                         " mapping region fail \n", acb->host->host_no);
312                 goto out_release_regions;
313         }
314         acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
315                            ACB_F_MESSAGE_RQBUFFER_CLEARED |
316                            ACB_F_MESSAGE_WQBUFFER_READED);
317         acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
318         INIT_LIST_HEAD(&acb->ccb_free_list);
319
320         error = arcmsr_alloc_ccb_pool(acb);
321         if (error)
322                 goto out_iounmap;
323
324         error = request_irq(pdev->irq, arcmsr_do_interrupt,
325                         IRQF_DISABLED | IRQF_SHARED, "arcmsr", acb);
326         if (error)
327                 goto out_free_ccb_pool;
328
329         arcmsr_iop_init(acb);
330         pci_set_drvdata(pdev, host);
331
332         error = scsi_add_host(host, &pdev->dev);
333         if (error)
334                 goto out_free_irq;
335
336         error = arcmsr_alloc_sysfs_attr(acb);
337         if (error)
338                 goto out_free_sysfs;
339
340         scsi_scan_host(host);
341         return 0;
342  out_free_sysfs:
343  out_free_irq:
344         free_irq(pdev->irq, acb);
345  out_free_ccb_pool:
346         arcmsr_free_ccb_pool(acb);
347  out_iounmap:
348         iounmap(acb->pmu);
349  out_release_regions:
350         pci_release_regions(pdev);
351  out_host_put:
352         scsi_host_put(host);
353  out_disable_device:
354         pci_disable_device(pdev);
355  out:
356         return error;
357 }
358
359 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
360 {
361         struct MessageUnit __iomem *reg = acb->pmu;
362
363         writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
364         if (arcmsr_wait_msgint_ready(acb))
365                 printk(KERN_NOTICE
366                         "arcmsr%d: wait 'abort all outstanding command' timeout \n"
367                         , acb->host->host_no);
368 }
369
370 static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb)
371 {
372         struct AdapterControlBlock *acb = ccb->acb;
373         struct scsi_cmnd *pcmd = ccb->pcmd;
374
375         if (pcmd->use_sg != 0) {
376                 struct scatterlist *sl;
377
378                 sl = (struct scatterlist *)pcmd->request_buffer;
379                 pci_unmap_sg(acb->pdev, sl, pcmd->use_sg, pcmd->sc_data_direction);
380         }
381         else if (pcmd->request_bufflen != 0)
382                 pci_unmap_single(acb->pdev,
383                         pcmd->SCp.dma_handle,
384                         pcmd->request_bufflen, pcmd->sc_data_direction);
385 }
386
387 static void arcmsr_ccb_complete(struct CommandControlBlock *ccb, int stand_flag)
388 {
389         struct AdapterControlBlock *acb = ccb->acb;
390         struct scsi_cmnd *pcmd = ccb->pcmd;
391
392         arcmsr_pci_unmap_dma(ccb);
393         if (stand_flag == 1)
394                 atomic_dec(&acb->ccboutstandingcount);
395         ccb->startdone = ARCMSR_CCB_DONE;
396         ccb->ccb_flags = 0;
397         list_add_tail(&ccb->list, &acb->ccb_free_list);
398         pcmd->scsi_done(pcmd);
399 }
400
401 static void arcmsr_remove(struct pci_dev *pdev)
402 {
403         struct Scsi_Host *host = pci_get_drvdata(pdev);
404         struct AdapterControlBlock *acb =
405                 (struct AdapterControlBlock *) host->hostdata;
406         struct MessageUnit __iomem *reg = acb->pmu;
407         int poll_count = 0;
408
409         arcmsr_free_sysfs_attr(acb);
410         scsi_remove_host(host);
411         arcmsr_stop_adapter_bgrb(acb);
412         arcmsr_flush_adapter_cache(acb);
413         writel(readl(&reg->outbound_intmask) |
414                 ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE,
415                 &reg->outbound_intmask);
416         acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
417         acb->acb_flags &= ~ACB_F_IOP_INITED;
418
419         for (poll_count = 0; poll_count < 256; poll_count++) {
420                 if (!atomic_read(&acb->ccboutstandingcount))
421                         break;
422                 arcmsr_interrupt(acb);
423                 msleep(25);
424         }
425
426         if (atomic_read(&acb->ccboutstandingcount)) {
427                 int i;
428
429                 arcmsr_abort_allcmd(acb);
430                 for (i = 0; i < ARCMSR_MAX_OUTSTANDING_CMD; i++)
431                         readl(&reg->outbound_queueport);
432                 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
433                         struct CommandControlBlock *ccb = acb->pccb_pool[i];
434                         if (ccb->startdone == ARCMSR_CCB_START) {
435                                 ccb->startdone = ARCMSR_CCB_ABORTED;
436                                 ccb->pcmd->result = DID_ABORT << 16;
437                                 arcmsr_ccb_complete(ccb, 1);
438                         }
439                 }
440         }
441
442         free_irq(pdev->irq, acb);
443         iounmap(acb->pmu);
444         arcmsr_free_ccb_pool(acb);
445         pci_release_regions(pdev);
446
447         scsi_host_put(host);
448
449         pci_disable_device(pdev);
450         pci_set_drvdata(pdev, NULL);
451 }
452
453 static void arcmsr_shutdown(struct pci_dev *pdev)
454 {
455         struct Scsi_Host *host = pci_get_drvdata(pdev);
456         struct AdapterControlBlock *acb =
457                 (struct AdapterControlBlock *)host->hostdata;
458
459         arcmsr_stop_adapter_bgrb(acb);
460         arcmsr_flush_adapter_cache(acb);
461 }
462
463 static int arcmsr_module_init(void)
464 {
465         int error = 0;
466
467         error = pci_register_driver(&arcmsr_pci_driver);
468         return error;
469 }
470
471 static void arcmsr_module_exit(void)
472 {
473         pci_unregister_driver(&arcmsr_pci_driver);
474 }
475 module_init(arcmsr_module_init);
476 module_exit(arcmsr_module_exit);
477
478 static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
479 {
480         struct MessageUnit __iomem *reg = acb->pmu;
481         u32 orig_mask = readl(&reg->outbound_intmask);
482
483         writel(orig_mask | ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE,
484                         &reg->outbound_intmask);
485         return orig_mask;
486 }
487
488 static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
489                 u32 orig_mask)
490 {
491         struct MessageUnit __iomem *reg = acb->pmu;
492         u32 mask;
493
494         mask = orig_mask & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
495                              ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE);
496         writel(mask, &reg->outbound_intmask);
497 }
498
499 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
500 {
501         struct MessageUnit __iomem *reg=acb->pmu;
502
503         writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
504         if (arcmsr_wait_msgint_ready(acb))
505                 printk(KERN_NOTICE
506                         "arcmsr%d: wait 'flush adapter cache' timeout \n"
507                         , acb->host->host_no);
508 }
509
510 static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
511 {
512         struct scsi_cmnd *pcmd = ccb->pcmd;
513         struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
514
515         pcmd->result = DID_OK << 16;
516         if (sensebuffer) {
517                 int sense_data_length =
518                         sizeof (struct SENSE_DATA) < sizeof (pcmd->sense_buffer)
519                         ? sizeof (struct SENSE_DATA) : sizeof (pcmd->sense_buffer);
520                 memset(sensebuffer, 0, sizeof (pcmd->sense_buffer));
521                 memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length);
522                 sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
523                 sensebuffer->Valid = 1;
524         }
525 }
526
527 static uint8_t arcmsr_wait_msgint_ready(struct AdapterControlBlock *acb)
528 {
529         struct MessageUnit __iomem *reg = acb->pmu;
530         uint32_t Index;
531         uint8_t Retries = 0x00;
532
533         do {
534                 for (Index = 0; Index < 100; Index++) {
535                         if (readl(&reg->outbound_intstatus)
536                                 & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
537                                 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT
538                                         , &reg->outbound_intstatus);
539                                 return 0x00;
540                         }
541                         msleep_interruptible(10);
542                 }/*max 1 seconds*/
543         } while (Retries++ < 20);/*max 20 sec*/
544         return 0xff;
545 }
546
547 static void arcmsr_build_ccb(struct AdapterControlBlock *acb,
548         struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd)
549 {
550         struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
551         int8_t *psge = (int8_t *)&arcmsr_cdb->u;
552         uint32_t address_lo, address_hi;
553         int arccdbsize = 0x30;
554
555         ccb->pcmd = pcmd;
556         memset(arcmsr_cdb, 0, sizeof (struct ARCMSR_CDB));
557         arcmsr_cdb->Bus = 0;
558         arcmsr_cdb->TargetID = pcmd->device->id;
559         arcmsr_cdb->LUN = pcmd->device->lun;
560         arcmsr_cdb->Function = 1;
561         arcmsr_cdb->CdbLength = (uint8_t)pcmd->cmd_len;
562         arcmsr_cdb->Context = (unsigned long)arcmsr_cdb;
563         memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len);
564         if (pcmd->use_sg) {
565                 int length, sgcount, i, cdb_sgcount = 0;
566                 struct scatterlist *sl;
567
568                 /* Get Scatter Gather List from scsiport. */
569                 sl = (struct scatterlist *) pcmd->request_buffer;
570                 sgcount = pci_map_sg(acb->pdev, sl, pcmd->use_sg,
571                                 pcmd->sc_data_direction);
572                 /* map stor port SG list to our iop SG List. */
573                 for (i = 0; i < sgcount; i++) {
574                         /* Get the physical address of the current data pointer */
575                         length = cpu_to_le32(sg_dma_len(sl));
576                         address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sl)));
577                         address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sl)));
578                         if (address_hi == 0) {
579                                 struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
580
581                                 pdma_sg->address = address_lo;
582                                 pdma_sg->length = length;
583                                 psge += sizeof (struct SG32ENTRY);
584                                 arccdbsize += sizeof (struct SG32ENTRY);
585                         } else {
586                                 struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
587
588                                 pdma_sg->addresshigh = address_hi;
589                                 pdma_sg->address = address_lo;
590                                 pdma_sg->length = length|IS_SG64_ADDR;
591                                 psge += sizeof (struct SG64ENTRY);
592                                 arccdbsize += sizeof (struct SG64ENTRY);
593                         }
594                         sl++;
595                         cdb_sgcount++;
596                 }
597                 arcmsr_cdb->sgcount = (uint8_t)cdb_sgcount;
598                 arcmsr_cdb->DataLength = pcmd->request_bufflen;
599                 if ( arccdbsize > 256)
600                         arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
601         } else if (pcmd->request_bufflen) {
602                 dma_addr_t dma_addr;
603                 dma_addr = pci_map_single(acb->pdev, pcmd->request_buffer,
604                                 pcmd->request_bufflen, pcmd->sc_data_direction);
605                 pcmd->SCp.dma_handle = dma_addr;
606                 address_lo = cpu_to_le32(dma_addr_lo32(dma_addr));
607                 address_hi = cpu_to_le32(dma_addr_hi32(dma_addr));
608                 if (address_hi == 0) {
609                         struct  SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
610                         pdma_sg->address = address_lo;
611                         pdma_sg->length = pcmd->request_bufflen;
612                 } else {
613                         struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
614                         pdma_sg->addresshigh = address_hi;
615                         pdma_sg->address = address_lo;
616                         pdma_sg->length = pcmd->request_bufflen|IS_SG64_ADDR;
617                 }
618                 arcmsr_cdb->sgcount = 1;
619                 arcmsr_cdb->DataLength = pcmd->request_bufflen;
620         }
621         if (pcmd->sc_data_direction == DMA_TO_DEVICE ) {
622                 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
623                 ccb->ccb_flags |= CCB_FLAG_WRITE;
624         }
625 }
626
627 static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb)
628 {
629         struct MessageUnit __iomem *reg = acb->pmu;
630         uint32_t cdb_shifted_phyaddr = ccb->cdb_shifted_phyaddr;
631         struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
632
633         atomic_inc(&acb->ccboutstandingcount);
634         ccb->startdone = ARCMSR_CCB_START;
635         if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE)
636                 writel(cdb_shifted_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,
637                         &reg->inbound_queueport);
638         else
639                 writel(cdb_shifted_phyaddr, &reg->inbound_queueport);
640 }
641
642 void arcmsr_post_Qbuffer(struct AdapterControlBlock *acb)
643 {
644         struct MessageUnit __iomem *reg = acb->pmu;
645         struct QBUFFER __iomem *pwbuffer = (struct QBUFFER __iomem *) &reg->message_wbuffer;
646         uint8_t __iomem *iop_data = (uint8_t __iomem *) pwbuffer->data;
647         int32_t allxfer_len = 0;
648
649         if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
650                 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
651                 while ((acb->wqbuf_firstindex != acb->wqbuf_lastindex)
652                         && (allxfer_len < 124)) {
653                         writeb(acb->wqbuffer[acb->wqbuf_firstindex], iop_data);
654                         acb->wqbuf_firstindex++;
655                         acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
656                         iop_data++;
657                         allxfer_len++;
658                 }
659                 writel(allxfer_len, &pwbuffer->data_len);
660                 writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK
661                         , &reg->inbound_doorbell);
662         }
663 }
664
665 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
666 {
667         struct MessageUnit __iomem *reg = acb->pmu;
668
669         acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
670         writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
671         if (arcmsr_wait_msgint_ready(acb))
672                 printk(KERN_NOTICE
673                         "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
674                         , acb->host->host_no);
675 }
676
677 static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
678 {
679         dma_free_coherent(&acb->pdev->dev,
680                 ARCMSR_MAX_FREECCB_NUM * sizeof (struct CommandControlBlock) + 0x20,
681                 acb->dma_coherent,
682                 acb->dma_coherent_handle);
683 }
684
685 static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
686 {
687         struct MessageUnit __iomem *reg = acb->pmu;
688         struct CommandControlBlock *ccb;
689         uint32_t flag_ccb, outbound_intstatus, outbound_doorbell;
690
691         outbound_intstatus = readl(&reg->outbound_intstatus)
692                 & acb->outbound_int_enable;
693         writel(outbound_intstatus, &reg->outbound_intstatus);
694         if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) {
695                 outbound_doorbell = readl(&reg->outbound_doorbell);
696                 writel(outbound_doorbell, &reg->outbound_doorbell);
697                 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) {
698                         struct QBUFFER __iomem * prbuffer =
699                                 (struct QBUFFER __iomem *) &reg->message_rbuffer;
700                         uint8_t __iomem * iop_data = (uint8_t __iomem *)prbuffer->data;
701                         int32_t my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex;
702
703                         rqbuf_lastindex = acb->rqbuf_lastindex;
704                         rqbuf_firstindex = acb->rqbuf_firstindex;
705                         iop_len = readl(&prbuffer->data_len);
706                         my_empty_len = (rqbuf_firstindex - rqbuf_lastindex - 1)
707                                         &(ARCMSR_MAX_QBUFFER - 1);
708                         if (my_empty_len >= iop_len) {
709                                 while (iop_len > 0) {
710                                         acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data);
711                                         acb->rqbuf_lastindex++;
712                                         acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
713                                         iop_data++;
714                                         iop_len--;
715                                 }
716                                 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK,
717                                         &reg->inbound_doorbell);
718                         } else
719                                 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
720                 }
721                 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) {
722                         acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED;
723                         if (acb->wqbuf_firstindex != acb->wqbuf_lastindex) {
724                                 struct QBUFFER __iomem * pwbuffer =
725                                                 (struct QBUFFER __iomem *) &reg->message_wbuffer;
726                                 uint8_t __iomem * iop_data = (uint8_t __iomem *) pwbuffer->data;
727                                 int32_t allxfer_len = 0;
728
729                                 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
730                                 while ((acb->wqbuf_firstindex != acb->wqbuf_lastindex)
731                                         && (allxfer_len < 124)) {
732                                         writeb(acb->wqbuffer[acb->wqbuf_firstindex], iop_data);
733                                         acb->wqbuf_firstindex++;
734                                         acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
735                                         iop_data++;
736                                         allxfer_len++;
737                                 }
738                                 writel(allxfer_len, &pwbuffer->data_len);
739                                 writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK,
740                                         &reg->inbound_doorbell);
741                         }
742                         if (acb->wqbuf_firstindex == acb->wqbuf_lastindex)
743                                 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
744                 }
745         }
746         if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) {
747                 int id, lun;
748                 /*
749                 ****************************************************************
750                 **               areca cdb command done
751                 ****************************************************************
752                 */
753                 while (1) {
754                         if ((flag_ccb = readl(&reg->outbound_queueport)) == 0xFFFFFFFF)
755                                 break;/*chip FIFO no ccb for completion already*/
756                         /* check if command done with no error*/
757                         ccb = (struct CommandControlBlock *)(acb->vir2phy_offset +
758                                 (flag_ccb << 5));
759                         if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
760                                 if (ccb->startdone == ARCMSR_CCB_ABORTED) {
761                                         struct scsi_cmnd *abortcmd=ccb->pcmd;
762                                         if (abortcmd) {
763                                         abortcmd->result |= DID_ABORT >> 16;
764                                         arcmsr_ccb_complete(ccb, 1);
765                                         printk(KERN_NOTICE
766                                                 "arcmsr%d: ccb='0x%p' isr got aborted command \n"
767                                                 , acb->host->host_no, ccb);
768                                         }
769                                         continue;
770                                 }
771                                 printk(KERN_NOTICE
772                                         "arcmsr%d: isr get an illegal ccb command done acb='0x%p'"
773                                         "ccb='0x%p' ccbacb='0x%p' startdone = 0x%x"
774                                         " ccboutstandingcount=%d \n"
775                                         , acb->host->host_no
776                                         , acb
777                                         , ccb
778                                         , ccb->acb
779                                         , ccb->startdone
780                                         , atomic_read(&acb->ccboutstandingcount));
781                                 continue;
782                         }
783                         id = ccb->pcmd->device->id;
784                         lun = ccb->pcmd->device->lun;
785                         if (!(flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR)) {
786                                 if (acb->devstate[id][lun] == ARECA_RAID_GONE)
787                                         acb->devstate[id][lun] = ARECA_RAID_GOOD;
788                                 ccb->pcmd->result = DID_OK << 16;
789                                 arcmsr_ccb_complete(ccb, 1);
790                         } else {
791                                 switch(ccb->arcmsr_cdb.DeviceStatus) {
792                                 case ARCMSR_DEV_SELECT_TIMEOUT: {
793                                                 acb->devstate[id][lun] = ARECA_RAID_GONE;
794                                                 ccb->pcmd->result = DID_TIME_OUT << 16;
795                                                 arcmsr_ccb_complete(ccb, 1);
796                                         }
797                                         break;
798                                 case ARCMSR_DEV_ABORTED:
799                                 case ARCMSR_DEV_INIT_FAIL: {
800                                                 acb->devstate[id][lun] = ARECA_RAID_GONE;
801                                                 ccb->pcmd->result = DID_BAD_TARGET << 16;
802                                                 arcmsr_ccb_complete(ccb, 1);
803                                         }
804                                         break;
805                                 case ARCMSR_DEV_CHECK_CONDITION: {
806                                                 acb->devstate[id][lun] = ARECA_RAID_GOOD;
807                                                 arcmsr_report_sense_info(ccb);
808                                                 arcmsr_ccb_complete(ccb, 1);
809                                         }
810                                         break;
811                                 default:
812                                         printk(KERN_NOTICE
813                                                 "arcmsr%d: scsi id=%d lun=%d"
814                                                 " isr get command error done,"
815                                                 "but got unknown DeviceStatus = 0x%x \n"
816                                                 , acb->host->host_no
817                                                 , id
818                                                 , lun
819                                                 , ccb->arcmsr_cdb.DeviceStatus);
820                                                 acb->devstate[id][lun] = ARECA_RAID_GONE;
821                                                 ccb->pcmd->result = DID_NO_CONNECT << 16;
822                                                 arcmsr_ccb_complete(ccb, 1);
823                                         break;
824                                 }
825                         }
826                 }/*drain reply FIFO*/
827         }
828         if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT))
829                 return IRQ_NONE;
830         return IRQ_HANDLED;
831 }
832
833 static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
834 {
835         if (acb) {
836                 /* stop adapter background rebuild */
837                 if (acb->acb_flags & ACB_F_MSG_START_BGRB) {
838                         acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
839                         arcmsr_stop_adapter_bgrb(acb);
840                         arcmsr_flush_adapter_cache(acb);
841                 }
842         }
843 }
844
845 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, struct scsi_cmnd *cmd)
846 {
847         struct MessageUnit __iomem *reg = acb->pmu;
848         struct CMD_MESSAGE_FIELD *pcmdmessagefld;
849         int retvalue = 0, transfer_len = 0;
850         char *buffer;
851         uint32_t controlcode = (uint32_t ) cmd->cmnd[5] << 24 |
852                                                 (uint32_t ) cmd->cmnd[6] << 16 |
853                                                 (uint32_t ) cmd->cmnd[7] << 8  |
854                                                 (uint32_t ) cmd->cmnd[8];
855                                         /* 4 bytes: Areca io control code */
856         if (cmd->use_sg) {
857                 struct scatterlist *sg = (struct scatterlist *)cmd->request_buffer;
858
859                 buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
860                 if (cmd->use_sg > 1) {
861                         retvalue = ARCMSR_MESSAGE_FAIL;
862                         goto message_out;
863                 }
864                 transfer_len += sg->length;
865         } else {
866                 buffer = cmd->request_buffer;
867                 transfer_len = cmd->request_bufflen;
868         }
869         if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
870                 retvalue = ARCMSR_MESSAGE_FAIL;
871                 goto message_out;
872         }
873         pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer;
874         switch(controlcode) {
875         case ARCMSR_MESSAGE_READ_RQBUFFER: {
876                         unsigned long *ver_addr;
877                         dma_addr_t buf_handle;
878                         uint8_t *pQbuffer, *ptmpQbuffer;
879                         int32_t allxfer_len = 0;
880
881                         ver_addr = pci_alloc_consistent(acb->pdev, 1032, &buf_handle);
882                         if (!ver_addr) {
883                                 retvalue = ARCMSR_MESSAGE_FAIL;
884                                 goto message_out;
885                         }
886                         ptmpQbuffer = (uint8_t *) ver_addr;
887                         while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
888                                 && (allxfer_len < 1031)) {
889                                 pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
890                                 memcpy(ptmpQbuffer, pQbuffer, 1);
891                                 acb->rqbuf_firstindex++;
892                                 acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
893                                 ptmpQbuffer++;
894                                 allxfer_len++;
895                         }
896                         if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
897                                 struct QBUFFER __iomem * prbuffer = (struct QBUFFER __iomem *)
898                                                         &reg->message_rbuffer;
899                                 uint8_t __iomem * iop_data = (uint8_t __iomem *)prbuffer->data;
900                                 int32_t iop_len;
901
902                                 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
903                                 iop_len = readl(&prbuffer->data_len);
904                                 while (iop_len > 0) {
905                                         acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data);
906                                         acb->rqbuf_lastindex++;
907                                         acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
908                                         iop_data++;
909                                         iop_len--;
910                                 }
911                                 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK,
912                                                 &reg->inbound_doorbell);
913                         }
914                         memcpy(pcmdmessagefld->messagedatabuffer,
915                                 (uint8_t *)ver_addr, allxfer_len);
916                         pcmdmessagefld->cmdmessage.Length = allxfer_len;
917                         pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
918                         pci_free_consistent(acb->pdev, 1032, ver_addr, buf_handle);
919                 }
920                 break;
921         case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
922                         unsigned long *ver_addr;
923                         dma_addr_t buf_handle;
924                         int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
925                         uint8_t *pQbuffer, *ptmpuserbuffer;
926
927                         ver_addr = pci_alloc_consistent(acb->pdev, 1032, &buf_handle);
928                         if (!ver_addr) {
929                                 retvalue = ARCMSR_MESSAGE_FAIL;
930                                 goto message_out;
931                         }
932                         ptmpuserbuffer = (uint8_t *)ver_addr;
933                         user_len = pcmdmessagefld->cmdmessage.Length;
934                         memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len);
935                         wqbuf_lastindex = acb->wqbuf_lastindex;
936                         wqbuf_firstindex = acb->wqbuf_firstindex;
937                         if (wqbuf_lastindex != wqbuf_firstindex) {
938                                 struct SENSE_DATA *sensebuffer =
939                                         (struct SENSE_DATA *)cmd->sense_buffer;
940                                 arcmsr_post_Qbuffer(acb);
941                                 /* has error report sensedata */
942                                 sensebuffer->ErrorCode = 0x70;
943                                 sensebuffer->SenseKey = ILLEGAL_REQUEST;
944                                 sensebuffer->AdditionalSenseLength = 0x0A;
945                                 sensebuffer->AdditionalSenseCode = 0x20;
946                                 sensebuffer->Valid = 1;
947                                 retvalue = ARCMSR_MESSAGE_FAIL;
948                         } else {
949                                 my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1)
950                                                 &(ARCMSR_MAX_QBUFFER - 1);
951                                 if (my_empty_len >= user_len) {
952                                         while (user_len > 0) {
953                                                 pQbuffer =
954                                                 &acb->wqbuffer[acb->wqbuf_lastindex];
955                                                 memcpy(pQbuffer, ptmpuserbuffer, 1);
956                                                 acb->wqbuf_lastindex++;
957                                                 acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
958                                                 ptmpuserbuffer++;
959                                                 user_len--;
960                                         }
961                                         if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
962                                                 acb->acb_flags &=
963                                                         ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
964                                                 arcmsr_post_Qbuffer(acb);
965                                         }
966                                 } else {
967                                         /* has error report sensedata */
968                                         struct SENSE_DATA *sensebuffer =
969                                                 (struct SENSE_DATA *)cmd->sense_buffer;
970                                         sensebuffer->ErrorCode = 0x70;
971                                         sensebuffer->SenseKey = ILLEGAL_REQUEST;
972                                         sensebuffer->AdditionalSenseLength = 0x0A;
973                                         sensebuffer->AdditionalSenseCode = 0x20;
974                                         sensebuffer->Valid = 1;
975                                         retvalue = ARCMSR_MESSAGE_FAIL;
976                                 }
977                         }
978                         pci_free_consistent(acb->pdev, 1032, ver_addr, buf_handle);
979                 }
980                 break;
981         case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
982                         uint8_t *pQbuffer = acb->rqbuffer;
983
984                         if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
985                                 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
986                                 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK,
987                                         &reg->inbound_doorbell);
988                         }
989                         acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
990                         acb->rqbuf_firstindex = 0;
991                         acb->rqbuf_lastindex = 0;
992                         memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
993                         pcmdmessagefld->cmdmessage.ReturnCode =
994                                 ARCMSR_MESSAGE_RETURNCODE_OK;
995                 }
996                 break;
997         case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
998                         uint8_t *pQbuffer = acb->wqbuffer;
999
1000                         if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1001                                 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1002                                 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK
1003                                                 , &reg->inbound_doorbell);
1004                         }
1005                         acb->acb_flags |=
1006                                 (ACB_F_MESSAGE_WQBUFFER_CLEARED |
1007                                         ACB_F_MESSAGE_WQBUFFER_READED);
1008                         acb->wqbuf_firstindex = 0;
1009                         acb->wqbuf_lastindex = 0;
1010                         memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
1011                         pcmdmessagefld->cmdmessage.ReturnCode =
1012                                 ARCMSR_MESSAGE_RETURNCODE_OK;
1013                 }
1014                 break;
1015         case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
1016                         uint8_t *pQbuffer;
1017
1018                         if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1019                                 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1020                                 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK
1021                                                 , &reg->inbound_doorbell);
1022                         }
1023                         acb->acb_flags |=
1024                                 (ACB_F_MESSAGE_WQBUFFER_CLEARED
1025                                 | ACB_F_MESSAGE_RQBUFFER_CLEARED
1026                                 | ACB_F_MESSAGE_WQBUFFER_READED);
1027                         acb->rqbuf_firstindex = 0;
1028                         acb->rqbuf_lastindex = 0;
1029                         acb->wqbuf_firstindex = 0;
1030                         acb->wqbuf_lastindex = 0;
1031                         pQbuffer = acb->rqbuffer;
1032                         memset(pQbuffer, 0, sizeof (struct QBUFFER));
1033                         pQbuffer = acb->wqbuffer;
1034                         memset(pQbuffer, 0, sizeof (struct QBUFFER));
1035                         pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
1036                 }
1037                 break;
1038         case ARCMSR_MESSAGE_RETURN_CODE_3F: {
1039                         pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F;
1040                 }
1041                 break;
1042         case ARCMSR_MESSAGE_SAY_HELLO: {
1043                         int8_t * hello_string = "Hello! I am ARCMSR";
1044
1045                         memcpy(pcmdmessagefld->messagedatabuffer, hello_string
1046                                 , (int16_t)strlen(hello_string));
1047                         pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
1048                 }
1049                 break;
1050         case ARCMSR_MESSAGE_SAY_GOODBYE:
1051                 arcmsr_iop_parking(acb);
1052                 break;
1053         case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
1054                 arcmsr_flush_adapter_cache(acb);
1055                 break;
1056         default:
1057                 retvalue = ARCMSR_MESSAGE_FAIL;
1058         }
1059  message_out:
1060         if (cmd->use_sg) {
1061                 struct scatterlist *sg;
1062
1063                 sg = (struct scatterlist *) cmd->request_buffer;
1064                 kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1065         }
1066         return retvalue;
1067 }
1068
1069 static struct CommandControlBlock *arcmsr_get_freeccb(struct AdapterControlBlock *acb)
1070 {
1071         struct list_head *head = &acb->ccb_free_list;
1072         struct CommandControlBlock *ccb = NULL;
1073
1074         if (!list_empty(head)) {
1075                 ccb = list_entry(head->next, struct CommandControlBlock, list);
1076                 list_del(head->next);
1077         }
1078         return ccb;
1079 }
1080
1081 static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
1082                 struct scsi_cmnd *cmd)
1083 {
1084         switch (cmd->cmnd[0]) {
1085         case INQUIRY: {
1086                 unsigned char inqdata[36];
1087                 char *buffer;
1088
1089                 if (cmd->device->lun) {
1090                         cmd->result = (DID_TIME_OUT << 16);
1091                         cmd->scsi_done(cmd);
1092                         return;
1093                 }
1094                 inqdata[0] = TYPE_PROCESSOR;
1095                 /* Periph Qualifier & Periph Dev Type */
1096                 inqdata[1] = 0;
1097                 /* rem media bit & Dev Type Modifier */
1098                 inqdata[2] = 0;
1099                 /* ISO,ECMA,& ANSI versions */
1100                 inqdata[4] = 31;
1101                 /* length of additional data */
1102                 strncpy(&inqdata[8], "Areca   ", 8);
1103                 /* Vendor Identification */
1104                 strncpy(&inqdata[16], "RAID controller ", 16);
1105                 /* Product Identification */
1106                 strncpy(&inqdata[32], "R001", 4); /* Product Revision */
1107                 if (cmd->use_sg) {
1108                         struct scatterlist *sg;
1109
1110                         sg = (struct scatterlist *) cmd->request_buffer;
1111                         buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
1112                 } else {
1113                         buffer = cmd->request_buffer;
1114                 }
1115                 memcpy(buffer, inqdata, sizeof(inqdata));
1116                 if (cmd->use_sg) {
1117                         struct scatterlist *sg;
1118
1119                         sg = (struct scatterlist *) cmd->request_buffer;
1120                         kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1121                 }
1122                 cmd->scsi_done(cmd);
1123         }
1124         break;
1125         case WRITE_BUFFER:
1126         case READ_BUFFER: {
1127                 if (arcmsr_iop_message_xfer(acb, cmd))
1128                         cmd->result = (DID_ERROR << 16);
1129                 cmd->scsi_done(cmd);
1130         }
1131         break;
1132         default:
1133                 cmd->scsi_done(cmd);
1134         }
1135 }
1136
1137 static int arcmsr_queue_command(struct scsi_cmnd *cmd,
1138         void (* done)(struct scsi_cmnd *))
1139 {
1140         struct Scsi_Host *host = cmd->device->host;
1141         struct AdapterControlBlock *acb =
1142                 (struct AdapterControlBlock *) host->hostdata;
1143         struct CommandControlBlock *ccb;
1144         int target = cmd->device->id;
1145         int lun = cmd->device->lun;
1146
1147         cmd->scsi_done = done;
1148         cmd->host_scribble = NULL;
1149         cmd->result = 0;
1150         if (acb->acb_flags & ACB_F_BUS_RESET) {
1151                 printk(KERN_NOTICE "arcmsr%d: bus reset"
1152                         " and return busy \n"
1153                         , acb->host->host_no);
1154                 return SCSI_MLQUEUE_HOST_BUSY;
1155         }
1156         if(target == 16) {
1157                 /* virtual device for iop message transfer */
1158                 arcmsr_handle_virtual_command(acb, cmd);
1159                 return 0;
1160         }
1161         if (acb->devstate[target][lun] == ARECA_RAID_GONE) {
1162                 uint8_t block_cmd;
1163
1164                 block_cmd = cmd->cmnd[0] & 0x0f;
1165                 if (block_cmd == 0x08 || block_cmd == 0x0a) {
1166                         printk(KERN_NOTICE
1167                                 "arcmsr%d: block 'read/write'"
1168                                 "command with gone raid volume"
1169                                 " Cmd=%2x, TargetId=%d, Lun=%d \n"
1170                                 , acb->host->host_no
1171                                 , cmd->cmnd[0]
1172                                 , target, lun);
1173                         cmd->result = (DID_NO_CONNECT << 16);
1174                         cmd->scsi_done(cmd);
1175                         return 0;
1176                 }
1177         }
1178         if (atomic_read(&acb->ccboutstandingcount) >=
1179                         ARCMSR_MAX_OUTSTANDING_CMD)
1180                 return SCSI_MLQUEUE_HOST_BUSY;
1181
1182         ccb = arcmsr_get_freeccb(acb);
1183         if (!ccb)
1184                 return SCSI_MLQUEUE_HOST_BUSY;
1185         arcmsr_build_ccb(acb, ccb, cmd);
1186         arcmsr_post_ccb(acb, ccb);
1187         return 0;
1188 }
1189
1190 static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
1191 {
1192         struct MessageUnit __iomem *reg = acb->pmu;
1193         char *acb_firm_model = acb->firm_model;
1194         char *acb_firm_version = acb->firm_version;
1195         char __iomem *iop_firm_model = (char __iomem *) &reg->message_rwbuffer[15];
1196         char __iomem *iop_firm_version = (char __iomem *) &reg->message_rwbuffer[17];
1197         int count;
1198
1199         writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
1200         if (arcmsr_wait_msgint_ready(acb))
1201                 printk(KERN_NOTICE
1202                         "arcmsr%d: wait "
1203                         "'get adapter firmware miscellaneous data' timeout \n"
1204                         , acb->host->host_no);
1205         count = 8;
1206         while (count) {
1207                 *acb_firm_model = readb(iop_firm_model);
1208                 acb_firm_model++;
1209                 iop_firm_model++;
1210                 count--;
1211         }
1212         count = 16;
1213         while (count) {
1214                 *acb_firm_version = readb(iop_firm_version);
1215                 acb_firm_version++;
1216                 iop_firm_version++;
1217                 count--;
1218         }
1219         printk(KERN_INFO
1220                 "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n"
1221                 , acb->host->host_no
1222                 , acb->firm_version);
1223         acb->firm_request_len = readl(&reg->message_rwbuffer[1]);
1224         acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]);
1225         acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]);
1226         acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]);
1227 }
1228
1229 static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
1230         struct CommandControlBlock *poll_ccb)
1231 {
1232         struct MessageUnit __iomem *reg = acb->pmu;
1233         struct CommandControlBlock *ccb;
1234         uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0;
1235         int id, lun;
1236
1237  polling_ccb_retry:
1238         poll_count++;
1239         outbound_intstatus = readl(&reg->outbound_intstatus)
1240                                         & acb->outbound_int_enable;
1241         writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
1242         while (1) {
1243                 if ((flag_ccb = readl(&reg->outbound_queueport)) == 0xFFFFFFFF) {
1244                         if (poll_ccb_done)
1245                                 break;
1246                         else {
1247                                 msleep(25);
1248                                 if (poll_count > 100)
1249                                         break;
1250                                 goto polling_ccb_retry;
1251                         }
1252                 }
1253                 ccb = (struct CommandControlBlock *)
1254                         (acb->vir2phy_offset + (flag_ccb << 5));
1255                 if ((ccb->acb != acb) ||
1256                         (ccb->startdone != ARCMSR_CCB_START)) {
1257                         if ((ccb->startdone == ARCMSR_CCB_ABORTED) ||
1258                                 (ccb == poll_ccb)) {
1259                                 printk(KERN_NOTICE
1260                                         "arcmsr%d: scsi id=%d lun=%d ccb='0x%p'"
1261                                         " poll command abort successfully \n"
1262                                         , acb->host->host_no
1263                                         , ccb->pcmd->device->id
1264                                         , ccb->pcmd->device->lun
1265                                         , ccb);
1266                                 ccb->pcmd->result = DID_ABORT << 16;
1267                                 arcmsr_ccb_complete(ccb, 1);
1268                                 poll_ccb_done = 1;
1269                                 continue;
1270                         }
1271                         printk(KERN_NOTICE
1272                                 "arcmsr%d: polling get an illegal ccb"
1273                                 " command done ccb='0x%p'"
1274                                 "ccboutstandingcount=%d \n"
1275                                 , acb->host->host_no
1276                                 , ccb
1277                                 , atomic_read(&acb->ccboutstandingcount));
1278                         continue;
1279                 }
1280                 id = ccb->pcmd->device->id;
1281                 lun = ccb->pcmd->device->lun;
1282                 if (!(flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR)) {
1283                         if (acb->devstate[id][lun] == ARECA_RAID_GONE)
1284                                 acb->devstate[id][lun] = ARECA_RAID_GOOD;
1285                         ccb->pcmd->result = DID_OK << 16;
1286                         arcmsr_ccb_complete(ccb, 1);
1287                 } else {
1288                         switch(ccb->arcmsr_cdb.DeviceStatus) {
1289                         case ARCMSR_DEV_SELECT_TIMEOUT: {
1290                                         acb->devstate[id][lun] = ARECA_RAID_GONE;
1291                                         ccb->pcmd->result = DID_TIME_OUT << 16;
1292                                         arcmsr_ccb_complete(ccb, 1);
1293                                 }
1294                                 break;
1295                         case ARCMSR_DEV_ABORTED:
1296                         case ARCMSR_DEV_INIT_FAIL: {
1297                                         acb->devstate[id][lun] = ARECA_RAID_GONE;
1298                                         ccb->pcmd->result = DID_BAD_TARGET << 16;
1299                                         arcmsr_ccb_complete(ccb, 1);
1300                                 }
1301                                 break;
1302                         case ARCMSR_DEV_CHECK_CONDITION: {
1303                                         acb->devstate[id][lun] = ARECA_RAID_GOOD;
1304                                         arcmsr_report_sense_info(ccb);
1305                                         arcmsr_ccb_complete(ccb, 1);
1306                                 }
1307                                 break;
1308                         default:
1309                                 printk(KERN_NOTICE
1310                                         "arcmsr%d: scsi id=%d lun=%d"
1311                                         " polling and getting command error done"
1312                                         "but got unknown DeviceStatus = 0x%x \n"
1313                                         , acb->host->host_no
1314                                         , id
1315                                         , lun
1316                                         , ccb->arcmsr_cdb.DeviceStatus);
1317                                 acb->devstate[id][lun] = ARECA_RAID_GONE;
1318                                 ccb->pcmd->result = DID_BAD_TARGET << 16;
1319                                 arcmsr_ccb_complete(ccb, 1);
1320                                 break;
1321                         }
1322                 }
1323         }
1324 }
1325
1326 static void arcmsr_iop_init(struct AdapterControlBlock *acb)
1327 {
1328         struct MessageUnit __iomem *reg = acb->pmu;
1329         uint32_t intmask_org, mask, outbound_doorbell, firmware_state = 0;
1330
1331         do {
1332                 firmware_state = readl(&reg->outbound_msgaddr1);
1333         } while (!(firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK));
1334         intmask_org = readl(&reg->outbound_intmask)
1335                         | ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE;
1336         arcmsr_get_firmware_spec(acb);
1337
1338         acb->acb_flags |= ACB_F_MSG_START_BGRB;
1339         writel(ARCMSR_INBOUND_MESG0_START_BGRB, &reg->inbound_msgaddr0);
1340         if (arcmsr_wait_msgint_ready(acb)) {
1341                 printk(KERN_NOTICE "arcmsr%d: "
1342                         "wait 'start adapter background rebulid' timeout\n",
1343                         acb->host->host_no);
1344         }
1345
1346         outbound_doorbell = readl(&reg->outbound_doorbell);
1347         writel(outbound_doorbell, &reg->outbound_doorbell);
1348         writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
1349         mask = ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE
1350                         | ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE);
1351         writel(intmask_org & mask, &reg->outbound_intmask);
1352         acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
1353         acb->acb_flags |= ACB_F_IOP_INITED;
1354 }
1355
1356 static void arcmsr_iop_reset(struct AdapterControlBlock *acb)
1357 {
1358         struct MessageUnit __iomem *reg = acb->pmu;
1359         struct CommandControlBlock *ccb;
1360         uint32_t intmask_org;
1361         int i = 0;
1362
1363         if (atomic_read(&acb->ccboutstandingcount) != 0) {
1364                 /* talk to iop 331 outstanding command aborted */
1365                 arcmsr_abort_allcmd(acb);
1366                 /* wait for 3 sec for all command aborted*/
1367                 msleep_interruptible(3000);
1368                 /* disable all outbound interrupt */
1369                 intmask_org = arcmsr_disable_outbound_ints(acb);
1370                 /* clear all outbound posted Q */
1371                 for (i = 0; i < ARCMSR_MAX_OUTSTANDING_CMD; i++)
1372                         readl(&reg->outbound_queueport);
1373                 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
1374                         ccb = acb->pccb_pool[i];
1375                         if ((ccb->startdone == ARCMSR_CCB_START) ||
1376                                 (ccb->startdone == ARCMSR_CCB_ABORTED)) {
1377                                 ccb->startdone = ARCMSR_CCB_ABORTED;
1378                                 ccb->pcmd->result = DID_ABORT << 16;
1379                                 arcmsr_ccb_complete(ccb, 1);
1380                         }
1381                 }
1382                 /* enable all outbound interrupt */
1383                 arcmsr_enable_outbound_ints(acb, intmask_org);
1384         }
1385         atomic_set(&acb->ccboutstandingcount, 0);
1386 }
1387
1388 static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
1389 {
1390         struct AdapterControlBlock *acb =
1391                 (struct AdapterControlBlock *)cmd->device->host->hostdata;
1392         int i;
1393
1394         acb->num_resets++;
1395         acb->acb_flags |= ACB_F_BUS_RESET;
1396         for (i = 0; i < 400; i++) {
1397                 if (!atomic_read(&acb->ccboutstandingcount))
1398                         break;
1399                 arcmsr_interrupt(acb);
1400                 msleep(25);
1401         }
1402         arcmsr_iop_reset(acb);
1403         acb->acb_flags &= ~ACB_F_BUS_RESET;
1404         return SUCCESS;
1405 }
1406
1407 static void arcmsr_abort_one_cmd(struct AdapterControlBlock *acb,
1408                 struct CommandControlBlock *ccb)
1409 {
1410         u32 intmask;
1411
1412         ccb->startdone = ARCMSR_CCB_ABORTED;
1413
1414         /*
1415         ** Wait for 3 sec for all command done.
1416         */
1417         msleep_interruptible(3000);
1418
1419         intmask = arcmsr_disable_outbound_ints(acb);
1420         arcmsr_polling_ccbdone(acb, ccb);
1421         arcmsr_enable_outbound_ints(acb, intmask);
1422 }
1423
1424 static int arcmsr_abort(struct scsi_cmnd *cmd)
1425 {
1426         struct AdapterControlBlock *acb =
1427                 (struct AdapterControlBlock *)cmd->device->host->hostdata;
1428         int i = 0;
1429
1430         printk(KERN_NOTICE
1431                 "arcmsr%d: abort device command of scsi id=%d lun=%d \n",
1432                 acb->host->host_no, cmd->device->id, cmd->device->lun);
1433         acb->num_aborts++;
1434
1435         /*
1436         ************************************************
1437         ** the all interrupt service routine is locked
1438         ** we need to handle it as soon as possible and exit
1439         ************************************************
1440         */
1441         if (!atomic_read(&acb->ccboutstandingcount))
1442                 return SUCCESS;
1443
1444         for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
1445                 struct CommandControlBlock *ccb = acb->pccb_pool[i];
1446                 if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) {
1447                         arcmsr_abort_one_cmd(acb, ccb);
1448                         break;
1449                 }
1450         }
1451
1452         return SUCCESS;
1453 }
1454
1455 static const char *arcmsr_info(struct Scsi_Host *host)
1456 {
1457         struct AdapterControlBlock *acb =
1458                 (struct AdapterControlBlock *) host->hostdata;
1459         static char buf[256];
1460         char *type;
1461         int raid6 = 1;
1462
1463         switch (acb->pdev->device) {
1464         case PCI_DEVICE_ID_ARECA_1110:
1465         case PCI_DEVICE_ID_ARECA_1210:
1466                 raid6 = 0;
1467                 /*FALLTHRU*/
1468         case PCI_DEVICE_ID_ARECA_1120:
1469         case PCI_DEVICE_ID_ARECA_1130:
1470         case PCI_DEVICE_ID_ARECA_1160:
1471         case PCI_DEVICE_ID_ARECA_1170:
1472         case PCI_DEVICE_ID_ARECA_1220:
1473         case PCI_DEVICE_ID_ARECA_1230:
1474         case PCI_DEVICE_ID_ARECA_1260:
1475         case PCI_DEVICE_ID_ARECA_1270:
1476         case PCI_DEVICE_ID_ARECA_1280:
1477                 type = "SATA";
1478                 break;
1479         case PCI_DEVICE_ID_ARECA_1380:
1480         case PCI_DEVICE_ID_ARECA_1381:
1481         case PCI_DEVICE_ID_ARECA_1680:
1482         case PCI_DEVICE_ID_ARECA_1681:
1483                 type = "SAS";
1484                 break;
1485         default:
1486                 type = "X-TYPE";
1487                 break;
1488         }
1489         sprintf(buf, "Areca %s Host Adapter RAID Controller%s\n        %s",
1490                         type, raid6 ? "( RAID6 capable)" : "",
1491                         ARCMSR_DRIVER_VERSION);
1492         return buf;
1493 }
1494
1495