2 * HighPoint RR3xxx controller driver for Linux
3 * Copyright (C) 2006 HighPoint Technologies, Inc. All Rights Reserved.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * Please report bugs/comments/suggestions to linux@highpoint-tech.com
16 * For more information, visit http://www.highpoint-tech.com
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/string.h>
21 #include <linux/kernel.h>
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24 #include <linux/errno.h>
25 #include <linux/delay.h>
26 #include <linux/timer.h>
27 #include <linux/spinlock.h>
28 #include <linux/hdreg.h>
29 #include <asm/uaccess.h>
31 #include <asm/div64.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_host.h>
40 MODULE_AUTHOR("HighPoint Technologies, Inc.");
41 MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx SATA Controller Driver");
43 static char driver_name[] = "hptiop";
44 static const char driver_name_long[] = "RocketRAID 3xxx SATA Controller driver";
45 static const char driver_ver[] = "v1.0 (060426)";
47 static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag);
48 static void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag);
49 static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg);
51 static inline void hptiop_pci_posting_flush(struct hpt_iopmu __iomem *iop)
53 readl(&iop->outbound_intstatus);
56 static int iop_wait_ready(struct hpt_iopmu __iomem *iop, u32 millisec)
61 for (i = 0; i < millisec; i++) {
62 req = readl(&iop->inbound_queue);
63 if (req != IOPMU_QUEUE_EMPTY)
68 if (req != IOPMU_QUEUE_EMPTY) {
69 writel(req, &iop->outbound_queue);
70 hptiop_pci_posting_flush(iop);
77 static void hptiop_request_callback(struct hptiop_hba *hba, u32 tag)
79 if ((tag & IOPMU_QUEUE_MASK_HOST_BITS) == IOPMU_QUEUE_ADDR_HOST_BIT)
80 return hptiop_host_request_callback(hba,
81 tag & ~IOPMU_QUEUE_ADDR_HOST_BIT);
83 return hptiop_iop_request_callback(hba, tag);
86 static inline void hptiop_drain_outbound_queue(struct hptiop_hba *hba)
90 while ((req = readl(&hba->iop->outbound_queue)) != IOPMU_QUEUE_EMPTY) {
92 if (req & IOPMU_QUEUE_MASK_HOST_BITS)
93 hptiop_request_callback(hba, req);
95 struct hpt_iop_request_header __iomem * p;
97 p = (struct hpt_iop_request_header __iomem *)
98 ((char __iomem *)hba->iop + req);
100 if (readl(&p->flags) & IOP_REQUEST_FLAG_SYNC_REQUEST) {
101 if (readl(&p->context))
102 hptiop_request_callback(hba, req);
104 writel(1, &p->context);
107 hptiop_request_callback(hba, req);
112 static int __iop_intr(struct hptiop_hba *hba)
114 struct hpt_iopmu __iomem *iop = hba->iop;
118 status = readl(&iop->outbound_intstatus);
120 if (status & IOPMU_OUTBOUND_INT_MSG0) {
121 u32 msg = readl(&iop->outbound_msgaddr0);
122 dprintk("received outbound msg %x\n", msg);
123 writel(IOPMU_OUTBOUND_INT_MSG0, &iop->outbound_intstatus);
124 hptiop_message_callback(hba, msg);
128 if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
129 hptiop_drain_outbound_queue(hba);
136 static int iop_send_sync_request(struct hptiop_hba *hba,
137 void __iomem *_req, u32 millisec)
139 struct hpt_iop_request_header __iomem *req = _req;
142 writel(readl(&req->flags) | IOP_REQUEST_FLAG_SYNC_REQUEST,
145 writel(0, &req->context);
147 writel((unsigned long)req - (unsigned long)hba->iop,
148 &hba->iop->inbound_queue);
150 hptiop_pci_posting_flush(hba->iop);
152 for (i = 0; i < millisec; i++) {
154 if (readl(&req->context))
162 static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec)
168 writel(msg, &hba->iop->inbound_msgaddr0);
170 hptiop_pci_posting_flush(hba->iop);
172 for (i = 0; i < millisec; i++) {
173 spin_lock_irq(hba->host->host_lock);
175 spin_unlock_irq(hba->host->host_lock);
181 return hba->msg_done? 0 : -1;
184 static int iop_get_config(struct hptiop_hba *hba,
185 struct hpt_iop_request_get_config *config)
188 struct hpt_iop_request_get_config __iomem *req;
190 req32 = readl(&hba->iop->inbound_queue);
191 if (req32 == IOPMU_QUEUE_EMPTY)
194 req = (struct hpt_iop_request_get_config __iomem *)
195 ((unsigned long)hba->iop + req32);
197 writel(0, &req->header.flags);
198 writel(IOP_REQUEST_TYPE_GET_CONFIG, &req->header.type);
199 writel(sizeof(struct hpt_iop_request_get_config), &req->header.size);
200 writel(IOP_RESULT_PENDING, &req->header.result);
202 if (iop_send_sync_request(hba, req, 20000)) {
203 dprintk("Get config send cmd failed\n");
207 memcpy_fromio(config, req, sizeof(*config));
208 writel(req32, &hba->iop->outbound_queue);
212 static int iop_set_config(struct hptiop_hba *hba,
213 struct hpt_iop_request_set_config *config)
216 struct hpt_iop_request_set_config __iomem *req;
218 req32 = readl(&hba->iop->inbound_queue);
219 if (req32 == IOPMU_QUEUE_EMPTY)
222 req = (struct hpt_iop_request_set_config __iomem *)
223 ((unsigned long)hba->iop + req32);
225 memcpy_toio((u8 __iomem *)req + sizeof(struct hpt_iop_request_header),
226 (u8 *)config + sizeof(struct hpt_iop_request_header),
227 sizeof(struct hpt_iop_request_set_config) -
228 sizeof(struct hpt_iop_request_header));
230 writel(0, &req->header.flags);
231 writel(IOP_REQUEST_TYPE_SET_CONFIG, &req->header.type);
232 writel(sizeof(struct hpt_iop_request_set_config), &req->header.size);
233 writel(IOP_RESULT_PENDING, &req->header.result);
235 if (iop_send_sync_request(hba, req, 20000)) {
236 dprintk("Set config send cmd failed\n");
240 writel(req32, &hba->iop->outbound_queue);
244 static int hptiop_initialize_iop(struct hptiop_hba *hba)
246 struct hpt_iopmu __iomem *iop = hba->iop;
248 /* enable interrupts */
249 writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0),
250 &iop->outbound_intmask);
252 hba->initialized = 1;
254 /* start background tasks */
255 if (iop_send_sync_msg(hba,
256 IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
257 printk(KERN_ERR "scsi%d: fail to start background task\n",
264 static int hptiop_map_pci_bar(struct hptiop_hba *hba)
266 u32 mem_base_phy, length;
267 void __iomem *mem_base_virt;
268 struct pci_dev *pcidev = hba->pcidev;
270 if (!(pci_resource_flags(pcidev, 0) & IORESOURCE_MEM)) {
271 printk(KERN_ERR "scsi%d: pci resource invalid\n",
276 mem_base_phy = pci_resource_start(pcidev, 0);
277 length = pci_resource_len(pcidev, 0);
278 mem_base_virt = ioremap(mem_base_phy, length);
280 if (!mem_base_virt) {
281 printk(KERN_ERR "scsi%d: Fail to ioremap memory space\n",
286 hba->iop = mem_base_virt;
287 dprintk("hptiop_map_pci_bar: iop=%p\n", hba->iop);
291 static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg)
293 dprintk("iop message 0x%x\n", msg);
295 if (!hba->initialized)
298 if (msg == IOPMU_INBOUND_MSG0_RESET) {
299 atomic_set(&hba->resetting, 0);
300 wake_up(&hba->reset_wq);
302 else if (msg <= IOPMU_INBOUND_MSG0_MAX)
306 static inline struct hptiop_request *get_req(struct hptiop_hba *hba)
308 struct hptiop_request *ret;
310 dprintk("get_req : req=%p\n", hba->req_list);
314 hba->req_list = ret->next;
319 static inline void free_req(struct hptiop_hba *hba, struct hptiop_request *req)
321 dprintk("free_req(%d, %p)\n", req->index, req);
322 req->next = hba->req_list;
326 static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag)
328 struct hpt_iop_request_scsi_command *req;
329 struct scsi_cmnd *scp;
331 req = (struct hpt_iop_request_scsi_command *)hba->reqs[tag].req_virt;
332 dprintk("hptiop_host_request_callback: req=%p, type=%d, "
333 "result=%d, context=0x%x tag=%d\n",
334 req, req->header.type, req->header.result,
335 req->header.context, tag);
337 BUG_ON(!req->header.result);
338 BUG_ON(req->header.type != cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND));
340 scp = hba->reqs[tag].scp;
342 if (HPT_SCP(scp)->mapped) {
344 pci_unmap_sg(hba->pcidev,
345 (struct scatterlist *)scp->request_buffer,
347 scp->sc_data_direction
350 pci_unmap_single(hba->pcidev,
351 HPT_SCP(scp)->dma_handle,
352 scp->request_bufflen,
353 scp->sc_data_direction
357 switch (le32_to_cpu(req->header.result)) {
358 case IOP_RESULT_SUCCESS:
359 scp->result = (DID_OK<<16);
361 case IOP_RESULT_BAD_TARGET:
362 scp->result = (DID_BAD_TARGET<<16);
364 case IOP_RESULT_BUSY:
365 scp->result = (DID_BUS_BUSY<<16);
367 case IOP_RESULT_RESET:
368 scp->result = (DID_RESET<<16);
370 case IOP_RESULT_FAIL:
371 scp->result = (DID_ERROR<<16);
373 case IOP_RESULT_INVALID_REQUEST:
374 scp->result = (DID_ABORT<<16);
376 case IOP_RESULT_MODE_SENSE_CHECK_CONDITION:
377 scp->result = SAM_STAT_CHECK_CONDITION;
378 memset(&scp->sense_buffer,
379 0, sizeof(scp->sense_buffer));
380 memcpy(&scp->sense_buffer,
381 &req->sg_list, le32_to_cpu(req->dataxfer_length));
385 scp->result = ((DRIVER_INVALID|SUGGEST_ABORT)<<24) |
390 dprintk("scsi_done(%p)\n", scp);
392 free_req(hba, &hba->reqs[tag]);
395 void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag)
397 struct hpt_iop_request_header __iomem *req;
398 struct hpt_iop_request_ioctl_command __iomem *p;
399 struct hpt_ioctl_k *arg;
401 req = (struct hpt_iop_request_header __iomem *)
402 ((unsigned long)hba->iop + tag);
403 dprintk("hptiop_iop_request_callback: req=%p, type=%d, "
404 "result=%d, context=0x%x tag=%d\n",
405 req, readl(&req->type), readl(&req->result),
406 readl(&req->context), tag);
408 BUG_ON(!readl(&req->result));
409 BUG_ON(readl(&req->type) != IOP_REQUEST_TYPE_IOCTL_COMMAND);
411 p = (struct hpt_iop_request_ioctl_command __iomem *)req;
412 arg = (struct hpt_ioctl_k *)(unsigned long)
413 (readl(&req->context) |
414 ((u64)readl(&req->context_hi32)<<32));
416 if (readl(&req->result) == IOP_RESULT_SUCCESS) {
417 arg->result = HPT_IOCTL_RESULT_OK;
419 if (arg->outbuf_size)
420 memcpy_fromio(arg->outbuf,
421 &p->buf[(readl(&p->inbuf_size) + 3)& ~3],
424 if (arg->bytes_returned)
425 *arg->bytes_returned = arg->outbuf_size;
428 arg->result = HPT_IOCTL_RESULT_FAILED;
431 writel(tag, &hba->iop->outbound_queue);
434 static irqreturn_t hptiop_intr(int irq, void *dev_id)
436 struct hptiop_hba *hba = dev_id;
440 spin_lock_irqsave(hba->host->host_lock, flags);
441 handled = __iop_intr(hba);
442 spin_unlock_irqrestore(hba->host->host_lock, flags);
447 static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg)
449 struct Scsi_Host *host = scp->device->host;
450 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
451 struct scatterlist *sglist = (struct scatterlist *)scp->request_buffer;
454 * though we'll not get non-use_sg fields anymore,
455 * keep use_sg checking anyway
460 HPT_SCP(scp)->sgcnt = pci_map_sg(hba->pcidev,
462 scp->sc_data_direction);
463 HPT_SCP(scp)->mapped = 1;
464 BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors);
466 for (idx = 0; idx < HPT_SCP(scp)->sgcnt; idx++) {
467 psg[idx].pci_address =
468 cpu_to_le64(sg_dma_address(&sglist[idx]));
469 psg[idx].size = cpu_to_le32(sg_dma_len(&sglist[idx]));
470 psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ?
474 return HPT_SCP(scp)->sgcnt;
476 HPT_SCP(scp)->dma_handle = pci_map_single(
479 scp->request_bufflen,
480 scp->sc_data_direction
482 HPT_SCP(scp)->mapped = 1;
483 psg->pci_address = cpu_to_le64(HPT_SCP(scp)->dma_handle);
484 psg->size = cpu_to_le32(scp->request_bufflen);
485 psg->eot = cpu_to_le32(1);
490 static int hptiop_queuecommand(struct scsi_cmnd *scp,
491 void (*done)(struct scsi_cmnd *))
493 struct Scsi_Host *host = scp->device->host;
494 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
495 struct hpt_iop_request_scsi_command *req;
497 struct hptiop_request *_req;
500 scp->scsi_done = done;
504 dprintk("hptiop_queuecmd : no free req\n");
505 return SCSI_MLQUEUE_HOST_BUSY;
510 dprintk("hptiop_queuecmd(scp=%p) %d/%d/%d/%d cdb=(%x-%x-%x) "
511 "req_index=%d, req=%p\n",
513 host->host_no, scp->device->channel,
514 scp->device->id, scp->device->lun,
515 *((u32 *)&scp->cmnd),
516 *((u32 *)&scp->cmnd + 1),
517 *((u32 *)&scp->cmnd + 2),
518 _req->index, _req->req_virt);
522 if (scp->device->channel || scp->device->lun ||
523 scp->device->id > hba->max_devices) {
524 scp->result = DID_BAD_TARGET << 16;
529 req = (struct hpt_iop_request_scsi_command *)_req->req_virt;
531 /* build S/G table */
532 if (scp->request_bufflen)
533 sg_count = hptiop_buildsgl(scp, req->sg_list);
535 HPT_SCP(scp)->mapped = 0;
537 req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
538 req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND);
539 req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
540 req->header.context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT |
542 req->header.context_hi32 = 0;
543 req->dataxfer_length = cpu_to_le32(scp->request_bufflen);
544 req->channel = scp->device->channel;
545 req->target = scp->device->id;
546 req->lun = scp->device->lun;
547 req->header.size = cpu_to_le32(
548 sizeof(struct hpt_iop_request_scsi_command)
549 - sizeof(struct hpt_iopsg)
550 + sg_count * sizeof(struct hpt_iopsg));
552 memcpy(req->cdb, scp->cmnd, sizeof(req->cdb));
554 writel(IOPMU_QUEUE_ADDR_HOST_BIT | _req->req_shifted_phy,
555 &hba->iop->inbound_queue);
560 dprintk("scsi_done(scp=%p)\n", scp);
565 static const char *hptiop_info(struct Scsi_Host *host)
567 return driver_name_long;
570 static int hptiop_reset_hba(struct hptiop_hba *hba)
572 if (atomic_xchg(&hba->resetting, 1) == 0) {
573 atomic_inc(&hba->reset_count);
574 writel(IOPMU_INBOUND_MSG0_RESET,
575 &hba->iop->inbound_msgaddr0);
576 hptiop_pci_posting_flush(hba->iop);
579 wait_event_timeout(hba->reset_wq,
580 atomic_read(&hba->resetting) == 0, 60 * HZ);
582 if (atomic_read(&hba->resetting)) {
583 /* IOP is in unkown state, abort reset */
584 printk(KERN_ERR "scsi%d: reset failed\n", hba->host->host_no);
588 if (iop_send_sync_msg(hba,
589 IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
590 dprintk("scsi%d: fail to start background task\n",
597 static int hptiop_reset(struct scsi_cmnd *scp)
599 struct Scsi_Host * host = scp->device->host;
600 struct hptiop_hba * hba = (struct hptiop_hba *)host->hostdata;
602 printk(KERN_WARNING "hptiop_reset(%d/%d/%d) scp=%p\n",
603 scp->device->host->host_no, scp->device->channel,
604 scp->device->id, scp);
606 return hptiop_reset_hba(hba)? FAILED : SUCCESS;
609 static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev,
612 if(queue_depth > 256)
614 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
618 static ssize_t hptiop_show_version(struct class_device *class_dev, char *buf)
620 return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver);
623 static ssize_t hptiop_show_fw_version(struct class_device *class_dev, char *buf)
625 struct Scsi_Host *host = class_to_shost(class_dev);
626 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
628 return snprintf(buf, PAGE_SIZE, "%d.%d.%d.%d\n",
629 hba->firmware_version >> 24,
630 (hba->firmware_version >> 16) & 0xff,
631 (hba->firmware_version >> 8) & 0xff,
632 hba->firmware_version & 0xff);
635 static struct class_device_attribute hptiop_attr_version = {
637 .name = "driver-version",
640 .show = hptiop_show_version,
643 static struct class_device_attribute hptiop_attr_fw_version = {
645 .name = "firmware-version",
648 .show = hptiop_show_fw_version,
651 static struct class_device_attribute *hptiop_attrs[] = {
652 &hptiop_attr_version,
653 &hptiop_attr_fw_version,
657 static struct scsi_host_template driver_template = {
658 .module = THIS_MODULE,
660 .queuecommand = hptiop_queuecommand,
661 .eh_device_reset_handler = hptiop_reset,
662 .eh_bus_reset_handler = hptiop_reset,
664 .unchecked_isa_dma = 0,
666 .use_clustering = ENABLE_CLUSTERING,
667 .proc_name = driver_name,
668 .shost_attrs = hptiop_attrs,
670 .change_queue_depth = hptiop_adjust_disk_queue_depth,
673 static int __devinit hptiop_probe(struct pci_dev *pcidev,
674 const struct pci_device_id *id)
676 struct Scsi_Host *host = NULL;
677 struct hptiop_hba *hba;
678 struct hpt_iop_request_get_config iop_config;
679 struct hpt_iop_request_set_config set_config;
680 dma_addr_t start_phy;
682 u32 offset, i, req_size;
684 dprintk("hptiop_probe(%p)\n", pcidev);
686 if (pci_enable_device(pcidev)) {
687 printk(KERN_ERR "hptiop: fail to enable pci device\n");
691 printk(KERN_INFO "adapter at PCI %d:%d:%d, IRQ %d\n",
692 pcidev->bus->number, pcidev->devfn >> 3, pcidev->devfn & 7,
695 pci_set_master(pcidev);
697 /* Enable 64bit DMA if possible */
698 if (pci_set_dma_mask(pcidev, DMA_64BIT_MASK)) {
699 if (pci_set_dma_mask(pcidev, DMA_32BIT_MASK)) {
700 printk(KERN_ERR "hptiop: fail to set dma_mask\n");
701 goto disable_pci_device;
705 if (pci_request_regions(pcidev, driver_name)) {
706 printk(KERN_ERR "hptiop: pci_request_regions failed\n");
707 goto disable_pci_device;
710 host = scsi_host_alloc(&driver_template, sizeof(struct hptiop_hba));
712 printk(KERN_ERR "hptiop: fail to alloc scsi host\n");
713 goto free_pci_regions;
716 hba = (struct hptiop_hba *)host->hostdata;
718 hba->pcidev = pcidev;
720 hba->initialized = 0;
722 atomic_set(&hba->resetting, 0);
723 atomic_set(&hba->reset_count, 0);
725 init_waitqueue_head(&hba->reset_wq);
726 init_waitqueue_head(&hba->ioctl_wq);
729 host->max_channel = 0;
732 host->irq = pcidev->irq;
734 if (hptiop_map_pci_bar(hba))
737 if (iop_wait_ready(hba->iop, 20000)) {
738 printk(KERN_ERR "scsi%d: firmware not ready\n",
743 if (iop_get_config(hba, &iop_config)) {
744 printk(KERN_ERR "scsi%d: get config failed\n",
749 hba->max_requests = min(le32_to_cpu(iop_config.max_requests),
750 HPTIOP_MAX_REQUESTS);
751 hba->max_devices = le32_to_cpu(iop_config.max_devices);
752 hba->max_request_size = le32_to_cpu(iop_config.request_size);
753 hba->max_sg_descriptors = le32_to_cpu(iop_config.max_sg_count);
754 hba->firmware_version = le32_to_cpu(iop_config.firmware_version);
755 hba->sdram_size = le32_to_cpu(iop_config.sdram_size);
757 host->max_sectors = le32_to_cpu(iop_config.data_transfer_length) >> 9;
758 host->max_id = le32_to_cpu(iop_config.max_devices);
759 host->sg_tablesize = le32_to_cpu(iop_config.max_sg_count);
760 host->can_queue = le32_to_cpu(iop_config.max_requests);
761 host->cmd_per_lun = le32_to_cpu(iop_config.max_requests);
762 host->max_cmd_len = 16;
764 set_config.vbus_id = cpu_to_le32(host->host_no);
765 set_config.iop_id = cpu_to_le32(host->host_no);
767 if (iop_set_config(hba, &set_config)) {
768 printk(KERN_ERR "scsi%d: set config failed\n",
773 pci_set_drvdata(pcidev, host);
775 if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED,
777 printk(KERN_ERR "scsi%d: request irq %d failed\n",
778 hba->host->host_no, pcidev->irq);
782 /* Allocate request mem */
783 req_size = sizeof(struct hpt_iop_request_scsi_command)
784 + sizeof(struct hpt_iopsg) * (hba->max_sg_descriptors - 1);
785 if ((req_size& 0x1f) != 0)
786 req_size = (req_size + 0x1f) & ~0x1f;
788 dprintk("req_size=%d, max_requests=%d\n", req_size, hba->max_requests);
790 hba->req_size = req_size;
791 start_virt = dma_alloc_coherent(&pcidev->dev,
792 hba->req_size*hba->max_requests + 0x20,
793 &start_phy, GFP_KERNEL);
796 printk(KERN_ERR "scsi%d: fail to alloc request mem\n",
798 goto free_request_irq;
801 hba->dma_coherent = start_virt;
802 hba->dma_coherent_handle = start_phy;
804 if ((start_phy & 0x1f) != 0)
806 offset = ((start_phy + 0x1f) & ~0x1f) - start_phy;
808 start_virt += offset;
811 hba->req_list = start_virt;
812 for (i = 0; i < hba->max_requests; i++) {
813 hba->reqs[i].next = NULL;
814 hba->reqs[i].req_virt = start_virt;
815 hba->reqs[i].req_shifted_phy = start_phy >> 5;
816 hba->reqs[i].index = i;
817 free_req(hba, &hba->reqs[i]);
818 start_virt = (char *)start_virt + hba->req_size;
819 start_phy = start_phy + hba->req_size;
822 /* Enable Interrupt and start background task */
823 if (hptiop_initialize_iop(hba))
824 goto free_request_mem;
826 if (scsi_add_host(host, &pcidev->dev)) {
827 printk(KERN_ERR "scsi%d: scsi_add_host failed\n",
829 goto free_request_mem;
833 scsi_scan_host(host);
835 dprintk("scsi%d: hptiop_probe successfully\n", hba->host->host_no);
839 dma_free_coherent(&hba->pcidev->dev,
840 hba->req_size*hba->max_requests + 0x20,
841 hba->dma_coherent, hba->dma_coherent_handle);
844 free_irq(hba->pcidev->irq, hba);
850 pci_release_regions(pcidev) ;
856 pci_disable_device(pcidev);
858 dprintk("scsi%d: hptiop_probe fail\n", host->host_no);
862 static void hptiop_shutdown(struct pci_dev *pcidev)
864 struct Scsi_Host *host = pci_get_drvdata(pcidev);
865 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
866 struct hpt_iopmu __iomem *iop = hba->iop;
869 dprintk("hptiop_shutdown(%p)\n", hba);
872 if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000))
873 printk(KERN_ERR "scsi%d: shutdown the iop timeout\n",
876 /* disable all outbound interrupts */
877 int_mask = readl(&iop->outbound_intmask);
879 IOPMU_OUTBOUND_INT_MSG0 | IOPMU_OUTBOUND_INT_POSTQUEUE,
880 &iop->outbound_intmask);
881 hptiop_pci_posting_flush(iop);
884 static void hptiop_remove(struct pci_dev *pcidev)
886 struct Scsi_Host *host = pci_get_drvdata(pcidev);
887 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
889 dprintk("scsi%d: hptiop_remove\n", hba->host->host_no);
891 scsi_remove_host(host);
893 hptiop_shutdown(pcidev);
895 free_irq(hba->pcidev->irq, hba);
897 dma_free_coherent(&hba->pcidev->dev,
898 hba->req_size * hba->max_requests + 0x20,
900 hba->dma_coherent_handle);
904 pci_release_regions(hba->pcidev);
905 pci_set_drvdata(hba->pcidev, NULL);
906 pci_disable_device(hba->pcidev);
911 static struct pci_device_id hptiop_id_table[] = {
912 { PCI_DEVICE(0x1103, 0x3220) },
913 { PCI_DEVICE(0x1103, 0x3320) },
917 MODULE_DEVICE_TABLE(pci, hptiop_id_table);
919 static struct pci_driver hptiop_pci_driver = {
921 .id_table = hptiop_id_table,
922 .probe = hptiop_probe,
923 .remove = hptiop_remove,
924 .shutdown = hptiop_shutdown,
927 static int __init hptiop_module_init(void)
929 printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver);
930 return pci_register_driver(&hptiop_pci_driver);
933 static void __exit hptiop_module_exit(void)
935 pci_unregister_driver(&hptiop_pci_driver);
939 module_init(hptiop_module_init);
940 module_exit(hptiop_module_exit);
942 MODULE_LICENSE("GPL");