2 * HighPoint RR3xxx controller driver for Linux
3 * Copyright (C) 2006-2007 HighPoint Technologies, Inc. All Rights Reserved.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * Please report bugs/comments/suggestions to linux@highpoint-tech.com
16 * For more information, visit http://www.highpoint-tech.com
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/string.h>
21 #include <linux/kernel.h>
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24 #include <linux/errno.h>
25 #include <linux/delay.h>
26 #include <linux/timer.h>
27 #include <linux/spinlock.h>
28 #include <linux/hdreg.h>
29 #include <asm/uaccess.h>
31 #include <asm/div64.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_host.h>
40 MODULE_AUTHOR("HighPoint Technologies, Inc.");
41 MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx SATA Controller Driver");
43 static char driver_name[] = "hptiop";
44 static const char driver_name_long[] = "RocketRAID 3xxx SATA Controller driver";
45 static const char driver_ver[] = "v1.2 (070830)";
47 static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag);
48 static void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag);
49 static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg);
51 static inline void hptiop_pci_posting_flush(struct hpt_iopmu __iomem *iop)
53 readl(&iop->outbound_intstatus);
56 static int iop_wait_ready(struct hpt_iopmu __iomem *iop, u32 millisec)
61 for (i = 0; i < millisec; i++) {
62 req = readl(&iop->inbound_queue);
63 if (req != IOPMU_QUEUE_EMPTY)
68 if (req != IOPMU_QUEUE_EMPTY) {
69 writel(req, &iop->outbound_queue);
70 hptiop_pci_posting_flush(iop);
77 static void hptiop_request_callback(struct hptiop_hba *hba, u32 tag)
79 if (tag & IOPMU_QUEUE_ADDR_HOST_BIT)
80 return hptiop_host_request_callback(hba,
81 tag & ~IOPMU_QUEUE_ADDR_HOST_BIT);
83 return hptiop_iop_request_callback(hba, tag);
86 static inline void hptiop_drain_outbound_queue(struct hptiop_hba *hba)
90 while ((req = readl(&hba->iop->outbound_queue)) != IOPMU_QUEUE_EMPTY) {
92 if (req & IOPMU_QUEUE_MASK_HOST_BITS)
93 hptiop_request_callback(hba, req);
95 struct hpt_iop_request_header __iomem * p;
97 p = (struct hpt_iop_request_header __iomem *)
98 ((char __iomem *)hba->iop + req);
100 if (readl(&p->flags) & IOP_REQUEST_FLAG_SYNC_REQUEST) {
101 if (readl(&p->context))
102 hptiop_request_callback(hba, req);
104 writel(1, &p->context);
107 hptiop_request_callback(hba, req);
112 static int __iop_intr(struct hptiop_hba *hba)
114 struct hpt_iopmu __iomem *iop = hba->iop;
118 status = readl(&iop->outbound_intstatus);
120 if (status & IOPMU_OUTBOUND_INT_MSG0) {
121 u32 msg = readl(&iop->outbound_msgaddr0);
122 dprintk("received outbound msg %x\n", msg);
123 writel(IOPMU_OUTBOUND_INT_MSG0, &iop->outbound_intstatus);
124 hptiop_message_callback(hba, msg);
128 if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
129 hptiop_drain_outbound_queue(hba);
136 static int iop_send_sync_request(struct hptiop_hba *hba,
137 void __iomem *_req, u32 millisec)
139 struct hpt_iop_request_header __iomem *req = _req;
142 writel(readl(&req->flags) | IOP_REQUEST_FLAG_SYNC_REQUEST,
145 writel(0, &req->context);
147 writel((unsigned long)req - (unsigned long)hba->iop,
148 &hba->iop->inbound_queue);
150 hptiop_pci_posting_flush(hba->iop);
152 for (i = 0; i < millisec; i++) {
154 if (readl(&req->context))
162 static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec)
168 writel(msg, &hba->iop->inbound_msgaddr0);
170 hptiop_pci_posting_flush(hba->iop);
172 for (i = 0; i < millisec; i++) {
173 spin_lock_irq(hba->host->host_lock);
175 spin_unlock_irq(hba->host->host_lock);
181 return hba->msg_done? 0 : -1;
184 static int iop_get_config(struct hptiop_hba *hba,
185 struct hpt_iop_request_get_config *config)
188 struct hpt_iop_request_get_config __iomem *req;
190 req32 = readl(&hba->iop->inbound_queue);
191 if (req32 == IOPMU_QUEUE_EMPTY)
194 req = (struct hpt_iop_request_get_config __iomem *)
195 ((unsigned long)hba->iop + req32);
197 writel(0, &req->header.flags);
198 writel(IOP_REQUEST_TYPE_GET_CONFIG, &req->header.type);
199 writel(sizeof(struct hpt_iop_request_get_config), &req->header.size);
200 writel(IOP_RESULT_PENDING, &req->header.result);
202 if (iop_send_sync_request(hba, req, 20000)) {
203 dprintk("Get config send cmd failed\n");
207 memcpy_fromio(config, req, sizeof(*config));
208 writel(req32, &hba->iop->outbound_queue);
212 static int iop_set_config(struct hptiop_hba *hba,
213 struct hpt_iop_request_set_config *config)
216 struct hpt_iop_request_set_config __iomem *req;
218 req32 = readl(&hba->iop->inbound_queue);
219 if (req32 == IOPMU_QUEUE_EMPTY)
222 req = (struct hpt_iop_request_set_config __iomem *)
223 ((unsigned long)hba->iop + req32);
225 memcpy_toio((u8 __iomem *)req + sizeof(struct hpt_iop_request_header),
226 (u8 *)config + sizeof(struct hpt_iop_request_header),
227 sizeof(struct hpt_iop_request_set_config) -
228 sizeof(struct hpt_iop_request_header));
230 writel(0, &req->header.flags);
231 writel(IOP_REQUEST_TYPE_SET_CONFIG, &req->header.type);
232 writel(sizeof(struct hpt_iop_request_set_config), &req->header.size);
233 writel(IOP_RESULT_PENDING, &req->header.result);
235 if (iop_send_sync_request(hba, req, 20000)) {
236 dprintk("Set config send cmd failed\n");
240 writel(req32, &hba->iop->outbound_queue);
244 static int hptiop_initialize_iop(struct hptiop_hba *hba)
246 struct hpt_iopmu __iomem *iop = hba->iop;
248 /* enable interrupts */
249 writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0),
250 &iop->outbound_intmask);
252 hba->initialized = 1;
254 /* start background tasks */
255 if (iop_send_sync_msg(hba,
256 IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
257 printk(KERN_ERR "scsi%d: fail to start background task\n",
264 static int hptiop_map_pci_bar(struct hptiop_hba *hba)
266 u32 mem_base_phy, length;
267 void __iomem *mem_base_virt;
268 struct pci_dev *pcidev = hba->pcidev;
270 if (!(pci_resource_flags(pcidev, 0) & IORESOURCE_MEM)) {
271 printk(KERN_ERR "scsi%d: pci resource invalid\n",
276 mem_base_phy = pci_resource_start(pcidev, 0);
277 length = pci_resource_len(pcidev, 0);
278 mem_base_virt = ioremap(mem_base_phy, length);
280 if (!mem_base_virt) {
281 printk(KERN_ERR "scsi%d: Fail to ioremap memory space\n",
286 hba->iop = mem_base_virt;
287 dprintk("hptiop_map_pci_bar: iop=%p\n", hba->iop);
291 static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg)
293 dprintk("iop message 0x%x\n", msg);
295 if (!hba->initialized)
298 if (msg == IOPMU_INBOUND_MSG0_RESET) {
299 atomic_set(&hba->resetting, 0);
300 wake_up(&hba->reset_wq);
302 else if (msg <= IOPMU_INBOUND_MSG0_MAX)
306 static inline struct hptiop_request *get_req(struct hptiop_hba *hba)
308 struct hptiop_request *ret;
310 dprintk("get_req : req=%p\n", hba->req_list);
314 hba->req_list = ret->next;
319 static inline void free_req(struct hptiop_hba *hba, struct hptiop_request *req)
321 dprintk("free_req(%d, %p)\n", req->index, req);
322 req->next = hba->req_list;
326 static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 _tag)
328 struct hpt_iop_request_scsi_command *req;
329 struct scsi_cmnd *scp;
332 if (hba->iopintf_v2) {
333 tag = _tag & ~ IOPMU_QUEUE_REQUEST_RESULT_BIT;
334 req = hba->reqs[tag].req_virt;
335 if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT))
336 req->header.result = IOP_RESULT_SUCCESS;
339 req = hba->reqs[tag].req_virt;
342 dprintk("hptiop_host_request_callback: req=%p, type=%d, "
343 "result=%d, context=0x%x tag=%d\n",
344 req, req->header.type, req->header.result,
345 req->header.context, tag);
347 BUG_ON(!req->header.result);
348 BUG_ON(req->header.type != cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND));
350 scp = hba->reqs[tag].scp;
352 if (HPT_SCP(scp)->mapped)
355 switch (le32_to_cpu(req->header.result)) {
356 case IOP_RESULT_SUCCESS:
357 scp->result = (DID_OK<<16);
359 case IOP_RESULT_BAD_TARGET:
360 scp->result = (DID_BAD_TARGET<<16);
362 case IOP_RESULT_BUSY:
363 scp->result = (DID_BUS_BUSY<<16);
365 case IOP_RESULT_RESET:
366 scp->result = (DID_RESET<<16);
368 case IOP_RESULT_FAIL:
369 scp->result = (DID_ERROR<<16);
371 case IOP_RESULT_INVALID_REQUEST:
372 scp->result = (DID_ABORT<<16);
374 case IOP_RESULT_MODE_SENSE_CHECK_CONDITION:
375 scp->result = SAM_STAT_CHECK_CONDITION;
376 memset(&scp->sense_buffer,
377 0, sizeof(scp->sense_buffer));
378 memcpy(&scp->sense_buffer, &req->sg_list,
379 min(sizeof(scp->sense_buffer),
380 le32_to_cpu(req->dataxfer_length)));
384 scp->result = ((DRIVER_INVALID|SUGGEST_ABORT)<<24) |
389 dprintk("scsi_done(%p)\n", scp);
391 free_req(hba, &hba->reqs[tag]);
394 void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag)
396 struct hpt_iop_request_header __iomem *req;
397 struct hpt_iop_request_ioctl_command __iomem *p;
398 struct hpt_ioctl_k *arg;
400 req = (struct hpt_iop_request_header __iomem *)
401 ((unsigned long)hba->iop + tag);
402 dprintk("hptiop_iop_request_callback: req=%p, type=%d, "
403 "result=%d, context=0x%x tag=%d\n",
404 req, readl(&req->type), readl(&req->result),
405 readl(&req->context), tag);
407 BUG_ON(!readl(&req->result));
408 BUG_ON(readl(&req->type) != IOP_REQUEST_TYPE_IOCTL_COMMAND);
410 p = (struct hpt_iop_request_ioctl_command __iomem *)req;
411 arg = (struct hpt_ioctl_k *)(unsigned long)
412 (readl(&req->context) |
413 ((u64)readl(&req->context_hi32)<<32));
415 if (readl(&req->result) == IOP_RESULT_SUCCESS) {
416 arg->result = HPT_IOCTL_RESULT_OK;
418 if (arg->outbuf_size)
419 memcpy_fromio(arg->outbuf,
420 &p->buf[(readl(&p->inbuf_size) + 3)& ~3],
423 if (arg->bytes_returned)
424 *arg->bytes_returned = arg->outbuf_size;
427 arg->result = HPT_IOCTL_RESULT_FAILED;
430 writel(tag, &hba->iop->outbound_queue);
433 static irqreturn_t hptiop_intr(int irq, void *dev_id)
435 struct hptiop_hba *hba = dev_id;
439 spin_lock_irqsave(hba->host->host_lock, flags);
440 handled = __iop_intr(hba);
441 spin_unlock_irqrestore(hba->host->host_lock, flags);
446 static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg)
448 struct Scsi_Host *host = scp->device->host;
449 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
450 struct scatterlist *sg;
453 nseg = scsi_dma_map(scp);
458 HPT_SCP(scp)->sgcnt = nseg;
459 HPT_SCP(scp)->mapped = 1;
461 BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors);
463 scsi_for_each_sg(scp, sg, HPT_SCP(scp)->sgcnt, idx) {
464 psg[idx].pci_address = cpu_to_le64(sg_dma_address(sg));
465 psg[idx].size = cpu_to_le32(sg_dma_len(sg));
466 psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ?
469 return HPT_SCP(scp)->sgcnt;
472 static int hptiop_queuecommand(struct scsi_cmnd *scp,
473 void (*done)(struct scsi_cmnd *))
475 struct Scsi_Host *host = scp->device->host;
476 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
477 struct hpt_iop_request_scsi_command *req;
479 struct hptiop_request *_req;
482 scp->scsi_done = done;
486 dprintk("hptiop_queuecmd : no free req\n");
487 return SCSI_MLQUEUE_HOST_BUSY;
492 dprintk("hptiop_queuecmd(scp=%p) %d/%d/%d/%d cdb=(%x-%x-%x) "
493 "req_index=%d, req=%p\n",
495 host->host_no, scp->device->channel,
496 scp->device->id, scp->device->lun,
497 *((u32 *)&scp->cmnd),
498 *((u32 *)&scp->cmnd + 1),
499 *((u32 *)&scp->cmnd + 2),
500 _req->index, _req->req_virt);
504 if (scp->device->channel || scp->device->lun ||
505 scp->device->id > hba->max_devices) {
506 scp->result = DID_BAD_TARGET << 16;
511 req = _req->req_virt;
513 /* build S/G table */
514 sg_count = hptiop_buildsgl(scp, req->sg_list);
516 HPT_SCP(scp)->mapped = 0;
518 req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
519 req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND);
520 req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
521 req->header.context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT |
523 req->header.context_hi32 = 0;
524 req->dataxfer_length = cpu_to_le32(scsi_bufflen(scp));
525 req->channel = scp->device->channel;
526 req->target = scp->device->id;
527 req->lun = scp->device->lun;
528 req->header.size = cpu_to_le32(
529 sizeof(struct hpt_iop_request_scsi_command)
530 - sizeof(struct hpt_iopsg)
531 + sg_count * sizeof(struct hpt_iopsg));
533 memcpy(req->cdb, scp->cmnd, sizeof(req->cdb));
535 if (hba->iopintf_v2) {
537 if (req->header.size < 256)
538 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
539 else if (req->header.size < 512)
540 size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
542 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT |
543 IOPMU_QUEUE_ADDR_HOST_BIT;
544 writel(_req->req_shifted_phy | size_bits, &hba->iop->inbound_queue);
546 writel(_req->req_shifted_phy | IOPMU_QUEUE_ADDR_HOST_BIT,
547 &hba->iop->inbound_queue);
552 dprintk("scsi_done(scp=%p)\n", scp);
557 static const char *hptiop_info(struct Scsi_Host *host)
559 return driver_name_long;
562 static int hptiop_reset_hba(struct hptiop_hba *hba)
564 if (atomic_xchg(&hba->resetting, 1) == 0) {
565 atomic_inc(&hba->reset_count);
566 writel(IOPMU_INBOUND_MSG0_RESET,
567 &hba->iop->inbound_msgaddr0);
568 hptiop_pci_posting_flush(hba->iop);
571 wait_event_timeout(hba->reset_wq,
572 atomic_read(&hba->resetting) == 0, 60 * HZ);
574 if (atomic_read(&hba->resetting)) {
575 /* IOP is in unkown state, abort reset */
576 printk(KERN_ERR "scsi%d: reset failed\n", hba->host->host_no);
580 if (iop_send_sync_msg(hba,
581 IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
582 dprintk("scsi%d: fail to start background task\n",
589 static int hptiop_reset(struct scsi_cmnd *scp)
591 struct Scsi_Host * host = scp->device->host;
592 struct hptiop_hba * hba = (struct hptiop_hba *)host->hostdata;
594 printk(KERN_WARNING "hptiop_reset(%d/%d/%d) scp=%p\n",
595 scp->device->host->host_no, scp->device->channel,
596 scp->device->id, scp);
598 return hptiop_reset_hba(hba)? FAILED : SUCCESS;
601 static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev,
604 if(queue_depth > 256)
606 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
610 static ssize_t hptiop_show_version(struct class_device *class_dev, char *buf)
612 return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver);
615 static ssize_t hptiop_show_fw_version(struct class_device *class_dev, char *buf)
617 struct Scsi_Host *host = class_to_shost(class_dev);
618 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
620 return snprintf(buf, PAGE_SIZE, "%d.%d.%d.%d\n",
621 hba->firmware_version >> 24,
622 (hba->firmware_version >> 16) & 0xff,
623 (hba->firmware_version >> 8) & 0xff,
624 hba->firmware_version & 0xff);
627 static struct class_device_attribute hptiop_attr_version = {
629 .name = "driver-version",
632 .show = hptiop_show_version,
635 static struct class_device_attribute hptiop_attr_fw_version = {
637 .name = "firmware-version",
640 .show = hptiop_show_fw_version,
643 static struct class_device_attribute *hptiop_attrs[] = {
644 &hptiop_attr_version,
645 &hptiop_attr_fw_version,
649 static struct scsi_host_template driver_template = {
650 .module = THIS_MODULE,
652 .queuecommand = hptiop_queuecommand,
653 .eh_device_reset_handler = hptiop_reset,
654 .eh_bus_reset_handler = hptiop_reset,
656 .unchecked_isa_dma = 0,
658 .use_clustering = ENABLE_CLUSTERING,
659 .use_sg_chaining = ENABLE_SG_CHAINING,
660 .proc_name = driver_name,
661 .shost_attrs = hptiop_attrs,
663 .change_queue_depth = hptiop_adjust_disk_queue_depth,
666 static int __devinit hptiop_probe(struct pci_dev *pcidev,
667 const struct pci_device_id *id)
669 struct Scsi_Host *host = NULL;
670 struct hptiop_hba *hba;
671 struct hpt_iop_request_get_config iop_config;
672 struct hpt_iop_request_set_config set_config;
673 dma_addr_t start_phy;
675 u32 offset, i, req_size;
677 dprintk("hptiop_probe(%p)\n", pcidev);
679 if (pci_enable_device(pcidev)) {
680 printk(KERN_ERR "hptiop: fail to enable pci device\n");
684 printk(KERN_INFO "adapter at PCI %d:%d:%d, IRQ %d\n",
685 pcidev->bus->number, pcidev->devfn >> 3, pcidev->devfn & 7,
688 pci_set_master(pcidev);
690 /* Enable 64bit DMA if possible */
691 if (pci_set_dma_mask(pcidev, DMA_64BIT_MASK)) {
692 if (pci_set_dma_mask(pcidev, DMA_32BIT_MASK)) {
693 printk(KERN_ERR "hptiop: fail to set dma_mask\n");
694 goto disable_pci_device;
698 if (pci_request_regions(pcidev, driver_name)) {
699 printk(KERN_ERR "hptiop: pci_request_regions failed\n");
700 goto disable_pci_device;
703 host = scsi_host_alloc(&driver_template, sizeof(struct hptiop_hba));
705 printk(KERN_ERR "hptiop: fail to alloc scsi host\n");
706 goto free_pci_regions;
709 hba = (struct hptiop_hba *)host->hostdata;
711 hba->pcidev = pcidev;
713 hba->initialized = 0;
716 atomic_set(&hba->resetting, 0);
717 atomic_set(&hba->reset_count, 0);
719 init_waitqueue_head(&hba->reset_wq);
720 init_waitqueue_head(&hba->ioctl_wq);
723 host->max_channel = 0;
726 host->irq = pcidev->irq;
728 if (hptiop_map_pci_bar(hba))
731 if (iop_wait_ready(hba->iop, 20000)) {
732 printk(KERN_ERR "scsi%d: firmware not ready\n",
737 if (iop_get_config(hba, &iop_config)) {
738 printk(KERN_ERR "scsi%d: get config failed\n",
743 hba->max_requests = min(le32_to_cpu(iop_config.max_requests),
744 HPTIOP_MAX_REQUESTS);
745 hba->max_devices = le32_to_cpu(iop_config.max_devices);
746 hba->max_request_size = le32_to_cpu(iop_config.request_size);
747 hba->max_sg_descriptors = le32_to_cpu(iop_config.max_sg_count);
748 hba->firmware_version = le32_to_cpu(iop_config.firmware_version);
749 hba->interface_version = le32_to_cpu(iop_config.interface_version);
750 hba->sdram_size = le32_to_cpu(iop_config.sdram_size);
752 if (hba->firmware_version > 0x01020000 ||
753 hba->interface_version > 0x01020000)
756 host->max_sectors = le32_to_cpu(iop_config.data_transfer_length) >> 9;
757 host->max_id = le32_to_cpu(iop_config.max_devices);
758 host->sg_tablesize = le32_to_cpu(iop_config.max_sg_count);
759 host->can_queue = le32_to_cpu(iop_config.max_requests);
760 host->cmd_per_lun = le32_to_cpu(iop_config.max_requests);
761 host->max_cmd_len = 16;
763 req_size = sizeof(struct hpt_iop_request_scsi_command)
764 + sizeof(struct hpt_iopsg) * (hba->max_sg_descriptors - 1);
765 if ((req_size & 0x1f) != 0)
766 req_size = (req_size + 0x1f) & ~0x1f;
768 memset(&set_config, 0, sizeof(struct hpt_iop_request_set_config));
769 set_config.iop_id = cpu_to_le32(host->host_no);
770 set_config.vbus_id = cpu_to_le16(host->host_no);
771 set_config.max_host_request_size = cpu_to_le16(req_size);
773 if (iop_set_config(hba, &set_config)) {
774 printk(KERN_ERR "scsi%d: set config failed\n",
779 pci_set_drvdata(pcidev, host);
781 if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED,
783 printk(KERN_ERR "scsi%d: request irq %d failed\n",
784 hba->host->host_no, pcidev->irq);
788 /* Allocate request mem */
790 dprintk("req_size=%d, max_requests=%d\n", req_size, hba->max_requests);
792 hba->req_size = req_size;
793 start_virt = dma_alloc_coherent(&pcidev->dev,
794 hba->req_size*hba->max_requests + 0x20,
795 &start_phy, GFP_KERNEL);
798 printk(KERN_ERR "scsi%d: fail to alloc request mem\n",
800 goto free_request_irq;
803 hba->dma_coherent = start_virt;
804 hba->dma_coherent_handle = start_phy;
806 if ((start_phy & 0x1f) != 0)
808 offset = ((start_phy + 0x1f) & ~0x1f) - start_phy;
810 start_virt += offset;
813 hba->req_list = start_virt;
814 for (i = 0; i < hba->max_requests; i++) {
815 hba->reqs[i].next = NULL;
816 hba->reqs[i].req_virt = start_virt;
817 hba->reqs[i].req_shifted_phy = start_phy >> 5;
818 hba->reqs[i].index = i;
819 free_req(hba, &hba->reqs[i]);
820 start_virt = (char *)start_virt + hba->req_size;
821 start_phy = start_phy + hba->req_size;
824 /* Enable Interrupt and start background task */
825 if (hptiop_initialize_iop(hba))
826 goto free_request_mem;
828 if (scsi_add_host(host, &pcidev->dev)) {
829 printk(KERN_ERR "scsi%d: scsi_add_host failed\n",
831 goto free_request_mem;
835 scsi_scan_host(host);
837 dprintk("scsi%d: hptiop_probe successfully\n", hba->host->host_no);
841 dma_free_coherent(&hba->pcidev->dev,
842 hba->req_size*hba->max_requests + 0x20,
843 hba->dma_coherent, hba->dma_coherent_handle);
846 free_irq(hba->pcidev->irq, hba);
852 pci_release_regions(pcidev) ;
858 pci_disable_device(pcidev);
860 dprintk("scsi%d: hptiop_probe fail\n", host->host_no);
864 static void hptiop_shutdown(struct pci_dev *pcidev)
866 struct Scsi_Host *host = pci_get_drvdata(pcidev);
867 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
868 struct hpt_iopmu __iomem *iop = hba->iop;
871 dprintk("hptiop_shutdown(%p)\n", hba);
874 if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000))
875 printk(KERN_ERR "scsi%d: shutdown the iop timeout\n",
878 /* disable all outbound interrupts */
879 int_mask = readl(&iop->outbound_intmask);
881 IOPMU_OUTBOUND_INT_MSG0 | IOPMU_OUTBOUND_INT_POSTQUEUE,
882 &iop->outbound_intmask);
883 hptiop_pci_posting_flush(iop);
886 static void hptiop_remove(struct pci_dev *pcidev)
888 struct Scsi_Host *host = pci_get_drvdata(pcidev);
889 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
891 dprintk("scsi%d: hptiop_remove\n", hba->host->host_no);
893 scsi_remove_host(host);
895 hptiop_shutdown(pcidev);
897 free_irq(hba->pcidev->irq, hba);
899 dma_free_coherent(&hba->pcidev->dev,
900 hba->req_size * hba->max_requests + 0x20,
902 hba->dma_coherent_handle);
906 pci_release_regions(hba->pcidev);
907 pci_set_drvdata(hba->pcidev, NULL);
908 pci_disable_device(hba->pcidev);
913 static struct pci_device_id hptiop_id_table[] = {
914 { PCI_VDEVICE(TTI, 0x3220) },
915 { PCI_VDEVICE(TTI, 0x3320) },
916 { PCI_VDEVICE(TTI, 0x3520) },
917 { PCI_VDEVICE(TTI, 0x4320) },
921 MODULE_DEVICE_TABLE(pci, hptiop_id_table);
923 static struct pci_driver hptiop_pci_driver = {
925 .id_table = hptiop_id_table,
926 .probe = hptiop_probe,
927 .remove = hptiop_remove,
928 .shutdown = hptiop_shutdown,
931 static int __init hptiop_module_init(void)
933 printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver);
934 return pci_register_driver(&hptiop_pci_driver);
937 static void __exit hptiop_module_exit(void)
939 pci_unregister_driver(&hptiop_pci_driver);
943 module_init(hptiop_module_init);
944 module_exit(hptiop_module_exit);
946 MODULE_LICENSE("GPL");