2 * HighPoint RR3xxx controller driver for Linux
3 * Copyright (C) 2006-2007 HighPoint Technologies, Inc. All Rights Reserved.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * Please report bugs/comments/suggestions to linux@highpoint-tech.com
16 * For more information, visit http://www.highpoint-tech.com
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/string.h>
21 #include <linux/kernel.h>
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24 #include <linux/errno.h>
25 #include <linux/delay.h>
26 #include <linux/timer.h>
27 #include <linux/spinlock.h>
28 #include <linux/hdreg.h>
29 #include <asm/uaccess.h>
31 #include <asm/div64.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_host.h>
40 MODULE_AUTHOR("HighPoint Technologies, Inc.");
41 MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx SATA Controller Driver");
43 static char driver_name[] = "hptiop";
44 static const char driver_name_long[] = "RocketRAID 3xxx SATA Controller driver";
45 static const char driver_ver[] = "v1.2 (070830)";
47 static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag);
48 static void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag);
49 static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg);
51 static inline void hptiop_pci_posting_flush(struct hpt_iopmu __iomem *iop)
53 readl(&iop->outbound_intstatus);
56 static int iop_wait_ready(struct hpt_iopmu __iomem *iop, u32 millisec)
61 for (i = 0; i < millisec; i++) {
62 req = readl(&iop->inbound_queue);
63 if (req != IOPMU_QUEUE_EMPTY)
68 if (req != IOPMU_QUEUE_EMPTY) {
69 writel(req, &iop->outbound_queue);
70 hptiop_pci_posting_flush(iop);
77 static void hptiop_request_callback(struct hptiop_hba *hba, u32 tag)
79 if (tag & IOPMU_QUEUE_ADDR_HOST_BIT)
80 return hptiop_host_request_callback(hba,
81 tag & ~IOPMU_QUEUE_ADDR_HOST_BIT);
83 return hptiop_iop_request_callback(hba, tag);
86 static inline void hptiop_drain_outbound_queue(struct hptiop_hba *hba)
90 while ((req = readl(&hba->iop->outbound_queue)) != IOPMU_QUEUE_EMPTY) {
92 if (req & IOPMU_QUEUE_MASK_HOST_BITS)
93 hptiop_request_callback(hba, req);
95 struct hpt_iop_request_header __iomem * p;
97 p = (struct hpt_iop_request_header __iomem *)
98 ((char __iomem *)hba->iop + req);
100 if (readl(&p->flags) & IOP_REQUEST_FLAG_SYNC_REQUEST) {
101 if (readl(&p->context))
102 hptiop_request_callback(hba, req);
104 writel(1, &p->context);
107 hptiop_request_callback(hba, req);
112 static int __iop_intr(struct hptiop_hba *hba)
114 struct hpt_iopmu __iomem *iop = hba->iop;
118 status = readl(&iop->outbound_intstatus);
120 if (status & IOPMU_OUTBOUND_INT_MSG0) {
121 u32 msg = readl(&iop->outbound_msgaddr0);
122 dprintk("received outbound msg %x\n", msg);
123 writel(IOPMU_OUTBOUND_INT_MSG0, &iop->outbound_intstatus);
124 hptiop_message_callback(hba, msg);
128 if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
129 hptiop_drain_outbound_queue(hba);
136 static int iop_send_sync_request(struct hptiop_hba *hba,
137 void __iomem *_req, u32 millisec)
139 struct hpt_iop_request_header __iomem *req = _req;
142 writel(readl(&req->flags) | IOP_REQUEST_FLAG_SYNC_REQUEST,
145 writel(0, &req->context);
147 writel((unsigned long)req - (unsigned long)hba->iop,
148 &hba->iop->inbound_queue);
150 hptiop_pci_posting_flush(hba->iop);
152 for (i = 0; i < millisec; i++) {
154 if (readl(&req->context))
162 static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec)
168 writel(msg, &hba->iop->inbound_msgaddr0);
170 hptiop_pci_posting_flush(hba->iop);
172 for (i = 0; i < millisec; i++) {
173 spin_lock_irq(hba->host->host_lock);
175 spin_unlock_irq(hba->host->host_lock);
181 return hba->msg_done? 0 : -1;
184 static int iop_get_config(struct hptiop_hba *hba,
185 struct hpt_iop_request_get_config *config)
188 struct hpt_iop_request_get_config __iomem *req;
190 req32 = readl(&hba->iop->inbound_queue);
191 if (req32 == IOPMU_QUEUE_EMPTY)
194 req = (struct hpt_iop_request_get_config __iomem *)
195 ((unsigned long)hba->iop + req32);
197 writel(0, &req->header.flags);
198 writel(IOP_REQUEST_TYPE_GET_CONFIG, &req->header.type);
199 writel(sizeof(struct hpt_iop_request_get_config), &req->header.size);
200 writel(IOP_RESULT_PENDING, &req->header.result);
202 if (iop_send_sync_request(hba, req, 20000)) {
203 dprintk("Get config send cmd failed\n");
207 memcpy_fromio(config, req, sizeof(*config));
208 writel(req32, &hba->iop->outbound_queue);
212 static int iop_set_config(struct hptiop_hba *hba,
213 struct hpt_iop_request_set_config *config)
216 struct hpt_iop_request_set_config __iomem *req;
218 req32 = readl(&hba->iop->inbound_queue);
219 if (req32 == IOPMU_QUEUE_EMPTY)
222 req = (struct hpt_iop_request_set_config __iomem *)
223 ((unsigned long)hba->iop + req32);
225 memcpy_toio((u8 __iomem *)req + sizeof(struct hpt_iop_request_header),
226 (u8 *)config + sizeof(struct hpt_iop_request_header),
227 sizeof(struct hpt_iop_request_set_config) -
228 sizeof(struct hpt_iop_request_header));
230 writel(0, &req->header.flags);
231 writel(IOP_REQUEST_TYPE_SET_CONFIG, &req->header.type);
232 writel(sizeof(struct hpt_iop_request_set_config), &req->header.size);
233 writel(IOP_RESULT_PENDING, &req->header.result);
235 if (iop_send_sync_request(hba, req, 20000)) {
236 dprintk("Set config send cmd failed\n");
240 writel(req32, &hba->iop->outbound_queue);
244 static int hptiop_initialize_iop(struct hptiop_hba *hba)
246 struct hpt_iopmu __iomem *iop = hba->iop;
248 /* enable interrupts */
249 writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0),
250 &iop->outbound_intmask);
252 hba->initialized = 1;
254 /* start background tasks */
255 if (iop_send_sync_msg(hba,
256 IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
257 printk(KERN_ERR "scsi%d: fail to start background task\n",
264 static int hptiop_map_pci_bar(struct hptiop_hba *hba)
266 u32 mem_base_phy, length;
267 void __iomem *mem_base_virt;
268 struct pci_dev *pcidev = hba->pcidev;
270 if (!(pci_resource_flags(pcidev, 0) & IORESOURCE_MEM)) {
271 printk(KERN_ERR "scsi%d: pci resource invalid\n",
276 mem_base_phy = pci_resource_start(pcidev, 0);
277 length = pci_resource_len(pcidev, 0);
278 mem_base_virt = ioremap(mem_base_phy, length);
280 if (!mem_base_virt) {
281 printk(KERN_ERR "scsi%d: Fail to ioremap memory space\n",
286 hba->iop = mem_base_virt;
287 dprintk("hptiop_map_pci_bar: iop=%p\n", hba->iop);
291 static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg)
293 dprintk("iop message 0x%x\n", msg);
295 if (!hba->initialized)
298 if (msg == IOPMU_INBOUND_MSG0_RESET) {
299 atomic_set(&hba->resetting, 0);
300 wake_up(&hba->reset_wq);
302 else if (msg <= IOPMU_INBOUND_MSG0_MAX)
306 static inline struct hptiop_request *get_req(struct hptiop_hba *hba)
308 struct hptiop_request *ret;
310 dprintk("get_req : req=%p\n", hba->req_list);
314 hba->req_list = ret->next;
319 static inline void free_req(struct hptiop_hba *hba, struct hptiop_request *req)
321 dprintk("free_req(%d, %p)\n", req->index, req);
322 req->next = hba->req_list;
326 static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 _tag)
328 struct hpt_iop_request_scsi_command *req;
329 struct scsi_cmnd *scp;
332 if (hba->iopintf_v2) {
333 tag = _tag & ~ IOPMU_QUEUE_REQUEST_RESULT_BIT;
334 req = hba->reqs[tag].req_virt;
335 if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT))
336 req->header.result = IOP_RESULT_SUCCESS;
339 req = hba->reqs[tag].req_virt;
342 dprintk("hptiop_host_request_callback: req=%p, type=%d, "
343 "result=%d, context=0x%x tag=%d\n",
344 req, req->header.type, req->header.result,
345 req->header.context, tag);
347 BUG_ON(!req->header.result);
348 BUG_ON(req->header.type != cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND));
350 scp = hba->reqs[tag].scp;
352 if (HPT_SCP(scp)->mapped)
355 switch (le32_to_cpu(req->header.result)) {
356 case IOP_RESULT_SUCCESS:
357 scp->result = (DID_OK<<16);
359 case IOP_RESULT_BAD_TARGET:
360 scp->result = (DID_BAD_TARGET<<16);
362 case IOP_RESULT_BUSY:
363 scp->result = (DID_BUS_BUSY<<16);
365 case IOP_RESULT_RESET:
366 scp->result = (DID_RESET<<16);
368 case IOP_RESULT_FAIL:
369 scp->result = (DID_ERROR<<16);
371 case IOP_RESULT_INVALID_REQUEST:
372 scp->result = (DID_ABORT<<16);
374 case IOP_RESULT_MODE_SENSE_CHECK_CONDITION:
375 scp->result = SAM_STAT_CHECK_CONDITION;
376 memset(&scp->sense_buffer,
377 0, sizeof(scp->sense_buffer));
378 memcpy(&scp->sense_buffer,
379 &req->sg_list, le32_to_cpu(req->dataxfer_length));
383 scp->result = ((DRIVER_INVALID|SUGGEST_ABORT)<<24) |
388 dprintk("scsi_done(%p)\n", scp);
390 free_req(hba, &hba->reqs[tag]);
393 void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag)
395 struct hpt_iop_request_header __iomem *req;
396 struct hpt_iop_request_ioctl_command __iomem *p;
397 struct hpt_ioctl_k *arg;
399 req = (struct hpt_iop_request_header __iomem *)
400 ((unsigned long)hba->iop + tag);
401 dprintk("hptiop_iop_request_callback: req=%p, type=%d, "
402 "result=%d, context=0x%x tag=%d\n",
403 req, readl(&req->type), readl(&req->result),
404 readl(&req->context), tag);
406 BUG_ON(!readl(&req->result));
407 BUG_ON(readl(&req->type) != IOP_REQUEST_TYPE_IOCTL_COMMAND);
409 p = (struct hpt_iop_request_ioctl_command __iomem *)req;
410 arg = (struct hpt_ioctl_k *)(unsigned long)
411 (readl(&req->context) |
412 ((u64)readl(&req->context_hi32)<<32));
414 if (readl(&req->result) == IOP_RESULT_SUCCESS) {
415 arg->result = HPT_IOCTL_RESULT_OK;
417 if (arg->outbuf_size)
418 memcpy_fromio(arg->outbuf,
419 &p->buf[(readl(&p->inbuf_size) + 3)& ~3],
422 if (arg->bytes_returned)
423 *arg->bytes_returned = arg->outbuf_size;
426 arg->result = HPT_IOCTL_RESULT_FAILED;
429 writel(tag, &hba->iop->outbound_queue);
432 static irqreturn_t hptiop_intr(int irq, void *dev_id)
434 struct hptiop_hba *hba = dev_id;
438 spin_lock_irqsave(hba->host->host_lock, flags);
439 handled = __iop_intr(hba);
440 spin_unlock_irqrestore(hba->host->host_lock, flags);
445 static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg)
447 struct Scsi_Host *host = scp->device->host;
448 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
449 struct scatterlist *sg;
452 nseg = scsi_dma_map(scp);
457 HPT_SCP(scp)->sgcnt = nseg;
458 HPT_SCP(scp)->mapped = 1;
460 BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors);
462 scsi_for_each_sg(scp, sg, HPT_SCP(scp)->sgcnt, idx) {
463 psg[idx].pci_address = cpu_to_le64(sg_dma_address(sg));
464 psg[idx].size = cpu_to_le32(sg_dma_len(sg));
465 psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ?
468 return HPT_SCP(scp)->sgcnt;
471 static int hptiop_queuecommand(struct scsi_cmnd *scp,
472 void (*done)(struct scsi_cmnd *))
474 struct Scsi_Host *host = scp->device->host;
475 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
476 struct hpt_iop_request_scsi_command *req;
478 struct hptiop_request *_req;
481 scp->scsi_done = done;
485 dprintk("hptiop_queuecmd : no free req\n");
486 return SCSI_MLQUEUE_HOST_BUSY;
491 dprintk("hptiop_queuecmd(scp=%p) %d/%d/%d/%d cdb=(%x-%x-%x) "
492 "req_index=%d, req=%p\n",
494 host->host_no, scp->device->channel,
495 scp->device->id, scp->device->lun,
496 *((u32 *)&scp->cmnd),
497 *((u32 *)&scp->cmnd + 1),
498 *((u32 *)&scp->cmnd + 2),
499 _req->index, _req->req_virt);
503 if (scp->device->channel || scp->device->lun ||
504 scp->device->id > hba->max_devices) {
505 scp->result = DID_BAD_TARGET << 16;
510 req = _req->req_virt;
512 /* build S/G table */
513 sg_count = hptiop_buildsgl(scp, req->sg_list);
515 HPT_SCP(scp)->mapped = 0;
517 req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
518 req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND);
519 req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
520 req->header.context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT |
522 req->header.context_hi32 = 0;
523 req->dataxfer_length = cpu_to_le32(scsi_bufflen(scp));
524 req->channel = scp->device->channel;
525 req->target = scp->device->id;
526 req->lun = scp->device->lun;
527 req->header.size = cpu_to_le32(
528 sizeof(struct hpt_iop_request_scsi_command)
529 - sizeof(struct hpt_iopsg)
530 + sg_count * sizeof(struct hpt_iopsg));
532 memcpy(req->cdb, scp->cmnd, sizeof(req->cdb));
534 if (hba->iopintf_v2) {
536 if (req->header.size < 256)
537 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
538 else if (req->header.size < 512)
539 size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
541 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT |
542 IOPMU_QUEUE_ADDR_HOST_BIT;
543 writel(_req->req_shifted_phy | size_bits, &hba->iop->inbound_queue);
545 writel(_req->req_shifted_phy | IOPMU_QUEUE_ADDR_HOST_BIT,
546 &hba->iop->inbound_queue);
551 dprintk("scsi_done(scp=%p)\n", scp);
556 static const char *hptiop_info(struct Scsi_Host *host)
558 return driver_name_long;
561 static int hptiop_reset_hba(struct hptiop_hba *hba)
563 if (atomic_xchg(&hba->resetting, 1) == 0) {
564 atomic_inc(&hba->reset_count);
565 writel(IOPMU_INBOUND_MSG0_RESET,
566 &hba->iop->inbound_msgaddr0);
567 hptiop_pci_posting_flush(hba->iop);
570 wait_event_timeout(hba->reset_wq,
571 atomic_read(&hba->resetting) == 0, 60 * HZ);
573 if (atomic_read(&hba->resetting)) {
574 /* IOP is in unkown state, abort reset */
575 printk(KERN_ERR "scsi%d: reset failed\n", hba->host->host_no);
579 if (iop_send_sync_msg(hba,
580 IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
581 dprintk("scsi%d: fail to start background task\n",
588 static int hptiop_reset(struct scsi_cmnd *scp)
590 struct Scsi_Host * host = scp->device->host;
591 struct hptiop_hba * hba = (struct hptiop_hba *)host->hostdata;
593 printk(KERN_WARNING "hptiop_reset(%d/%d/%d) scp=%p\n",
594 scp->device->host->host_no, scp->device->channel,
595 scp->device->id, scp);
597 return hptiop_reset_hba(hba)? FAILED : SUCCESS;
600 static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev,
603 if(queue_depth > 256)
605 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
609 static ssize_t hptiop_show_version(struct class_device *class_dev, char *buf)
611 return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver);
614 static ssize_t hptiop_show_fw_version(struct class_device *class_dev, char *buf)
616 struct Scsi_Host *host = class_to_shost(class_dev);
617 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
619 return snprintf(buf, PAGE_SIZE, "%d.%d.%d.%d\n",
620 hba->firmware_version >> 24,
621 (hba->firmware_version >> 16) & 0xff,
622 (hba->firmware_version >> 8) & 0xff,
623 hba->firmware_version & 0xff);
626 static struct class_device_attribute hptiop_attr_version = {
628 .name = "driver-version",
631 .show = hptiop_show_version,
634 static struct class_device_attribute hptiop_attr_fw_version = {
636 .name = "firmware-version",
639 .show = hptiop_show_fw_version,
642 static struct class_device_attribute *hptiop_attrs[] = {
643 &hptiop_attr_version,
644 &hptiop_attr_fw_version,
648 static struct scsi_host_template driver_template = {
649 .module = THIS_MODULE,
651 .queuecommand = hptiop_queuecommand,
652 .eh_device_reset_handler = hptiop_reset,
653 .eh_bus_reset_handler = hptiop_reset,
655 .unchecked_isa_dma = 0,
657 .use_clustering = ENABLE_CLUSTERING,
658 .proc_name = driver_name,
659 .shost_attrs = hptiop_attrs,
661 .change_queue_depth = hptiop_adjust_disk_queue_depth,
664 static int __devinit hptiop_probe(struct pci_dev *pcidev,
665 const struct pci_device_id *id)
667 struct Scsi_Host *host = NULL;
668 struct hptiop_hba *hba;
669 struct hpt_iop_request_get_config iop_config;
670 struct hpt_iop_request_set_config set_config;
671 dma_addr_t start_phy;
673 u32 offset, i, req_size;
675 dprintk("hptiop_probe(%p)\n", pcidev);
677 if (pci_enable_device(pcidev)) {
678 printk(KERN_ERR "hptiop: fail to enable pci device\n");
682 printk(KERN_INFO "adapter at PCI %d:%d:%d, IRQ %d\n",
683 pcidev->bus->number, pcidev->devfn >> 3, pcidev->devfn & 7,
686 pci_set_master(pcidev);
688 /* Enable 64bit DMA if possible */
689 if (pci_set_dma_mask(pcidev, DMA_64BIT_MASK)) {
690 if (pci_set_dma_mask(pcidev, DMA_32BIT_MASK)) {
691 printk(KERN_ERR "hptiop: fail to set dma_mask\n");
692 goto disable_pci_device;
696 if (pci_request_regions(pcidev, driver_name)) {
697 printk(KERN_ERR "hptiop: pci_request_regions failed\n");
698 goto disable_pci_device;
701 host = scsi_host_alloc(&driver_template, sizeof(struct hptiop_hba));
703 printk(KERN_ERR "hptiop: fail to alloc scsi host\n");
704 goto free_pci_regions;
707 hba = (struct hptiop_hba *)host->hostdata;
709 hba->pcidev = pcidev;
711 hba->initialized = 0;
714 atomic_set(&hba->resetting, 0);
715 atomic_set(&hba->reset_count, 0);
717 init_waitqueue_head(&hba->reset_wq);
718 init_waitqueue_head(&hba->ioctl_wq);
721 host->max_channel = 0;
724 host->irq = pcidev->irq;
726 if (hptiop_map_pci_bar(hba))
729 if (iop_wait_ready(hba->iop, 20000)) {
730 printk(KERN_ERR "scsi%d: firmware not ready\n",
735 if (iop_get_config(hba, &iop_config)) {
736 printk(KERN_ERR "scsi%d: get config failed\n",
741 hba->max_requests = min(le32_to_cpu(iop_config.max_requests),
742 HPTIOP_MAX_REQUESTS);
743 hba->max_devices = le32_to_cpu(iop_config.max_devices);
744 hba->max_request_size = le32_to_cpu(iop_config.request_size);
745 hba->max_sg_descriptors = le32_to_cpu(iop_config.max_sg_count);
746 hba->firmware_version = le32_to_cpu(iop_config.firmware_version);
747 hba->interface_version = le32_to_cpu(iop_config.interface_version);
748 hba->sdram_size = le32_to_cpu(iop_config.sdram_size);
750 if (hba->firmware_version > 0x01020000 ||
751 hba->interface_version > 0x01020000)
754 host->max_sectors = le32_to_cpu(iop_config.data_transfer_length) >> 9;
755 host->max_id = le32_to_cpu(iop_config.max_devices);
756 host->sg_tablesize = le32_to_cpu(iop_config.max_sg_count);
757 host->can_queue = le32_to_cpu(iop_config.max_requests);
758 host->cmd_per_lun = le32_to_cpu(iop_config.max_requests);
759 host->max_cmd_len = 16;
761 req_size = sizeof(struct hpt_iop_request_scsi_command)
762 + sizeof(struct hpt_iopsg) * (hba->max_sg_descriptors - 1);
763 if ((req_size & 0x1f) != 0)
764 req_size = (req_size + 0x1f) & ~0x1f;
766 memset(&set_config, 0, sizeof(struct hpt_iop_request_set_config));
767 set_config.iop_id = cpu_to_le32(host->host_no);
768 set_config.vbus_id = cpu_to_le16(host->host_no);
769 set_config.max_host_request_size = cpu_to_le16(req_size);
771 if (iop_set_config(hba, &set_config)) {
772 printk(KERN_ERR "scsi%d: set config failed\n",
777 pci_set_drvdata(pcidev, host);
779 if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED,
781 printk(KERN_ERR "scsi%d: request irq %d failed\n",
782 hba->host->host_no, pcidev->irq);
786 /* Allocate request mem */
788 dprintk("req_size=%d, max_requests=%d\n", req_size, hba->max_requests);
790 hba->req_size = req_size;
791 start_virt = dma_alloc_coherent(&pcidev->dev,
792 hba->req_size*hba->max_requests + 0x20,
793 &start_phy, GFP_KERNEL);
796 printk(KERN_ERR "scsi%d: fail to alloc request mem\n",
798 goto free_request_irq;
801 hba->dma_coherent = start_virt;
802 hba->dma_coherent_handle = start_phy;
804 if ((start_phy & 0x1f) != 0)
806 offset = ((start_phy + 0x1f) & ~0x1f) - start_phy;
808 start_virt += offset;
811 hba->req_list = start_virt;
812 for (i = 0; i < hba->max_requests; i++) {
813 hba->reqs[i].next = NULL;
814 hba->reqs[i].req_virt = start_virt;
815 hba->reqs[i].req_shifted_phy = start_phy >> 5;
816 hba->reqs[i].index = i;
817 free_req(hba, &hba->reqs[i]);
818 start_virt = (char *)start_virt + hba->req_size;
819 start_phy = start_phy + hba->req_size;
822 /* Enable Interrupt and start background task */
823 if (hptiop_initialize_iop(hba))
824 goto free_request_mem;
826 if (scsi_add_host(host, &pcidev->dev)) {
827 printk(KERN_ERR "scsi%d: scsi_add_host failed\n",
829 goto free_request_mem;
833 scsi_scan_host(host);
835 dprintk("scsi%d: hptiop_probe successfully\n", hba->host->host_no);
839 dma_free_coherent(&hba->pcidev->dev,
840 hba->req_size*hba->max_requests + 0x20,
841 hba->dma_coherent, hba->dma_coherent_handle);
844 free_irq(hba->pcidev->irq, hba);
850 pci_release_regions(pcidev) ;
856 pci_disable_device(pcidev);
858 dprintk("scsi%d: hptiop_probe fail\n", host->host_no);
862 static void hptiop_shutdown(struct pci_dev *pcidev)
864 struct Scsi_Host *host = pci_get_drvdata(pcidev);
865 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
866 struct hpt_iopmu __iomem *iop = hba->iop;
869 dprintk("hptiop_shutdown(%p)\n", hba);
872 if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000))
873 printk(KERN_ERR "scsi%d: shutdown the iop timeout\n",
876 /* disable all outbound interrupts */
877 int_mask = readl(&iop->outbound_intmask);
879 IOPMU_OUTBOUND_INT_MSG0 | IOPMU_OUTBOUND_INT_POSTQUEUE,
880 &iop->outbound_intmask);
881 hptiop_pci_posting_flush(iop);
884 static void hptiop_remove(struct pci_dev *pcidev)
886 struct Scsi_Host *host = pci_get_drvdata(pcidev);
887 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
889 dprintk("scsi%d: hptiop_remove\n", hba->host->host_no);
891 scsi_remove_host(host);
893 hptiop_shutdown(pcidev);
895 free_irq(hba->pcidev->irq, hba);
897 dma_free_coherent(&hba->pcidev->dev,
898 hba->req_size * hba->max_requests + 0x20,
900 hba->dma_coherent_handle);
904 pci_release_regions(hba->pcidev);
905 pci_set_drvdata(hba->pcidev, NULL);
906 pci_disable_device(hba->pcidev);
911 static struct pci_device_id hptiop_id_table[] = {
912 { PCI_VDEVICE(TTI, 0x3220) },
913 { PCI_VDEVICE(TTI, 0x3320) },
914 { PCI_VDEVICE(TTI, 0x3520) },
915 { PCI_VDEVICE(TTI, 0x4320) },
919 MODULE_DEVICE_TABLE(pci, hptiop_id_table);
921 static struct pci_driver hptiop_pci_driver = {
923 .id_table = hptiop_id_table,
924 .probe = hptiop_probe,
925 .remove = hptiop_remove,
926 .shutdown = hptiop_shutdown,
929 static int __init hptiop_module_init(void)
931 printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver);
932 return pci_register_driver(&hptiop_pci_driver);
935 static void __exit hptiop_module_exit(void)
937 pci_unregister_driver(&hptiop_pci_driver);
941 module_init(hptiop_module_init);
942 module_exit(hptiop_module_exit);
944 MODULE_LICENSE("GPL");