2 * SuperTrak EX Series Storage Controller driver for Linux
4 * Copyright (C) 2005, 2006 Promise Technology Inc.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 * Ed Lin <promise_linux@promise.com>
16 #include <linux/init.h>
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/delay.h>
20 #include <linux/time.h>
21 #include <linux/pci.h>
22 #include <linux/blkdev.h>
23 #include <linux/interrupt.h>
24 #include <linux/types.h>
25 #include <linux/module.h>
26 #include <linux/spinlock.h>
29 #include <asm/byteorder.h>
30 #include <scsi/scsi.h>
31 #include <scsi/scsi_device.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_host.h>
34 #include <scsi/scsi_tcq.h>
36 #define DRV_NAME "stex"
37 #define ST_DRIVER_VERSION "3.1.0.1"
38 #define ST_VER_MAJOR 3
39 #define ST_VER_MINOR 1
41 #define ST_BUILD_VER 1
44 /* MU register offset */
45 IMR0 = 0x10, /* MU_INBOUND_MESSAGE_REG0 */
46 IMR1 = 0x14, /* MU_INBOUND_MESSAGE_REG1 */
47 OMR0 = 0x18, /* MU_OUTBOUND_MESSAGE_REG0 */
48 OMR1 = 0x1c, /* MU_OUTBOUND_MESSAGE_REG1 */
49 IDBL = 0x20, /* MU_INBOUND_DOORBELL */
50 IIS = 0x24, /* MU_INBOUND_INTERRUPT_STATUS */
51 IIM = 0x28, /* MU_INBOUND_INTERRUPT_MASK */
52 ODBL = 0x2c, /* MU_OUTBOUND_DOORBELL */
53 OIS = 0x30, /* MU_OUTBOUND_INTERRUPT_STATUS */
54 OIM = 0x3c, /* MU_OUTBOUND_INTERRUPT_MASK */
56 /* MU register value */
57 MU_INBOUND_DOORBELL_HANDSHAKE = 1,
58 MU_INBOUND_DOORBELL_REQHEADCHANGED = 2,
59 MU_INBOUND_DOORBELL_STATUSTAILCHANGED = 4,
60 MU_INBOUND_DOORBELL_HMUSTOPPED = 8,
61 MU_INBOUND_DOORBELL_RESET = 16,
63 MU_OUTBOUND_DOORBELL_HANDSHAKE = 1,
64 MU_OUTBOUND_DOORBELL_REQUESTTAILCHANGED = 2,
65 MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED = 4,
66 MU_OUTBOUND_DOORBELL_BUSCHANGE = 8,
67 MU_OUTBOUND_DOORBELL_HASEVENT = 16,
70 MU_STATE_STARTING = 1,
71 MU_STATE_FMU_READY_FOR_HANDSHAKE = 2,
72 MU_STATE_SEND_HANDSHAKE_FRAME = 3,
74 MU_STATE_RESETTING = 5,
77 MU_HANDSHAKE_SIGNATURE = 0x55aaaa55,
78 MU_HANDSHAKE_SIGNATURE_HALF = 0x5a5a0000,
79 MU_HARD_RESET_WAIT = 30000,
82 /* firmware returned values */
83 SRB_STATUS_SUCCESS = 0x01,
84 SRB_STATUS_ERROR = 0x04,
85 SRB_STATUS_BUSY = 0x05,
86 SRB_STATUS_INVALID_REQUEST = 0x06,
87 SRB_STATUS_SELECTION_TIMEOUT = 0x0A,
91 TASK_ATTRIBUTE_SIMPLE = 0x0,
92 TASK_ATTRIBUTE_HEADOFQUEUE = 0x1,
93 TASK_ATTRIBUTE_ORDERED = 0x2,
94 TASK_ATTRIBUTE_ACA = 0x4,
96 /* request count, etc. */
99 /* one message wasted, use MU_MAX_REQUEST+1
100 to handle MU_MAX_REQUEST messages */
101 MU_REQ_COUNT = (MU_MAX_REQUEST + 1),
102 MU_STATUS_COUNT = (MU_MAX_REQUEST + 1),
104 STEX_CDB_LENGTH = MAX_COMMAND_SIZE,
105 REQ_VARIABLE_LEN = 1024,
106 STATUS_VAR_LEN = 128,
107 ST_CAN_QUEUE = MU_MAX_REQUEST,
108 ST_CMD_PER_LUN = MU_MAX_REQUEST,
112 SG_CF_EOT = 0x80, /* end of table */
113 SG_CF_64B = 0x40, /* 64 bit item */
114 SG_CF_HOST = 0x20, /* sg in host memory */
121 PASSTHRU_REQ_TYPE = 0x00000001,
122 PASSTHRU_REQ_NO_WAKEUP = 0x00000100,
123 ST_INTERNAL_TIMEOUT = 30,
128 /* vendor specific commands of Promise */
130 SINBAND_MGT_CMD = 0xd9,
132 CONTROLLER_CMD = 0xe1,
133 DEBUGGING_CMD = 0xe2,
136 PASSTHRU_GET_ADAPTER = 0x05,
137 PASSTHRU_GET_DRVVER = 0x10,
139 CTLR_CONFIG_CMD = 0x03,
140 CTLR_SHUTDOWN = 0x0d,
142 CTLR_POWER_STATE_CHANGE = 0x0e,
143 CTLR_POWER_SAVING = 0x01,
145 PASSTHRU_SIGNATURE = 0x4e415041,
146 MGT_CMD_SIGNATURE = 0xba,
150 ST_ADDITIONAL_MEM = 0x200000,
153 /* SCSI inquiry data */
154 typedef struct st_inq {
156 u8 DeviceTypeQualifier :3;
157 u8 DeviceTypeModifier :7;
158 u8 RemovableMedia :1;
160 u8 ResponseDataFormat :4;
170 u8 LinkedCommands :1;
174 u8 RelativeAddressing :1;
177 u8 ProductRevisionLevel[4];
178 u8 VendorSpecific[20];
183 u8 ctrl; /* SG_CF_xxx */
194 struct st_sgitem table[ST_MAX_SG];
197 struct handshake_frame {
198 __le32 rb_phy; /* request payload queue physical address */
200 __le16 req_sz; /* size of each request payload */
201 __le16 req_cnt; /* count of reqs the buffer can hold */
202 __le16 status_sz; /* size of each status payload */
203 __le16 status_cnt; /* count of status the buffer can hold */
204 __le32 hosttime; /* seconds from Jan 1, 1970 (GMT) */
206 u8 partner_type; /* who sends this frame */
208 __le32 partner_ver_major;
209 __le32 partner_ver_minor;
210 __le32 partner_ver_oem;
211 __le32 partner_ver_build;
212 __le32 extra_offset; /* NEW */
213 __le32 extra_size; /* NEW */
224 u8 payload_sz; /* payload size in 4-byte, not used */
225 u8 cdb[STEX_CDB_LENGTH];
226 u8 variable[REQ_VARIABLE_LEN];
236 u8 payload_sz; /* payload size in 4-byte */
237 u8 variable[STATUS_VAR_LEN];
252 struct ver_info drv_ver;
253 struct ver_info bios_ver;
282 #define MU_REQ_BUFFER_SIZE (MU_REQ_COUNT * sizeof(struct req_msg))
283 #define MU_STATUS_BUFFER_SIZE (MU_STATUS_COUNT * sizeof(struct status_msg))
284 #define MU_BUFFER_SIZE (MU_REQ_BUFFER_SIZE + MU_STATUS_BUFFER_SIZE)
285 #define STEX_EXTRA_SIZE max(sizeof(struct st_frame), sizeof(ST_INQ))
286 #define STEX_BUFFER_SIZE (MU_BUFFER_SIZE + STEX_EXTRA_SIZE)
290 struct scsi_cmnd *cmd;
293 unsigned int sense_bufflen;
302 void __iomem *mmio_base; /* iomapped PCI memory space */
304 dma_addr_t dma_handle;
307 struct Scsi_Host *host;
308 struct pci_dev *pdev;
315 struct status_msg *status_buffer;
316 void *copy_buffer; /* temp buffer for driver-handled commands */
317 struct st_ccb ccb[MU_MAX_REQUEST];
318 struct st_ccb *wait_ccb;
319 wait_queue_head_t waitq;
321 unsigned int mu_status;
324 unsigned int cardtype;
327 static const char console_inq_page[] =
329 0x03,0x00,0x03,0x03,0xFA,0x00,0x00,0x30,
330 0x50,0x72,0x6F,0x6D,0x69,0x73,0x65,0x20, /* "Promise " */
331 0x52,0x41,0x49,0x44,0x20,0x43,0x6F,0x6E, /* "RAID Con" */
332 0x73,0x6F,0x6C,0x65,0x20,0x20,0x20,0x20, /* "sole " */
333 0x31,0x2E,0x30,0x30,0x20,0x20,0x20,0x20, /* "1.00 " */
334 0x53,0x58,0x2F,0x52,0x53,0x41,0x46,0x2D, /* "SX/RSAF-" */
335 0x54,0x45,0x31,0x2E,0x30,0x30,0x20,0x20, /* "TE1.00 " */
336 0x0C,0x20,0x20,0x20,0x20,0x20,0x20,0x20
339 MODULE_AUTHOR("Ed Lin");
340 MODULE_DESCRIPTION("Promise Technology SuperTrak EX Controllers");
341 MODULE_LICENSE("GPL");
342 MODULE_VERSION(ST_DRIVER_VERSION);
344 static void stex_gettime(__le32 *time)
347 do_gettimeofday(&tv);
349 *time = cpu_to_le32(tv.tv_sec & 0xffffffff);
350 *(time + 1) = cpu_to_le32((tv.tv_sec >> 16) >> 16);
353 static struct status_msg *stex_get_status(struct st_hba *hba)
355 struct status_msg *status =
356 hba->status_buffer + hba->status_tail;
359 hba->status_tail %= MU_STATUS_COUNT;
364 static void stex_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
366 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
368 cmd->sense_buffer[0] = 0x70; /* fixed format, current */
369 cmd->sense_buffer[2] = sk;
370 cmd->sense_buffer[7] = 18 - 8; /* additional sense length */
371 cmd->sense_buffer[12] = asc;
372 cmd->sense_buffer[13] = ascq;
375 static void stex_invalid_field(struct scsi_cmnd *cmd,
376 void (*done)(struct scsi_cmnd *))
378 /* "Invalid field in cbd" */
379 stex_set_sense(cmd, ILLEGAL_REQUEST, 0x24, 0x0);
383 static struct req_msg *stex_alloc_req(struct st_hba *hba)
385 struct req_msg *req = ((struct req_msg *)hba->dma_mem) +
389 hba->req_head %= MU_REQ_COUNT;
394 static int stex_map_sg(struct st_hba *hba,
395 struct req_msg *req, struct st_ccb *ccb)
397 struct pci_dev *pdev = hba->pdev;
398 struct scsi_cmnd *cmd;
399 dma_addr_t dma_handle;
400 struct scatterlist *src;
401 struct st_sgtable *dst;
405 dst = (struct st_sgtable *)req->variable;
406 dst->max_sg_count = cpu_to_le16(ST_MAX_SG);
407 dst->sz_in_byte = cpu_to_le32(cmd->request_bufflen);
412 src = (struct scatterlist *) cmd->request_buffer;
413 n_elem = pci_map_sg(pdev, src,
414 cmd->use_sg, cmd->sc_data_direction);
418 ccb->sg_count = n_elem;
419 dst->sg_count = cpu_to_le16((u16)n_elem);
421 for (i = 0; i < n_elem; i++, src++) {
422 dst->table[i].count = cpu_to_le32((u32)sg_dma_len(src));
424 cpu_to_le32(sg_dma_address(src) & 0xffffffff);
425 dst->table[i].addr_hi =
426 cpu_to_le32((sg_dma_address(src) >> 16) >> 16);
427 dst->table[i].ctrl = SG_CF_64B | SG_CF_HOST;
429 dst->table[--i].ctrl |= SG_CF_EOT;
433 dma_handle = pci_map_single(pdev, cmd->request_buffer,
434 cmd->request_bufflen, cmd->sc_data_direction);
435 cmd->SCp.dma_handle = dma_handle;
438 dst->sg_count = cpu_to_le16(1);
439 dst->table[0].addr = cpu_to_le32(dma_handle & 0xffffffff);
440 dst->table[0].addr_hi = cpu_to_le32((dma_handle >> 16) >> 16);
441 dst->table[0].count = cpu_to_le32((u32)cmd->request_bufflen);
442 dst->table[0].ctrl = SG_CF_EOT | SG_CF_64B | SG_CF_HOST;
447 static void stex_internal_copy(struct scsi_cmnd *cmd,
448 const void *src, size_t *count, int sg_count, int direction)
452 void *s, *d, *base = NULL;
453 if (*count > cmd->request_bufflen)
454 *count = cmd->request_bufflen;
460 size_t offset = *count - lcount;
462 base = scsi_kmap_atomic_sg(cmd->request_buffer,
463 sg_count, &offset, &len);
470 d = cmd->request_buffer;
472 if (direction == ST_TO_CMD)
479 scsi_kunmap_atomic_sg(base);
483 static int stex_direct_copy(struct scsi_cmnd *cmd,
484 const void *src, size_t count)
486 struct st_hba *hba = (struct st_hba *) &cmd->device->host->hostdata[0];
487 size_t cp_len = count;
491 n_elem = pci_map_sg(hba->pdev, cmd->request_buffer,
492 cmd->use_sg, cmd->sc_data_direction);
497 stex_internal_copy(cmd, src, &cp_len, n_elem, ST_TO_CMD);
500 pci_unmap_sg(hba->pdev, cmd->request_buffer,
501 cmd->use_sg, cmd->sc_data_direction);
502 return cp_len == count;
505 static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb)
508 size_t count = sizeof(struct st_frame);
510 p = hba->copy_buffer;
511 stex_internal_copy(ccb->cmd, p, &count, ccb->sg_count, ST_FROM_CMD);
512 memset(p->base, 0, sizeof(u32)*6);
513 *(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0);
516 p->drv_ver.major = ST_VER_MAJOR;
517 p->drv_ver.minor = ST_VER_MINOR;
518 p->drv_ver.oem = ST_OEM;
519 p->drv_ver.build = ST_BUILD_VER;
521 p->bus = hba->pdev->bus->number;
522 p->slot = hba->pdev->devfn;
524 p->irq_vec = hba->pdev->irq;
525 p->id = hba->pdev->vendor << 16 | hba->pdev->device;
527 hba->pdev->subsystem_vendor << 16 | hba->pdev->subsystem_device;
529 stex_internal_copy(ccb->cmd, p, &count, ccb->sg_count, ST_TO_CMD);
533 stex_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag)
535 req->tag = cpu_to_le16(tag);
536 req->task_attr = TASK_ATTRIBUTE_SIMPLE;
537 req->task_manage = 0; /* not supported yet */
539 hba->ccb[tag].req = req;
542 writel(hba->req_head, hba->mmio_base + IMR0);
543 writel(MU_INBOUND_DOORBELL_REQHEADCHANGED, hba->mmio_base + IDBL);
544 readl(hba->mmio_base + IDBL); /* flush */
548 stex_slave_alloc(struct scsi_device *sdev)
550 /* Cheat: usually extracted from Inquiry data */
551 sdev->tagged_supported = 1;
553 scsi_activate_tcq(sdev, sdev->host->can_queue);
559 stex_slave_config(struct scsi_device *sdev)
561 sdev->use_10_for_rw = 1;
562 sdev->use_10_for_ms = 1;
563 sdev->timeout = 60 * HZ;
564 sdev->tagged_supported = 1;
570 stex_slave_destroy(struct scsi_device *sdev)
572 scsi_deactivate_tcq(sdev, 1);
576 stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
579 struct Scsi_Host *host;
583 host = cmd->device->host;
584 id = cmd->device->id;
585 lun = cmd->device->lun;
586 hba = (struct st_hba *) &host->hostdata[0];
588 switch (cmd->cmnd[0]) {
591 static char ms10_caching_page[12] =
592 { 0, 0x12, 0, 0, 0, 0, 0, 0, 0x8, 0xa, 0x4, 0 };
594 page = cmd->cmnd[2] & 0x3f;
595 if (page == 0x8 || page == 0x3f) {
596 stex_direct_copy(cmd, ms10_caching_page,
597 sizeof(ms10_caching_page));
598 cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
601 stex_invalid_field(cmd, done);
606 * The shasta firmware does not report actual luns in the
607 * target, so fail the command to force sequential lun scan.
608 * Also, the console device does not support this command.
610 if (hba->cardtype == st_shasta || id == host->max_id - 1) {
611 stex_invalid_field(cmd, done);
615 case TEST_UNIT_READY:
616 if (id == host->max_id - 1) {
617 cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
623 if (id != host->max_id - 1)
625 if (lun == 0 && (cmd->cmnd[1] & INQUIRY_EVPD) == 0) {
626 stex_direct_copy(cmd, console_inq_page,
627 sizeof(console_inq_page));
628 cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
631 stex_invalid_field(cmd, done);
634 if (cmd->cmnd[1] == PASSTHRU_GET_DRVVER) {
635 struct st_drvver ver;
636 ver.major = ST_VER_MAJOR;
637 ver.minor = ST_VER_MINOR;
639 ver.build = ST_BUILD_VER;
640 ver.signature[0] = PASSTHRU_SIGNATURE;
641 ver.console_id = host->max_id - 1;
642 ver.host_no = hba->host->host_no;
643 cmd->result = stex_direct_copy(cmd, &ver, sizeof(ver)) ?
644 DID_OK << 16 | COMMAND_COMPLETE << 8 :
645 DID_ERROR << 16 | COMMAND_COMPLETE << 8;
653 cmd->scsi_done = done;
655 tag = cmd->request->tag;
657 if (unlikely(tag >= host->can_queue))
658 return SCSI_MLQUEUE_HOST_BUSY;
660 req = stex_alloc_req(hba);
666 memcpy(req->cdb, cmd->cmnd, STEX_CDB_LENGTH);
668 hba->ccb[tag].cmd = cmd;
669 hba->ccb[tag].sense_bufflen = SCSI_SENSE_BUFFERSIZE;
670 hba->ccb[tag].sense_buffer = cmd->sense_buffer;
671 hba->ccb[tag].req_type = 0;
673 if (cmd->sc_data_direction != DMA_NONE)
674 stex_map_sg(hba, req, &hba->ccb[tag]);
676 stex_send_cmd(hba, req, tag);
680 static void stex_unmap_sg(struct st_hba *hba, struct scsi_cmnd *cmd)
682 if (cmd->sc_data_direction != DMA_NONE) {
684 pci_unmap_sg(hba->pdev, cmd->request_buffer,
685 cmd->use_sg, cmd->sc_data_direction);
687 pci_unmap_single(hba->pdev, cmd->SCp.dma_handle,
688 cmd->request_bufflen, cmd->sc_data_direction);
692 static void stex_scsi_done(struct st_ccb *ccb)
694 struct scsi_cmnd *cmd = ccb->cmd;
697 if (ccb->srb_status == SRB_STATUS_SUCCESS || ccb->srb_status == 0) {
698 result = ccb->scsi_status;
699 switch (ccb->scsi_status) {
701 result |= DID_OK << 16 | COMMAND_COMPLETE << 8;
703 case SAM_STAT_CHECK_CONDITION:
704 result |= DRIVER_SENSE << 24;
707 result |= DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
710 result |= DID_ERROR << 16 | COMMAND_COMPLETE << 8;
714 else if (ccb->srb_status & SRB_SEE_SENSE)
715 result = DRIVER_SENSE << 24 | SAM_STAT_CHECK_CONDITION;
716 else switch (ccb->srb_status) {
717 case SRB_STATUS_SELECTION_TIMEOUT:
718 result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
720 case SRB_STATUS_BUSY:
721 result = DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
723 case SRB_STATUS_INVALID_REQUEST:
724 case SRB_STATUS_ERROR:
726 result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
730 cmd->result = result;
734 static void stex_copy_data(struct st_ccb *ccb,
735 struct status_msg *resp, unsigned int variable)
737 size_t count = variable;
738 if (resp->scsi_status != SAM_STAT_GOOD) {
739 if (ccb->sense_buffer != NULL)
740 memcpy(ccb->sense_buffer, resp->variable,
741 min(variable, ccb->sense_bufflen));
745 if (ccb->cmd == NULL)
747 stex_internal_copy(ccb->cmd,
748 resp->variable, &count, ccb->sg_count, ST_TO_CMD);
751 static void stex_ys_commands(struct st_hba *hba,
752 struct st_ccb *ccb, struct status_msg *resp)
756 if (ccb->cmd->cmnd[0] == MGT_CMD &&
757 resp->scsi_status != SAM_STAT_CHECK_CONDITION) {
758 ccb->cmd->request_bufflen =
759 le32_to_cpu(*(__le32 *)&resp->variable[0]);
763 if (resp->srb_status != 0)
766 /* determine inquiry command status by DeviceTypeQualifier */
767 if (ccb->cmd->cmnd[0] == INQUIRY &&
768 resp->scsi_status == SAM_STAT_GOOD) {
771 count = STEX_EXTRA_SIZE;
772 stex_internal_copy(ccb->cmd, hba->copy_buffer,
773 &count, ccb->sg_count, ST_FROM_CMD);
774 inq_data = (ST_INQ *)hba->copy_buffer;
775 if (inq_data->DeviceTypeQualifier != 0)
776 ccb->srb_status = SRB_STATUS_SELECTION_TIMEOUT;
778 ccb->srb_status = SRB_STATUS_SUCCESS;
782 static void stex_mu_intr(struct st_hba *hba, u32 doorbell)
784 void __iomem *base = hba->mmio_base;
785 struct status_msg *resp;
790 if (!(doorbell & MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED))
793 /* status payloads */
794 hba->status_head = readl(base + OMR1);
795 if (unlikely(hba->status_head >= MU_STATUS_COUNT)) {
796 printk(KERN_WARNING DRV_NAME "(%s): invalid status head\n",
797 pci_name(hba->pdev));
802 * it's not a valid status payload if:
803 * 1. there are no pending requests(e.g. during init stage)
804 * 2. there are some pending requests, but the controller is in
805 * reset status, and its type is not st_yosemite
806 * firmware of st_yosemite in reset status will return pending requests
807 * to driver, so we allow it to pass
809 if (unlikely(hba->out_req_cnt <= 0 ||
810 (hba->mu_status == MU_STATE_RESETTING &&
811 hba->cardtype != st_yosemite))) {
812 hba->status_tail = hba->status_head;
816 while (hba->status_tail != hba->status_head) {
817 resp = stex_get_status(hba);
818 tag = le16_to_cpu(resp->tag);
819 if (unlikely(tag >= hba->host->can_queue)) {
820 printk(KERN_WARNING DRV_NAME
821 "(%s): invalid tag\n", pci_name(hba->pdev));
825 ccb = &hba->ccb[tag];
826 if (hba->wait_ccb == ccb)
827 hba->wait_ccb = NULL;
828 if (unlikely(ccb->req == NULL)) {
829 printk(KERN_WARNING DRV_NAME
830 "(%s): lagging req\n", pci_name(hba->pdev));
835 size = resp->payload_sz * sizeof(u32); /* payload size */
836 if (unlikely(size < sizeof(*resp) - STATUS_VAR_LEN ||
837 size > sizeof(*resp))) {
838 printk(KERN_WARNING DRV_NAME "(%s): bad status size\n",
839 pci_name(hba->pdev));
841 size -= sizeof(*resp) - STATUS_VAR_LEN; /* copy size */
843 stex_copy_data(ccb, resp, size);
846 ccb->srb_status = resp->srb_status;
847 ccb->scsi_status = resp->scsi_status;
849 if (likely(ccb->cmd != NULL)) {
850 if (hba->cardtype == st_yosemite)
851 stex_ys_commands(hba, ccb, resp);
853 if (unlikely(ccb->cmd->cmnd[0] == PASSTHRU_CMD &&
854 ccb->cmd->cmnd[1] == PASSTHRU_GET_ADAPTER))
855 stex_controller_info(hba, ccb);
857 stex_unmap_sg(hba, ccb->cmd);
860 } else if (ccb->req_type & PASSTHRU_REQ_TYPE) {
862 if (ccb->req_type & PASSTHRU_REQ_NO_WAKEUP) {
867 if (waitqueue_active(&hba->waitq))
868 wake_up(&hba->waitq);
873 writel(hba->status_head, base + IMR1);
874 readl(base + IMR1); /* flush */
877 static irqreturn_t stex_intr(int irq, void *__hba)
879 struct st_hba *hba = __hba;
880 void __iomem *base = hba->mmio_base;
885 spin_lock_irqsave(hba->host->host_lock, flags);
887 data = readl(base + ODBL);
889 if (data && data != 0xffffffff) {
890 /* clear the interrupt */
891 writel(data, base + ODBL);
892 readl(base + ODBL); /* flush */
893 stex_mu_intr(hba, data);
897 spin_unlock_irqrestore(hba->host->host_lock, flags);
899 return IRQ_RETVAL(handled);
902 static int stex_handshake(struct st_hba *hba)
904 void __iomem *base = hba->mmio_base;
905 struct handshake_frame *h;
906 dma_addr_t status_phys;
908 unsigned long before;
910 if (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
911 writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL);
914 while (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
915 if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
916 printk(KERN_ERR DRV_NAME
917 "(%s): no handshake signature\n",
918 pci_name(hba->pdev));
928 data = readl(base + OMR1);
929 if ((data & 0xffff0000) == MU_HANDSHAKE_SIGNATURE_HALF) {
931 if (hba->host->can_queue > data)
932 hba->host->can_queue = data;
935 h = (struct handshake_frame *)(hba->dma_mem + MU_REQ_BUFFER_SIZE);
936 h->rb_phy = cpu_to_le32(hba->dma_handle);
937 h->rb_phy_hi = cpu_to_le32((hba->dma_handle >> 16) >> 16);
938 h->req_sz = cpu_to_le16(sizeof(struct req_msg));
939 h->req_cnt = cpu_to_le16(MU_REQ_COUNT);
940 h->status_sz = cpu_to_le16(sizeof(struct status_msg));
941 h->status_cnt = cpu_to_le16(MU_STATUS_COUNT);
942 stex_gettime(&h->hosttime);
943 h->partner_type = HMU_PARTNER_TYPE;
944 if (hba->dma_size > STEX_BUFFER_SIZE) {
945 h->extra_offset = cpu_to_le32(STEX_BUFFER_SIZE);
946 h->extra_size = cpu_to_le32(ST_ADDITIONAL_MEM);
948 h->extra_offset = h->extra_size = 0;
950 status_phys = hba->dma_handle + MU_REQ_BUFFER_SIZE;
951 writel(status_phys, base + IMR0);
953 writel((status_phys >> 16) >> 16, base + IMR1);
956 writel((status_phys >> 16) >> 16, base + OMR0); /* old fw compatible */
958 writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL);
959 readl(base + IDBL); /* flush */
963 while (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
964 if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
965 printk(KERN_ERR DRV_NAME
966 "(%s): no signature after handshake frame\n",
967 pci_name(hba->pdev));
974 writel(0, base + IMR0);
976 writel(0, base + OMR0);
978 writel(0, base + IMR1);
980 writel(0, base + OMR1);
981 readl(base + OMR1); /* flush */
982 hba->mu_status = MU_STATE_STARTED;
986 static int stex_abort(struct scsi_cmnd *cmd)
988 struct Scsi_Host *host = cmd->device->host;
989 struct st_hba *hba = (struct st_hba *)host->hostdata;
990 u16 tag = cmd->request->tag;
993 int result = SUCCESS;
995 base = hba->mmio_base;
996 spin_lock_irqsave(host->host_lock, flags);
997 if (tag < host->can_queue && hba->ccb[tag].cmd == cmd)
998 hba->wait_ccb = &hba->ccb[tag];
1000 for (tag = 0; tag < host->can_queue; tag++)
1001 if (hba->ccb[tag].cmd == cmd) {
1002 hba->wait_ccb = &hba->ccb[tag];
1005 if (tag >= host->can_queue)
1009 data = readl(base + ODBL);
1010 if (data == 0 || data == 0xffffffff)
1013 writel(data, base + ODBL);
1014 readl(base + ODBL); /* flush */
1016 stex_mu_intr(hba, data);
1018 if (hba->wait_ccb == NULL) {
1019 printk(KERN_WARNING DRV_NAME
1020 "(%s): lost interrupt\n", pci_name(hba->pdev));
1025 stex_unmap_sg(hba, cmd);
1026 hba->wait_ccb->req = NULL; /* nullify the req's future return */
1027 hba->wait_ccb = NULL;
1030 spin_unlock_irqrestore(host->host_lock, flags);
1034 static void stex_hard_reset(struct st_hba *hba)
1036 struct pci_bus *bus;
1041 for (i = 0; i < 16; i++)
1042 pci_read_config_dword(hba->pdev, i * 4,
1043 &hba->pdev->saved_config_space[i]);
1045 /* Reset secondary bus. Our controller(MU/ATU) is the only device on
1046 secondary bus. Consult Intel 80331/3 developer's manual for detail */
1047 bus = hba->pdev->bus;
1048 pci_read_config_byte(bus->self, PCI_BRIDGE_CONTROL, &pci_bctl);
1049 pci_bctl |= PCI_BRIDGE_CTL_BUS_RESET;
1050 pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl);
1053 * 1 ms may be enough for 8-port controllers. But 16-port controllers
1054 * require more time to finish bus reset. Use 100 ms here for safety
1057 pci_bctl &= ~PCI_BRIDGE_CTL_BUS_RESET;
1058 pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl);
1060 for (i = 0; i < MU_HARD_RESET_WAIT; i++) {
1061 pci_read_config_word(hba->pdev, PCI_COMMAND, &pci_cmd);
1062 if (pci_cmd != 0xffff && (pci_cmd & PCI_COMMAND_MASTER))
1068 for (i = 0; i < 16; i++)
1069 pci_write_config_dword(hba->pdev, i * 4,
1070 hba->pdev->saved_config_space[i]);
1073 static int stex_reset(struct scsi_cmnd *cmd)
1076 unsigned long flags;
1077 unsigned long before;
1078 hba = (struct st_hba *) &cmd->device->host->hostdata[0];
1080 hba->mu_status = MU_STATE_RESETTING;
1082 if (hba->cardtype == st_shasta)
1083 stex_hard_reset(hba);
1085 if (hba->cardtype != st_yosemite) {
1086 if (stex_handshake(hba)) {
1087 printk(KERN_WARNING DRV_NAME
1088 "(%s): resetting: handshake failed\n",
1089 pci_name(hba->pdev));
1092 spin_lock_irqsave(hba->host->host_lock, flags);
1095 hba->status_head = 0;
1096 hba->status_tail = 0;
1097 hba->out_req_cnt = 0;
1098 spin_unlock_irqrestore(hba->host->host_lock, flags);
1103 writel(MU_INBOUND_DOORBELL_RESET, hba->mmio_base + IDBL);
1104 readl(hba->mmio_base + IDBL); /* flush */
1106 while (hba->out_req_cnt > 0) {
1107 if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) {
1108 printk(KERN_WARNING DRV_NAME
1109 "(%s): reset timeout\n", pci_name(hba->pdev));
1115 hba->mu_status = MU_STATE_STARTED;
1119 static int stex_biosparam(struct scsi_device *sdev,
1120 struct block_device *bdev, sector_t capacity, int geom[])
1122 int heads = 255, sectors = 63;
1124 if (capacity < 0x200000) {
1129 sector_div(capacity, heads * sectors);
1138 static struct scsi_host_template driver_template = {
1139 .module = THIS_MODULE,
1141 .proc_name = DRV_NAME,
1142 .bios_param = stex_biosparam,
1143 .queuecommand = stex_queuecommand,
1144 .slave_alloc = stex_slave_alloc,
1145 .slave_configure = stex_slave_config,
1146 .slave_destroy = stex_slave_destroy,
1147 .eh_abort_handler = stex_abort,
1148 .eh_host_reset_handler = stex_reset,
1149 .can_queue = ST_CAN_QUEUE,
1151 .sg_tablesize = ST_MAX_SG,
1152 .cmd_per_lun = ST_CMD_PER_LUN,
1155 static int stex_set_dma_mask(struct pci_dev * pdev)
1158 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)
1159 && !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
1161 ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1163 ret = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1167 static int __devinit
1168 stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1171 struct Scsi_Host *host;
1174 err = pci_enable_device(pdev);
1178 pci_set_master(pdev);
1180 host = scsi_host_alloc(&driver_template, sizeof(struct st_hba));
1183 printk(KERN_ERR DRV_NAME "(%s): scsi_host_alloc failed\n",
1189 hba = (struct st_hba *)host->hostdata;
1190 memset(hba, 0, sizeof(struct st_hba));
1192 err = pci_request_regions(pdev, DRV_NAME);
1194 printk(KERN_ERR DRV_NAME "(%s): request regions failed\n",
1196 goto out_scsi_host_put;
1199 hba->mmio_base = ioremap(pci_resource_start(pdev, 0),
1200 pci_resource_len(pdev, 0));
1201 if ( !hba->mmio_base) {
1202 printk(KERN_ERR DRV_NAME "(%s): memory map failed\n",
1205 goto out_release_regions;
1208 err = stex_set_dma_mask(pdev);
1210 printk(KERN_ERR DRV_NAME "(%s): set dma mask failed\n",
1215 hba->cardtype = (unsigned int) id->driver_data;
1216 if (hba->cardtype == st_vsc && (pdev->subsystem_device & 0xf) == 0x1)
1217 hba->cardtype = st_vsc1;
1218 hba->dma_size = (hba->cardtype == st_vsc1) ?
1219 (STEX_BUFFER_SIZE + ST_ADDITIONAL_MEM) : (STEX_BUFFER_SIZE);
1220 hba->dma_mem = dma_alloc_coherent(&pdev->dev,
1221 hba->dma_size, &hba->dma_handle, GFP_KERNEL);
1222 if (!hba->dma_mem) {
1224 printk(KERN_ERR DRV_NAME "(%s): dma mem alloc failed\n",
1229 hba->status_buffer =
1230 (struct status_msg *)(hba->dma_mem + MU_REQ_BUFFER_SIZE);
1231 hba->copy_buffer = hba->dma_mem + MU_BUFFER_SIZE;
1232 hba->mu_status = MU_STATE_STARTING;
1234 if (hba->cardtype == st_shasta) {
1236 host->max_id = 16 + 1;
1237 } else if (hba->cardtype == st_yosemite) {
1238 host->max_lun = 128;
1239 host->max_id = 1 + 1;
1241 /* st_vsc and st_vsc1 */
1243 host->max_id = 128 + 1;
1245 host->max_channel = 0;
1246 host->unique_id = host->host_no;
1247 host->max_cmd_len = STEX_CDB_LENGTH;
1251 init_waitqueue_head(&hba->waitq);
1253 err = request_irq(pdev->irq, stex_intr, IRQF_SHARED, DRV_NAME, hba);
1255 printk(KERN_ERR DRV_NAME "(%s): request irq failed\n",
1260 err = stex_handshake(hba);
1264 err = scsi_init_shared_tag_map(host, host->can_queue);
1266 printk(KERN_ERR DRV_NAME "(%s): init shared queue failed\n",
1271 pci_set_drvdata(pdev, hba);
1273 err = scsi_add_host(host, &pdev->dev);
1275 printk(KERN_ERR DRV_NAME "(%s): scsi_add_host failed\n",
1280 scsi_scan_host(host);
1285 free_irq(pdev->irq, hba);
1287 dma_free_coherent(&pdev->dev, hba->dma_size,
1288 hba->dma_mem, hba->dma_handle);
1290 iounmap(hba->mmio_base);
1291 out_release_regions:
1292 pci_release_regions(pdev);
1294 scsi_host_put(host);
1296 pci_disable_device(pdev);
1301 static void stex_hba_stop(struct st_hba *hba)
1303 struct req_msg *req;
1304 unsigned long flags;
1305 unsigned long before;
1308 spin_lock_irqsave(hba->host->host_lock, flags);
1309 req = stex_alloc_req(hba);
1310 memset(req->cdb, 0, STEX_CDB_LENGTH);
1312 if (hba->cardtype == st_yosemite) {
1313 req->cdb[0] = MGT_CMD;
1314 req->cdb[1] = MGT_CMD_SIGNATURE;
1315 req->cdb[2] = CTLR_CONFIG_CMD;
1316 req->cdb[3] = CTLR_SHUTDOWN;
1318 req->cdb[0] = CONTROLLER_CMD;
1319 req->cdb[1] = CTLR_POWER_STATE_CHANGE;
1320 req->cdb[2] = CTLR_POWER_SAVING;
1323 hba->ccb[tag].cmd = NULL;
1324 hba->ccb[tag].sg_count = 0;
1325 hba->ccb[tag].sense_bufflen = 0;
1326 hba->ccb[tag].sense_buffer = NULL;
1327 hba->ccb[tag].req_type |= PASSTHRU_REQ_TYPE;
1329 stex_send_cmd(hba, req, tag);
1330 spin_unlock_irqrestore(hba->host->host_lock, flags);
1333 while (hba->ccb[tag].req_type & PASSTHRU_REQ_TYPE) {
1334 if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ))
1340 static void stex_hba_free(struct st_hba *hba)
1342 free_irq(hba->pdev->irq, hba);
1344 iounmap(hba->mmio_base);
1346 pci_release_regions(hba->pdev);
1348 dma_free_coherent(&hba->pdev->dev, hba->dma_size,
1349 hba->dma_mem, hba->dma_handle);
1352 static void stex_remove(struct pci_dev *pdev)
1354 struct st_hba *hba = pci_get_drvdata(pdev);
1356 scsi_remove_host(hba->host);
1358 pci_set_drvdata(pdev, NULL);
1364 scsi_host_put(hba->host);
1366 pci_disable_device(pdev);
1369 static void stex_shutdown(struct pci_dev *pdev)
1371 struct st_hba *hba = pci_get_drvdata(pdev);
1376 static struct pci_device_id stex_pci_tbl[] = {
1378 { 0x105a, 0x8350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1379 st_shasta }, /* SuperTrak EX8350/8300/16350/16300 */
1380 { 0x105a, 0xc350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1381 st_shasta }, /* SuperTrak EX12350 */
1382 { 0x105a, 0x4302, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1383 st_shasta }, /* SuperTrak EX4350 */
1384 { 0x105a, 0xe350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1385 st_shasta }, /* SuperTrak EX24350 */
1388 { 0x105a, 0x7250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_vsc },
1391 { 0x105a, 0x8650, PCI_ANY_ID, 0x4600, 0, 0,
1392 st_yosemite }, /* SuperTrak EX4650 */
1393 { 0x105a, 0x8650, PCI_ANY_ID, 0x4610, 0, 0,
1394 st_yosemite }, /* SuperTrak EX4650o */
1395 { 0x105a, 0x8650, PCI_ANY_ID, 0x8600, 0, 0,
1396 st_yosemite }, /* SuperTrak EX8650EL */
1397 { 0x105a, 0x8650, PCI_ANY_ID, 0x8601, 0, 0,
1398 st_yosemite }, /* SuperTrak EX8650 */
1399 { 0x105a, 0x8650, PCI_ANY_ID, 0x8602, 0, 0,
1400 st_yosemite }, /* SuperTrak EX8654 */
1401 { 0x105a, 0x8650, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1402 st_yosemite }, /* generic st_yosemite */
1403 { } /* terminate list */
1405 MODULE_DEVICE_TABLE(pci, stex_pci_tbl);
1407 static struct pci_driver stex_pci_driver = {
1409 .id_table = stex_pci_tbl,
1410 .probe = stex_probe,
1411 .remove = __devexit_p(stex_remove),
1412 .shutdown = stex_shutdown,
1415 static int __init stex_init(void)
1417 printk(KERN_INFO DRV_NAME
1418 ": Promise SuperTrak EX Driver version: %s\n",
1421 return pci_register_driver(&stex_pci_driver);
1424 static void __exit stex_exit(void)
1426 pci_unregister_driver(&stex_pci_driver);
1429 module_init(stex_init);
1430 module_exit(stex_exit);