2 * Disk Array driver for HP Smart Array controllers.
3 * (C) Copyright 2000, 2007 Hewlett-Packard Development Company, L.P.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/types.h>
26 #include <linux/pci.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/major.h>
32 #include <linux/bio.h>
33 #include <linux/blkpg.h>
34 #include <linux/timer.h>
35 #include <linux/proc_fs.h>
36 #include <linux/seq_file.h>
37 #include <linux/init.h>
38 #include <linux/hdreg.h>
39 #include <linux/spinlock.h>
40 #include <linux/compat.h>
41 #include <linux/blktrace_api.h>
42 #include <asm/uaccess.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/blkdev.h>
47 #include <linux/genhd.h>
48 #include <linux/completion.h>
49 #include <scsi/scsi.h>
51 #include <scsi/scsi_ioctl.h>
52 #include <linux/cdrom.h>
53 #include <linux/scatterlist.h>
55 #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
56 #define DRIVER_NAME "HP CISS Driver (v 3.6.14)"
57 #define DRIVER_VERSION CCISS_DRIVER_VERSION(3,6,14)
59 /* Embedded module documentation macros - see modules.h */
60 MODULE_AUTHOR("Hewlett-Packard Company");
61 MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 3.6.14");
62 MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
63 " SA6i P600 P800 P400 P400i E200 E200i E500");
64 MODULE_VERSION("3.6.14");
65 MODULE_LICENSE("GPL");
67 #include "cciss_cmd.h"
69 #include <linux/cciss_ioctl.h>
71 /* define the PCI info for the cards we can control */
72 static const struct pci_device_id cciss_pci_device_id[] = {
73 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS, 0x0E11, 0x4070},
74 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4080},
75 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4082},
76 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4083},
77 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x4091},
78 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409A},
79 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409B},
80 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409C},
81 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409D},
82 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA, 0x103C, 0x3225},
83 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223},
84 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234},
85 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3235},
86 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3211},
87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3212},
88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3213},
89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3214},
90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3215},
91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3237},
92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x323D},
93 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
94 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
98 MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
100 /* board_id = Subsystem Device ID & Vendor ID
101 * product = Marketing Name for the board
102 * access = Address of the struct of function pointers
103 * nr_cmds = Number of commands supported by controller
105 static struct board_type products[] = {
106 {0x40700E11, "Smart Array 5300", &SA5_access, 512},
107 {0x40800E11, "Smart Array 5i", &SA5B_access, 512},
108 {0x40820E11, "Smart Array 532", &SA5B_access, 512},
109 {0x40830E11, "Smart Array 5312", &SA5B_access, 512},
110 {0x409A0E11, "Smart Array 641", &SA5_access, 512},
111 {0x409B0E11, "Smart Array 642", &SA5_access, 512},
112 {0x409C0E11, "Smart Array 6400", &SA5_access, 512},
113 {0x409D0E11, "Smart Array 6400 EM", &SA5_access, 512},
114 {0x40910E11, "Smart Array 6i", &SA5_access, 512},
115 {0x3225103C, "Smart Array P600", &SA5_access, 512},
116 {0x3223103C, "Smart Array P800", &SA5_access, 512},
117 {0x3234103C, "Smart Array P400", &SA5_access, 512},
118 {0x3235103C, "Smart Array P400i", &SA5_access, 512},
119 {0x3211103C, "Smart Array E200i", &SA5_access, 120},
120 {0x3212103C, "Smart Array E200", &SA5_access, 120},
121 {0x3213103C, "Smart Array E200i", &SA5_access, 120},
122 {0x3214103C, "Smart Array E200i", &SA5_access, 120},
123 {0x3215103C, "Smart Array E200i", &SA5_access, 120},
124 {0x3237103C, "Smart Array E500", &SA5_access, 512},
125 {0x323D103C, "Smart Array P700m", &SA5_access, 512},
126 {0xFFFF103C, "Unknown Smart Array", &SA5_access, 120},
129 /* How long to wait (in milliseconds) for board to go into simple mode */
130 #define MAX_CONFIG_WAIT 30000
131 #define MAX_IOCTL_CONFIG_WAIT 1000
133 /*define how many times we will try a command because of bus resets */
134 #define MAX_CMD_RETRIES 3
138 /* Originally cciss driver only supports 8 major numbers */
139 #define MAX_CTLR_ORIG 8
141 static ctlr_info_t *hba[MAX_CTLR];
143 static void do_cciss_request(struct request_queue *q);
144 static irqreturn_t do_cciss_intr(int irq, void *dev_id);
145 static int cciss_open(struct inode *inode, struct file *filep);
146 static int cciss_release(struct inode *inode, struct file *filep);
147 static int cciss_ioctl(struct inode *inode, struct file *filep,
148 unsigned int cmd, unsigned long arg);
149 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
151 static int cciss_revalidate(struct gendisk *disk);
152 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk);
153 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
156 static void cciss_read_capacity(int ctlr, int logvol, int withirq,
157 sector_t *total_size, unsigned int *block_size);
158 static void cciss_read_capacity_16(int ctlr, int logvol, int withirq,
159 sector_t *total_size, unsigned int *block_size);
160 static void cciss_geometry_inquiry(int ctlr, int logvol,
161 int withirq, sector_t total_size,
162 unsigned int block_size, InquiryData_struct *inq_buff,
163 drive_info_struct *drv);
164 static void cciss_getgeometry(int cntl_num);
165 static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *,
167 static void start_io(ctlr_info_t *h);
168 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size,
169 unsigned int use_unit_num, unsigned int log_unit,
170 __u8 page_code, unsigned char *scsi3addr, int cmd_type);
171 static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
172 unsigned int use_unit_num, unsigned int log_unit,
173 __u8 page_code, int cmd_type);
175 static void fail_all_cmds(unsigned long ctlr);
177 #ifdef CONFIG_PROC_FS
178 static void cciss_procinit(int i);
180 static void cciss_procinit(int i)
183 #endif /* CONFIG_PROC_FS */
186 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg);
189 static struct block_device_operations cciss_fops = {
190 .owner = THIS_MODULE,
192 .release = cciss_release,
193 .ioctl = cciss_ioctl,
194 .getgeo = cciss_getgeo,
196 .compat_ioctl = cciss_compat_ioctl,
198 .revalidate_disk = cciss_revalidate,
202 * Enqueuing and dequeuing functions for cmdlists.
204 static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c)
208 c->next = c->prev = c;
210 c->prev = (*Qptr)->prev;
212 (*Qptr)->prev->next = c;
217 static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
218 CommandList_struct *c)
220 if (c && c->next != c) {
223 c->prev->next = c->next;
224 c->next->prev = c->prev;
231 #include "cciss_scsi.c" /* For SCSI tape support */
233 #define RAID_UNKNOWN 6
235 #ifdef CONFIG_PROC_FS
238 * Report information about this controller.
240 #define ENG_GIG 1000000000
241 #define ENG_GIG_FACTOR (ENG_GIG/512)
242 #define ENGAGE_SCSI "engage scsi"
243 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
247 static struct proc_dir_entry *proc_cciss;
249 static void cciss_seq_show_header(struct seq_file *seq)
251 ctlr_info_t *h = seq->private;
253 seq_printf(seq, "%s: HP %s Controller\n"
254 "Board ID: 0x%08lx\n"
255 "Firmware Version: %c%c%c%c\n"
257 "Logical drives: %d\n"
258 "Current Q depth: %d\n"
259 "Current # commands on controller: %d\n"
260 "Max Q depth since init: %d\n"
261 "Max # commands on controller since init: %d\n"
262 "Max SG entries since init: %d\n",
265 (unsigned long)h->board_id,
266 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
267 h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT],
269 h->Qdepth, h->commands_outstanding,
270 h->maxQsinceinit, h->max_outstanding, h->maxSG);
272 #ifdef CONFIG_CISS_SCSI_TAPE
273 cciss_seq_tape_report(seq, h->ctlr);
274 #endif /* CONFIG_CISS_SCSI_TAPE */
277 static void *cciss_seq_start(struct seq_file *seq, loff_t *pos)
279 ctlr_info_t *h = seq->private;
280 unsigned ctlr = h->ctlr;
283 /* prevent displaying bogus info during configuration
284 * or deconfiguration of a logical volume
286 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
287 if (h->busy_configuring) {
288 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
289 return ERR_PTR(-EBUSY);
291 h->busy_configuring = 1;
292 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
295 cciss_seq_show_header(seq);
300 static int cciss_seq_show(struct seq_file *seq, void *v)
302 sector_t vol_sz, vol_sz_frac;
303 ctlr_info_t *h = seq->private;
304 unsigned ctlr = h->ctlr;
306 drive_info_struct *drv = &h->drv[*pos];
308 if (*pos > h->highest_lun)
314 vol_sz = drv->nr_blocks;
315 vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
317 sector_div(vol_sz_frac, ENG_GIG_FACTOR);
319 if (drv->raid_level > 5)
320 drv->raid_level = RAID_UNKNOWN;
321 seq_printf(seq, "cciss/c%dd%d:"
322 "\t%4u.%02uGB\tRAID %s\n",
323 ctlr, (int) *pos, (int)vol_sz, (int)vol_sz_frac,
324 raid_label[drv->raid_level]);
328 static void *cciss_seq_next(struct seq_file *seq, void *v, loff_t *pos)
330 ctlr_info_t *h = seq->private;
332 if (*pos > h->highest_lun)
339 static void cciss_seq_stop(struct seq_file *seq, void *v)
341 ctlr_info_t *h = seq->private;
343 /* Only reset h->busy_configuring if we succeeded in setting
344 * it during cciss_seq_start. */
345 if (v == ERR_PTR(-EBUSY))
348 h->busy_configuring = 0;
351 static struct seq_operations cciss_seq_ops = {
352 .start = cciss_seq_start,
353 .show = cciss_seq_show,
354 .next = cciss_seq_next,
355 .stop = cciss_seq_stop,
358 static int cciss_seq_open(struct inode *inode, struct file *file)
360 int ret = seq_open(file, &cciss_seq_ops);
361 struct seq_file *seq = file->private_data;
364 seq->private = PDE(inode)->data;
370 cciss_proc_write(struct file *file, const char __user *buf,
371 size_t length, loff_t *ppos)
376 #ifndef CONFIG_CISS_SCSI_TAPE
380 if (!buf || length > PAGE_SIZE - 1)
383 buffer = (char *)__get_free_page(GFP_KERNEL);
388 if (copy_from_user(buffer, buf, length))
390 buffer[length] = '\0';
392 #ifdef CONFIG_CISS_SCSI_TAPE
393 if (strncmp(ENGAGE_SCSI, buffer, sizeof ENGAGE_SCSI - 1) == 0) {
394 struct seq_file *seq = file->private_data;
395 ctlr_info_t *h = seq->private;
398 rc = cciss_engage_scsi(h->ctlr);
404 #endif /* CONFIG_CISS_SCSI_TAPE */
406 /* might be nice to have "disengage" too, but it's not
407 safely possible. (only 1 module use count, lock issues.) */
410 free_page((unsigned long)buffer);
414 static struct file_operations cciss_proc_fops = {
415 .owner = THIS_MODULE,
416 .open = cciss_seq_open,
419 .release = seq_release,
420 .write = cciss_proc_write,
423 static void __devinit cciss_procinit(int i)
425 struct proc_dir_entry *pde;
427 if (proc_cciss == NULL)
428 proc_cciss = proc_mkdir("cciss", proc_root_driver);
431 pde = proc_create(hba[i]->devname, S_IWUSR | S_IRUSR | S_IRGRP |
439 #endif /* CONFIG_PROC_FS */
442 * For operations that cannot sleep, a command block is allocated at init,
443 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
444 * which ones are free or in use. For operations that can wait for kmalloc
445 * to possible sleep, this routine can be called with get_from_pool set to 0.
446 * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
448 static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool)
450 CommandList_struct *c;
453 dma_addr_t cmd_dma_handle, err_dma_handle;
455 if (!get_from_pool) {
456 c = (CommandList_struct *) pci_alloc_consistent(h->pdev,
457 sizeof(CommandList_struct), &cmd_dma_handle);
460 memset(c, 0, sizeof(CommandList_struct));
464 c->err_info = (ErrorInfo_struct *)
465 pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct),
468 if (c->err_info == NULL) {
469 pci_free_consistent(h->pdev,
470 sizeof(CommandList_struct), c, cmd_dma_handle);
473 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
474 } else { /* get it out of the controllers pool */
477 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
480 } while (test_and_set_bit
481 (i & (BITS_PER_LONG - 1),
482 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
484 printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
487 memset(c, 0, sizeof(CommandList_struct));
488 cmd_dma_handle = h->cmd_pool_dhandle
489 + i * sizeof(CommandList_struct);
490 c->err_info = h->errinfo_pool + i;
491 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
492 err_dma_handle = h->errinfo_pool_dhandle
493 + i * sizeof(ErrorInfo_struct);
499 c->busaddr = (__u32) cmd_dma_handle;
500 temp64.val = (__u64) err_dma_handle;
501 c->ErrDesc.Addr.lower = temp64.val32.lower;
502 c->ErrDesc.Addr.upper = temp64.val32.upper;
503 c->ErrDesc.Len = sizeof(ErrorInfo_struct);
510 * Frees a command block that was previously allocated with cmd_alloc().
512 static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
517 if (!got_from_pool) {
518 temp64.val32.lower = c->ErrDesc.Addr.lower;
519 temp64.val32.upper = c->ErrDesc.Addr.upper;
520 pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
521 c->err_info, (dma_addr_t) temp64.val);
522 pci_free_consistent(h->pdev, sizeof(CommandList_struct),
523 c, (dma_addr_t) c->busaddr);
526 clear_bit(i & (BITS_PER_LONG - 1),
527 h->cmd_pool_bits + (i / BITS_PER_LONG));
532 static inline ctlr_info_t *get_host(struct gendisk *disk)
534 return disk->queue->queuedata;
537 static inline drive_info_struct *get_drv(struct gendisk *disk)
539 return disk->private_data;
543 * Open. Make sure the device is really there.
545 static int cciss_open(struct inode *inode, struct file *filep)
547 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
548 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
551 printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name);
552 #endif /* CCISS_DEBUG */
554 if (host->busy_initializing || drv->busy_configuring)
557 * Root is allowed to open raw volume zero even if it's not configured
558 * so array config can still work. Root is also allowed to open any
559 * volume that has a LUN ID, so it can issue IOCTL to reread the
560 * disk information. I don't think I really like this
561 * but I'm already using way to many device nodes to claim another one
562 * for "raw controller".
564 if (drv->heads == 0) {
565 if (iminor(inode) != 0) { /* not node 0? */
566 /* if not node 0 make sure it is a partition = 0 */
567 if (iminor(inode) & 0x0f) {
569 /* if it is, make sure we have a LUN ID */
570 } else if (drv->LunID == 0) {
574 if (!capable(CAP_SYS_ADMIN))
585 static int cciss_release(struct inode *inode, struct file *filep)
587 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
588 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
591 printk(KERN_DEBUG "cciss_release %s\n",
592 inode->i_bdev->bd_disk->disk_name);
593 #endif /* CCISS_DEBUG */
602 static int do_ioctl(struct file *f, unsigned cmd, unsigned long arg)
606 ret = cciss_ioctl(f->f_path.dentry->d_inode, f, cmd, arg);
611 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
613 static int cciss_ioctl32_big_passthru(struct file *f, unsigned cmd,
616 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
619 case CCISS_GETPCIINFO:
620 case CCISS_GETINTINFO:
621 case CCISS_SETINTINFO:
622 case CCISS_GETNODENAME:
623 case CCISS_SETNODENAME:
624 case CCISS_GETHEARTBEAT:
625 case CCISS_GETBUSTYPES:
626 case CCISS_GETFIRMVER:
627 case CCISS_GETDRIVVER:
628 case CCISS_REVALIDVOLS:
629 case CCISS_DEREGDISK:
630 case CCISS_REGNEWDISK:
632 case CCISS_RESCANDISK:
633 case CCISS_GETLUNINFO:
634 return do_ioctl(f, cmd, arg);
636 case CCISS_PASSTHRU32:
637 return cciss_ioctl32_passthru(f, cmd, arg);
638 case CCISS_BIG_PASSTHRU32:
639 return cciss_ioctl32_big_passthru(f, cmd, arg);
646 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
649 IOCTL32_Command_struct __user *arg32 =
650 (IOCTL32_Command_struct __user *) arg;
651 IOCTL_Command_struct arg64;
652 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
658 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
659 sizeof(arg64.LUN_info));
661 copy_from_user(&arg64.Request, &arg32->Request,
662 sizeof(arg64.Request));
664 copy_from_user(&arg64.error_info, &arg32->error_info,
665 sizeof(arg64.error_info));
666 err |= get_user(arg64.buf_size, &arg32->buf_size);
667 err |= get_user(cp, &arg32->buf);
668 arg64.buf = compat_ptr(cp);
669 err |= copy_to_user(p, &arg64, sizeof(arg64));
674 err = do_ioctl(f, CCISS_PASSTHRU, (unsigned long)p);
678 copy_in_user(&arg32->error_info, &p->error_info,
679 sizeof(arg32->error_info));
685 static int cciss_ioctl32_big_passthru(struct file *file, unsigned cmd,
688 BIG_IOCTL32_Command_struct __user *arg32 =
689 (BIG_IOCTL32_Command_struct __user *) arg;
690 BIG_IOCTL_Command_struct arg64;
691 BIG_IOCTL_Command_struct __user *p =
692 compat_alloc_user_space(sizeof(arg64));
698 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
699 sizeof(arg64.LUN_info));
701 copy_from_user(&arg64.Request, &arg32->Request,
702 sizeof(arg64.Request));
704 copy_from_user(&arg64.error_info, &arg32->error_info,
705 sizeof(arg64.error_info));
706 err |= get_user(arg64.buf_size, &arg32->buf_size);
707 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
708 err |= get_user(cp, &arg32->buf);
709 arg64.buf = compat_ptr(cp);
710 err |= copy_to_user(p, &arg64, sizeof(arg64));
715 err = do_ioctl(file, CCISS_BIG_PASSTHRU, (unsigned long)p);
719 copy_in_user(&arg32->error_info, &p->error_info,
720 sizeof(arg32->error_info));
727 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
729 drive_info_struct *drv = get_drv(bdev->bd_disk);
734 geo->heads = drv->heads;
735 geo->sectors = drv->sectors;
736 geo->cylinders = drv->cylinders;
743 static int cciss_ioctl(struct inode *inode, struct file *filep,
744 unsigned int cmd, unsigned long arg)
746 struct block_device *bdev = inode->i_bdev;
747 struct gendisk *disk = bdev->bd_disk;
748 ctlr_info_t *host = get_host(disk);
749 drive_info_struct *drv = get_drv(disk);
750 int ctlr = host->ctlr;
751 void __user *argp = (void __user *)arg;
754 printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
755 #endif /* CCISS_DEBUG */
758 case CCISS_GETPCIINFO:
760 cciss_pci_info_struct pciinfo;
764 pciinfo.domain = pci_domain_nr(host->pdev->bus);
765 pciinfo.bus = host->pdev->bus->number;
766 pciinfo.dev_fn = host->pdev->devfn;
767 pciinfo.board_id = host->board_id;
769 (argp, &pciinfo, sizeof(cciss_pci_info_struct)))
773 case CCISS_GETINTINFO:
775 cciss_coalint_struct intinfo;
779 readl(&host->cfgtable->HostWrite.CoalIntDelay);
781 readl(&host->cfgtable->HostWrite.CoalIntCount);
783 (argp, &intinfo, sizeof(cciss_coalint_struct)))
787 case CCISS_SETINTINFO:
789 cciss_coalint_struct intinfo;
795 if (!capable(CAP_SYS_ADMIN))
798 (&intinfo, argp, sizeof(cciss_coalint_struct)))
800 if ((intinfo.delay == 0) && (intinfo.count == 0))
802 // printk("cciss_ioctl: delay and count cannot be 0\n");
805 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
806 /* Update the field, and then ring the doorbell */
807 writel(intinfo.delay,
808 &(host->cfgtable->HostWrite.CoalIntDelay));
809 writel(intinfo.count,
810 &(host->cfgtable->HostWrite.CoalIntCount));
811 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
813 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
814 if (!(readl(host->vaddr + SA5_DOORBELL)
817 /* delay and try again */
820 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
821 if (i >= MAX_IOCTL_CONFIG_WAIT)
825 case CCISS_GETNODENAME:
827 NodeName_type NodeName;
832 for (i = 0; i < 16; i++)
834 readb(&host->cfgtable->ServerName[i]);
835 if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
839 case CCISS_SETNODENAME:
841 NodeName_type NodeName;
847 if (!capable(CAP_SYS_ADMIN))
851 (NodeName, argp, sizeof(NodeName_type)))
854 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
856 /* Update the field, and then ring the doorbell */
857 for (i = 0; i < 16; i++)
859 &host->cfgtable->ServerName[i]);
861 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
863 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
864 if (!(readl(host->vaddr + SA5_DOORBELL)
867 /* delay and try again */
870 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
871 if (i >= MAX_IOCTL_CONFIG_WAIT)
876 case CCISS_GETHEARTBEAT:
878 Heartbeat_type heartbeat;
882 heartbeat = readl(&host->cfgtable->HeartBeat);
884 (argp, &heartbeat, sizeof(Heartbeat_type)))
888 case CCISS_GETBUSTYPES:
890 BusTypes_type BusTypes;
894 BusTypes = readl(&host->cfgtable->BusTypes);
896 (argp, &BusTypes, sizeof(BusTypes_type)))
900 case CCISS_GETFIRMVER:
902 FirmwareVer_type firmware;
906 memcpy(firmware, host->firm_ver, 4);
909 (argp, firmware, sizeof(FirmwareVer_type)))
913 case CCISS_GETDRIVVER:
915 DriverVer_type DriverVer = DRIVER_VERSION;
921 (argp, &DriverVer, sizeof(DriverVer_type)))
926 case CCISS_REVALIDVOLS:
927 return rebuild_lun_table(host, NULL);
929 case CCISS_GETLUNINFO:{
930 LogvolInfo_struct luninfo;
932 luninfo.LunID = drv->LunID;
933 luninfo.num_opens = drv->usage_count;
934 luninfo.num_parts = 0;
935 if (copy_to_user(argp, &luninfo,
936 sizeof(LogvolInfo_struct)))
940 case CCISS_DEREGDISK:
941 return rebuild_lun_table(host, disk);
944 return rebuild_lun_table(host, NULL);
948 IOCTL_Command_struct iocommand;
949 CommandList_struct *c;
953 DECLARE_COMPLETION_ONSTACK(wait);
958 if (!capable(CAP_SYS_RAWIO))
962 (&iocommand, argp, sizeof(IOCTL_Command_struct)))
964 if ((iocommand.buf_size < 1) &&
965 (iocommand.Request.Type.Direction != XFER_NONE)) {
968 #if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
969 /* Check kmalloc limits */
970 if (iocommand.buf_size > 128000)
973 if (iocommand.buf_size > 0) {
974 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
978 if (iocommand.Request.Type.Direction == XFER_WRITE) {
979 /* Copy the data into the buffer we created */
981 (buff, iocommand.buf, iocommand.buf_size)) {
986 memset(buff, 0, iocommand.buf_size);
988 if ((c = cmd_alloc(host, 0)) == NULL) {
992 // Fill in the command type
993 c->cmd_type = CMD_IOCTL_PEND;
994 // Fill in Command Header
995 c->Header.ReplyQueue = 0; // unused in simple mode
996 if (iocommand.buf_size > 0) // buffer to fill
998 c->Header.SGList = 1;
999 c->Header.SGTotal = 1;
1000 } else // no buffers to fill
1002 c->Header.SGList = 0;
1003 c->Header.SGTotal = 0;
1005 c->Header.LUN = iocommand.LUN_info;
1006 c->Header.Tag.lower = c->busaddr; // use the kernel address the cmd block for tag
1008 // Fill in Request block
1009 c->Request = iocommand.Request;
1011 // Fill in the scatter gather information
1012 if (iocommand.buf_size > 0) {
1013 temp64.val = pci_map_single(host->pdev, buff,
1015 PCI_DMA_BIDIRECTIONAL);
1016 c->SG[0].Addr.lower = temp64.val32.lower;
1017 c->SG[0].Addr.upper = temp64.val32.upper;
1018 c->SG[0].Len = iocommand.buf_size;
1019 c->SG[0].Ext = 0; // we are not chaining
1023 /* Put the request on the tail of the request queue */
1024 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1025 addQ(&host->reqQ, c);
1028 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1030 wait_for_completion(&wait);
1032 /* unlock the buffers from DMA */
1033 temp64.val32.lower = c->SG[0].Addr.lower;
1034 temp64.val32.upper = c->SG[0].Addr.upper;
1035 pci_unmap_single(host->pdev, (dma_addr_t) temp64.val,
1037 PCI_DMA_BIDIRECTIONAL);
1039 /* Copy the error information out */
1040 iocommand.error_info = *(c->err_info);
1042 (argp, &iocommand, sizeof(IOCTL_Command_struct))) {
1044 cmd_free(host, c, 0);
1048 if (iocommand.Request.Type.Direction == XFER_READ) {
1049 /* Copy the data out of the buffer we created */
1051 (iocommand.buf, buff, iocommand.buf_size)) {
1053 cmd_free(host, c, 0);
1058 cmd_free(host, c, 0);
1061 case CCISS_BIG_PASSTHRU:{
1062 BIG_IOCTL_Command_struct *ioc;
1063 CommandList_struct *c;
1064 unsigned char **buff = NULL;
1065 int *buff_size = NULL;
1067 unsigned long flags;
1071 DECLARE_COMPLETION_ONSTACK(wait);
1074 BYTE __user *data_ptr;
1078 if (!capable(CAP_SYS_RAWIO))
1080 ioc = (BIG_IOCTL_Command_struct *)
1081 kmalloc(sizeof(*ioc), GFP_KERNEL);
1086 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
1090 if ((ioc->buf_size < 1) &&
1091 (ioc->Request.Type.Direction != XFER_NONE)) {
1095 /* Check kmalloc limits using all SGs */
1096 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
1100 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
1105 kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
1110 buff_size = kmalloc(MAXSGENTRIES * sizeof(int),
1116 left = ioc->buf_size;
1117 data_ptr = ioc->buf;
1120 ioc->malloc_size) ? ioc->
1122 buff_size[sg_used] = sz;
1123 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
1124 if (buff[sg_used] == NULL) {
1128 if (ioc->Request.Type.Direction == XFER_WRITE) {
1130 (buff[sg_used], data_ptr, sz)) {
1135 memset(buff[sg_used], 0, sz);
1141 if ((c = cmd_alloc(host, 0)) == NULL) {
1145 c->cmd_type = CMD_IOCTL_PEND;
1146 c->Header.ReplyQueue = 0;
1148 if (ioc->buf_size > 0) {
1149 c->Header.SGList = sg_used;
1150 c->Header.SGTotal = sg_used;
1152 c->Header.SGList = 0;
1153 c->Header.SGTotal = 0;
1155 c->Header.LUN = ioc->LUN_info;
1156 c->Header.Tag.lower = c->busaddr;
1158 c->Request = ioc->Request;
1159 if (ioc->buf_size > 0) {
1161 for (i = 0; i < sg_used; i++) {
1163 pci_map_single(host->pdev, buff[i],
1165 PCI_DMA_BIDIRECTIONAL);
1166 c->SG[i].Addr.lower =
1168 c->SG[i].Addr.upper =
1170 c->SG[i].Len = buff_size[i];
1171 c->SG[i].Ext = 0; /* we are not chaining */
1175 /* Put the request on the tail of the request queue */
1176 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1177 addQ(&host->reqQ, c);
1180 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1181 wait_for_completion(&wait);
1182 /* unlock the buffers from DMA */
1183 for (i = 0; i < sg_used; i++) {
1184 temp64.val32.lower = c->SG[i].Addr.lower;
1185 temp64.val32.upper = c->SG[i].Addr.upper;
1186 pci_unmap_single(host->pdev,
1187 (dma_addr_t) temp64.val, buff_size[i],
1188 PCI_DMA_BIDIRECTIONAL);
1190 /* Copy the error information out */
1191 ioc->error_info = *(c->err_info);
1192 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
1193 cmd_free(host, c, 0);
1197 if (ioc->Request.Type.Direction == XFER_READ) {
1198 /* Copy the data out of the buffer we created */
1199 BYTE __user *ptr = ioc->buf;
1200 for (i = 0; i < sg_used; i++) {
1202 (ptr, buff[i], buff_size[i])) {
1203 cmd_free(host, c, 0);
1207 ptr += buff_size[i];
1210 cmd_free(host, c, 0);
1214 for (i = 0; i < sg_used; i++)
1223 /* scsi_cmd_ioctl handles these, below, though some are not */
1224 /* very meaningful for cciss. SG_IO is the main one people want. */
1226 case SG_GET_VERSION_NUM:
1227 case SG_SET_TIMEOUT:
1228 case SG_GET_TIMEOUT:
1229 case SG_GET_RESERVED_SIZE:
1230 case SG_SET_RESERVED_SIZE:
1231 case SG_EMULATED_HOST:
1233 case SCSI_IOCTL_SEND_COMMAND:
1234 return scsi_cmd_ioctl(filep, disk->queue, disk, cmd, argp);
1236 /* scsi_cmd_ioctl would normally handle these, below, but */
1237 /* they aren't a good fit for cciss, as CD-ROMs are */
1238 /* not supported, and we don't have any bus/target/lun */
1239 /* which we present to the kernel. */
1241 case CDROM_SEND_PACKET:
1242 case CDROMCLOSETRAY:
1244 case SCSI_IOCTL_GET_IDLUN:
1245 case SCSI_IOCTL_GET_BUS_NUMBER:
1251 static void cciss_check_queues(ctlr_info_t *h)
1253 int start_queue = h->next_to_run;
1256 /* check to see if we have maxed out the number of commands that can
1257 * be placed on the queue. If so then exit. We do this check here
1258 * in case the interrupt we serviced was from an ioctl and did not
1259 * free any new commands.
1261 if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds)
1264 /* We have room on the queue for more commands. Now we need to queue
1265 * them up. We will also keep track of the next queue to run so
1266 * that every queue gets a chance to be started first.
1268 for (i = 0; i < h->highest_lun + 1; i++) {
1269 int curr_queue = (start_queue + i) % (h->highest_lun + 1);
1270 /* make sure the disk has been added and the drive is real
1271 * because this can be called from the middle of init_one.
1273 if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads))
1275 blk_start_queue(h->gendisk[curr_queue]->queue);
1277 /* check to see if we have maxed out the number of commands
1278 * that can be placed on the queue.
1280 if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds) {
1281 if (curr_queue == start_queue) {
1283 (start_queue + 1) % (h->highest_lun + 1);
1286 h->next_to_run = curr_queue;
1290 curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
1295 static void cciss_softirq_done(struct request *rq)
1297 CommandList_struct *cmd = rq->completion_data;
1298 ctlr_info_t *h = hba[cmd->ctlr];
1299 unsigned long flags;
1303 if (cmd->Request.Type.Direction == XFER_READ)
1304 ddir = PCI_DMA_FROMDEVICE;
1306 ddir = PCI_DMA_TODEVICE;
1308 /* command did not need to be retried */
1309 /* unmap the DMA mapping for all the scatter gather elements */
1310 for (i = 0; i < cmd->Header.SGList; i++) {
1311 temp64.val32.lower = cmd->SG[i].Addr.lower;
1312 temp64.val32.upper = cmd->SG[i].Addr.upper;
1313 pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir);
1317 printk("Done with %p\n", rq);
1318 #endif /* CCISS_DEBUG */
1320 if (blk_end_request(rq, (rq->errors == 0) ? 0 : -EIO, blk_rq_bytes(rq)))
1323 spin_lock_irqsave(&h->lock, flags);
1324 cmd_free(h, cmd, 1);
1325 cciss_check_queues(h);
1326 spin_unlock_irqrestore(&h->lock, flags);
1329 /* This function will check the usage_count of the drive to be updated/added.
1330 * If the usage_count is zero then the drive information will be updated and
1331 * the disk will be re-registered with the kernel. If not then it will be
1332 * left alone for the next reboot. The exception to this is disk 0 which
1333 * will always be left registered with the kernel since it is also the
1334 * controller node. Any changes to disk 0 will show up on the next
1337 static void cciss_update_drive_info(int ctlr, int drv_index)
1339 ctlr_info_t *h = hba[ctlr];
1340 struct gendisk *disk;
1341 InquiryData_struct *inq_buff = NULL;
1342 unsigned int block_size;
1343 sector_t total_size;
1344 unsigned long flags = 0;
1347 /* if the disk already exists then deregister it before proceeding */
1348 if (h->drv[drv_index].raid_level != -1) {
1349 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1350 h->drv[drv_index].busy_configuring = 1;
1351 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1353 /* deregister_disk sets h->drv[drv_index].queue = NULL */
1354 /* which keeps the interrupt handler from starting */
1356 ret = deregister_disk(h->gendisk[drv_index],
1357 &h->drv[drv_index], 0);
1358 h->drv[drv_index].busy_configuring = 0;
1361 /* If the disk is in use return */
1365 /* Get information about the disk and modify the driver structure */
1366 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
1367 if (inq_buff == NULL)
1370 /* testing to see if 16-byte CDBs are already being used */
1371 if (h->cciss_read == CCISS_READ_16) {
1372 cciss_read_capacity_16(h->ctlr, drv_index, 1,
1373 &total_size, &block_size);
1377 cciss_read_capacity(ctlr, drv_index, 1,
1378 &total_size, &block_size);
1380 /* if read_capacity returns all F's this volume is >2TB in size */
1381 /* so we switch to 16-byte CDB's for all read/write ops */
1382 if (total_size == 0xFFFFFFFFULL) {
1383 cciss_read_capacity_16(ctlr, drv_index, 1,
1384 &total_size, &block_size);
1385 h->cciss_read = CCISS_READ_16;
1386 h->cciss_write = CCISS_WRITE_16;
1388 h->cciss_read = CCISS_READ_10;
1389 h->cciss_write = CCISS_WRITE_10;
1392 cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
1393 inq_buff, &h->drv[drv_index]);
1396 disk = h->gendisk[drv_index];
1397 set_capacity(disk, h->drv[drv_index].nr_blocks);
1399 /* if it's the controller it's already added */
1401 disk->queue = blk_init_queue(do_cciss_request, &h->lock);
1402 sprintf(disk->disk_name, "cciss/c%dd%d", ctlr, drv_index);
1403 disk->major = h->major;
1404 disk->first_minor = drv_index << NWD_SHIFT;
1405 disk->fops = &cciss_fops;
1406 disk->private_data = &h->drv[drv_index];
1408 /* Set up queue information */
1409 blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask);
1411 /* This is a hardware imposed limit. */
1412 blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES);
1414 /* This is a limit in the driver and could be eliminated. */
1415 blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);
1417 blk_queue_max_sectors(disk->queue, h->cciss_max_sectors);
1419 blk_queue_softirq_done(disk->queue, cciss_softirq_done);
1421 disk->queue->queuedata = hba[ctlr];
1423 blk_queue_hardsect_size(disk->queue,
1424 hba[ctlr]->drv[drv_index].block_size);
1426 /* Make sure all queue data is written out before */
1427 /* setting h->drv[drv_index].queue, as setting this */
1428 /* allows the interrupt handler to start the queue */
1430 h->drv[drv_index].queue = disk->queue;
1438 printk(KERN_ERR "cciss: out of memory\n");
1442 /* This function will find the first index of the controllers drive array
1443 * that has a -1 for the raid_level and will return that index. This is
1444 * where new drives will be added. If the index to be returned is greater
1445 * than the highest_lun index for the controller then highest_lun is set
1446 * to this new index. If there are no available indexes then -1 is returned.
1448 static int cciss_find_free_drive_index(int ctlr)
1452 for (i = 0; i < CISS_MAX_LUN; i++) {
1453 if (hba[ctlr]->drv[i].raid_level == -1) {
1454 if (i > hba[ctlr]->highest_lun)
1455 hba[ctlr]->highest_lun = i;
1462 /* This function will add and remove logical drives from the Logical
1463 * drive array of the controller and maintain persistency of ordering
1464 * so that mount points are preserved until the next reboot. This allows
1465 * for the removal of logical drives in the middle of the drive array
1466 * without a re-ordering of those drives.
1468 * h = The controller to perform the operations on
1469 * del_disk = The disk to remove if specified. If the value given
1470 * is NULL then no disk is removed.
1472 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
1476 ReportLunData_struct *ld_buff = NULL;
1477 drive_info_struct *drv = NULL;
1484 unsigned long flags;
1486 /* Set busy_configuring flag for this operation */
1487 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1488 if (h->busy_configuring) {
1489 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1492 h->busy_configuring = 1;
1494 /* if del_disk is NULL then we are being called to add a new disk
1495 * and update the logical drive table. If it is not NULL then
1496 * we will check if the disk is in use or not.
1498 if (del_disk != NULL) {
1499 drv = get_drv(del_disk);
1500 drv->busy_configuring = 1;
1501 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1502 return_code = deregister_disk(del_disk, drv, 1);
1503 drv->busy_configuring = 0;
1504 h->busy_configuring = 0;
1507 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1508 if (!capable(CAP_SYS_RAWIO))
1511 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
1512 if (ld_buff == NULL)
1515 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
1516 sizeof(ReportLunData_struct), 0,
1519 if (return_code == IO_OK) {
1521 be32_to_cpu(*(__be32 *) ld_buff->LUNListLength);
1522 } else { /* reading number of logical volumes failed */
1523 printk(KERN_WARNING "cciss: report logical volume"
1524 " command failed\n");
1529 num_luns = listlength / 8; /* 8 bytes per entry */
1530 if (num_luns > CISS_MAX_LUN) {
1531 num_luns = CISS_MAX_LUN;
1532 printk(KERN_WARNING "cciss: more luns configured"
1533 " on controller than can be handled by"
1537 /* Compare controller drive array to drivers drive array.
1538 * Check for updates in the drive information and any new drives
1539 * on the controller.
1541 for (i = 0; i < num_luns; i++) {
1547 (unsigned int)(ld_buff->LUN[i][3])) << 24;
1549 (unsigned int)(ld_buff->LUN[i][2])) << 16;
1551 (unsigned int)(ld_buff->LUN[i][1])) << 8;
1552 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
1554 /* Find if the LUN is already in the drive array
1555 * of the controller. If so then update its info
1556 * if not is use. If it does not exist then find
1557 * the first free index and add it.
1559 for (j = 0; j <= h->highest_lun; j++) {
1560 if (h->drv[j].LunID == lunid) {
1566 /* check if the drive was found already in the array */
1568 drv_index = cciss_find_free_drive_index(ctlr);
1569 if (drv_index == -1)
1572 /*Check if the gendisk needs to be allocated */
1573 if (!h->gendisk[drv_index]){
1574 h->gendisk[drv_index] = alloc_disk(1 << NWD_SHIFT);
1575 if (!h->gendisk[drv_index]){
1576 printk(KERN_ERR "cciss: could not allocate new disk %d\n", drv_index);
1581 h->drv[drv_index].LunID = lunid;
1582 cciss_update_drive_info(ctlr, drv_index);
1588 h->busy_configuring = 0;
1589 /* We return -1 here to tell the ACU that we have registered/updated
1590 * all of the drives that we can and to keep it from calling us
1595 printk(KERN_ERR "cciss: out of memory\n");
1599 /* This function will deregister the disk and it's queue from the
1600 * kernel. It must be called with the controller lock held and the
1601 * drv structures busy_configuring flag set. It's parameters are:
1603 * disk = This is the disk to be deregistered
1604 * drv = This is the drive_info_struct associated with the disk to be
1605 * deregistered. It contains information about the disk used
1607 * clear_all = This flag determines whether or not the disk information
1608 * is going to be completely cleared out and the highest_lun
1609 * reset. Sometimes we want to clear out information about
1610 * the disk in preparation for re-adding it. In this case
1611 * the highest_lun should be left unchanged and the LunID
1612 * should not be cleared.
1614 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
1618 ctlr_info_t *h = get_host(disk);
1620 if (!capable(CAP_SYS_RAWIO))
1623 /* make sure logical volume is NOT is use */
1624 if (clear_all || (h->gendisk[0] == disk)) {
1625 if (drv->usage_count > 1)
1627 } else if (drv->usage_count > 0)
1630 /* invalidate the devices and deregister the disk. If it is disk
1631 * zero do not deregister it but just zero out it's values. This
1632 * allows us to delete disk zero but keep the controller registered.
1634 if (h->gendisk[0] != disk) {
1635 struct request_queue *q = disk->queue;
1636 if (disk->flags & GENHD_FL_UP)
1639 blk_cleanup_queue(q);
1640 /* Set drv->queue to NULL so that we do not try
1641 * to call blk_start_queue on this queue in the
1646 /* If clear_all is set then we are deleting the logical
1647 * drive, not just refreshing its info. For drives
1648 * other than disk 0 we will call put_disk. We do not
1649 * do this for disk 0 as we need it to be able to
1650 * configure the controller.
1653 /* This isn't pretty, but we need to find the
1654 * disk in our array and NULL our the pointer.
1655 * This is so that we will call alloc_disk if
1656 * this index is used again later.
1658 for (i=0; i < CISS_MAX_LUN; i++){
1659 if(h->gendisk[i] == disk){
1660 h->gendisk[i] = NULL;
1667 set_capacity(disk, 0);
1671 /* zero out the disk size info */
1673 drv->block_size = 0;
1677 drv->raid_level = -1; /* This can be used as a flag variable to
1678 * indicate that this element of the drive
1683 /* check to see if it was the last disk */
1684 if (drv == h->drv + h->highest_lun) {
1685 /* if so, find the new hightest lun */
1686 int i, newhighest = -1;
1687 for (i = 0; i < h->highest_lun; i++) {
1688 /* if the disk has size > 0, it is available */
1689 if (h->drv[i].heads)
1692 h->highest_lun = newhighest;
1700 static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
1701 1: address logical volume log_unit,
1702 2: periph device address is scsi3addr */
1703 unsigned int log_unit, __u8 page_code,
1704 unsigned char *scsi3addr, int cmd_type)
1706 ctlr_info_t *h = hba[ctlr];
1707 u64bit buff_dma_handle;
1710 c->cmd_type = CMD_IOCTL_PEND;
1711 c->Header.ReplyQueue = 0;
1713 c->Header.SGList = 1;
1714 c->Header.SGTotal = 1;
1716 c->Header.SGList = 0;
1717 c->Header.SGTotal = 0;
1719 c->Header.Tag.lower = c->busaddr;
1721 c->Request.Type.Type = cmd_type;
1722 if (cmd_type == TYPE_CMD) {
1725 /* If the logical unit number is 0 then, this is going
1726 to controller so It's a physical command
1727 mode = 0 target = 0. So we have nothing to write.
1728 otherwise, if use_unit_num == 1,
1729 mode = 1(volume set addressing) target = LUNID
1730 otherwise, if use_unit_num == 2,
1731 mode = 0(periph dev addr) target = scsi3addr */
1732 if (use_unit_num == 1) {
1733 c->Header.LUN.LogDev.VolId =
1734 h->drv[log_unit].LunID;
1735 c->Header.LUN.LogDev.Mode = 1;
1736 } else if (use_unit_num == 2) {
1737 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr,
1739 c->Header.LUN.LogDev.Mode = 0;
1741 /* are we trying to read a vital product page */
1742 if (page_code != 0) {
1743 c->Request.CDB[1] = 0x01;
1744 c->Request.CDB[2] = page_code;
1746 c->Request.CDBLen = 6;
1747 c->Request.Type.Attribute = ATTR_SIMPLE;
1748 c->Request.Type.Direction = XFER_READ;
1749 c->Request.Timeout = 0;
1750 c->Request.CDB[0] = CISS_INQUIRY;
1751 c->Request.CDB[4] = size & 0xFF;
1753 case CISS_REPORT_LOG:
1754 case CISS_REPORT_PHYS:
1755 /* Talking to controller so It's a physical command
1756 mode = 00 target = 0. Nothing to write.
1758 c->Request.CDBLen = 12;
1759 c->Request.Type.Attribute = ATTR_SIMPLE;
1760 c->Request.Type.Direction = XFER_READ;
1761 c->Request.Timeout = 0;
1762 c->Request.CDB[0] = cmd;
1763 c->Request.CDB[6] = (size >> 24) & 0xFF; //MSB
1764 c->Request.CDB[7] = (size >> 16) & 0xFF;
1765 c->Request.CDB[8] = (size >> 8) & 0xFF;
1766 c->Request.CDB[9] = size & 0xFF;
1769 case CCISS_READ_CAPACITY:
1770 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1771 c->Header.LUN.LogDev.Mode = 1;
1772 c->Request.CDBLen = 10;
1773 c->Request.Type.Attribute = ATTR_SIMPLE;
1774 c->Request.Type.Direction = XFER_READ;
1775 c->Request.Timeout = 0;
1776 c->Request.CDB[0] = cmd;
1778 case CCISS_READ_CAPACITY_16:
1779 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1780 c->Header.LUN.LogDev.Mode = 1;
1781 c->Request.CDBLen = 16;
1782 c->Request.Type.Attribute = ATTR_SIMPLE;
1783 c->Request.Type.Direction = XFER_READ;
1784 c->Request.Timeout = 0;
1785 c->Request.CDB[0] = cmd;
1786 c->Request.CDB[1] = 0x10;
1787 c->Request.CDB[10] = (size >> 24) & 0xFF;
1788 c->Request.CDB[11] = (size >> 16) & 0xFF;
1789 c->Request.CDB[12] = (size >> 8) & 0xFF;
1790 c->Request.CDB[13] = size & 0xFF;
1791 c->Request.Timeout = 0;
1792 c->Request.CDB[0] = cmd;
1794 case CCISS_CACHE_FLUSH:
1795 c->Request.CDBLen = 12;
1796 c->Request.Type.Attribute = ATTR_SIMPLE;
1797 c->Request.Type.Direction = XFER_WRITE;
1798 c->Request.Timeout = 0;
1799 c->Request.CDB[0] = BMIC_WRITE;
1800 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
1804 "cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
1807 } else if (cmd_type == TYPE_MSG) {
1809 case 0: /* ABORT message */
1810 c->Request.CDBLen = 12;
1811 c->Request.Type.Attribute = ATTR_SIMPLE;
1812 c->Request.Type.Direction = XFER_WRITE;
1813 c->Request.Timeout = 0;
1814 c->Request.CDB[0] = cmd; /* abort */
1815 c->Request.CDB[1] = 0; /* abort a command */
1816 /* buff contains the tag of the command to abort */
1817 memcpy(&c->Request.CDB[4], buff, 8);
1819 case 1: /* RESET message */
1820 c->Request.CDBLen = 12;
1821 c->Request.Type.Attribute = ATTR_SIMPLE;
1822 c->Request.Type.Direction = XFER_WRITE;
1823 c->Request.Timeout = 0;
1824 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
1825 c->Request.CDB[0] = cmd; /* reset */
1826 c->Request.CDB[1] = 0x04; /* reset a LUN */
1828 case 3: /* No-Op message */
1829 c->Request.CDBLen = 1;
1830 c->Request.Type.Attribute = ATTR_SIMPLE;
1831 c->Request.Type.Direction = XFER_WRITE;
1832 c->Request.Timeout = 0;
1833 c->Request.CDB[0] = cmd;
1837 "cciss%d: unknown message type %d\n", ctlr, cmd);
1842 "cciss%d: unknown command type %d\n", ctlr, cmd_type);
1845 /* Fill in the scatter gather information */
1847 buff_dma_handle.val = (__u64) pci_map_single(h->pdev,
1849 PCI_DMA_BIDIRECTIONAL);
1850 c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
1851 c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
1852 c->SG[0].Len = size;
1853 c->SG[0].Ext = 0; /* we are not chaining */
1858 static int sendcmd_withirq(__u8 cmd,
1862 unsigned int use_unit_num,
1863 unsigned int log_unit, __u8 page_code, int cmd_type)
1865 ctlr_info_t *h = hba[ctlr];
1866 CommandList_struct *c;
1867 u64bit buff_dma_handle;
1868 unsigned long flags;
1870 DECLARE_COMPLETION_ONSTACK(wait);
1872 if ((c = cmd_alloc(h, 0)) == NULL)
1874 return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
1875 log_unit, page_code, NULL, cmd_type);
1876 if (return_status != IO_OK) {
1878 return return_status;
1883 /* Put the request on the tail of the queue and send it */
1884 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1888 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1890 wait_for_completion(&wait);
1892 if (c->err_info->CommandStatus != 0) { /* an error has occurred */
1893 switch (c->err_info->CommandStatus) {
1894 case CMD_TARGET_STATUS:
1895 printk(KERN_WARNING "cciss: cmd %p has "
1896 " completed with errors\n", c);
1897 if (c->err_info->ScsiStatus) {
1898 printk(KERN_WARNING "cciss: cmd %p "
1899 "has SCSI Status = %x\n",
1900 c, c->err_info->ScsiStatus);
1904 case CMD_DATA_UNDERRUN:
1905 case CMD_DATA_OVERRUN:
1906 /* expected for inquire and report lun commands */
1909 printk(KERN_WARNING "cciss: Cmd %p is "
1910 "reported invalid\n", c);
1911 return_status = IO_ERROR;
1913 case CMD_PROTOCOL_ERR:
1914 printk(KERN_WARNING "cciss: cmd %p has "
1915 "protocol error \n", c);
1916 return_status = IO_ERROR;
1918 case CMD_HARDWARE_ERR:
1919 printk(KERN_WARNING "cciss: cmd %p had "
1920 " hardware error\n", c);
1921 return_status = IO_ERROR;
1923 case CMD_CONNECTION_LOST:
1924 printk(KERN_WARNING "cciss: cmd %p had "
1925 "connection lost\n", c);
1926 return_status = IO_ERROR;
1929 printk(KERN_WARNING "cciss: cmd %p was "
1931 return_status = IO_ERROR;
1933 case CMD_ABORT_FAILED:
1934 printk(KERN_WARNING "cciss: cmd %p reports "
1935 "abort failed\n", c);
1936 return_status = IO_ERROR;
1938 case CMD_UNSOLICITED_ABORT:
1940 "cciss%d: unsolicited abort %p\n", ctlr, c);
1941 if (c->retry_count < MAX_CMD_RETRIES) {
1943 "cciss%d: retrying %p\n", ctlr, c);
1945 /* erase the old error information */
1946 memset(c->err_info, 0,
1947 sizeof(ErrorInfo_struct));
1948 return_status = IO_OK;
1949 INIT_COMPLETION(wait);
1952 return_status = IO_ERROR;
1955 printk(KERN_WARNING "cciss: cmd %p returned "
1956 "unknown status %x\n", c,
1957 c->err_info->CommandStatus);
1958 return_status = IO_ERROR;
1961 /* unlock the buffers from DMA */
1962 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
1963 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
1964 pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val,
1965 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
1967 return return_status;
1970 static void cciss_geometry_inquiry(int ctlr, int logvol,
1971 int withirq, sector_t total_size,
1972 unsigned int block_size,
1973 InquiryData_struct *inq_buff,
1974 drive_info_struct *drv)
1979 memset(inq_buff, 0, sizeof(InquiryData_struct));
1981 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
1982 inq_buff, sizeof(*inq_buff), 1,
1983 logvol, 0xC1, TYPE_CMD);
1985 return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
1986 sizeof(*inq_buff), 1, logvol, 0xC1, NULL,
1988 if (return_code == IO_OK) {
1989 if (inq_buff->data_byte[8] == 0xFF) {
1991 "cciss: reading geometry failed, volume "
1992 "does not support reading geometry\n");
1994 drv->sectors = 32; // Sectors per track
1995 drv->cylinders = total_size + 1;
1996 drv->raid_level = RAID_UNKNOWN;
1998 drv->heads = inq_buff->data_byte[6];
1999 drv->sectors = inq_buff->data_byte[7];
2000 drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8;
2001 drv->cylinders += inq_buff->data_byte[5];
2002 drv->raid_level = inq_buff->data_byte[8];
2004 drv->block_size = block_size;
2005 drv->nr_blocks = total_size + 1;
2006 t = drv->heads * drv->sectors;
2008 sector_t real_size = total_size + 1;
2009 unsigned long rem = sector_div(real_size, t);
2012 drv->cylinders = real_size;
2014 } else { /* Get geometry failed */
2015 printk(KERN_WARNING "cciss: reading geometry failed\n");
2017 printk(KERN_INFO " heads=%d, sectors=%d, cylinders=%d\n\n",
2018 drv->heads, drv->sectors, drv->cylinders);
2022 cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size,
2023 unsigned int *block_size)
2025 ReadCapdata_struct *buf;
2028 buf = kzalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
2030 printk(KERN_WARNING "cciss: out of memory\n");
2035 return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
2036 ctlr, buf, sizeof(ReadCapdata_struct),
2037 1, logvol, 0, TYPE_CMD);
2039 return_code = sendcmd(CCISS_READ_CAPACITY,
2040 ctlr, buf, sizeof(ReadCapdata_struct),
2041 1, logvol, 0, NULL, TYPE_CMD);
2042 if (return_code == IO_OK) {
2043 *total_size = be32_to_cpu(*(__be32 *) buf->total_size);
2044 *block_size = be32_to_cpu(*(__be32 *) buf->block_size);
2045 } else { /* read capacity command failed */
2046 printk(KERN_WARNING "cciss: read capacity failed\n");
2048 *block_size = BLOCK_SIZE;
2050 if (*total_size != 0)
2051 printk(KERN_INFO " blocks= %llu block_size= %d\n",
2052 (unsigned long long)*total_size+1, *block_size);
2057 cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size, unsigned int *block_size)
2059 ReadCapdata_struct_16 *buf;
2062 buf = kzalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL);
2064 printk(KERN_WARNING "cciss: out of memory\n");
2069 return_code = sendcmd_withirq(CCISS_READ_CAPACITY_16,
2070 ctlr, buf, sizeof(ReadCapdata_struct_16),
2071 1, logvol, 0, TYPE_CMD);
2074 return_code = sendcmd(CCISS_READ_CAPACITY_16,
2075 ctlr, buf, sizeof(ReadCapdata_struct_16),
2076 1, logvol, 0, NULL, TYPE_CMD);
2078 if (return_code == IO_OK) {
2079 *total_size = be64_to_cpu(*(__be64 *) buf->total_size);
2080 *block_size = be32_to_cpu(*(__be32 *) buf->block_size);
2081 } else { /* read capacity command failed */
2082 printk(KERN_WARNING "cciss: read capacity failed\n");
2084 *block_size = BLOCK_SIZE;
2086 printk(KERN_INFO " blocks= %llu block_size= %d\n",
2087 (unsigned long long)*total_size+1, *block_size);
2091 static int cciss_revalidate(struct gendisk *disk)
2093 ctlr_info_t *h = get_host(disk);
2094 drive_info_struct *drv = get_drv(disk);
2097 unsigned int block_size;
2098 sector_t total_size;
2099 InquiryData_struct *inq_buff = NULL;
2101 for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) {
2102 if (h->drv[logvol].LunID == drv->LunID) {
2111 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
2112 if (inq_buff == NULL) {
2113 printk(KERN_WARNING "cciss: out of memory\n");
2116 if (h->cciss_read == CCISS_READ_10) {
2117 cciss_read_capacity(h->ctlr, logvol, 1,
2118 &total_size, &block_size);
2120 cciss_read_capacity_16(h->ctlr, logvol, 1,
2121 &total_size, &block_size);
2123 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size,
2126 blk_queue_hardsect_size(drv->queue, drv->block_size);
2127 set_capacity(disk, drv->nr_blocks);
2134 * Wait polling for a command to complete.
2135 * The memory mapped FIFO is polled for the completion.
2136 * Used only at init time, interrupts from the HBA are disabled.
2138 static unsigned long pollcomplete(int ctlr)
2143 /* Wait (up to 20 seconds) for a command to complete */
2145 for (i = 20 * HZ; i > 0; i--) {
2146 done = hba[ctlr]->access.command_completed(hba[ctlr]);
2147 if (done == FIFO_EMPTY)
2148 schedule_timeout_uninterruptible(1);
2152 /* Invalid address to tell caller we ran out of time */
2156 static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete)
2158 /* We get in here if sendcmd() is polling for completions
2159 and gets some command back that it wasn't expecting --
2160 something other than that which it just sent down.
2161 Ordinarily, that shouldn't happen, but it can happen when
2162 the scsi tape stuff gets into error handling mode, and
2163 starts using sendcmd() to try to abort commands and
2164 reset tape drives. In that case, sendcmd may pick up
2165 completions of commands that were sent to logical drives
2166 through the block i/o system, or cciss ioctls completing, etc.
2167 In that case, we need to save those completions for later
2168 processing by the interrupt handler.
2171 #ifdef CONFIG_CISS_SCSI_TAPE
2172 struct sendcmd_reject_list *srl = &hba[ctlr]->scsi_rejects;
2174 /* If it's not the scsi tape stuff doing error handling, (abort */
2175 /* or reset) then we don't expect anything weird. */
2176 if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) {
2178 printk(KERN_WARNING "cciss cciss%d: SendCmd "
2179 "Invalid command list address returned! (%lx)\n",
2181 /* not much we can do. */
2182 #ifdef CONFIG_CISS_SCSI_TAPE
2186 /* We've sent down an abort or reset, but something else
2188 if (srl->ncompletions >= (hba[ctlr]->nr_cmds + 2)) {
2189 /* Uh oh. No room to save it for later... */
2190 printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
2191 "reject list overflow, command lost!\n", ctlr);
2194 /* Save it for later */
2195 srl->complete[srl->ncompletions] = complete;
2196 srl->ncompletions++;
2202 * Send a command to the controller, and wait for it to complete.
2203 * Only used at init time.
2205 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
2206 1: address logical volume log_unit,
2207 2: periph device address is scsi3addr */
2208 unsigned int log_unit,
2209 __u8 page_code, unsigned char *scsi3addr, int cmd_type)
2211 CommandList_struct *c;
2213 unsigned long complete;
2214 ctlr_info_t *info_p = hba[ctlr];
2215 u64bit buff_dma_handle;
2216 int status, done = 0;
2218 if ((c = cmd_alloc(info_p, 1)) == NULL) {
2219 printk(KERN_WARNING "cciss: unable to get memory");
2222 status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
2223 log_unit, page_code, scsi3addr, cmd_type);
2224 if (status != IO_OK) {
2225 cmd_free(info_p, c, 1);
2233 printk(KERN_DEBUG "cciss: turning intr off\n");
2234 #endif /* CCISS_DEBUG */
2235 info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
2237 /* Make sure there is room in the command FIFO */
2238 /* Actually it should be completely empty at this time */
2239 /* unless we are in here doing error handling for the scsi */
2240 /* tape side of the driver. */
2241 for (i = 200000; i > 0; i--) {
2242 /* if fifo isn't full go */
2243 if (!(info_p->access.fifo_full(info_p))) {
2248 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
2249 " waiting!\n", ctlr);
2254 info_p->access.submit_command(info_p, c);
2257 complete = pollcomplete(ctlr);
2260 printk(KERN_DEBUG "cciss: command completed\n");
2261 #endif /* CCISS_DEBUG */
2263 if (complete == 1) {
2265 "cciss cciss%d: SendCmd Timeout out, "
2266 "No command list address returned!\n", ctlr);
2272 /* This will need to change for direct lookup completions */
2273 if ((complete & CISS_ERROR_BIT)
2274 && (complete & ~CISS_ERROR_BIT) == c->busaddr) {
2275 /* if data overrun or underun on Report command
2278 if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
2279 (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
2280 (c->Request.CDB[0] == CISS_INQUIRY)) &&
2281 ((c->err_info->CommandStatus ==
2282 CMD_DATA_OVERRUN) ||
2283 (c->err_info->CommandStatus == CMD_DATA_UNDERRUN)
2285 complete = c->busaddr;
2287 if (c->err_info->CommandStatus ==
2288 CMD_UNSOLICITED_ABORT) {
2289 printk(KERN_WARNING "cciss%d: "
2290 "unsolicited abort %p\n",
2292 if (c->retry_count < MAX_CMD_RETRIES) {
2294 "cciss%d: retrying %p\n",
2297 /* erase the old error */
2299 memset(c->err_info, 0,
2301 (ErrorInfo_struct));
2305 "cciss%d: retried %p too "
2306 "many times\n", ctlr, c);
2310 } else if (c->err_info->CommandStatus ==
2313 "cciss%d: command could not be aborted.\n",
2318 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2319 " Error %x \n", ctlr,
2320 c->err_info->CommandStatus);
2321 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2323 " size %x\n num %x value %x\n",
2325 c->err_info->MoreErrInfo.Invalid_Cmd.
2327 c->err_info->MoreErrInfo.Invalid_Cmd.
2329 c->err_info->MoreErrInfo.Invalid_Cmd.
2335 /* This will need changing for direct lookup completions */
2336 if (complete != c->busaddr) {
2337 if (add_sendcmd_reject(cmd, ctlr, complete) != 0) {
2338 BUG(); /* we are pretty much hosed if we get here. */
2346 /* unlock the data buffer from DMA */
2347 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
2348 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
2349 pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
2350 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
2351 #ifdef CONFIG_CISS_SCSI_TAPE
2352 /* if we saved some commands for later, process them now. */
2353 if (info_p->scsi_rejects.ncompletions > 0)
2354 do_cciss_intr(0, info_p);
2356 cmd_free(info_p, c, 1);
2361 * Map (physical) PCI mem into (virtual) kernel space
2363 static void __iomem *remap_pci_mem(ulong base, ulong size)
2365 ulong page_base = ((ulong) base) & PAGE_MASK;
2366 ulong page_offs = ((ulong) base) - page_base;
2367 void __iomem *page_remapped = ioremap(page_base, page_offs + size);
2369 return page_remapped ? (page_remapped + page_offs) : NULL;
2373 * Takes jobs of the Q and sends them to the hardware, then puts it on
2374 * the Q to wait for completion.
2376 static void start_io(ctlr_info_t *h)
2378 CommandList_struct *c;
2380 while ((c = h->reqQ) != NULL) {
2381 /* can't do anything if fifo is full */
2382 if ((h->access.fifo_full(h))) {
2383 printk(KERN_WARNING "cciss: fifo full\n");
2387 /* Get the first entry from the Request Q */
2388 removeQ(&(h->reqQ), c);
2391 /* Tell the controller execute command */
2392 h->access.submit_command(h, c);
2394 /* Put job onto the completed Q */
2395 addQ(&(h->cmpQ), c);
2399 /* Assumes that CCISS_LOCK(h->ctlr) is held. */
2400 /* Zeros out the error record and then resends the command back */
2401 /* to the controller */
2402 static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c)
2404 /* erase the old error information */
2405 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
2407 /* add it to software queue and then send it to the controller */
2408 addQ(&(h->reqQ), c);
2410 if (h->Qdepth > h->maxQsinceinit)
2411 h->maxQsinceinit = h->Qdepth;
2416 static inline unsigned int make_status_bytes(unsigned int scsi_status_byte,
2417 unsigned int msg_byte, unsigned int host_byte,
2418 unsigned int driver_byte)
2420 /* inverse of macros in scsi.h */
2421 return (scsi_status_byte & 0xff) |
2422 ((msg_byte & 0xff) << 8) |
2423 ((host_byte & 0xff) << 16) |
2424 ((driver_byte & 0xff) << 24);
2427 static inline int evaluate_target_status(CommandList_struct *cmd)
2429 unsigned char sense_key;
2430 unsigned char status_byte, msg_byte, host_byte, driver_byte;
2433 /* If we get in here, it means we got "target status", that is, scsi status */
2434 status_byte = cmd->err_info->ScsiStatus;
2435 driver_byte = DRIVER_OK;
2436 msg_byte = cmd->err_info->CommandStatus; /* correct? seems too device specific */
2438 if (blk_pc_request(cmd->rq))
2439 host_byte = DID_PASSTHROUGH;
2443 error_value = make_status_bytes(status_byte, msg_byte,
2444 host_byte, driver_byte);
2446 if (cmd->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) {
2447 if (!blk_pc_request(cmd->rq))
2448 printk(KERN_WARNING "cciss: cmd %p "
2449 "has SCSI Status 0x%x\n",
2450 cmd, cmd->err_info->ScsiStatus);
2454 /* check the sense key */
2455 sense_key = 0xf & cmd->err_info->SenseInfo[2];
2456 /* no status or recovered error */
2457 if (((sense_key == 0x0) || (sense_key == 0x1)) && !blk_pc_request(cmd->rq))
2460 if (!blk_pc_request(cmd->rq)) { /* Not SG_IO or similar? */
2461 if (error_value != 0)
2462 printk(KERN_WARNING "cciss: cmd %p has CHECK CONDITION"
2463 " sense key = 0x%x\n", cmd, sense_key);
2467 /* SG_IO or similar, copy sense data back */
2468 if (cmd->rq->sense) {
2469 if (cmd->rq->sense_len > cmd->err_info->SenseLen)
2470 cmd->rq->sense_len = cmd->err_info->SenseLen;
2471 memcpy(cmd->rq->sense, cmd->err_info->SenseInfo,
2472 cmd->rq->sense_len);
2474 cmd->rq->sense_len = 0;
2479 /* checks the status of the job and calls complete buffers to mark all
2480 * buffers for the completed job. Note that this function does not need
2481 * to hold the hba/queue lock.
2483 static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
2487 struct request *rq = cmd->rq;
2492 rq->errors = make_status_bytes(0, 0, 0, DRIVER_TIMEOUT);
2494 if (cmd->err_info->CommandStatus == 0) /* no error has occurred */
2495 goto after_error_processing;
2497 switch (cmd->err_info->CommandStatus) {
2498 case CMD_TARGET_STATUS:
2499 rq->errors = evaluate_target_status(cmd);
2501 case CMD_DATA_UNDERRUN:
2502 if (blk_fs_request(cmd->rq)) {
2503 printk(KERN_WARNING "cciss: cmd %p has"
2504 " completed with data underrun "
2506 cmd->rq->data_len = cmd->err_info->ResidualCnt;
2509 case CMD_DATA_OVERRUN:
2510 if (blk_fs_request(cmd->rq))
2511 printk(KERN_WARNING "cciss: cmd %p has"
2512 " completed with data overrun "
2516 printk(KERN_WARNING "cciss: cmd %p is "
2517 "reported invalid\n", cmd);
2518 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2519 cmd->err_info->CommandStatus, DRIVER_OK,
2520 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2522 case CMD_PROTOCOL_ERR:
2523 printk(KERN_WARNING "cciss: cmd %p has "
2524 "protocol error \n", cmd);
2525 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2526 cmd->err_info->CommandStatus, DRIVER_OK,
2527 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2529 case CMD_HARDWARE_ERR:
2530 printk(KERN_WARNING "cciss: cmd %p had "
2531 " hardware error\n", cmd);
2532 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2533 cmd->err_info->CommandStatus, DRIVER_OK,
2534 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2536 case CMD_CONNECTION_LOST:
2537 printk(KERN_WARNING "cciss: cmd %p had "
2538 "connection lost\n", cmd);
2539 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2540 cmd->err_info->CommandStatus, DRIVER_OK,
2541 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2544 printk(KERN_WARNING "cciss: cmd %p was "
2546 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2547 cmd->err_info->CommandStatus, DRIVER_OK,
2548 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT);
2550 case CMD_ABORT_FAILED:
2551 printk(KERN_WARNING "cciss: cmd %p reports "
2552 "abort failed\n", cmd);
2553 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2554 cmd->err_info->CommandStatus, DRIVER_OK,
2555 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2557 case CMD_UNSOLICITED_ABORT:
2558 printk(KERN_WARNING "cciss%d: unsolicited "
2559 "abort %p\n", h->ctlr, cmd);
2560 if (cmd->retry_count < MAX_CMD_RETRIES) {
2563 "cciss%d: retrying %p\n", h->ctlr, cmd);
2567 "cciss%d: %p retried too "
2568 "many times\n", h->ctlr, cmd);
2569 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2570 cmd->err_info->CommandStatus, DRIVER_OK,
2571 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT);
2574 printk(KERN_WARNING "cciss: cmd %p timedout\n", cmd);
2575 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2576 cmd->err_info->CommandStatus, DRIVER_OK,
2577 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2580 printk(KERN_WARNING "cciss: cmd %p returned "
2581 "unknown status %x\n", cmd,
2582 cmd->err_info->CommandStatus);
2583 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2584 cmd->err_info->CommandStatus, DRIVER_OK,
2585 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2588 after_error_processing:
2590 /* We need to return this command */
2592 resend_cciss_cmd(h, cmd);
2595 cmd->rq->completion_data = cmd;
2596 blk_complete_request(cmd->rq);
2600 * Get a request and submit it to the controller.
2602 static void do_cciss_request(struct request_queue *q)
2604 ctlr_info_t *h = q->queuedata;
2605 CommandList_struct *c;
2608 struct request *creq;
2610 struct scatterlist tmp_sg[MAXSGENTRIES];
2611 drive_info_struct *drv;
2614 /* We call start_io here in case there is a command waiting on the
2615 * queue that has not been sent.
2617 if (blk_queue_plugged(q))
2621 creq = elv_next_request(q);
2625 BUG_ON(creq->nr_phys_segments > MAXSGENTRIES);
2627 if ((c = cmd_alloc(h, 1)) == NULL)
2630 blkdev_dequeue_request(creq);
2632 spin_unlock_irq(q->queue_lock);
2634 c->cmd_type = CMD_RWREQ;
2637 /* fill in the request */
2638 drv = creq->rq_disk->private_data;
2639 c->Header.ReplyQueue = 0; // unused in simple mode
2640 /* got command from pool, so use the command block index instead */
2641 /* for direct lookups. */
2642 /* The first 2 bits are reserved for controller error reporting. */
2643 c->Header.Tag.lower = (c->cmdindex << 3);
2644 c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
2645 c->Header.LUN.LogDev.VolId = drv->LunID;
2646 c->Header.LUN.LogDev.Mode = 1;
2647 c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
2648 c->Request.Type.Type = TYPE_CMD; // It is a command.
2649 c->Request.Type.Attribute = ATTR_SIMPLE;
2650 c->Request.Type.Direction =
2651 (rq_data_dir(creq) == READ) ? XFER_READ : XFER_WRITE;
2652 c->Request.Timeout = 0; // Don't time out
2654 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
2655 start_blk = creq->sector;
2657 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", (int)creq->sector,
2658 (int)creq->nr_sectors);
2659 #endif /* CCISS_DEBUG */
2661 sg_init_table(tmp_sg, MAXSGENTRIES);
2662 seg = blk_rq_map_sg(q, creq, tmp_sg);
2664 /* get the DMA records for the setup */
2665 if (c->Request.Type.Direction == XFER_READ)
2666 dir = PCI_DMA_FROMDEVICE;
2668 dir = PCI_DMA_TODEVICE;
2670 for (i = 0; i < seg; i++) {
2671 c->SG[i].Len = tmp_sg[i].length;
2672 temp64.val = (__u64) pci_map_page(h->pdev, sg_page(&tmp_sg[i]),
2674 tmp_sg[i].length, dir);
2675 c->SG[i].Addr.lower = temp64.val32.lower;
2676 c->SG[i].Addr.upper = temp64.val32.upper;
2677 c->SG[i].Ext = 0; // we are not chaining
2679 /* track how many SG entries we are using */
2684 printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n",
2685 creq->nr_sectors, seg);
2686 #endif /* CCISS_DEBUG */
2688 c->Header.SGList = c->Header.SGTotal = seg;
2689 if (likely(blk_fs_request(creq))) {
2690 if(h->cciss_read == CCISS_READ_10) {
2691 c->Request.CDB[1] = 0;
2692 c->Request.CDB[2] = (start_blk >> 24) & 0xff; //MSB
2693 c->Request.CDB[3] = (start_blk >> 16) & 0xff;
2694 c->Request.CDB[4] = (start_blk >> 8) & 0xff;
2695 c->Request.CDB[5] = start_blk & 0xff;
2696 c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB
2697 c->Request.CDB[7] = (creq->nr_sectors >> 8) & 0xff;
2698 c->Request.CDB[8] = creq->nr_sectors & 0xff;
2699 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
2701 u32 upper32 = upper_32_bits(start_blk);
2703 c->Request.CDBLen = 16;
2704 c->Request.CDB[1]= 0;
2705 c->Request.CDB[2]= (upper32 >> 24) & 0xff; //MSB
2706 c->Request.CDB[3]= (upper32 >> 16) & 0xff;
2707 c->Request.CDB[4]= (upper32 >> 8) & 0xff;
2708 c->Request.CDB[5]= upper32 & 0xff;
2709 c->Request.CDB[6]= (start_blk >> 24) & 0xff;
2710 c->Request.CDB[7]= (start_blk >> 16) & 0xff;
2711 c->Request.CDB[8]= (start_blk >> 8) & 0xff;
2712 c->Request.CDB[9]= start_blk & 0xff;
2713 c->Request.CDB[10]= (creq->nr_sectors >> 24) & 0xff;
2714 c->Request.CDB[11]= (creq->nr_sectors >> 16) & 0xff;
2715 c->Request.CDB[12]= (creq->nr_sectors >> 8) & 0xff;
2716 c->Request.CDB[13]= creq->nr_sectors & 0xff;
2717 c->Request.CDB[14] = c->Request.CDB[15] = 0;
2719 } else if (blk_pc_request(creq)) {
2720 c->Request.CDBLen = creq->cmd_len;
2721 memcpy(c->Request.CDB, creq->cmd, BLK_MAX_CDB);
2723 printk(KERN_WARNING "cciss%d: bad request type %d\n", h->ctlr, creq->cmd_type);
2727 spin_lock_irq(q->queue_lock);
2729 addQ(&(h->reqQ), c);
2731 if (h->Qdepth > h->maxQsinceinit)
2732 h->maxQsinceinit = h->Qdepth;
2738 /* We will already have the driver lock here so not need
2744 static inline unsigned long get_next_completion(ctlr_info_t *h)
2746 #ifdef CONFIG_CISS_SCSI_TAPE
2747 /* Any rejects from sendcmd() lying around? Process them first */
2748 if (h->scsi_rejects.ncompletions == 0)
2749 return h->access.command_completed(h);
2751 struct sendcmd_reject_list *srl;
2753 srl = &h->scsi_rejects;
2754 n = --srl->ncompletions;
2755 /* printk("cciss%d: processing saved reject\n", h->ctlr); */
2757 return srl->complete[n];
2760 return h->access.command_completed(h);
2764 static inline int interrupt_pending(ctlr_info_t *h)
2766 #ifdef CONFIG_CISS_SCSI_TAPE
2767 return (h->access.intr_pending(h)
2768 || (h->scsi_rejects.ncompletions > 0));
2770 return h->access.intr_pending(h);
2774 static inline long interrupt_not_for_us(ctlr_info_t *h)
2776 #ifdef CONFIG_CISS_SCSI_TAPE
2777 return (((h->access.intr_pending(h) == 0) ||
2778 (h->interrupts_enabled == 0))
2779 && (h->scsi_rejects.ncompletions == 0));
2781 return (((h->access.intr_pending(h) == 0) ||
2782 (h->interrupts_enabled == 0)));
2786 static irqreturn_t do_cciss_intr(int irq, void *dev_id)
2788 ctlr_info_t *h = dev_id;
2789 CommandList_struct *c;
2790 unsigned long flags;
2793 if (interrupt_not_for_us(h))
2796 * If there are completed commands in the completion queue,
2797 * we had better do something about it.
2799 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
2800 while (interrupt_pending(h)) {
2801 while ((a = get_next_completion(h)) != FIFO_EMPTY) {
2805 if (a2 >= h->nr_cmds) {
2807 "cciss: controller cciss%d failed, stopping.\n",
2809 fail_all_cmds(h->ctlr);
2813 c = h->cmd_pool + a2;
2818 if ((c = h->cmpQ) == NULL) {
2820 "cciss: Completion of %08x ignored\n",
2824 while (c->busaddr != a) {
2831 * If we've found the command, take it off the
2832 * completion Q and free it
2834 if (c->busaddr == a) {
2835 removeQ(&h->cmpQ, c);
2836 if (c->cmd_type == CMD_RWREQ) {
2837 complete_command(h, c, 0);
2838 } else if (c->cmd_type == CMD_IOCTL_PEND) {
2839 complete(c->waiting);
2841 # ifdef CONFIG_CISS_SCSI_TAPE
2842 else if (c->cmd_type == CMD_SCSI)
2843 complete_scsi_command(c, 0, a1);
2850 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
2855 * We cannot read the structure directly, for portability we must use
2857 * This is for debug only.
2860 static void print_cfg_table(CfgTable_struct *tb)
2865 printk("Controller Configuration information\n");
2866 printk("------------------------------------\n");
2867 for (i = 0; i < 4; i++)
2868 temp_name[i] = readb(&(tb->Signature[i]));
2869 temp_name[4] = '\0';
2870 printk(" Signature = %s\n", temp_name);
2871 printk(" Spec Number = %d\n", readl(&(tb->SpecValence)));
2872 printk(" Transport methods supported = 0x%x\n",
2873 readl(&(tb->TransportSupport)));
2874 printk(" Transport methods active = 0x%x\n",
2875 readl(&(tb->TransportActive)));
2876 printk(" Requested transport Method = 0x%x\n",
2877 readl(&(tb->HostWrite.TransportRequest)));
2878 printk(" Coalesce Interrupt Delay = 0x%x\n",
2879 readl(&(tb->HostWrite.CoalIntDelay)));
2880 printk(" Coalesce Interrupt Count = 0x%x\n",
2881 readl(&(tb->HostWrite.CoalIntCount)));
2882 printk(" Max outstanding commands = 0x%d\n",
2883 readl(&(tb->CmdsOutMax)));
2884 printk(" Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
2885 for (i = 0; i < 16; i++)
2886 temp_name[i] = readb(&(tb->ServerName[i]));
2887 temp_name[16] = '\0';
2888 printk(" Server Name = %s\n", temp_name);
2889 printk(" Heartbeat Counter = 0x%x\n\n\n", readl(&(tb->HeartBeat)));
2891 #endif /* CCISS_DEBUG */
2893 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
2895 int i, offset, mem_type, bar_type;
2896 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
2899 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2900 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
2901 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
2904 mem_type = pci_resource_flags(pdev, i) &
2905 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
2907 case PCI_BASE_ADDRESS_MEM_TYPE_32:
2908 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
2909 offset += 4; /* 32 bit */
2911 case PCI_BASE_ADDRESS_MEM_TYPE_64:
2914 default: /* reserved in PCI 2.2 */
2916 "Base address is invalid\n");
2921 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
2927 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
2928 * controllers that are capable. If not, we use IO-APIC mode.
2931 static void __devinit cciss_interrupt_mode(ctlr_info_t *c,
2932 struct pci_dev *pdev, __u32 board_id)
2934 #ifdef CONFIG_PCI_MSI
2936 struct msix_entry cciss_msix_entries[4] = { {0, 0}, {0, 1},
2940 /* Some boards advertise MSI but don't really support it */
2941 if ((board_id == 0x40700E11) ||
2942 (board_id == 0x40800E11) ||
2943 (board_id == 0x40820E11) || (board_id == 0x40830E11))
2944 goto default_int_mode;
2946 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
2947 err = pci_enable_msix(pdev, cciss_msix_entries, 4);
2949 c->intr[0] = cciss_msix_entries[0].vector;
2950 c->intr[1] = cciss_msix_entries[1].vector;
2951 c->intr[2] = cciss_msix_entries[2].vector;
2952 c->intr[3] = cciss_msix_entries[3].vector;
2957 printk(KERN_WARNING "cciss: only %d MSI-X vectors "
2958 "available\n", err);
2959 goto default_int_mode;
2961 printk(KERN_WARNING "cciss: MSI-X init failed %d\n",
2963 goto default_int_mode;
2966 if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
2967 if (!pci_enable_msi(pdev)) {
2970 printk(KERN_WARNING "cciss: MSI init failed\n");
2974 #endif /* CONFIG_PCI_MSI */
2975 /* if we get here we're going to use the default interrupt mode */
2976 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2980 static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
2982 ushort subsystem_vendor_id, subsystem_device_id, command;
2983 __u32 board_id, scratchpad = 0;
2985 __u32 cfg_base_addr;
2986 __u64 cfg_base_addr_index;
2989 /* check to see if controller has been disabled */
2990 /* BEFORE trying to enable it */
2991 (void)pci_read_config_word(pdev, PCI_COMMAND, &command);
2992 if (!(command & 0x02)) {
2994 "cciss: controller appears to be disabled\n");
2998 err = pci_enable_device(pdev);
3000 printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
3004 err = pci_request_regions(pdev, "cciss");
3006 printk(KERN_ERR "cciss: Cannot obtain PCI resources, "
3011 subsystem_vendor_id = pdev->subsystem_vendor;
3012 subsystem_device_id = pdev->subsystem_device;
3013 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
3014 subsystem_vendor_id);
3017 printk("command = %x\n", command);
3018 printk("irq = %x\n", pdev->irq);
3019 printk("board_id = %x\n", board_id);
3020 #endif /* CCISS_DEBUG */
3022 /* If the kernel supports MSI/MSI-X we will try to enable that functionality,
3023 * else we use the IO-APIC interrupt assigned to us by system ROM.
3025 cciss_interrupt_mode(c, pdev, board_id);
3028 * Memory base addr is first addr , the second points to the config
3032 c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
3034 printk("address 0 = %x\n", c->paddr);
3035 #endif /* CCISS_DEBUG */
3036 c->vaddr = remap_pci_mem(c->paddr, 0x250);
3038 /* Wait for the board to become ready. (PCI hotplug needs this.)
3039 * We poll for up to 120 secs, once per 100ms. */
3040 for (i = 0; i < 1200; i++) {
3041 scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET);
3042 if (scratchpad == CCISS_FIRMWARE_READY)
3044 set_current_state(TASK_INTERRUPTIBLE);
3045 schedule_timeout(HZ / 10); /* wait 100ms */
3047 if (scratchpad != CCISS_FIRMWARE_READY) {
3048 printk(KERN_WARNING "cciss: Board not ready. Timed out.\n");
3050 goto err_out_free_res;
3053 /* get the address index number */
3054 cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
3055 cfg_base_addr &= (__u32) 0x0000ffff;
3057 printk("cfg base address = %x\n", cfg_base_addr);
3058 #endif /* CCISS_DEBUG */
3059 cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
3061 printk("cfg base address index = %x\n", cfg_base_addr_index);
3062 #endif /* CCISS_DEBUG */
3063 if (cfg_base_addr_index == -1) {
3064 printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
3066 goto err_out_free_res;
3069 cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
3071 printk("cfg offset = %x\n", cfg_offset);
3072 #endif /* CCISS_DEBUG */
3073 c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
3074 cfg_base_addr_index) +
3075 cfg_offset, sizeof(CfgTable_struct));
3076 c->board_id = board_id;
3079 print_cfg_table(c->cfgtable);
3080 #endif /* CCISS_DEBUG */
3082 for (i = 0; i < ARRAY_SIZE(products); i++) {
3083 if (board_id == products[i].board_id) {
3084 c->product_name = products[i].product_name;
3085 c->access = *(products[i].access);
3086 c->nr_cmds = products[i].nr_cmds;
3090 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
3091 (readb(&c->cfgtable->Signature[1]) != 'I') ||
3092 (readb(&c->cfgtable->Signature[2]) != 'S') ||
3093 (readb(&c->cfgtable->Signature[3]) != 'S')) {
3094 printk("Does not appear to be a valid CISS config table\n");
3096 goto err_out_free_res;
3098 /* We didn't find the controller in our list. We know the
3099 * signature is valid. If it's an HP device let's try to
3100 * bind to the device and fire it up. Otherwise we bail.
3102 if (i == ARRAY_SIZE(products)) {
3103 if (subsystem_vendor_id == PCI_VENDOR_ID_HP) {
3104 c->product_name = products[i-1].product_name;
3105 c->access = *(products[i-1].access);
3106 c->nr_cmds = products[i-1].nr_cmds;
3107 printk(KERN_WARNING "cciss: This is an unknown "
3108 "Smart Array controller.\n"
3109 "cciss: Please update to the latest driver "
3110 "available from www.hp.com.\n");
3112 printk(KERN_WARNING "cciss: Sorry, I don't know how"
3113 " to access the Smart Array controller %08lx\n"
3114 , (unsigned long)board_id);
3116 goto err_out_free_res;
3121 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
3123 prefetch = readl(&(c->cfgtable->SCSI_Prefetch));
3125 writel(prefetch, &(c->cfgtable->SCSI_Prefetch));
3129 /* Disabling DMA prefetch and refetch for the P600.
3130 * An ASIC bug may result in accesses to invalid memory addresses.
3131 * We've disabled prefetch for some time now. Testing with XEN
3132 * kernels revealed a bug in the refetch if dom0 resides on a P600.
3134 if(board_id == 0x3225103C) {
3137 dma_prefetch = readl(c->vaddr + I2O_DMA1_CFG);
3138 dma_prefetch |= 0x8000;
3139 writel(dma_prefetch, c->vaddr + I2O_DMA1_CFG);
3140 pci_read_config_dword(pdev, PCI_COMMAND_PARITY, &dma_refetch);
3142 pci_write_config_dword(pdev, PCI_COMMAND_PARITY, dma_refetch);
3146 printk("Trying to put board into Simple mode\n");
3147 #endif /* CCISS_DEBUG */
3148 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
3149 /* Update the field, and then ring the doorbell */
3150 writel(CFGTBL_Trans_Simple, &(c->cfgtable->HostWrite.TransportRequest));
3151 writel(CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
3153 /* under certain very rare conditions, this can take awhile.
3154 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
3155 * as we enter this code.) */
3156 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
3157 if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
3159 /* delay and try again */
3160 set_current_state(TASK_INTERRUPTIBLE);
3161 schedule_timeout(10);
3165 printk(KERN_DEBUG "I counter got to %d %x\n", i,
3166 readl(c->vaddr + SA5_DOORBELL));
3167 #endif /* CCISS_DEBUG */
3169 print_cfg_table(c->cfgtable);
3170 #endif /* CCISS_DEBUG */
3172 if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
3173 printk(KERN_WARNING "cciss: unable to get board into"
3176 goto err_out_free_res;
3182 * Deliberately omit pci_disable_device(): it does something nasty to
3183 * Smart Array controllers that pci_enable_device does not undo
3185 pci_release_regions(pdev);
3190 * Gets information about the local volumes attached to the controller.
3192 static void cciss_getgeometry(int cntl_num)
3194 ReportLunData_struct *ld_buff;
3195 InquiryData_struct *inq_buff;
3200 unsigned block_size;
3201 sector_t total_size;
3203 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
3204 if (ld_buff == NULL) {
3205 printk(KERN_ERR "cciss: out of memory\n");
3208 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
3209 if (inq_buff == NULL) {
3210 printk(KERN_ERR "cciss: out of memory\n");
3214 /* Get the firmware version */
3215 return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff,
3216 sizeof(InquiryData_struct), 0, 0, 0, NULL,
3218 if (return_code == IO_OK) {
3219 hba[cntl_num]->firm_ver[0] = inq_buff->data_byte[32];
3220 hba[cntl_num]->firm_ver[1] = inq_buff->data_byte[33];
3221 hba[cntl_num]->firm_ver[2] = inq_buff->data_byte[34];
3222 hba[cntl_num]->firm_ver[3] = inq_buff->data_byte[35];
3223 } else { /* send command failed */
3225 printk(KERN_WARNING "cciss: unable to determine firmware"
3226 " version of controller\n");
3228 /* Get the number of logical volumes */
3229 return_code = sendcmd(CISS_REPORT_LOG, cntl_num, ld_buff,
3230 sizeof(ReportLunData_struct), 0, 0, 0, NULL,
3233 if (return_code == IO_OK) {
3235 printk("LUN Data\n--------------------------\n");
3236 #endif /* CCISS_DEBUG */
3239 (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
3241 (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
3243 (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
3244 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
3245 } else { /* reading number of logical volumes failed */
3247 printk(KERN_WARNING "cciss: report logical volume"
3248 " command failed\n");
3251 hba[cntl_num]->num_luns = listlength / 8; // 8 bytes pre entry
3252 if (hba[cntl_num]->num_luns > CISS_MAX_LUN) {
3254 "ciss: only %d number of logical volumes supported\n",
3256 hba[cntl_num]->num_luns = CISS_MAX_LUN;
3259 printk(KERN_DEBUG "Length = %x %x %x %x = %d\n",
3260 ld_buff->LUNListLength[0], ld_buff->LUNListLength[1],
3261 ld_buff->LUNListLength[2], ld_buff->LUNListLength[3],
3262 hba[cntl_num]->num_luns);
3263 #endif /* CCISS_DEBUG */
3265 hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns - 1;
3266 for (i = 0; i < CISS_MAX_LUN; i++) {
3267 if (i < hba[cntl_num]->num_luns) {
3268 lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3]))
3270 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2]))
3272 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1]))
3274 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
3276 hba[cntl_num]->drv[i].LunID = lunid;
3279 printk(KERN_DEBUG "LUN[%d]: %x %x %x %x = %x\n", i,
3280 ld_buff->LUN[i][0], ld_buff->LUN[i][1],
3281 ld_buff->LUN[i][2], ld_buff->LUN[i][3],
3282 hba[cntl_num]->drv[i].LunID);
3283 #endif /* CCISS_DEBUG */
3285 /* testing to see if 16-byte CDBs are already being used */
3286 if(hba[cntl_num]->cciss_read == CCISS_READ_16) {
3287 cciss_read_capacity_16(cntl_num, i, 0,
3288 &total_size, &block_size);
3291 cciss_read_capacity(cntl_num, i, 0, &total_size, &block_size);
3293 /* If read_capacity returns all F's the logical is >2TB */
3294 /* so we switch to 16-byte CDBs for all read/write ops */
3295 if(total_size == 0xFFFFFFFFULL) {
3296 cciss_read_capacity_16(cntl_num, i, 0,
3297 &total_size, &block_size);
3298 hba[cntl_num]->cciss_read = CCISS_READ_16;
3299 hba[cntl_num]->cciss_write = CCISS_WRITE_16;
3301 hba[cntl_num]->cciss_read = CCISS_READ_10;
3302 hba[cntl_num]->cciss_write = CCISS_WRITE_10;
3305 cciss_geometry_inquiry(cntl_num, i, 0, total_size,
3306 block_size, inq_buff,
3307 &hba[cntl_num]->drv[i]);
3309 /* initialize raid_level to indicate a free space */
3310 hba[cntl_num]->drv[i].raid_level = -1;
3317 /* Function to find the first free pointer into our hba[] array */
3318 /* Returns -1 if no free entries are left. */
3319 static int alloc_cciss_hba(void)
3323 for (i = 0; i < MAX_CTLR; i++) {
3327 p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
3330 p->gendisk[0] = alloc_disk(1 << NWD_SHIFT);
3331 if (!p->gendisk[0]) {
3339 printk(KERN_WARNING "cciss: This driver supports a maximum"
3340 " of %d controllers.\n", MAX_CTLR);
3343 printk(KERN_ERR "cciss: out of memory.\n");
3347 static void free_hba(int i)
3349 ctlr_info_t *p = hba[i];
3353 for (n = 0; n < CISS_MAX_LUN; n++)
3354 put_disk(p->gendisk[n]);
3359 * This is it. Find all the controllers and register them. I really hate
3360 * stealing all these major device numbers.
3361 * returns the number of block devices registered.
3363 static int __devinit cciss_init_one(struct pci_dev *pdev,
3364 const struct pci_device_id *ent)
3371 i = alloc_cciss_hba();
3375 hba[i]->busy_initializing = 1;
3377 if (cciss_pci_init(hba[i], pdev) != 0)
3380 sprintf(hba[i]->devname, "cciss%d", i);
3382 hba[i]->pdev = pdev;
3384 /* configure PCI DMA stuff */
3385 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
3387 else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
3390 printk(KERN_ERR "cciss: no suitable DMA available\n");
3395 * register with the major number, or get a dynamic major number
3396 * by passing 0 as argument. This is done for greater than
3397 * 8 controller support.
3399 if (i < MAX_CTLR_ORIG)
3400 hba[i]->major = COMPAQ_CISS_MAJOR + i;
3401 rc = register_blkdev(hba[i]->major, hba[i]->devname);
3402 if (rc == -EBUSY || rc == -EINVAL) {
3404 "cciss: Unable to get major number %d for %s "
3405 "on hba %d\n", hba[i]->major, hba[i]->devname, i);
3408 if (i >= MAX_CTLR_ORIG)
3412 /* make sure the board interrupts are off */
3413 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
3414 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
3415 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
3416 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
3417 hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname);
3421 printk(KERN_INFO "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
3422 hba[i]->devname, pdev->device, pci_name(pdev),
3423 hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not");
3425 hba[i]->cmd_pool_bits =
3426 kmalloc(((hba[i]->nr_cmds + BITS_PER_LONG -
3427 1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
3428 hba[i]->cmd_pool = (CommandList_struct *)
3429 pci_alloc_consistent(hba[i]->pdev,
3430 hba[i]->nr_cmds * sizeof(CommandList_struct),
3431 &(hba[i]->cmd_pool_dhandle));
3432 hba[i]->errinfo_pool = (ErrorInfo_struct *)
3433 pci_alloc_consistent(hba[i]->pdev,
3434 hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3435 &(hba[i]->errinfo_pool_dhandle));
3436 if ((hba[i]->cmd_pool_bits == NULL)
3437 || (hba[i]->cmd_pool == NULL)
3438 || (hba[i]->errinfo_pool == NULL)) {
3439 printk(KERN_ERR "cciss: out of memory");
3442 #ifdef CONFIG_CISS_SCSI_TAPE
3443 hba[i]->scsi_rejects.complete =
3444 kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
3445 (hba[i]->nr_cmds + 5), GFP_KERNEL);
3446 if (hba[i]->scsi_rejects.complete == NULL) {
3447 printk(KERN_ERR "cciss: out of memory");
3451 spin_lock_init(&hba[i]->lock);
3453 /* Initialize the pdev driver private data.
3454 have it point to hba[i]. */
3455 pci_set_drvdata(pdev, hba[i]);
3456 /* command and error info recs zeroed out before
3458 memset(hba[i]->cmd_pool_bits, 0,
3459 ((hba[i]->nr_cmds + BITS_PER_LONG -
3460 1) / BITS_PER_LONG) * sizeof(unsigned long));
3463 printk(KERN_DEBUG "Scanning for drives on controller cciss%d\n", i);
3464 #endif /* CCISS_DEBUG */
3466 cciss_getgeometry(i);
3468 cciss_scsi_setup(i);
3470 /* Turn the interrupts on so we can service requests */
3471 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
3475 hba[i]->cciss_max_sectors = 2048;
3477 hba[i]->busy_initializing = 0;
3480 drive_info_struct *drv = &(hba[i]->drv[j]);
3481 struct gendisk *disk = hba[i]->gendisk[j];
3482 struct request_queue *q;
3484 /* Check if the disk was allocated already */
3486 hba[i]->gendisk[j] = alloc_disk(1 << NWD_SHIFT);
3487 disk = hba[i]->gendisk[j];
3490 /* Check that the disk was able to be allocated */
3492 printk(KERN_ERR "cciss: unable to allocate memory for disk %d\n", j);
3496 q = blk_init_queue(do_cciss_request, &hba[i]->lock);
3499 "cciss: unable to allocate queue for disk %d\n",
3505 blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
3507 /* This is a hardware imposed limit. */
3508 blk_queue_max_hw_segments(q, MAXSGENTRIES);
3510 /* This is a limit in the driver and could be eliminated. */
3511 blk_queue_max_phys_segments(q, MAXSGENTRIES);
3513 blk_queue_max_sectors(q, hba[i]->cciss_max_sectors);
3515 blk_queue_softirq_done(q, cciss_softirq_done);
3517 q->queuedata = hba[i];
3518 sprintf(disk->disk_name, "cciss/c%dd%d", i, j);
3519 disk->major = hba[i]->major;
3520 disk->first_minor = j << NWD_SHIFT;
3521 disk->fops = &cciss_fops;
3523 disk->private_data = drv;
3524 disk->driverfs_dev = &pdev->dev;
3525 /* we must register the controller even if no disks exist */
3526 /* this is for the online array utilities */
3527 if (!drv->heads && j)
3529 blk_queue_hardsect_size(q, drv->block_size);
3530 set_capacity(disk, drv->nr_blocks);
3532 } while (j <= hba[i]->highest_lun);
3534 /* Make sure all queue data is written out before */
3535 /* interrupt handler, triggered by add_disk, */
3536 /* is allowed to start them. */
3539 for (j = 0; j <= hba[i]->highest_lun; j++)
3540 add_disk(hba[i]->gendisk[j]);
3545 #ifdef CONFIG_CISS_SCSI_TAPE
3546 kfree(hba[i]->scsi_rejects.complete);
3548 kfree(hba[i]->cmd_pool_bits);
3549 if (hba[i]->cmd_pool)
3550 pci_free_consistent(hba[i]->pdev,
3551 hba[i]->nr_cmds * sizeof(CommandList_struct),
3552 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3553 if (hba[i]->errinfo_pool)
3554 pci_free_consistent(hba[i]->pdev,
3555 hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3556 hba[i]->errinfo_pool,
3557 hba[i]->errinfo_pool_dhandle);
3558 free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
3560 unregister_blkdev(hba[i]->major, hba[i]->devname);
3562 hba[i]->busy_initializing = 0;
3563 /* cleanup any queues that may have been initialized */
3564 for (j=0; j <= hba[i]->highest_lun; j++){
3565 drive_info_struct *drv = &(hba[i]->drv[j]);
3567 blk_cleanup_queue(drv->queue);
3570 * Deliberately omit pci_disable_device(): it does something nasty to
3571 * Smart Array controllers that pci_enable_device does not undo
3573 pci_release_regions(pdev);
3574 pci_set_drvdata(pdev, NULL);
3579 static void cciss_shutdown(struct pci_dev *pdev)
3581 ctlr_info_t *tmp_ptr;
3586 tmp_ptr = pci_get_drvdata(pdev);
3587 if (tmp_ptr == NULL)
3593 /* Turn board interrupts off and send the flush cache command */
3594 /* sendcmd will turn off interrupt, and send the flush...
3595 * To write all data in the battery backed cache to disks */
3596 memset(flush_buf, 0, 4);
3597 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL,
3599 if (return_code == IO_OK) {
3600 printk(KERN_INFO "Completed flushing cache on controller %d\n", i);
3602 printk(KERN_WARNING "Error flushing cache on controller %d\n", i);
3604 free_irq(hba[i]->intr[2], hba[i]);
3607 static void __devexit cciss_remove_one(struct pci_dev *pdev)
3609 ctlr_info_t *tmp_ptr;
3612 if (pci_get_drvdata(pdev) == NULL) {
3613 printk(KERN_ERR "cciss: Unable to remove device \n");
3616 tmp_ptr = pci_get_drvdata(pdev);
3618 if (hba[i] == NULL) {
3619 printk(KERN_ERR "cciss: device appears to "
3620 "already be removed \n");
3624 remove_proc_entry(hba[i]->devname, proc_cciss);
3625 unregister_blkdev(hba[i]->major, hba[i]->devname);
3627 /* remove it from the disk list */
3628 for (j = 0; j < CISS_MAX_LUN; j++) {
3629 struct gendisk *disk = hba[i]->gendisk[j];
3631 struct request_queue *q = disk->queue;
3633 if (disk->flags & GENHD_FL_UP)
3636 blk_cleanup_queue(q);
3640 cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
3642 cciss_shutdown(pdev);
3644 #ifdef CONFIG_PCI_MSI
3645 if (hba[i]->msix_vector)
3646 pci_disable_msix(hba[i]->pdev);
3647 else if (hba[i]->msi_vector)
3648 pci_disable_msi(hba[i]->pdev);
3649 #endif /* CONFIG_PCI_MSI */
3651 iounmap(hba[i]->vaddr);
3653 pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(CommandList_struct),
3654 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3655 pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3656 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
3657 kfree(hba[i]->cmd_pool_bits);
3658 #ifdef CONFIG_CISS_SCSI_TAPE
3659 kfree(hba[i]->scsi_rejects.complete);
3662 * Deliberately omit pci_disable_device(): it does something nasty to
3663 * Smart Array controllers that pci_enable_device does not undo
3665 pci_release_regions(pdev);
3666 pci_set_drvdata(pdev, NULL);
3670 static struct pci_driver cciss_pci_driver = {
3672 .probe = cciss_init_one,
3673 .remove = __devexit_p(cciss_remove_one),
3674 .id_table = cciss_pci_device_id, /* id_table */
3675 .shutdown = cciss_shutdown,
3679 * This is it. Register the PCI driver information for the cards we control
3680 * the OS will call our registered routines when it finds one of our cards.
3682 static int __init cciss_init(void)
3684 printk(KERN_INFO DRIVER_NAME "\n");
3686 /* Register for our PCI devices */
3687 return pci_register_driver(&cciss_pci_driver);
3690 static void __exit cciss_cleanup(void)
3694 pci_unregister_driver(&cciss_pci_driver);
3695 /* double check that all controller entrys have been removed */
3696 for (i = 0; i < MAX_CTLR; i++) {
3697 if (hba[i] != NULL) {
3698 printk(KERN_WARNING "cciss: had to remove"
3699 " controller %d\n", i);
3700 cciss_remove_one(hba[i]->pdev);
3703 remove_proc_entry("cciss", proc_root_driver);
3706 static void fail_all_cmds(unsigned long ctlr)
3708 /* If we get here, the board is apparently dead. */
3709 ctlr_info_t *h = hba[ctlr];
3710 CommandList_struct *c;
3711 unsigned long flags;
3713 printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
3714 h->alive = 0; /* the controller apparently died... */
3716 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
3718 pci_disable_device(h->pdev); /* Make sure it is really dead. */
3720 /* move everything off the request queue onto the completed queue */
3721 while ((c = h->reqQ) != NULL) {
3722 removeQ(&(h->reqQ), c);
3724 addQ(&(h->cmpQ), c);
3727 /* Now, fail everything on the completed queue with a HW error */
3728 while ((c = h->cmpQ) != NULL) {
3729 removeQ(&h->cmpQ, c);
3730 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
3731 if (c->cmd_type == CMD_RWREQ) {
3732 complete_command(h, c, 0);
3733 } else if (c->cmd_type == CMD_IOCTL_PEND)
3734 complete(c->waiting);
3735 #ifdef CONFIG_CISS_SCSI_TAPE
3736 else if (c->cmd_type == CMD_SCSI)
3737 complete_scsi_command(c, 0, 0);
3740 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
3744 module_init(cciss_init);
3745 module_exit(cciss_cleanup);