2 * Disk Array driver for HP SA 5xxx and 6xxx Controllers
3 * Copyright 2000, 2006 Hewlett-Packard Development Company, L.P.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/types.h>
26 #include <linux/pci.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/major.h>
32 #include <linux/bio.h>
33 #include <linux/blkpg.h>
34 #include <linux/timer.h>
35 #include <linux/proc_fs.h>
36 #include <linux/init.h>
37 #include <linux/hdreg.h>
38 #include <linux/spinlock.h>
39 #include <linux/compat.h>
40 #include <linux/blktrace_api.h>
41 #include <asm/uaccess.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/blkdev.h>
46 #include <linux/genhd.h>
47 #include <linux/completion.h>
49 #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
50 #define DRIVER_NAME "HP CISS Driver (v 3.6.10)"
51 #define DRIVER_VERSION CCISS_DRIVER_VERSION(3,6,10)
53 /* Embedded module documentation macros - see modules.h */
54 MODULE_AUTHOR("Hewlett-Packard Company");
55 MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 3.6.10");
56 MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
57 " SA6i P600 P800 P400 P400i E200 E200i E500");
58 MODULE_LICENSE("GPL");
60 #include "cciss_cmd.h"
62 #include <linux/cciss_ioctl.h>
64 /* define the PCI info for the cards we can control */
65 static const struct pci_device_id cciss_pci_device_id[] = {
66 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS, 0x0E11, 0x4070},
67 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4080},
68 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4082},
69 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4083},
70 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x4091},
71 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409A},
72 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409B},
73 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409C},
74 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409D},
75 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA, 0x103C, 0x3225},
76 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223},
77 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234},
78 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3235},
79 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3211},
80 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3212},
81 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3213},
82 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3214},
83 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3215},
84 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3233},
88 MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
90 /* board_id = Subsystem Device ID & Vendor ID
91 * product = Marketing Name for the board
92 * access = Address of the struct of function pointers
94 static struct board_type products[] = {
95 {0x40700E11, "Smart Array 5300", &SA5_access},
96 {0x40800E11, "Smart Array 5i", &SA5B_access},
97 {0x40820E11, "Smart Array 532", &SA5B_access},
98 {0x40830E11, "Smart Array 5312", &SA5B_access},
99 {0x409A0E11, "Smart Array 641", &SA5_access},
100 {0x409B0E11, "Smart Array 642", &SA5_access},
101 {0x409C0E11, "Smart Array 6400", &SA5_access},
102 {0x409D0E11, "Smart Array 6400 EM", &SA5_access},
103 {0x40910E11, "Smart Array 6i", &SA5_access},
104 {0x3225103C, "Smart Array P600", &SA5_access},
105 {0x3223103C, "Smart Array P800", &SA5_access},
106 {0x3234103C, "Smart Array P400", &SA5_access},
107 {0x3235103C, "Smart Array P400i", &SA5_access},
108 {0x3211103C, "Smart Array E200i", &SA5_access},
109 {0x3212103C, "Smart Array E200", &SA5_access},
110 {0x3213103C, "Smart Array E200i", &SA5_access},
111 {0x3214103C, "Smart Array E200i", &SA5_access},
112 {0x3215103C, "Smart Array E200i", &SA5_access},
113 {0x3233103C, "Smart Array E500", &SA5_access},
116 /* How long to wait (in milliseconds) for board to go into simple mode */
117 #define MAX_CONFIG_WAIT 30000
118 #define MAX_IOCTL_CONFIG_WAIT 1000
120 /*define how many times we will try a command because of bus resets */
121 #define MAX_CMD_RETRIES 3
123 #define READ_AHEAD 1024
124 #define NR_CMDS 384 /* #commands that can be outstanding */
127 /* Originally cciss driver only supports 8 major numbers */
128 #define MAX_CTLR_ORIG 8
130 static ctlr_info_t *hba[MAX_CTLR];
132 static void do_cciss_request(request_queue_t *q);
133 static irqreturn_t do_cciss_intr(int irq, void *dev_id);
134 static int cciss_open(struct inode *inode, struct file *filep);
135 static int cciss_release(struct inode *inode, struct file *filep);
136 static int cciss_ioctl(struct inode *inode, struct file *filep,
137 unsigned int cmd, unsigned long arg);
138 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
140 static int revalidate_allvol(ctlr_info_t *host);
141 static int cciss_revalidate(struct gendisk *disk);
142 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk);
143 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
146 static void cciss_read_capacity(int ctlr, int logvol, int withirq,
147 sector_t *total_size, unsigned int *block_size);
148 static void cciss_read_capacity_16(int ctlr, int logvol, int withirq,
149 sector_t *total_size, unsigned int *block_size);
150 static void cciss_geometry_inquiry(int ctlr, int logvol,
151 int withirq, sector_t total_size,
152 unsigned int block_size, InquiryData_struct *inq_buff,
153 drive_info_struct *drv);
154 static void cciss_getgeometry(int cntl_num);
155 static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *,
157 static void start_io(ctlr_info_t *h);
158 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size,
159 unsigned int use_unit_num, unsigned int log_unit,
160 __u8 page_code, unsigned char *scsi3addr, int cmd_type);
161 static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
162 unsigned int use_unit_num, unsigned int log_unit,
163 __u8 page_code, int cmd_type);
165 static void fail_all_cmds(unsigned long ctlr);
167 #ifdef CONFIG_PROC_FS
168 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
169 int length, int *eof, void *data);
170 static void cciss_procinit(int i);
172 static void cciss_procinit(int i)
175 #endif /* CONFIG_PROC_FS */
178 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg);
181 static struct block_device_operations cciss_fops = {
182 .owner = THIS_MODULE,
184 .release = cciss_release,
185 .ioctl = cciss_ioctl,
186 .getgeo = cciss_getgeo,
188 .compat_ioctl = cciss_compat_ioctl,
190 .revalidate_disk = cciss_revalidate,
194 * Enqueuing and dequeuing functions for cmdlists.
196 static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c)
200 c->next = c->prev = c;
202 c->prev = (*Qptr)->prev;
204 (*Qptr)->prev->next = c;
209 static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
210 CommandList_struct *c)
212 if (c && c->next != c) {
215 c->prev->next = c->next;
216 c->next->prev = c->prev;
223 #include "cciss_scsi.c" /* For SCSI tape support */
225 #ifdef CONFIG_PROC_FS
228 * Report information about this controller.
230 #define ENG_GIG 1000000000
231 #define ENG_GIG_FACTOR (ENG_GIG/512)
232 #define RAID_UNKNOWN 6
233 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
237 static struct proc_dir_entry *proc_cciss;
239 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
240 int length, int *eof, void *data)
245 ctlr_info_t *h = (ctlr_info_t *) data;
246 drive_info_struct *drv;
248 sector_t vol_sz, vol_sz_frac;
252 /* prevent displaying bogus info during configuration
253 * or deconfiguration of a logical volume
255 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
256 if (h->busy_configuring) {
257 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
260 h->busy_configuring = 1;
261 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
263 size = sprintf(buffer, "%s: HP %s Controller\n"
264 "Board ID: 0x%08lx\n"
265 "Firmware Version: %c%c%c%c\n"
267 "Logical drives: %d\n"
268 "Current Q depth: %d\n"
269 "Current # commands on controller: %d\n"
270 "Max Q depth since init: %d\n"
271 "Max # commands on controller since init: %d\n"
272 "Max SG entries since init: %d\n\n",
275 (unsigned long)h->board_id,
276 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
277 h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT],
278 h->num_luns, h->Qdepth, h->commands_outstanding,
279 h->maxQsinceinit, h->max_outstanding, h->maxSG);
283 cciss_proc_tape_report(ctlr, buffer, &pos, &len);
284 for (i = 0; i <= h->highest_lun; i++) {
290 vol_sz = drv->nr_blocks;
291 vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
293 sector_div(vol_sz_frac, ENG_GIG_FACTOR);
295 if (drv->raid_level > 5)
296 drv->raid_level = RAID_UNKNOWN;
297 size = sprintf(buffer + len, "cciss/c%dd%d:"
298 "\t%4u.%02uGB\tRAID %s\n",
299 ctlr, i, (int)vol_sz, (int)vol_sz_frac,
300 raid_label[drv->raid_level]);
306 *start = buffer + offset;
310 h->busy_configuring = 0;
315 cciss_proc_write(struct file *file, const char __user *buffer,
316 unsigned long count, void *data)
318 unsigned char cmd[80];
320 #ifdef CONFIG_CISS_SCSI_TAPE
321 ctlr_info_t *h = (ctlr_info_t *) data;
325 if (count > sizeof(cmd) - 1)
327 if (copy_from_user(cmd, buffer, count))
330 len = strlen(cmd); // above 3 lines ensure safety
331 if (len && cmd[len - 1] == '\n')
333 # ifdef CONFIG_CISS_SCSI_TAPE
334 if (strcmp("engage scsi", cmd) == 0) {
335 rc = cciss_engage_scsi(h->ctlr);
340 /* might be nice to have "disengage" too, but it's not
341 safely possible. (only 1 module use count, lock issues.) */
347 * Get us a file in /proc/cciss that says something about each controller.
348 * Create /proc/cciss if it doesn't exist yet.
350 static void __devinit cciss_procinit(int i)
352 struct proc_dir_entry *pde;
354 if (proc_cciss == NULL) {
355 proc_cciss = proc_mkdir("cciss", proc_root_driver);
360 pde = create_proc_read_entry(hba[i]->devname,
361 S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH,
362 proc_cciss, cciss_proc_get_info, hba[i]);
363 pde->write_proc = cciss_proc_write;
365 #endif /* CONFIG_PROC_FS */
368 * For operations that cannot sleep, a command block is allocated at init,
369 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
370 * which ones are free or in use. For operations that can wait for kmalloc
371 * to possible sleep, this routine can be called with get_from_pool set to 0.
372 * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
374 static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool)
376 CommandList_struct *c;
379 dma_addr_t cmd_dma_handle, err_dma_handle;
381 if (!get_from_pool) {
382 c = (CommandList_struct *) pci_alloc_consistent(h->pdev,
383 sizeof(CommandList_struct), &cmd_dma_handle);
386 memset(c, 0, sizeof(CommandList_struct));
390 c->err_info = (ErrorInfo_struct *)
391 pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct),
394 if (c->err_info == NULL) {
395 pci_free_consistent(h->pdev,
396 sizeof(CommandList_struct), c, cmd_dma_handle);
399 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
400 } else { /* get it out of the controllers pool */
403 i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
406 } while (test_and_set_bit
407 (i & (BITS_PER_LONG - 1),
408 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
410 printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
413 memset(c, 0, sizeof(CommandList_struct));
414 cmd_dma_handle = h->cmd_pool_dhandle
415 + i * sizeof(CommandList_struct);
416 c->err_info = h->errinfo_pool + i;
417 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
418 err_dma_handle = h->errinfo_pool_dhandle
419 + i * sizeof(ErrorInfo_struct);
425 c->busaddr = (__u32) cmd_dma_handle;
426 temp64.val = (__u64) err_dma_handle;
427 c->ErrDesc.Addr.lower = temp64.val32.lower;
428 c->ErrDesc.Addr.upper = temp64.val32.upper;
429 c->ErrDesc.Len = sizeof(ErrorInfo_struct);
436 * Frees a command block that was previously allocated with cmd_alloc().
438 static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
443 if (!got_from_pool) {
444 temp64.val32.lower = c->ErrDesc.Addr.lower;
445 temp64.val32.upper = c->ErrDesc.Addr.upper;
446 pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
447 c->err_info, (dma_addr_t) temp64.val);
448 pci_free_consistent(h->pdev, sizeof(CommandList_struct),
449 c, (dma_addr_t) c->busaddr);
452 clear_bit(i & (BITS_PER_LONG - 1),
453 h->cmd_pool_bits + (i / BITS_PER_LONG));
458 static inline ctlr_info_t *get_host(struct gendisk *disk)
460 return disk->queue->queuedata;
463 static inline drive_info_struct *get_drv(struct gendisk *disk)
465 return disk->private_data;
469 * Open. Make sure the device is really there.
471 static int cciss_open(struct inode *inode, struct file *filep)
473 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
474 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
477 printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name);
478 #endif /* CCISS_DEBUG */
480 if (host->busy_initializing || drv->busy_configuring)
483 * Root is allowed to open raw volume zero even if it's not configured
484 * so array config can still work. Root is also allowed to open any
485 * volume that has a LUN ID, so it can issue IOCTL to reread the
486 * disk information. I don't think I really like this
487 * but I'm already using way to many device nodes to claim another one
488 * for "raw controller".
490 if (drv->nr_blocks == 0) {
491 if (iminor(inode) != 0) { /* not node 0? */
492 /* if not node 0 make sure it is a partition = 0 */
493 if (iminor(inode) & 0x0f) {
495 /* if it is, make sure we have a LUN ID */
496 } else if (drv->LunID == 0) {
500 if (!capable(CAP_SYS_ADMIN))
511 static int cciss_release(struct inode *inode, struct file *filep)
513 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
514 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
517 printk(KERN_DEBUG "cciss_release %s\n",
518 inode->i_bdev->bd_disk->disk_name);
519 #endif /* CCISS_DEBUG */
528 static int do_ioctl(struct file *f, unsigned cmd, unsigned long arg)
532 ret = cciss_ioctl(f->f_dentry->d_inode, f, cmd, arg);
537 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
539 static int cciss_ioctl32_big_passthru(struct file *f, unsigned cmd,
542 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
545 case CCISS_GETPCIINFO:
546 case CCISS_GETINTINFO:
547 case CCISS_SETINTINFO:
548 case CCISS_GETNODENAME:
549 case CCISS_SETNODENAME:
550 case CCISS_GETHEARTBEAT:
551 case CCISS_GETBUSTYPES:
552 case CCISS_GETFIRMVER:
553 case CCISS_GETDRIVVER:
554 case CCISS_REVALIDVOLS:
555 case CCISS_DEREGDISK:
556 case CCISS_REGNEWDISK:
558 case CCISS_RESCANDISK:
559 case CCISS_GETLUNINFO:
560 return do_ioctl(f, cmd, arg);
562 case CCISS_PASSTHRU32:
563 return cciss_ioctl32_passthru(f, cmd, arg);
564 case CCISS_BIG_PASSTHRU32:
565 return cciss_ioctl32_big_passthru(f, cmd, arg);
572 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
575 IOCTL32_Command_struct __user *arg32 =
576 (IOCTL32_Command_struct __user *) arg;
577 IOCTL_Command_struct arg64;
578 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
584 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
585 sizeof(arg64.LUN_info));
587 copy_from_user(&arg64.Request, &arg32->Request,
588 sizeof(arg64.Request));
590 copy_from_user(&arg64.error_info, &arg32->error_info,
591 sizeof(arg64.error_info));
592 err |= get_user(arg64.buf_size, &arg32->buf_size);
593 err |= get_user(cp, &arg32->buf);
594 arg64.buf = compat_ptr(cp);
595 err |= copy_to_user(p, &arg64, sizeof(arg64));
600 err = do_ioctl(f, CCISS_PASSTHRU, (unsigned long)p);
604 copy_in_user(&arg32->error_info, &p->error_info,
605 sizeof(arg32->error_info));
611 static int cciss_ioctl32_big_passthru(struct file *file, unsigned cmd,
614 BIG_IOCTL32_Command_struct __user *arg32 =
615 (BIG_IOCTL32_Command_struct __user *) arg;
616 BIG_IOCTL_Command_struct arg64;
617 BIG_IOCTL_Command_struct __user *p =
618 compat_alloc_user_space(sizeof(arg64));
624 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
625 sizeof(arg64.LUN_info));
627 copy_from_user(&arg64.Request, &arg32->Request,
628 sizeof(arg64.Request));
630 copy_from_user(&arg64.error_info, &arg32->error_info,
631 sizeof(arg64.error_info));
632 err |= get_user(arg64.buf_size, &arg32->buf_size);
633 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
634 err |= get_user(cp, &arg32->buf);
635 arg64.buf = compat_ptr(cp);
636 err |= copy_to_user(p, &arg64, sizeof(arg64));
641 err = do_ioctl(file, CCISS_BIG_PASSTHRU, (unsigned long)p);
645 copy_in_user(&arg32->error_info, &p->error_info,
646 sizeof(arg32->error_info));
653 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
655 drive_info_struct *drv = get_drv(bdev->bd_disk);
660 geo->heads = drv->heads;
661 geo->sectors = drv->sectors;
662 geo->cylinders = drv->cylinders;
669 static int cciss_ioctl(struct inode *inode, struct file *filep,
670 unsigned int cmd, unsigned long arg)
672 struct block_device *bdev = inode->i_bdev;
673 struct gendisk *disk = bdev->bd_disk;
674 ctlr_info_t *host = get_host(disk);
675 drive_info_struct *drv = get_drv(disk);
676 int ctlr = host->ctlr;
677 void __user *argp = (void __user *)arg;
680 printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
681 #endif /* CCISS_DEBUG */
684 case CCISS_GETPCIINFO:
686 cciss_pci_info_struct pciinfo;
690 pciinfo.domain = pci_domain_nr(host->pdev->bus);
691 pciinfo.bus = host->pdev->bus->number;
692 pciinfo.dev_fn = host->pdev->devfn;
693 pciinfo.board_id = host->board_id;
695 (argp, &pciinfo, sizeof(cciss_pci_info_struct)))
699 case CCISS_GETINTINFO:
701 cciss_coalint_struct intinfo;
705 readl(&host->cfgtable->HostWrite.CoalIntDelay);
707 readl(&host->cfgtable->HostWrite.CoalIntCount);
709 (argp, &intinfo, sizeof(cciss_coalint_struct)))
713 case CCISS_SETINTINFO:
715 cciss_coalint_struct intinfo;
721 if (!capable(CAP_SYS_ADMIN))
724 (&intinfo, argp, sizeof(cciss_coalint_struct)))
726 if ((intinfo.delay == 0) && (intinfo.count == 0))
728 // printk("cciss_ioctl: delay and count cannot be 0\n");
731 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
732 /* Update the field, and then ring the doorbell */
733 writel(intinfo.delay,
734 &(host->cfgtable->HostWrite.CoalIntDelay));
735 writel(intinfo.count,
736 &(host->cfgtable->HostWrite.CoalIntCount));
737 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
739 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
740 if (!(readl(host->vaddr + SA5_DOORBELL)
743 /* delay and try again */
746 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
747 if (i >= MAX_IOCTL_CONFIG_WAIT)
751 case CCISS_GETNODENAME:
753 NodeName_type NodeName;
758 for (i = 0; i < 16; i++)
760 readb(&host->cfgtable->ServerName[i]);
761 if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
765 case CCISS_SETNODENAME:
767 NodeName_type NodeName;
773 if (!capable(CAP_SYS_ADMIN))
777 (NodeName, argp, sizeof(NodeName_type)))
780 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
782 /* Update the field, and then ring the doorbell */
783 for (i = 0; i < 16; i++)
785 &host->cfgtable->ServerName[i]);
787 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
789 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
790 if (!(readl(host->vaddr + SA5_DOORBELL)
793 /* delay and try again */
796 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
797 if (i >= MAX_IOCTL_CONFIG_WAIT)
802 case CCISS_GETHEARTBEAT:
804 Heartbeat_type heartbeat;
808 heartbeat = readl(&host->cfgtable->HeartBeat);
810 (argp, &heartbeat, sizeof(Heartbeat_type)))
814 case CCISS_GETBUSTYPES:
816 BusTypes_type BusTypes;
820 BusTypes = readl(&host->cfgtable->BusTypes);
822 (argp, &BusTypes, sizeof(BusTypes_type)))
826 case CCISS_GETFIRMVER:
828 FirmwareVer_type firmware;
832 memcpy(firmware, host->firm_ver, 4);
835 (argp, firmware, sizeof(FirmwareVer_type)))
839 case CCISS_GETDRIVVER:
841 DriverVer_type DriverVer = DRIVER_VERSION;
847 (argp, &DriverVer, sizeof(DriverVer_type)))
852 case CCISS_REVALIDVOLS:
853 if (bdev != bdev->bd_contains || drv != host->drv)
855 return revalidate_allvol(host);
857 case CCISS_GETLUNINFO:{
858 LogvolInfo_struct luninfo;
860 luninfo.LunID = drv->LunID;
861 luninfo.num_opens = drv->usage_count;
862 luninfo.num_parts = 0;
863 if (copy_to_user(argp, &luninfo,
864 sizeof(LogvolInfo_struct)))
868 case CCISS_DEREGDISK:
869 return rebuild_lun_table(host, disk);
872 return rebuild_lun_table(host, NULL);
876 IOCTL_Command_struct iocommand;
877 CommandList_struct *c;
881 DECLARE_COMPLETION_ONSTACK(wait);
886 if (!capable(CAP_SYS_RAWIO))
890 (&iocommand, argp, sizeof(IOCTL_Command_struct)))
892 if ((iocommand.buf_size < 1) &&
893 (iocommand.Request.Type.Direction != XFER_NONE)) {
896 #if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
897 /* Check kmalloc limits */
898 if (iocommand.buf_size > 128000)
901 if (iocommand.buf_size > 0) {
902 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
906 if (iocommand.Request.Type.Direction == XFER_WRITE) {
907 /* Copy the data into the buffer we created */
909 (buff, iocommand.buf, iocommand.buf_size)) {
914 memset(buff, 0, iocommand.buf_size);
916 if ((c = cmd_alloc(host, 0)) == NULL) {
920 // Fill in the command type
921 c->cmd_type = CMD_IOCTL_PEND;
922 // Fill in Command Header
923 c->Header.ReplyQueue = 0; // unused in simple mode
924 if (iocommand.buf_size > 0) // buffer to fill
926 c->Header.SGList = 1;
927 c->Header.SGTotal = 1;
928 } else // no buffers to fill
930 c->Header.SGList = 0;
931 c->Header.SGTotal = 0;
933 c->Header.LUN = iocommand.LUN_info;
934 c->Header.Tag.lower = c->busaddr; // use the kernel address the cmd block for tag
936 // Fill in Request block
937 c->Request = iocommand.Request;
939 // Fill in the scatter gather information
940 if (iocommand.buf_size > 0) {
941 temp64.val = pci_map_single(host->pdev, buff,
943 PCI_DMA_BIDIRECTIONAL);
944 c->SG[0].Addr.lower = temp64.val32.lower;
945 c->SG[0].Addr.upper = temp64.val32.upper;
946 c->SG[0].Len = iocommand.buf_size;
947 c->SG[0].Ext = 0; // we are not chaining
951 /* Put the request on the tail of the request queue */
952 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
953 addQ(&host->reqQ, c);
956 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
958 wait_for_completion(&wait);
960 /* unlock the buffers from DMA */
961 temp64.val32.lower = c->SG[0].Addr.lower;
962 temp64.val32.upper = c->SG[0].Addr.upper;
963 pci_unmap_single(host->pdev, (dma_addr_t) temp64.val,
965 PCI_DMA_BIDIRECTIONAL);
967 /* Copy the error information out */
968 iocommand.error_info = *(c->err_info);
970 (argp, &iocommand, sizeof(IOCTL_Command_struct))) {
972 cmd_free(host, c, 0);
976 if (iocommand.Request.Type.Direction == XFER_READ) {
977 /* Copy the data out of the buffer we created */
979 (iocommand.buf, buff, iocommand.buf_size)) {
981 cmd_free(host, c, 0);
986 cmd_free(host, c, 0);
989 case CCISS_BIG_PASSTHRU:{
990 BIG_IOCTL_Command_struct *ioc;
991 CommandList_struct *c;
992 unsigned char **buff = NULL;
993 int *buff_size = NULL;
999 DECLARE_COMPLETION_ONSTACK(wait);
1002 BYTE __user *data_ptr;
1006 if (!capable(CAP_SYS_RAWIO))
1008 ioc = (BIG_IOCTL_Command_struct *)
1009 kmalloc(sizeof(*ioc), GFP_KERNEL);
1014 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
1018 if ((ioc->buf_size < 1) &&
1019 (ioc->Request.Type.Direction != XFER_NONE)) {
1023 /* Check kmalloc limits using all SGs */
1024 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
1028 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
1033 kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
1038 buff_size = (int *)kmalloc(MAXSGENTRIES * sizeof(int),
1044 left = ioc->buf_size;
1045 data_ptr = ioc->buf;
1048 ioc->malloc_size) ? ioc->
1050 buff_size[sg_used] = sz;
1051 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
1052 if (buff[sg_used] == NULL) {
1056 if (ioc->Request.Type.Direction == XFER_WRITE) {
1058 (buff[sg_used], data_ptr, sz)) {
1063 memset(buff[sg_used], 0, sz);
1069 if ((c = cmd_alloc(host, 0)) == NULL) {
1073 c->cmd_type = CMD_IOCTL_PEND;
1074 c->Header.ReplyQueue = 0;
1076 if (ioc->buf_size > 0) {
1077 c->Header.SGList = sg_used;
1078 c->Header.SGTotal = sg_used;
1080 c->Header.SGList = 0;
1081 c->Header.SGTotal = 0;
1083 c->Header.LUN = ioc->LUN_info;
1084 c->Header.Tag.lower = c->busaddr;
1086 c->Request = ioc->Request;
1087 if (ioc->buf_size > 0) {
1089 for (i = 0; i < sg_used; i++) {
1091 pci_map_single(host->pdev, buff[i],
1093 PCI_DMA_BIDIRECTIONAL);
1094 c->SG[i].Addr.lower =
1096 c->SG[i].Addr.upper =
1098 c->SG[i].Len = buff_size[i];
1099 c->SG[i].Ext = 0; /* we are not chaining */
1103 /* Put the request on the tail of the request queue */
1104 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1105 addQ(&host->reqQ, c);
1108 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1109 wait_for_completion(&wait);
1110 /* unlock the buffers from DMA */
1111 for (i = 0; i < sg_used; i++) {
1112 temp64.val32.lower = c->SG[i].Addr.lower;
1113 temp64.val32.upper = c->SG[i].Addr.upper;
1114 pci_unmap_single(host->pdev,
1115 (dma_addr_t) temp64.val, buff_size[i],
1116 PCI_DMA_BIDIRECTIONAL);
1118 /* Copy the error information out */
1119 ioc->error_info = *(c->err_info);
1120 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
1121 cmd_free(host, c, 0);
1125 if (ioc->Request.Type.Direction == XFER_READ) {
1126 /* Copy the data out of the buffer we created */
1127 BYTE __user *ptr = ioc->buf;
1128 for (i = 0; i < sg_used; i++) {
1130 (ptr, buff[i], buff_size[i])) {
1131 cmd_free(host, c, 0);
1135 ptr += buff_size[i];
1138 cmd_free(host, c, 0);
1142 for (i = 0; i < sg_used; i++)
1156 * revalidate_allvol is for online array config utilities. After a
1157 * utility reconfigures the drives in the array, it can use this function
1158 * (through an ioctl) to make the driver zap any previous disk structs for
1159 * that controller and get new ones.
1161 * Right now I'm using the getgeometry() function to do this, but this
1162 * function should probably be finer grained and allow you to revalidate one
1163 * particular logical volume (instead of all of them on a particular
1166 static int revalidate_allvol(ctlr_info_t *host)
1168 int ctlr = host->ctlr, i;
1169 unsigned long flags;
1171 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1172 if (host->usage_count > 1) {
1173 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1174 printk(KERN_WARNING "cciss: Device busy for volume"
1175 " revalidation (usage=%d)\n", host->usage_count);
1178 host->usage_count++;
1179 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1181 for (i = 0; i < NWD; i++) {
1182 struct gendisk *disk = host->gendisk[i];
1184 request_queue_t *q = disk->queue;
1186 if (disk->flags & GENHD_FL_UP)
1189 blk_cleanup_queue(q);
1194 * Set the partition and block size structures for all volumes
1195 * on this controller to zero. We will reread all of this data
1197 memset(host->drv, 0, sizeof(drive_info_struct)
1200 * Tell the array controller not to give us any interrupts while
1201 * we check the new geometry. Then turn interrupts back on when
1204 host->access.set_intr_mask(host, CCISS_INTR_OFF);
1205 cciss_getgeometry(ctlr);
1206 host->access.set_intr_mask(host, CCISS_INTR_ON);
1208 /* Loop through each real device */
1209 for (i = 0; i < NWD; i++) {
1210 struct gendisk *disk = host->gendisk[i];
1211 drive_info_struct *drv = &(host->drv[i]);
1212 /* we must register the controller even if no disks exist */
1213 /* this is for the online array utilities */
1214 if (!drv->heads && i)
1216 blk_queue_hardsect_size(drv->queue, drv->block_size);
1217 set_capacity(disk, drv->nr_blocks);
1220 host->usage_count--;
1224 static inline void complete_buffers(struct bio *bio, int status)
1227 struct bio *xbh = bio->bi_next;
1228 int nr_sectors = bio_sectors(bio);
1230 bio->bi_next = NULL;
1231 bio_endio(bio, nr_sectors << 9, status ? 0 : -EIO);
1236 static void cciss_check_queues(ctlr_info_t *h)
1238 int start_queue = h->next_to_run;
1241 /* check to see if we have maxed out the number of commands that can
1242 * be placed on the queue. If so then exit. We do this check here
1243 * in case the interrupt we serviced was from an ioctl and did not
1244 * free any new commands.
1246 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
1249 /* We have room on the queue for more commands. Now we need to queue
1250 * them up. We will also keep track of the next queue to run so
1251 * that every queue gets a chance to be started first.
1253 for (i = 0; i < h->highest_lun + 1; i++) {
1254 int curr_queue = (start_queue + i) % (h->highest_lun + 1);
1255 /* make sure the disk has been added and the drive is real
1256 * because this can be called from the middle of init_one.
1258 if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads))
1260 blk_start_queue(h->gendisk[curr_queue]->queue);
1262 /* check to see if we have maxed out the number of commands
1263 * that can be placed on the queue.
1265 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) {
1266 if (curr_queue == start_queue) {
1268 (start_queue + 1) % (h->highest_lun + 1);
1271 h->next_to_run = curr_queue;
1275 curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
1280 static void cciss_softirq_done(struct request *rq)
1282 CommandList_struct *cmd = rq->completion_data;
1283 ctlr_info_t *h = hba[cmd->ctlr];
1284 unsigned long flags;
1288 if (cmd->Request.Type.Direction == XFER_READ)
1289 ddir = PCI_DMA_FROMDEVICE;
1291 ddir = PCI_DMA_TODEVICE;
1293 /* command did not need to be retried */
1294 /* unmap the DMA mapping for all the scatter gather elements */
1295 for (i = 0; i < cmd->Header.SGList; i++) {
1296 temp64.val32.lower = cmd->SG[i].Addr.lower;
1297 temp64.val32.upper = cmd->SG[i].Addr.upper;
1298 pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir);
1301 complete_buffers(rq->bio, rq->errors);
1304 printk("Done with %p\n", rq);
1305 #endif /* CCISS_DEBUG */
1307 add_disk_randomness(rq->rq_disk);
1308 spin_lock_irqsave(&h->lock, flags);
1309 end_that_request_last(rq, rq->errors);
1310 cmd_free(h, cmd, 1);
1311 cciss_check_queues(h);
1312 spin_unlock_irqrestore(&h->lock, flags);
1315 /* This function will check the usage_count of the drive to be updated/added.
1316 * If the usage_count is zero then the drive information will be updated and
1317 * the disk will be re-registered with the kernel. If not then it will be
1318 * left alone for the next reboot. The exception to this is disk 0 which
1319 * will always be left registered with the kernel since it is also the
1320 * controller node. Any changes to disk 0 will show up on the next
1323 static void cciss_update_drive_info(int ctlr, int drv_index)
1325 ctlr_info_t *h = hba[ctlr];
1326 struct gendisk *disk;
1327 InquiryData_struct *inq_buff = NULL;
1328 unsigned int block_size;
1329 sector_t total_size;
1330 unsigned long flags = 0;
1333 /* if the disk already exists then deregister it before proceeding */
1334 if (h->drv[drv_index].raid_level != -1) {
1335 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1336 h->drv[drv_index].busy_configuring = 1;
1337 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1338 ret = deregister_disk(h->gendisk[drv_index],
1339 &h->drv[drv_index], 0);
1340 h->drv[drv_index].busy_configuring = 0;
1343 /* If the disk is in use return */
1347 /* Get information about the disk and modify the driver structure */
1348 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
1349 if (inq_buff == NULL)
1352 cciss_read_capacity(ctlr, drv_index, 1,
1353 &total_size, &block_size);
1355 /* total size = last LBA + 1 */
1356 /* FFFFFFFF + 1 = 0, cannot have a logical volume of size 0 */
1357 /* so we assume this volume this must be >2TB in size */
1358 if (total_size == (__u32) 0) {
1359 cciss_read_capacity_16(ctlr, drv_index, 1,
1360 &total_size, &block_size);
1361 h->cciss_read = CCISS_READ_16;
1362 h->cciss_write = CCISS_WRITE_16;
1364 h->cciss_read = CCISS_READ_10;
1365 h->cciss_write = CCISS_WRITE_10;
1367 cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
1368 inq_buff, &h->drv[drv_index]);
1371 disk = h->gendisk[drv_index];
1372 set_capacity(disk, h->drv[drv_index].nr_blocks);
1374 /* if it's the controller it's already added */
1376 disk->queue = blk_init_queue(do_cciss_request, &h->lock);
1378 /* Set up queue information */
1379 disk->queue->backing_dev_info.ra_pages = READ_AHEAD;
1380 blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask);
1382 /* This is a hardware imposed limit. */
1383 blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES);
1385 /* This is a limit in the driver and could be eliminated. */
1386 blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);
1388 blk_queue_max_sectors(disk->queue, 512);
1390 blk_queue_softirq_done(disk->queue, cciss_softirq_done);
1392 disk->queue->queuedata = hba[ctlr];
1394 blk_queue_hardsect_size(disk->queue,
1395 hba[ctlr]->drv[drv_index].block_size);
1397 h->drv[drv_index].queue = disk->queue;
1405 printk(KERN_ERR "cciss: out of memory\n");
1409 /* This function will find the first index of the controllers drive array
1410 * that has a -1 for the raid_level and will return that index. This is
1411 * where new drives will be added. If the index to be returned is greater
1412 * than the highest_lun index for the controller then highest_lun is set
1413 * to this new index. If there are no available indexes then -1 is returned.
1415 static int cciss_find_free_drive_index(int ctlr)
1419 for (i = 0; i < CISS_MAX_LUN; i++) {
1420 if (hba[ctlr]->drv[i].raid_level == -1) {
1421 if (i > hba[ctlr]->highest_lun)
1422 hba[ctlr]->highest_lun = i;
1429 /* This function will add and remove logical drives from the Logical
1430 * drive array of the controller and maintain persistency of ordering
1431 * so that mount points are preserved until the next reboot. This allows
1432 * for the removal of logical drives in the middle of the drive array
1433 * without a re-ordering of those drives.
1435 * h = The controller to perform the operations on
1436 * del_disk = The disk to remove if specified. If the value given
1437 * is NULL then no disk is removed.
1439 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
1443 ReportLunData_struct *ld_buff = NULL;
1444 drive_info_struct *drv = NULL;
1451 unsigned long flags;
1453 /* Set busy_configuring flag for this operation */
1454 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1455 if (h->num_luns >= CISS_MAX_LUN) {
1456 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1460 if (h->busy_configuring) {
1461 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1464 h->busy_configuring = 1;
1466 /* if del_disk is NULL then we are being called to add a new disk
1467 * and update the logical drive table. If it is not NULL then
1468 * we will check if the disk is in use or not.
1470 if (del_disk != NULL) {
1471 drv = get_drv(del_disk);
1472 drv->busy_configuring = 1;
1473 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1474 return_code = deregister_disk(del_disk, drv, 1);
1475 drv->busy_configuring = 0;
1476 h->busy_configuring = 0;
1479 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1480 if (!capable(CAP_SYS_RAWIO))
1483 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
1484 if (ld_buff == NULL)
1487 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
1488 sizeof(ReportLunData_struct), 0,
1491 if (return_code == IO_OK) {
1493 (0xff & (unsigned int)(ld_buff->LUNListLength[0]))
1496 (0xff & (unsigned int)(ld_buff->LUNListLength[1]))
1499 (0xff & (unsigned int)(ld_buff->LUNListLength[2]))
1502 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
1503 } else { /* reading number of logical volumes failed */
1504 printk(KERN_WARNING "cciss: report logical volume"
1505 " command failed\n");
1510 num_luns = listlength / 8; /* 8 bytes per entry */
1511 if (num_luns > CISS_MAX_LUN) {
1512 num_luns = CISS_MAX_LUN;
1513 printk(KERN_WARNING "cciss: more luns configured"
1514 " on controller than can be handled by"
1518 /* Compare controller drive array to drivers drive array.
1519 * Check for updates in the drive information and any new drives
1520 * on the controller.
1522 for (i = 0; i < num_luns; i++) {
1528 (unsigned int)(ld_buff->LUN[i][3])) << 24;
1530 (unsigned int)(ld_buff->LUN[i][2])) << 16;
1532 (unsigned int)(ld_buff->LUN[i][1])) << 8;
1533 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
1535 /* Find if the LUN is already in the drive array
1536 * of the controller. If so then update its info
1537 * if not is use. If it does not exist then find
1538 * the first free index and add it.
1540 for (j = 0; j <= h->highest_lun; j++) {
1541 if (h->drv[j].LunID == lunid) {
1547 /* check if the drive was found already in the array */
1549 drv_index = cciss_find_free_drive_index(ctlr);
1550 if (drv_index == -1)
1554 h->drv[drv_index].LunID = lunid;
1555 cciss_update_drive_info(ctlr, drv_index);
1561 h->busy_configuring = 0;
1562 /* We return -1 here to tell the ACU that we have registered/updated
1563 * all of the drives that we can and to keep it from calling us
1568 printk(KERN_ERR "cciss: out of memory\n");
1572 /* This function will deregister the disk and it's queue from the
1573 * kernel. It must be called with the controller lock held and the
1574 * drv structures busy_configuring flag set. It's parameters are:
1576 * disk = This is the disk to be deregistered
1577 * drv = This is the drive_info_struct associated with the disk to be
1578 * deregistered. It contains information about the disk used
1580 * clear_all = This flag determines whether or not the disk information
1581 * is going to be completely cleared out and the highest_lun
1582 * reset. Sometimes we want to clear out information about
1583 * the disk in preparation for re-adding it. In this case
1584 * the highest_lun should be left unchanged and the LunID
1585 * should not be cleared.
1587 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
1590 ctlr_info_t *h = get_host(disk);
1592 if (!capable(CAP_SYS_RAWIO))
1595 /* make sure logical volume is NOT is use */
1596 if (clear_all || (h->gendisk[0] == disk)) {
1597 if (drv->usage_count > 1)
1599 } else if (drv->usage_count > 0)
1602 /* invalidate the devices and deregister the disk. If it is disk
1603 * zero do not deregister it but just zero out it's values. This
1604 * allows us to delete disk zero but keep the controller registered.
1606 if (h->gendisk[0] != disk) {
1608 request_queue_t *q = disk->queue;
1609 if (disk->flags & GENHD_FL_UP)
1612 blk_cleanup_queue(q);
1619 /* zero out the disk size info */
1621 drv->block_size = 0;
1625 drv->raid_level = -1; /* This can be used as a flag variable to
1626 * indicate that this element of the drive
1631 /* check to see if it was the last disk */
1632 if (drv == h->drv + h->highest_lun) {
1633 /* if so, find the new hightest lun */
1634 int i, newhighest = -1;
1635 for (i = 0; i < h->highest_lun; i++) {
1636 /* if the disk has size > 0, it is available */
1637 if (h->drv[i].heads)
1640 h->highest_lun = newhighest;
1648 static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
1649 1: address logical volume log_unit,
1650 2: periph device address is scsi3addr */
1651 unsigned int log_unit, __u8 page_code,
1652 unsigned char *scsi3addr, int cmd_type)
1654 ctlr_info_t *h = hba[ctlr];
1655 u64bit buff_dma_handle;
1658 c->cmd_type = CMD_IOCTL_PEND;
1659 c->Header.ReplyQueue = 0;
1661 c->Header.SGList = 1;
1662 c->Header.SGTotal = 1;
1664 c->Header.SGList = 0;
1665 c->Header.SGTotal = 0;
1667 c->Header.Tag.lower = c->busaddr;
1669 c->Request.Type.Type = cmd_type;
1670 if (cmd_type == TYPE_CMD) {
1673 /* If the logical unit number is 0 then, this is going
1674 to controller so It's a physical command
1675 mode = 0 target = 0. So we have nothing to write.
1676 otherwise, if use_unit_num == 1,
1677 mode = 1(volume set addressing) target = LUNID
1678 otherwise, if use_unit_num == 2,
1679 mode = 0(periph dev addr) target = scsi3addr */
1680 if (use_unit_num == 1) {
1681 c->Header.LUN.LogDev.VolId =
1682 h->drv[log_unit].LunID;
1683 c->Header.LUN.LogDev.Mode = 1;
1684 } else if (use_unit_num == 2) {
1685 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr,
1687 c->Header.LUN.LogDev.Mode = 0;
1689 /* are we trying to read a vital product page */
1690 if (page_code != 0) {
1691 c->Request.CDB[1] = 0x01;
1692 c->Request.CDB[2] = page_code;
1694 c->Request.CDBLen = 6;
1695 c->Request.Type.Attribute = ATTR_SIMPLE;
1696 c->Request.Type.Direction = XFER_READ;
1697 c->Request.Timeout = 0;
1698 c->Request.CDB[0] = CISS_INQUIRY;
1699 c->Request.CDB[4] = size & 0xFF;
1701 case CISS_REPORT_LOG:
1702 case CISS_REPORT_PHYS:
1703 /* Talking to controller so It's a physical command
1704 mode = 00 target = 0. Nothing to write.
1706 c->Request.CDBLen = 12;
1707 c->Request.Type.Attribute = ATTR_SIMPLE;
1708 c->Request.Type.Direction = XFER_READ;
1709 c->Request.Timeout = 0;
1710 c->Request.CDB[0] = cmd;
1711 c->Request.CDB[6] = (size >> 24) & 0xFF; //MSB
1712 c->Request.CDB[7] = (size >> 16) & 0xFF;
1713 c->Request.CDB[8] = (size >> 8) & 0xFF;
1714 c->Request.CDB[9] = size & 0xFF;
1717 case CCISS_READ_CAPACITY:
1718 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1719 c->Header.LUN.LogDev.Mode = 1;
1720 c->Request.CDBLen = 10;
1721 c->Request.Type.Attribute = ATTR_SIMPLE;
1722 c->Request.Type.Direction = XFER_READ;
1723 c->Request.Timeout = 0;
1724 c->Request.CDB[0] = cmd;
1726 case CCISS_READ_CAPACITY_16:
1727 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1728 c->Header.LUN.LogDev.Mode = 1;
1729 c->Request.CDBLen = 16;
1730 c->Request.Type.Attribute = ATTR_SIMPLE;
1731 c->Request.Type.Direction = XFER_READ;
1732 c->Request.Timeout = 0;
1733 c->Request.CDB[0] = cmd;
1734 c->Request.CDB[1] = 0x10;
1735 c->Request.CDB[10] = (size >> 24) & 0xFF;
1736 c->Request.CDB[11] = (size >> 16) & 0xFF;
1737 c->Request.CDB[12] = (size >> 8) & 0xFF;
1738 c->Request.CDB[13] = size & 0xFF;
1739 c->Request.Timeout = 0;
1740 c->Request.CDB[0] = cmd;
1742 case CCISS_CACHE_FLUSH:
1743 c->Request.CDBLen = 12;
1744 c->Request.Type.Attribute = ATTR_SIMPLE;
1745 c->Request.Type.Direction = XFER_WRITE;
1746 c->Request.Timeout = 0;
1747 c->Request.CDB[0] = BMIC_WRITE;
1748 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
1752 "cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
1755 } else if (cmd_type == TYPE_MSG) {
1757 case 0: /* ABORT message */
1758 c->Request.CDBLen = 12;
1759 c->Request.Type.Attribute = ATTR_SIMPLE;
1760 c->Request.Type.Direction = XFER_WRITE;
1761 c->Request.Timeout = 0;
1762 c->Request.CDB[0] = cmd; /* abort */
1763 c->Request.CDB[1] = 0; /* abort a command */
1764 /* buff contains the tag of the command to abort */
1765 memcpy(&c->Request.CDB[4], buff, 8);
1767 case 1: /* RESET message */
1768 c->Request.CDBLen = 12;
1769 c->Request.Type.Attribute = ATTR_SIMPLE;
1770 c->Request.Type.Direction = XFER_WRITE;
1771 c->Request.Timeout = 0;
1772 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
1773 c->Request.CDB[0] = cmd; /* reset */
1774 c->Request.CDB[1] = 0x04; /* reset a LUN */
1776 case 3: /* No-Op message */
1777 c->Request.CDBLen = 1;
1778 c->Request.Type.Attribute = ATTR_SIMPLE;
1779 c->Request.Type.Direction = XFER_WRITE;
1780 c->Request.Timeout = 0;
1781 c->Request.CDB[0] = cmd;
1785 "cciss%d: unknown message type %d\n", ctlr, cmd);
1790 "cciss%d: unknown command type %d\n", ctlr, cmd_type);
1793 /* Fill in the scatter gather information */
1795 buff_dma_handle.val = (__u64) pci_map_single(h->pdev,
1797 PCI_DMA_BIDIRECTIONAL);
1798 c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
1799 c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
1800 c->SG[0].Len = size;
1801 c->SG[0].Ext = 0; /* we are not chaining */
1806 static int sendcmd_withirq(__u8 cmd,
1810 unsigned int use_unit_num,
1811 unsigned int log_unit, __u8 page_code, int cmd_type)
1813 ctlr_info_t *h = hba[ctlr];
1814 CommandList_struct *c;
1815 u64bit buff_dma_handle;
1816 unsigned long flags;
1818 DECLARE_COMPLETION_ONSTACK(wait);
1820 if ((c = cmd_alloc(h, 0)) == NULL)
1822 return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
1823 log_unit, page_code, NULL, cmd_type);
1824 if (return_status != IO_OK) {
1826 return return_status;
1831 /* Put the request on the tail of the queue and send it */
1832 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1836 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1838 wait_for_completion(&wait);
1840 if (c->err_info->CommandStatus != 0) { /* an error has occurred */
1841 switch (c->err_info->CommandStatus) {
1842 case CMD_TARGET_STATUS:
1843 printk(KERN_WARNING "cciss: cmd %p has "
1844 " completed with errors\n", c);
1845 if (c->err_info->ScsiStatus) {
1846 printk(KERN_WARNING "cciss: cmd %p "
1847 "has SCSI Status = %x\n",
1848 c, c->err_info->ScsiStatus);
1852 case CMD_DATA_UNDERRUN:
1853 case CMD_DATA_OVERRUN:
1854 /* expected for inquire and report lun commands */
1857 printk(KERN_WARNING "cciss: Cmd %p is "
1858 "reported invalid\n", c);
1859 return_status = IO_ERROR;
1861 case CMD_PROTOCOL_ERR:
1862 printk(KERN_WARNING "cciss: cmd %p has "
1863 "protocol error \n", c);
1864 return_status = IO_ERROR;
1866 case CMD_HARDWARE_ERR:
1867 printk(KERN_WARNING "cciss: cmd %p had "
1868 " hardware error\n", c);
1869 return_status = IO_ERROR;
1871 case CMD_CONNECTION_LOST:
1872 printk(KERN_WARNING "cciss: cmd %p had "
1873 "connection lost\n", c);
1874 return_status = IO_ERROR;
1877 printk(KERN_WARNING "cciss: cmd %p was "
1879 return_status = IO_ERROR;
1881 case CMD_ABORT_FAILED:
1882 printk(KERN_WARNING "cciss: cmd %p reports "
1883 "abort failed\n", c);
1884 return_status = IO_ERROR;
1886 case CMD_UNSOLICITED_ABORT:
1888 "cciss%d: unsolicited abort %p\n", ctlr, c);
1889 if (c->retry_count < MAX_CMD_RETRIES) {
1891 "cciss%d: retrying %p\n", ctlr, c);
1893 /* erase the old error information */
1894 memset(c->err_info, 0,
1895 sizeof(ErrorInfo_struct));
1896 return_status = IO_OK;
1897 INIT_COMPLETION(wait);
1900 return_status = IO_ERROR;
1903 printk(KERN_WARNING "cciss: cmd %p returned "
1904 "unknown status %x\n", c,
1905 c->err_info->CommandStatus);
1906 return_status = IO_ERROR;
1909 /* unlock the buffers from DMA */
1910 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
1911 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
1912 pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val,
1913 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
1915 return return_status;
1918 static void cciss_geometry_inquiry(int ctlr, int logvol,
1919 int withirq, sector_t total_size,
1920 unsigned int block_size,
1921 InquiryData_struct *inq_buff,
1922 drive_info_struct *drv)
1928 memset(inq_buff, 0, sizeof(InquiryData_struct));
1930 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
1931 inq_buff, sizeof(*inq_buff), 1,
1932 logvol, 0xC1, TYPE_CMD);
1934 return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
1935 sizeof(*inq_buff), 1, logvol, 0xC1, NULL,
1937 if (return_code == IO_OK) {
1938 if (inq_buff->data_byte[8] == 0xFF) {
1940 "cciss: reading geometry failed, volume "
1941 "does not support reading geometry\n");
1942 drv->block_size = block_size;
1943 drv->nr_blocks = total_size;
1945 drv->sectors = 32; // Sectors per track
1946 t = drv->heads * drv->sectors;
1947 drv->cylinders = total_size;
1948 rem = do_div(drv->cylinders, t);
1950 drv->block_size = block_size;
1951 drv->nr_blocks = total_size;
1952 drv->heads = inq_buff->data_byte[6];
1953 drv->sectors = inq_buff->data_byte[7];
1954 drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8;
1955 drv->cylinders += inq_buff->data_byte[5];
1956 drv->raid_level = inq_buff->data_byte[8];
1957 t = drv->heads * drv->sectors;
1959 drv->cylinders = total_size;
1960 rem = do_div(drv->cylinders, t);
1963 } else { /* Get geometry failed */
1964 printk(KERN_WARNING "cciss: reading geometry failed\n");
1966 printk(KERN_INFO " heads=%d, sectors=%d, cylinders=%d\n\n",
1967 drv->heads, drv->sectors, drv->cylinders);
1971 cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size,
1972 unsigned int *block_size)
1974 ReadCapdata_struct *buf;
1976 buf = kmalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
1978 printk(KERN_WARNING "cciss: out of memory\n");
1981 memset(buf, 0, sizeof(ReadCapdata_struct));
1983 return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
1984 ctlr, buf, sizeof(ReadCapdata_struct),
1985 1, logvol, 0, TYPE_CMD);
1987 return_code = sendcmd(CCISS_READ_CAPACITY,
1988 ctlr, buf, sizeof(ReadCapdata_struct),
1989 1, logvol, 0, NULL, TYPE_CMD);
1990 if (return_code == IO_OK) {
1991 *total_size = be32_to_cpu(*(__u32 *) buf->total_size)+1;
1992 *block_size = be32_to_cpu(*(__u32 *) buf->block_size);
1993 } else { /* read capacity command failed */
1994 printk(KERN_WARNING "cciss: read capacity failed\n");
1996 *block_size = BLOCK_SIZE;
1998 if (*total_size != (__u32) 0)
1999 printk(KERN_INFO " blocks= %lld block_size= %d\n",
2000 *total_size, *block_size);
2006 cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size, unsigned int *block_size)
2008 ReadCapdata_struct_16 *buf;
2010 buf = kmalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL);
2012 printk(KERN_WARNING "cciss: out of memory\n");
2015 memset(buf, 0, sizeof(ReadCapdata_struct_16));
2017 return_code = sendcmd_withirq(CCISS_READ_CAPACITY_16,
2018 ctlr, buf, sizeof(ReadCapdata_struct_16),
2019 1, logvol, 0, TYPE_CMD);
2022 return_code = sendcmd(CCISS_READ_CAPACITY_16,
2023 ctlr, buf, sizeof(ReadCapdata_struct_16),
2024 1, logvol, 0, NULL, TYPE_CMD);
2026 if (return_code == IO_OK) {
2027 *total_size = be64_to_cpu(*(__u64 *) buf->total_size)+1;
2028 *block_size = be32_to_cpu(*(__u32 *) buf->block_size);
2029 } else { /* read capacity command failed */
2030 printk(KERN_WARNING "cciss: read capacity failed\n");
2032 *block_size = BLOCK_SIZE;
2034 printk(KERN_INFO " blocks= %lld block_size= %d\n",
2035 *total_size, *block_size);
2040 static int cciss_revalidate(struct gendisk *disk)
2042 ctlr_info_t *h = get_host(disk);
2043 drive_info_struct *drv = get_drv(disk);
2046 unsigned int block_size;
2047 sector_t total_size;
2048 InquiryData_struct *inq_buff = NULL;
2050 for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) {
2051 if (h->drv[logvol].LunID == drv->LunID) {
2060 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
2061 if (inq_buff == NULL) {
2062 printk(KERN_WARNING "cciss: out of memory\n");
2065 if (h->cciss_read == CCISS_READ_10) {
2066 cciss_read_capacity(h->ctlr, logvol, 1,
2067 &total_size, &block_size);
2069 cciss_read_capacity_16(h->ctlr, logvol, 1,
2070 &total_size, &block_size);
2072 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size,
2075 blk_queue_hardsect_size(drv->queue, drv->block_size);
2076 set_capacity(disk, drv->nr_blocks);
2083 * Wait polling for a command to complete.
2084 * The memory mapped FIFO is polled for the completion.
2085 * Used only at init time, interrupts from the HBA are disabled.
2087 static unsigned long pollcomplete(int ctlr)
2092 /* Wait (up to 20 seconds) for a command to complete */
2094 for (i = 20 * HZ; i > 0; i--) {
2095 done = hba[ctlr]->access.command_completed(hba[ctlr]);
2096 if (done == FIFO_EMPTY)
2097 schedule_timeout_uninterruptible(1);
2101 /* Invalid address to tell caller we ran out of time */
2105 static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete)
2107 /* We get in here if sendcmd() is polling for completions
2108 and gets some command back that it wasn't expecting --
2109 something other than that which it just sent down.
2110 Ordinarily, that shouldn't happen, but it can happen when
2111 the scsi tape stuff gets into error handling mode, and
2112 starts using sendcmd() to try to abort commands and
2113 reset tape drives. In that case, sendcmd may pick up
2114 completions of commands that were sent to logical drives
2115 through the block i/o system, or cciss ioctls completing, etc.
2116 In that case, we need to save those completions for later
2117 processing by the interrupt handler.
2120 #ifdef CONFIG_CISS_SCSI_TAPE
2121 struct sendcmd_reject_list *srl = &hba[ctlr]->scsi_rejects;
2123 /* If it's not the scsi tape stuff doing error handling, (abort */
2124 /* or reset) then we don't expect anything weird. */
2125 if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) {
2127 printk(KERN_WARNING "cciss cciss%d: SendCmd "
2128 "Invalid command list address returned! (%lx)\n",
2130 /* not much we can do. */
2131 #ifdef CONFIG_CISS_SCSI_TAPE
2135 /* We've sent down an abort or reset, but something else
2137 if (srl->ncompletions >= (NR_CMDS + 2)) {
2138 /* Uh oh. No room to save it for later... */
2139 printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
2140 "reject list overflow, command lost!\n", ctlr);
2143 /* Save it for later */
2144 srl->complete[srl->ncompletions] = complete;
2145 srl->ncompletions++;
2151 * Send a command to the controller, and wait for it to complete.
2152 * Only used at init time.
2154 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
2155 1: address logical volume log_unit,
2156 2: periph device address is scsi3addr */
2157 unsigned int log_unit,
2158 __u8 page_code, unsigned char *scsi3addr, int cmd_type)
2160 CommandList_struct *c;
2162 unsigned long complete;
2163 ctlr_info_t *info_p = hba[ctlr];
2164 u64bit buff_dma_handle;
2165 int status, done = 0;
2167 if ((c = cmd_alloc(info_p, 1)) == NULL) {
2168 printk(KERN_WARNING "cciss: unable to get memory");
2171 status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
2172 log_unit, page_code, scsi3addr, cmd_type);
2173 if (status != IO_OK) {
2174 cmd_free(info_p, c, 1);
2182 printk(KERN_DEBUG "cciss: turning intr off\n");
2183 #endif /* CCISS_DEBUG */
2184 info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
2186 /* Make sure there is room in the command FIFO */
2187 /* Actually it should be completely empty at this time */
2188 /* unless we are in here doing error handling for the scsi */
2189 /* tape side of the driver. */
2190 for (i = 200000; i > 0; i--) {
2191 /* if fifo isn't full go */
2192 if (!(info_p->access.fifo_full(info_p))) {
2197 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
2198 " waiting!\n", ctlr);
2203 info_p->access.submit_command(info_p, c);
2206 complete = pollcomplete(ctlr);
2209 printk(KERN_DEBUG "cciss: command completed\n");
2210 #endif /* CCISS_DEBUG */
2212 if (complete == 1) {
2214 "cciss cciss%d: SendCmd Timeout out, "
2215 "No command list address returned!\n", ctlr);
2221 /* This will need to change for direct lookup completions */
2222 if ((complete & CISS_ERROR_BIT)
2223 && (complete & ~CISS_ERROR_BIT) == c->busaddr) {
2224 /* if data overrun or underun on Report command
2227 if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
2228 (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
2229 (c->Request.CDB[0] == CISS_INQUIRY)) &&
2230 ((c->err_info->CommandStatus ==
2231 CMD_DATA_OVERRUN) ||
2232 (c->err_info->CommandStatus == CMD_DATA_UNDERRUN)
2234 complete = c->busaddr;
2236 if (c->err_info->CommandStatus ==
2237 CMD_UNSOLICITED_ABORT) {
2238 printk(KERN_WARNING "cciss%d: "
2239 "unsolicited abort %p\n",
2241 if (c->retry_count < MAX_CMD_RETRIES) {
2243 "cciss%d: retrying %p\n",
2246 /* erase the old error */
2248 memset(c->err_info, 0,
2250 (ErrorInfo_struct));
2254 "cciss%d: retried %p too "
2255 "many times\n", ctlr, c);
2259 } else if (c->err_info->CommandStatus ==
2262 "cciss%d: command could not be aborted.\n",
2267 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2268 " Error %x \n", ctlr,
2269 c->err_info->CommandStatus);
2270 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2272 " size %x\n num %x value %x\n",
2274 c->err_info->MoreErrInfo.Invalid_Cmd.
2276 c->err_info->MoreErrInfo.Invalid_Cmd.
2278 c->err_info->MoreErrInfo.Invalid_Cmd.
2284 /* This will need changing for direct lookup completions */
2285 if (complete != c->busaddr) {
2286 if (add_sendcmd_reject(cmd, ctlr, complete) != 0) {
2287 BUG(); /* we are pretty much hosed if we get here. */
2295 /* unlock the data buffer from DMA */
2296 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
2297 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
2298 pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
2299 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
2300 #ifdef CONFIG_CISS_SCSI_TAPE
2301 /* if we saved some commands for later, process them now. */
2302 if (info_p->scsi_rejects.ncompletions > 0)
2303 do_cciss_intr(0, info_p);
2305 cmd_free(info_p, c, 1);
2310 * Map (physical) PCI mem into (virtual) kernel space
2312 static void __iomem *remap_pci_mem(ulong base, ulong size)
2314 ulong page_base = ((ulong) base) & PAGE_MASK;
2315 ulong page_offs = ((ulong) base) - page_base;
2316 void __iomem *page_remapped = ioremap(page_base, page_offs + size);
2318 return page_remapped ? (page_remapped + page_offs) : NULL;
2322 * Takes jobs of the Q and sends them to the hardware, then puts it on
2323 * the Q to wait for completion.
2325 static void start_io(ctlr_info_t *h)
2327 CommandList_struct *c;
2329 while ((c = h->reqQ) != NULL) {
2330 /* can't do anything if fifo is full */
2331 if ((h->access.fifo_full(h))) {
2332 printk(KERN_WARNING "cciss: fifo full\n");
2336 /* Get the first entry from the Request Q */
2337 removeQ(&(h->reqQ), c);
2340 /* Tell the controller execute command */
2341 h->access.submit_command(h, c);
2343 /* Put job onto the completed Q */
2344 addQ(&(h->cmpQ), c);
2348 /* Assumes that CCISS_LOCK(h->ctlr) is held. */
2349 /* Zeros out the error record and then resends the command back */
2350 /* to the controller */
2351 static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c)
2353 /* erase the old error information */
2354 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
2356 /* add it to software queue and then send it to the controller */
2357 addQ(&(h->reqQ), c);
2359 if (h->Qdepth > h->maxQsinceinit)
2360 h->maxQsinceinit = h->Qdepth;
2365 /* checks the status of the job and calls complete buffers to mark all
2366 * buffers for the completed job. Note that this function does not need
2367 * to hold the hba/queue lock.
2369 static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
2378 if (cmd->err_info->CommandStatus != 0) { /* an error has occurred */
2379 switch (cmd->err_info->CommandStatus) {
2380 unsigned char sense_key;
2381 case CMD_TARGET_STATUS:
2384 if (cmd->err_info->ScsiStatus == 0x02) {
2385 printk(KERN_WARNING "cciss: cmd %p "
2386 "has CHECK CONDITION "
2387 " byte 2 = 0x%x\n", cmd,
2388 cmd->err_info->SenseInfo[2]
2390 /* check the sense key */
2391 sense_key = 0xf & cmd->err_info->SenseInfo[2];
2392 /* no status or recovered error */
2393 if ((sense_key == 0x0) || (sense_key == 0x1)) {
2397 printk(KERN_WARNING "cciss: cmd %p "
2398 "has SCSI Status 0x%x\n",
2399 cmd, cmd->err_info->ScsiStatus);
2402 case CMD_DATA_UNDERRUN:
2403 printk(KERN_WARNING "cciss: cmd %p has"
2404 " completed with data underrun "
2407 case CMD_DATA_OVERRUN:
2408 printk(KERN_WARNING "cciss: cmd %p has"
2409 " completed with data overrun "
2413 printk(KERN_WARNING "cciss: cmd %p is "
2414 "reported invalid\n", cmd);
2417 case CMD_PROTOCOL_ERR:
2418 printk(KERN_WARNING "cciss: cmd %p has "
2419 "protocol error \n", cmd);
2422 case CMD_HARDWARE_ERR:
2423 printk(KERN_WARNING "cciss: cmd %p had "
2424 " hardware error\n", cmd);
2427 case CMD_CONNECTION_LOST:
2428 printk(KERN_WARNING "cciss: cmd %p had "
2429 "connection lost\n", cmd);
2433 printk(KERN_WARNING "cciss: cmd %p was "
2437 case CMD_ABORT_FAILED:
2438 printk(KERN_WARNING "cciss: cmd %p reports "
2439 "abort failed\n", cmd);
2442 case CMD_UNSOLICITED_ABORT:
2443 printk(KERN_WARNING "cciss%d: unsolicited "
2444 "abort %p\n", h->ctlr, cmd);
2445 if (cmd->retry_count < MAX_CMD_RETRIES) {
2448 "cciss%d: retrying %p\n", h->ctlr, cmd);
2452 "cciss%d: %p retried too "
2453 "many times\n", h->ctlr, cmd);
2457 printk(KERN_WARNING "cciss: cmd %p timedout\n", cmd);
2461 printk(KERN_WARNING "cciss: cmd %p returned "
2462 "unknown status %x\n", cmd,
2463 cmd->err_info->CommandStatus);
2467 /* We need to return this command */
2469 resend_cciss_cmd(h, cmd);
2473 cmd->rq->completion_data = cmd;
2474 cmd->rq->errors = status;
2475 blk_add_trace_rq(cmd->rq->q, cmd->rq, BLK_TA_COMPLETE);
2476 blk_complete_request(cmd->rq);
2480 * Get a request and submit it to the controller.
2482 static void do_cciss_request(request_queue_t *q)
2484 ctlr_info_t *h = q->queuedata;
2485 CommandList_struct *c;
2488 struct request *creq;
2490 struct scatterlist tmp_sg[MAXSGENTRIES];
2491 drive_info_struct *drv;
2494 /* We call start_io here in case there is a command waiting on the
2495 * queue that has not been sent.
2497 if (blk_queue_plugged(q))
2501 creq = elv_next_request(q);
2505 BUG_ON(creq->nr_phys_segments > MAXSGENTRIES);
2507 if ((c = cmd_alloc(h, 1)) == NULL)
2510 blkdev_dequeue_request(creq);
2512 spin_unlock_irq(q->queue_lock);
2514 c->cmd_type = CMD_RWREQ;
2517 /* fill in the request */
2518 drv = creq->rq_disk->private_data;
2519 c->Header.ReplyQueue = 0; // unused in simple mode
2520 /* got command from pool, so use the command block index instead */
2521 /* for direct lookups. */
2522 /* The first 2 bits are reserved for controller error reporting. */
2523 c->Header.Tag.lower = (c->cmdindex << 3);
2524 c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
2525 c->Header.LUN.LogDev.VolId = drv->LunID;
2526 c->Header.LUN.LogDev.Mode = 1;
2527 c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
2528 c->Request.Type.Type = TYPE_CMD; // It is a command.
2529 c->Request.Type.Attribute = ATTR_SIMPLE;
2530 c->Request.Type.Direction =
2531 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
2532 c->Request.Timeout = 0; // Don't time out
2534 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
2535 start_blk = creq->sector;
2537 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", (int)creq->sector,
2538 (int)creq->nr_sectors);
2539 #endif /* CCISS_DEBUG */
2541 seg = blk_rq_map_sg(q, creq, tmp_sg);
2543 /* get the DMA records for the setup */
2544 if (c->Request.Type.Direction == XFER_READ)
2545 dir = PCI_DMA_FROMDEVICE;
2547 dir = PCI_DMA_TODEVICE;
2549 for (i = 0; i < seg; i++) {
2550 c->SG[i].Len = tmp_sg[i].length;
2551 temp64.val = (__u64) pci_map_page(h->pdev, tmp_sg[i].page,
2553 tmp_sg[i].length, dir);
2554 c->SG[i].Addr.lower = temp64.val32.lower;
2555 c->SG[i].Addr.upper = temp64.val32.upper;
2556 c->SG[i].Ext = 0; // we are not chaining
2558 /* track how many SG entries we are using */
2563 printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n",
2564 creq->nr_sectors, seg);
2565 #endif /* CCISS_DEBUG */
2567 c->Header.SGList = c->Header.SGTotal = seg;
2568 if(h->cciss_read == CCISS_READ_10) {
2569 c->Request.CDB[1] = 0;
2570 c->Request.CDB[2] = (start_blk >> 24) & 0xff; //MSB
2571 c->Request.CDB[3] = (start_blk >> 16) & 0xff;
2572 c->Request.CDB[4] = (start_blk >> 8) & 0xff;
2573 c->Request.CDB[5] = start_blk & 0xff;
2574 c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB
2575 c->Request.CDB[7] = (creq->nr_sectors >> 8) & 0xff;
2576 c->Request.CDB[8] = creq->nr_sectors & 0xff;
2577 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
2579 c->Request.CDBLen = 16;
2580 c->Request.CDB[1]= 0;
2581 c->Request.CDB[2]= (start_blk >> 56) & 0xff; //MSB
2582 c->Request.CDB[3]= (start_blk >> 48) & 0xff;
2583 c->Request.CDB[4]= (start_blk >> 40) & 0xff;
2584 c->Request.CDB[5]= (start_blk >> 32) & 0xff;
2585 c->Request.CDB[6]= (start_blk >> 24) & 0xff;
2586 c->Request.CDB[7]= (start_blk >> 16) & 0xff;
2587 c->Request.CDB[8]= (start_blk >> 8) & 0xff;
2588 c->Request.CDB[9]= start_blk & 0xff;
2589 c->Request.CDB[10]= (creq->nr_sectors >> 24) & 0xff;
2590 c->Request.CDB[11]= (creq->nr_sectors >> 16) & 0xff;
2591 c->Request.CDB[12]= (creq->nr_sectors >> 8) & 0xff;
2592 c->Request.CDB[13]= creq->nr_sectors & 0xff;
2593 c->Request.CDB[14] = c->Request.CDB[15] = 0;
2596 spin_lock_irq(q->queue_lock);
2598 addQ(&(h->reqQ), c);
2600 if (h->Qdepth > h->maxQsinceinit)
2601 h->maxQsinceinit = h->Qdepth;
2607 /* We will already have the driver lock here so not need
2613 static inline unsigned long get_next_completion(ctlr_info_t *h)
2615 #ifdef CONFIG_CISS_SCSI_TAPE
2616 /* Any rejects from sendcmd() lying around? Process them first */
2617 if (h->scsi_rejects.ncompletions == 0)
2618 return h->access.command_completed(h);
2620 struct sendcmd_reject_list *srl;
2622 srl = &h->scsi_rejects;
2623 n = --srl->ncompletions;
2624 /* printk("cciss%d: processing saved reject\n", h->ctlr); */
2626 return srl->complete[n];
2629 return h->access.command_completed(h);
2633 static inline int interrupt_pending(ctlr_info_t *h)
2635 #ifdef CONFIG_CISS_SCSI_TAPE
2636 return (h->access.intr_pending(h)
2637 || (h->scsi_rejects.ncompletions > 0));
2639 return h->access.intr_pending(h);
2643 static inline long interrupt_not_for_us(ctlr_info_t *h)
2645 #ifdef CONFIG_CISS_SCSI_TAPE
2646 return (((h->access.intr_pending(h) == 0) ||
2647 (h->interrupts_enabled == 0))
2648 && (h->scsi_rejects.ncompletions == 0));
2650 return (((h->access.intr_pending(h) == 0) ||
2651 (h->interrupts_enabled == 0)));
2655 static irqreturn_t do_cciss_intr(int irq, void *dev_id)
2657 ctlr_info_t *h = dev_id;
2658 CommandList_struct *c;
2659 unsigned long flags;
2662 if (interrupt_not_for_us(h))
2665 * If there are completed commands in the completion queue,
2666 * we had better do something about it.
2668 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
2669 while (interrupt_pending(h)) {
2670 while ((a = get_next_completion(h)) != FIFO_EMPTY) {
2674 if (a2 >= NR_CMDS) {
2676 "cciss: controller cciss%d failed, stopping.\n",
2678 fail_all_cmds(h->ctlr);
2682 c = h->cmd_pool + a2;
2687 if ((c = h->cmpQ) == NULL) {
2689 "cciss: Completion of %08x ignored\n",
2693 while (c->busaddr != a) {
2700 * If we've found the command, take it off the
2701 * completion Q and free it
2703 if (c->busaddr == a) {
2704 removeQ(&h->cmpQ, c);
2705 if (c->cmd_type == CMD_RWREQ) {
2706 complete_command(h, c, 0);
2707 } else if (c->cmd_type == CMD_IOCTL_PEND) {
2708 complete(c->waiting);
2710 # ifdef CONFIG_CISS_SCSI_TAPE
2711 else if (c->cmd_type == CMD_SCSI)
2712 complete_scsi_command(c, 0, a1);
2719 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
2724 * We cannot read the structure directly, for portability we must use
2726 * This is for debug only.
2729 static void print_cfg_table(CfgTable_struct *tb)
2734 printk("Controller Configuration information\n");
2735 printk("------------------------------------\n");
2736 for (i = 0; i < 4; i++)
2737 temp_name[i] = readb(&(tb->Signature[i]));
2738 temp_name[4] = '\0';
2739 printk(" Signature = %s\n", temp_name);
2740 printk(" Spec Number = %d\n", readl(&(tb->SpecValence)));
2741 printk(" Transport methods supported = 0x%x\n",
2742 readl(&(tb->TransportSupport)));
2743 printk(" Transport methods active = 0x%x\n",
2744 readl(&(tb->TransportActive)));
2745 printk(" Requested transport Method = 0x%x\n",
2746 readl(&(tb->HostWrite.TransportRequest)));
2747 printk(" Coalesce Interrupt Delay = 0x%x\n",
2748 readl(&(tb->HostWrite.CoalIntDelay)));
2749 printk(" Coalesce Interrupt Count = 0x%x\n",
2750 readl(&(tb->HostWrite.CoalIntCount)));
2751 printk(" Max outstanding commands = 0x%d\n",
2752 readl(&(tb->CmdsOutMax)));
2753 printk(" Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
2754 for (i = 0; i < 16; i++)
2755 temp_name[i] = readb(&(tb->ServerName[i]));
2756 temp_name[16] = '\0';
2757 printk(" Server Name = %s\n", temp_name);
2758 printk(" Heartbeat Counter = 0x%x\n\n\n", readl(&(tb->HeartBeat)));
2760 #endif /* CCISS_DEBUG */
2762 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
2764 int i, offset, mem_type, bar_type;
2765 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
2768 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2769 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
2770 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
2773 mem_type = pci_resource_flags(pdev, i) &
2774 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
2776 case PCI_BASE_ADDRESS_MEM_TYPE_32:
2777 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
2778 offset += 4; /* 32 bit */
2780 case PCI_BASE_ADDRESS_MEM_TYPE_64:
2783 default: /* reserved in PCI 2.2 */
2785 "Base address is invalid\n");
2790 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
2796 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
2797 * controllers that are capable. If not, we use IO-APIC mode.
2800 static void __devinit cciss_interrupt_mode(ctlr_info_t *c,
2801 struct pci_dev *pdev, __u32 board_id)
2803 #ifdef CONFIG_PCI_MSI
2805 struct msix_entry cciss_msix_entries[4] = { {0, 0}, {0, 1},
2809 /* Some boards advertise MSI but don't really support it */
2810 if ((board_id == 0x40700E11) ||
2811 (board_id == 0x40800E11) ||
2812 (board_id == 0x40820E11) || (board_id == 0x40830E11))
2813 goto default_int_mode;
2815 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
2816 err = pci_enable_msix(pdev, cciss_msix_entries, 4);
2818 c->intr[0] = cciss_msix_entries[0].vector;
2819 c->intr[1] = cciss_msix_entries[1].vector;
2820 c->intr[2] = cciss_msix_entries[2].vector;
2821 c->intr[3] = cciss_msix_entries[3].vector;
2826 printk(KERN_WARNING "cciss: only %d MSI-X vectors "
2827 "available\n", err);
2829 printk(KERN_WARNING "cciss: MSI-X init failed %d\n",
2833 if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
2834 if (!pci_enable_msi(pdev)) {
2835 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2839 printk(KERN_WARNING "cciss: MSI init failed\n");
2840 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2845 #endif /* CONFIG_PCI_MSI */
2846 /* if we get here we're going to use the default interrupt mode */
2847 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2851 static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
2853 ushort subsystem_vendor_id, subsystem_device_id, command;
2854 __u32 board_id, scratchpad = 0;
2856 __u32 cfg_base_addr;
2857 __u64 cfg_base_addr_index;
2860 /* check to see if controller has been disabled */
2861 /* BEFORE trying to enable it */
2862 (void)pci_read_config_word(pdev, PCI_COMMAND, &command);
2863 if (!(command & 0x02)) {
2865 "cciss: controller appears to be disabled\n");
2869 err = pci_enable_device(pdev);
2871 printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
2875 err = pci_request_regions(pdev, "cciss");
2877 printk(KERN_ERR "cciss: Cannot obtain PCI resources, "
2879 goto err_out_disable_pdev;
2882 subsystem_vendor_id = pdev->subsystem_vendor;
2883 subsystem_device_id = pdev->subsystem_device;
2884 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
2885 subsystem_vendor_id);
2888 printk("command = %x\n", command);
2889 printk("irq = %x\n", pdev->irq);
2890 printk("board_id = %x\n", board_id);
2891 #endif /* CCISS_DEBUG */
2893 /* If the kernel supports MSI/MSI-X we will try to enable that functionality,
2894 * else we use the IO-APIC interrupt assigned to us by system ROM.
2896 cciss_interrupt_mode(c, pdev, board_id);
2899 * Memory base addr is first addr , the second points to the config
2903 c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
2905 printk("address 0 = %x\n", c->paddr);
2906 #endif /* CCISS_DEBUG */
2907 c->vaddr = remap_pci_mem(c->paddr, 200);
2909 /* Wait for the board to become ready. (PCI hotplug needs this.)
2910 * We poll for up to 120 secs, once per 100ms. */
2911 for (i = 0; i < 1200; i++) {
2912 scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET);
2913 if (scratchpad == CCISS_FIRMWARE_READY)
2915 set_current_state(TASK_INTERRUPTIBLE);
2916 schedule_timeout(HZ / 10); /* wait 100ms */
2918 if (scratchpad != CCISS_FIRMWARE_READY) {
2919 printk(KERN_WARNING "cciss: Board not ready. Timed out.\n");
2921 goto err_out_free_res;
2924 /* get the address index number */
2925 cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
2926 cfg_base_addr &= (__u32) 0x0000ffff;
2928 printk("cfg base address = %x\n", cfg_base_addr);
2929 #endif /* CCISS_DEBUG */
2930 cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
2932 printk("cfg base address index = %x\n", cfg_base_addr_index);
2933 #endif /* CCISS_DEBUG */
2934 if (cfg_base_addr_index == -1) {
2935 printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
2937 goto err_out_free_res;
2940 cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
2942 printk("cfg offset = %x\n", cfg_offset);
2943 #endif /* CCISS_DEBUG */
2944 c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
2945 cfg_base_addr_index) +
2946 cfg_offset, sizeof(CfgTable_struct));
2947 c->board_id = board_id;
2950 print_cfg_table(c->cfgtable);
2951 #endif /* CCISS_DEBUG */
2953 for (i = 0; i < ARRAY_SIZE(products); i++) {
2954 if (board_id == products[i].board_id) {
2955 c->product_name = products[i].product_name;
2956 c->access = *(products[i].access);
2960 if (i == ARRAY_SIZE(products)) {
2961 printk(KERN_WARNING "cciss: Sorry, I don't know how"
2962 " to access the Smart Array controller %08lx\n",
2963 (unsigned long)board_id);
2965 goto err_out_free_res;
2967 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
2968 (readb(&c->cfgtable->Signature[1]) != 'I') ||
2969 (readb(&c->cfgtable->Signature[2]) != 'S') ||
2970 (readb(&c->cfgtable->Signature[3]) != 'S')) {
2971 printk("Does not appear to be a valid CISS config table\n");
2973 goto err_out_free_res;
2977 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
2979 prefetch = readl(&(c->cfgtable->SCSI_Prefetch));
2981 writel(prefetch, &(c->cfgtable->SCSI_Prefetch));
2986 printk("Trying to put board into Simple mode\n");
2987 #endif /* CCISS_DEBUG */
2988 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
2989 /* Update the field, and then ring the doorbell */
2990 writel(CFGTBL_Trans_Simple, &(c->cfgtable->HostWrite.TransportRequest));
2991 writel(CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
2993 /* under certain very rare conditions, this can take awhile.
2994 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
2995 * as we enter this code.) */
2996 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
2997 if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
2999 /* delay and try again */
3000 set_current_state(TASK_INTERRUPTIBLE);
3001 schedule_timeout(10);
3005 printk(KERN_DEBUG "I counter got to %d %x\n", i,
3006 readl(c->vaddr + SA5_DOORBELL));
3007 #endif /* CCISS_DEBUG */
3009 print_cfg_table(c->cfgtable);
3010 #endif /* CCISS_DEBUG */
3012 if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
3013 printk(KERN_WARNING "cciss: unable to get board into"
3016 goto err_out_free_res;
3021 pci_release_regions(pdev);
3023 err_out_disable_pdev:
3024 pci_disable_device(pdev);
3029 * Gets information about the local volumes attached to the controller.
3031 static void cciss_getgeometry(int cntl_num)
3033 ReportLunData_struct *ld_buff;
3034 InquiryData_struct *inq_buff;
3040 sector_t total_size;
3042 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
3043 if (ld_buff == NULL) {
3044 printk(KERN_ERR "cciss: out of memory\n");
3047 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
3048 if (inq_buff == NULL) {
3049 printk(KERN_ERR "cciss: out of memory\n");
3053 /* Get the firmware version */
3054 return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff,
3055 sizeof(InquiryData_struct), 0, 0, 0, NULL,
3057 if (return_code == IO_OK) {
3058 hba[cntl_num]->firm_ver[0] = inq_buff->data_byte[32];
3059 hba[cntl_num]->firm_ver[1] = inq_buff->data_byte[33];
3060 hba[cntl_num]->firm_ver[2] = inq_buff->data_byte[34];
3061 hba[cntl_num]->firm_ver[3] = inq_buff->data_byte[35];
3062 } else { /* send command failed */
3064 printk(KERN_WARNING "cciss: unable to determine firmware"
3065 " version of controller\n");
3067 /* Get the number of logical volumes */
3068 return_code = sendcmd(CISS_REPORT_LOG, cntl_num, ld_buff,
3069 sizeof(ReportLunData_struct), 0, 0, 0, NULL,
3072 if (return_code == IO_OK) {
3074 printk("LUN Data\n--------------------------\n");
3075 #endif /* CCISS_DEBUG */
3078 (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
3080 (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
3082 (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
3083 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
3084 } else { /* reading number of logical volumes failed */
3086 printk(KERN_WARNING "cciss: report logical volume"
3087 " command failed\n");
3090 hba[cntl_num]->num_luns = listlength / 8; // 8 bytes pre entry
3091 if (hba[cntl_num]->num_luns > CISS_MAX_LUN) {
3093 "ciss: only %d number of logical volumes supported\n",
3095 hba[cntl_num]->num_luns = CISS_MAX_LUN;
3098 printk(KERN_DEBUG "Length = %x %x %x %x = %d\n",
3099 ld_buff->LUNListLength[0], ld_buff->LUNListLength[1],
3100 ld_buff->LUNListLength[2], ld_buff->LUNListLength[3],
3101 hba[cntl_num]->num_luns);
3102 #endif /* CCISS_DEBUG */
3104 hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns - 1;
3105 for (i = 0; i < CISS_MAX_LUN; i++) {
3106 if (i < hba[cntl_num]->num_luns) {
3107 lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3]))
3109 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2]))
3111 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1]))
3113 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
3115 hba[cntl_num]->drv[i].LunID = lunid;
3118 printk(KERN_DEBUG "LUN[%d]: %x %x %x %x = %x\n", i,
3119 ld_buff->LUN[i][0], ld_buff->LUN[i][1],
3120 ld_buff->LUN[i][2], ld_buff->LUN[i][3],
3121 hba[cntl_num]->drv[i].LunID);
3122 #endif /* CCISS_DEBUG */
3124 /* testing to see if 16-byte CDBs are already being used */
3125 if(hba[cntl_num]->cciss_read == CCISS_READ_16) {
3126 cciss_read_capacity_16(cntl_num, i, 0,
3127 &total_size, &block_size);
3130 cciss_read_capacity(cntl_num, i, 0, &total_size, &block_size);
3132 /* total_size = last LBA + 1 */
3133 if(total_size == (__u32) 0) {
3134 cciss_read_capacity_16(cntl_num, i, 0,
3135 &total_size, &block_size);
3136 hba[cntl_num]->cciss_read = CCISS_READ_16;
3137 hba[cntl_num]->cciss_write = CCISS_WRITE_16;
3139 hba[cntl_num]->cciss_read = CCISS_READ_10;
3140 hba[cntl_num]->cciss_write = CCISS_WRITE_10;
3143 cciss_geometry_inquiry(cntl_num, i, 0, total_size,
3144 block_size, inq_buff,
3145 &hba[cntl_num]->drv[i]);
3147 /* initialize raid_level to indicate a free space */
3148 hba[cntl_num]->drv[i].raid_level = -1;
3155 /* Function to find the first free pointer into our hba[] array */
3156 /* Returns -1 if no free entries are left. */
3157 static int alloc_cciss_hba(void)
3159 struct gendisk *disk[NWD];
3161 for (n = 0; n < NWD; n++) {
3162 disk[n] = alloc_disk(1 << NWD_SHIFT);
3167 for (i = 0; i < MAX_CTLR; i++) {
3170 p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
3173 for (n = 0; n < NWD; n++)
3174 p->gendisk[n] = disk[n];
3179 printk(KERN_WARNING "cciss: This driver supports a maximum"
3180 " of %d controllers.\n", MAX_CTLR);
3183 printk(KERN_ERR "cciss: out of memory.\n");
3190 static void free_hba(int i)
3192 ctlr_info_t *p = hba[i];
3196 for (n = 0; n < NWD; n++)
3197 put_disk(p->gendisk[n]);
3202 * This is it. Find all the controllers and register them. I really hate
3203 * stealing all these major device numbers.
3204 * returns the number of block devices registered.
3206 static int __devinit cciss_init_one(struct pci_dev *pdev,
3207 const struct pci_device_id *ent)
3215 i = alloc_cciss_hba();
3219 hba[i]->busy_initializing = 1;
3221 if (cciss_pci_init(hba[i], pdev) != 0)
3224 sprintf(hba[i]->devname, "cciss%d", i);
3226 hba[i]->pdev = pdev;
3228 /* configure PCI DMA stuff */
3229 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
3231 else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
3234 printk(KERN_ERR "cciss: no suitable DMA available\n");
3239 * register with the major number, or get a dynamic major number
3240 * by passing 0 as argument. This is done for greater than
3241 * 8 controller support.
3243 if (i < MAX_CTLR_ORIG)
3244 hba[i]->major = COMPAQ_CISS_MAJOR + i;
3245 rc = register_blkdev(hba[i]->major, hba[i]->devname);
3246 if (rc == -EBUSY || rc == -EINVAL) {
3248 "cciss: Unable to get major number %d for %s "
3249 "on hba %d\n", hba[i]->major, hba[i]->devname, i);
3252 if (i >= MAX_CTLR_ORIG)
3256 /* make sure the board interrupts are off */
3257 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
3258 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
3259 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
3260 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
3261 hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname);
3265 printk(KERN_INFO "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
3266 hba[i]->devname, pdev->device, pci_name(pdev),
3267 hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not");
3269 hba[i]->cmd_pool_bits =
3270 kmalloc(((NR_CMDS + BITS_PER_LONG -
3271 1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
3272 hba[i]->cmd_pool = (CommandList_struct *)
3273 pci_alloc_consistent(hba[i]->pdev,
3274 NR_CMDS * sizeof(CommandList_struct),
3275 &(hba[i]->cmd_pool_dhandle));
3276 hba[i]->errinfo_pool = (ErrorInfo_struct *)
3277 pci_alloc_consistent(hba[i]->pdev,
3278 NR_CMDS * sizeof(ErrorInfo_struct),
3279 &(hba[i]->errinfo_pool_dhandle));
3280 if ((hba[i]->cmd_pool_bits == NULL)
3281 || (hba[i]->cmd_pool == NULL)
3282 || (hba[i]->errinfo_pool == NULL)) {
3283 printk(KERN_ERR "cciss: out of memory");
3286 #ifdef CONFIG_CISS_SCSI_TAPE
3287 hba[i]->scsi_rejects.complete =
3288 kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
3289 (NR_CMDS + 5), GFP_KERNEL);
3290 if (hba[i]->scsi_rejects.complete == NULL) {
3291 printk(KERN_ERR "cciss: out of memory");
3295 spin_lock_init(&hba[i]->lock);
3297 /* Initialize the pdev driver private data.
3298 have it point to hba[i]. */
3299 pci_set_drvdata(pdev, hba[i]);
3300 /* command and error info recs zeroed out before
3302 memset(hba[i]->cmd_pool_bits, 0,
3303 ((NR_CMDS + BITS_PER_LONG -
3304 1) / BITS_PER_LONG) * sizeof(unsigned long));
3307 printk(KERN_DEBUG "Scanning for drives on controller cciss%d\n", i);
3308 #endif /* CCISS_DEBUG */
3310 cciss_getgeometry(i);
3312 cciss_scsi_setup(i);
3314 /* Turn the interrupts on so we can service requests */
3315 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
3318 hba[i]->busy_initializing = 0;
3320 for (j = 0; j < NWD; j++) { /* mfm */
3321 drive_info_struct *drv = &(hba[i]->drv[j]);
3322 struct gendisk *disk = hba[i]->gendisk[j];
3324 q = blk_init_queue(do_cciss_request, &hba[i]->lock);
3327 "cciss: unable to allocate queue for disk %d\n",
3333 q->backing_dev_info.ra_pages = READ_AHEAD;
3334 blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
3336 /* This is a hardware imposed limit. */
3337 blk_queue_max_hw_segments(q, MAXSGENTRIES);
3339 /* This is a limit in the driver and could be eliminated. */
3340 blk_queue_max_phys_segments(q, MAXSGENTRIES);
3342 blk_queue_max_sectors(q, 512);
3344 blk_queue_softirq_done(q, cciss_softirq_done);
3346 q->queuedata = hba[i];
3347 sprintf(disk->disk_name, "cciss/c%dd%d", i, j);
3348 disk->major = hba[i]->major;
3349 disk->first_minor = j << NWD_SHIFT;
3350 disk->fops = &cciss_fops;
3352 disk->private_data = drv;
3353 disk->driverfs_dev = &pdev->dev;
3354 /* we must register the controller even if no disks exist */
3355 /* this is for the online array utilities */
3356 if (!drv->heads && j)
3358 blk_queue_hardsect_size(q, drv->block_size);
3359 set_capacity(disk, drv->nr_blocks);
3366 #ifdef CONFIG_CISS_SCSI_TAPE
3367 kfree(hba[i]->scsi_rejects.complete);
3369 kfree(hba[i]->cmd_pool_bits);
3370 if (hba[i]->cmd_pool)
3371 pci_free_consistent(hba[i]->pdev,
3372 NR_CMDS * sizeof(CommandList_struct),
3373 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3374 if (hba[i]->errinfo_pool)
3375 pci_free_consistent(hba[i]->pdev,
3376 NR_CMDS * sizeof(ErrorInfo_struct),
3377 hba[i]->errinfo_pool,
3378 hba[i]->errinfo_pool_dhandle);
3379 free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
3381 unregister_blkdev(hba[i]->major, hba[i]->devname);
3383 hba[i]->busy_initializing = 0;
3388 static void __devexit cciss_remove_one(struct pci_dev *pdev)
3390 ctlr_info_t *tmp_ptr;
3395 if (pci_get_drvdata(pdev) == NULL) {
3396 printk(KERN_ERR "cciss: Unable to remove device \n");
3399 tmp_ptr = pci_get_drvdata(pdev);
3401 if (hba[i] == NULL) {
3402 printk(KERN_ERR "cciss: device appears to "
3403 "already be removed \n");
3406 /* Turn board interrupts off and send the flush cache command */
3407 /* sendcmd will turn off interrupt, and send the flush...
3408 * To write all data in the battery backed cache to disks */
3409 memset(flush_buf, 0, 4);
3410 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL,
3412 if (return_code != IO_OK) {
3413 printk(KERN_WARNING "Error Flushing cache on controller %d\n",
3416 free_irq(hba[i]->intr[2], hba[i]);
3418 #ifdef CONFIG_PCI_MSI
3419 if (hba[i]->msix_vector)
3420 pci_disable_msix(hba[i]->pdev);
3421 else if (hba[i]->msi_vector)
3422 pci_disable_msi(hba[i]->pdev);
3423 #endif /* CONFIG_PCI_MSI */
3425 iounmap(hba[i]->vaddr);
3426 cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
3427 unregister_blkdev(hba[i]->major, hba[i]->devname);
3428 remove_proc_entry(hba[i]->devname, proc_cciss);
3430 /* remove it from the disk list */
3431 for (j = 0; j < NWD; j++) {
3432 struct gendisk *disk = hba[i]->gendisk[j];
3434 request_queue_t *q = disk->queue;
3436 if (disk->flags & GENHD_FL_UP)
3439 blk_cleanup_queue(q);
3443 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
3444 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3445 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(ErrorInfo_struct),
3446 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
3447 kfree(hba[i]->cmd_pool_bits);
3448 #ifdef CONFIG_CISS_SCSI_TAPE
3449 kfree(hba[i]->scsi_rejects.complete);
3451 pci_release_regions(pdev);
3452 pci_disable_device(pdev);
3453 pci_set_drvdata(pdev, NULL);
3457 static struct pci_driver cciss_pci_driver = {
3459 .probe = cciss_init_one,
3460 .remove = __devexit_p(cciss_remove_one),
3461 .id_table = cciss_pci_device_id, /* id_table */
3465 * This is it. Register the PCI driver information for the cards we control
3466 * the OS will call our registered routines when it finds one of our cards.
3468 static int __init cciss_init(void)
3470 printk(KERN_INFO DRIVER_NAME "\n");
3472 /* Register for our PCI devices */
3473 return pci_register_driver(&cciss_pci_driver);
3476 static void __exit cciss_cleanup(void)
3480 pci_unregister_driver(&cciss_pci_driver);
3481 /* double check that all controller entrys have been removed */
3482 for (i = 0; i < MAX_CTLR; i++) {
3483 if (hba[i] != NULL) {
3484 printk(KERN_WARNING "cciss: had to remove"
3485 " controller %d\n", i);
3486 cciss_remove_one(hba[i]->pdev);
3489 remove_proc_entry("cciss", proc_root_driver);
3492 static void fail_all_cmds(unsigned long ctlr)
3494 /* If we get here, the board is apparently dead. */
3495 ctlr_info_t *h = hba[ctlr];
3496 CommandList_struct *c;
3497 unsigned long flags;
3499 printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
3500 h->alive = 0; /* the controller apparently died... */
3502 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
3504 pci_disable_device(h->pdev); /* Make sure it is really dead. */
3506 /* move everything off the request queue onto the completed queue */
3507 while ((c = h->reqQ) != NULL) {
3508 removeQ(&(h->reqQ), c);
3510 addQ(&(h->cmpQ), c);
3513 /* Now, fail everything on the completed queue with a HW error */
3514 while ((c = h->cmpQ) != NULL) {
3515 removeQ(&h->cmpQ, c);
3516 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
3517 if (c->cmd_type == CMD_RWREQ) {
3518 complete_command(h, c, 0);
3519 } else if (c->cmd_type == CMD_IOCTL_PEND)
3520 complete(c->waiting);
3521 #ifdef CONFIG_CISS_SCSI_TAPE
3522 else if (c->cmd_type == CMD_SCSI)
3523 complete_scsi_command(c, 0, 0);
3526 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
3530 module_init(cciss_init);
3531 module_exit(cciss_cleanup);