2 * Disk Array driver for HP SA 5xxx and 6xxx Controllers
3 * Copyright 2000, 2006 Hewlett-Packard Development Company, L.P.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/types.h>
26 #include <linux/pci.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/major.h>
32 #include <linux/bio.h>
33 #include <linux/blkpg.h>
34 #include <linux/timer.h>
35 #include <linux/proc_fs.h>
36 #include <linux/init.h>
37 #include <linux/hdreg.h>
38 #include <linux/spinlock.h>
39 #include <linux/compat.h>
40 #include <linux/blktrace_api.h>
41 #include <asm/uaccess.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/blkdev.h>
46 #include <linux/genhd.h>
47 #include <linux/completion.h>
48 #include <scsi/scsi.h>
50 #include <scsi/scsi_ioctl.h>
51 #include <linux/cdrom.h>
53 #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
54 #define DRIVER_NAME "HP CISS Driver (v 3.6.14)"
55 #define DRIVER_VERSION CCISS_DRIVER_VERSION(3,6,14)
57 /* Embedded module documentation macros - see modules.h */
58 MODULE_AUTHOR("Hewlett-Packard Company");
59 MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 3.6.14");
60 MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
61 " SA6i P600 P800 P400 P400i E200 E200i E500");
62 MODULE_VERSION("3.6.14");
63 MODULE_LICENSE("GPL");
65 #include "cciss_cmd.h"
67 #include <linux/cciss_ioctl.h>
69 /* define the PCI info for the cards we can control */
70 static const struct pci_device_id cciss_pci_device_id[] = {
71 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS, 0x0E11, 0x4070},
72 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4080},
73 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4082},
74 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4083},
75 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x4091},
76 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409A},
77 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409B},
78 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409C},
79 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409D},
80 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA, 0x103C, 0x3225},
81 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223},
82 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234},
83 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3235},
84 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3211},
85 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3212},
86 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3213},
87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3214},
88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3215},
89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3237},
90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x323D},
91 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
92 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
96 MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
98 /* board_id = Subsystem Device ID & Vendor ID
99 * product = Marketing Name for the board
100 * access = Address of the struct of function pointers
101 * nr_cmds = Number of commands supported by controller
103 static struct board_type products[] = {
104 {0x40700E11, "Smart Array 5300", &SA5_access, 512},
105 {0x40800E11, "Smart Array 5i", &SA5B_access, 512},
106 {0x40820E11, "Smart Array 532", &SA5B_access, 512},
107 {0x40830E11, "Smart Array 5312", &SA5B_access, 512},
108 {0x409A0E11, "Smart Array 641", &SA5_access, 512},
109 {0x409B0E11, "Smart Array 642", &SA5_access, 512},
110 {0x409C0E11, "Smart Array 6400", &SA5_access, 512},
111 {0x409D0E11, "Smart Array 6400 EM", &SA5_access, 512},
112 {0x40910E11, "Smart Array 6i", &SA5_access, 512},
113 {0x3225103C, "Smart Array P600", &SA5_access, 512},
114 {0x3223103C, "Smart Array P800", &SA5_access, 512},
115 {0x3234103C, "Smart Array P400", &SA5_access, 512},
116 {0x3235103C, "Smart Array P400i", &SA5_access, 512},
117 {0x3211103C, "Smart Array E200i", &SA5_access, 120},
118 {0x3212103C, "Smart Array E200", &SA5_access, 120},
119 {0x3213103C, "Smart Array E200i", &SA5_access, 120},
120 {0x3214103C, "Smart Array E200i", &SA5_access, 120},
121 {0x3215103C, "Smart Array E200i", &SA5_access, 120},
122 {0x3237103C, "Smart Array E500", &SA5_access, 512},
123 {0x323D103C, "Smart Array P700m", &SA5_access, 512},
124 {0xFFFF103C, "Unknown Smart Array", &SA5_access, 120},
127 /* How long to wait (in milliseconds) for board to go into simple mode */
128 #define MAX_CONFIG_WAIT 30000
129 #define MAX_IOCTL_CONFIG_WAIT 1000
131 /*define how many times we will try a command because of bus resets */
132 #define MAX_CMD_RETRIES 3
134 #define READ_AHEAD 1024
137 /* Originally cciss driver only supports 8 major numbers */
138 #define MAX_CTLR_ORIG 8
140 static ctlr_info_t *hba[MAX_CTLR];
142 static void do_cciss_request(request_queue_t *q);
143 static irqreturn_t do_cciss_intr(int irq, void *dev_id);
144 static int cciss_open(struct inode *inode, struct file *filep);
145 static int cciss_release(struct inode *inode, struct file *filep);
146 static int cciss_ioctl(struct inode *inode, struct file *filep,
147 unsigned int cmd, unsigned long arg);
148 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
150 static int cciss_revalidate(struct gendisk *disk);
151 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk);
152 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
155 static void cciss_read_capacity(int ctlr, int logvol, int withirq,
156 sector_t *total_size, unsigned int *block_size);
157 static void cciss_read_capacity_16(int ctlr, int logvol, int withirq,
158 sector_t *total_size, unsigned int *block_size);
159 static void cciss_geometry_inquiry(int ctlr, int logvol,
160 int withirq, sector_t total_size,
161 unsigned int block_size, InquiryData_struct *inq_buff,
162 drive_info_struct *drv);
163 static void cciss_getgeometry(int cntl_num);
164 static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *,
166 static void start_io(ctlr_info_t *h);
167 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size,
168 unsigned int use_unit_num, unsigned int log_unit,
169 __u8 page_code, unsigned char *scsi3addr, int cmd_type);
170 static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
171 unsigned int use_unit_num, unsigned int log_unit,
172 __u8 page_code, int cmd_type);
174 static void fail_all_cmds(unsigned long ctlr);
176 #ifdef CONFIG_PROC_FS
177 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
178 int length, int *eof, void *data);
179 static void cciss_procinit(int i);
181 static void cciss_procinit(int i)
184 #endif /* CONFIG_PROC_FS */
187 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg);
190 static struct block_device_operations cciss_fops = {
191 .owner = THIS_MODULE,
193 .release = cciss_release,
194 .ioctl = cciss_ioctl,
195 .getgeo = cciss_getgeo,
197 .compat_ioctl = cciss_compat_ioctl,
199 .revalidate_disk = cciss_revalidate,
203 * Enqueuing and dequeuing functions for cmdlists.
205 static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c)
209 c->next = c->prev = c;
211 c->prev = (*Qptr)->prev;
213 (*Qptr)->prev->next = c;
218 static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
219 CommandList_struct *c)
221 if (c && c->next != c) {
224 c->prev->next = c->next;
225 c->next->prev = c->prev;
232 #include "cciss_scsi.c" /* For SCSI tape support */
234 #define RAID_UNKNOWN 6
236 #ifdef CONFIG_PROC_FS
239 * Report information about this controller.
241 #define ENG_GIG 1000000000
242 #define ENG_GIG_FACTOR (ENG_GIG/512)
243 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
247 static struct proc_dir_entry *proc_cciss;
249 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
250 int length, int *eof, void *data)
255 ctlr_info_t *h = (ctlr_info_t *) data;
256 drive_info_struct *drv;
258 sector_t vol_sz, vol_sz_frac;
262 /* prevent displaying bogus info during configuration
263 * or deconfiguration of a logical volume
265 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
266 if (h->busy_configuring) {
267 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
270 h->busy_configuring = 1;
271 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
273 size = sprintf(buffer, "%s: HP %s Controller\n"
274 "Board ID: 0x%08lx\n"
275 "Firmware Version: %c%c%c%c\n"
277 "Logical drives: %d\n"
279 "Current Q depth: %d\n"
280 "Current # commands on controller: %d\n"
281 "Max Q depth since init: %d\n"
282 "Max # commands on controller since init: %d\n"
283 "Max SG entries since init: %d\n\n",
286 (unsigned long)h->board_id,
287 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
288 h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT],
290 h->cciss_max_sectors,
291 h->Qdepth, h->commands_outstanding,
292 h->maxQsinceinit, h->max_outstanding, h->maxSG);
296 cciss_proc_tape_report(ctlr, buffer, &pos, &len);
297 for (i = 0; i <= h->highest_lun; i++) {
303 vol_sz = drv->nr_blocks;
304 vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
306 sector_div(vol_sz_frac, ENG_GIG_FACTOR);
308 if (drv->raid_level > 5)
309 drv->raid_level = RAID_UNKNOWN;
310 size = sprintf(buffer + len, "cciss/c%dd%d:"
311 "\t%4u.%02uGB\tRAID %s\n",
312 ctlr, i, (int)vol_sz, (int)vol_sz_frac,
313 raid_label[drv->raid_level]);
319 *start = buffer + offset;
323 h->busy_configuring = 0;
328 cciss_proc_write(struct file *file, const char __user *buffer,
329 unsigned long count, void *data)
331 unsigned char cmd[80];
333 #ifdef CONFIG_CISS_SCSI_TAPE
334 ctlr_info_t *h = (ctlr_info_t *) data;
338 if (count > sizeof(cmd) - 1)
340 if (copy_from_user(cmd, buffer, count))
343 len = strlen(cmd); // above 3 lines ensure safety
344 if (len && cmd[len - 1] == '\n')
346 # ifdef CONFIG_CISS_SCSI_TAPE
347 if (strcmp("engage scsi", cmd) == 0) {
348 rc = cciss_engage_scsi(h->ctlr);
353 /* might be nice to have "disengage" too, but it's not
354 safely possible. (only 1 module use count, lock issues.) */
360 * Get us a file in /proc/cciss that says something about each controller.
361 * Create /proc/cciss if it doesn't exist yet.
363 static void __devinit cciss_procinit(int i)
365 struct proc_dir_entry *pde;
367 if (proc_cciss == NULL) {
368 proc_cciss = proc_mkdir("cciss", proc_root_driver);
373 pde = create_proc_read_entry(hba[i]->devname,
374 S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH,
375 proc_cciss, cciss_proc_get_info, hba[i]);
376 pde->write_proc = cciss_proc_write;
378 #endif /* CONFIG_PROC_FS */
381 * For operations that cannot sleep, a command block is allocated at init,
382 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
383 * which ones are free or in use. For operations that can wait for kmalloc
384 * to possible sleep, this routine can be called with get_from_pool set to 0.
385 * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
387 static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool)
389 CommandList_struct *c;
392 dma_addr_t cmd_dma_handle, err_dma_handle;
394 if (!get_from_pool) {
395 c = (CommandList_struct *) pci_alloc_consistent(h->pdev,
396 sizeof(CommandList_struct), &cmd_dma_handle);
399 memset(c, 0, sizeof(CommandList_struct));
403 c->err_info = (ErrorInfo_struct *)
404 pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct),
407 if (c->err_info == NULL) {
408 pci_free_consistent(h->pdev,
409 sizeof(CommandList_struct), c, cmd_dma_handle);
412 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
413 } else { /* get it out of the controllers pool */
416 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
419 } while (test_and_set_bit
420 (i & (BITS_PER_LONG - 1),
421 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
423 printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
426 memset(c, 0, sizeof(CommandList_struct));
427 cmd_dma_handle = h->cmd_pool_dhandle
428 + i * sizeof(CommandList_struct);
429 c->err_info = h->errinfo_pool + i;
430 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
431 err_dma_handle = h->errinfo_pool_dhandle
432 + i * sizeof(ErrorInfo_struct);
438 c->busaddr = (__u32) cmd_dma_handle;
439 temp64.val = (__u64) err_dma_handle;
440 c->ErrDesc.Addr.lower = temp64.val32.lower;
441 c->ErrDesc.Addr.upper = temp64.val32.upper;
442 c->ErrDesc.Len = sizeof(ErrorInfo_struct);
449 * Frees a command block that was previously allocated with cmd_alloc().
451 static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
456 if (!got_from_pool) {
457 temp64.val32.lower = c->ErrDesc.Addr.lower;
458 temp64.val32.upper = c->ErrDesc.Addr.upper;
459 pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
460 c->err_info, (dma_addr_t) temp64.val);
461 pci_free_consistent(h->pdev, sizeof(CommandList_struct),
462 c, (dma_addr_t) c->busaddr);
465 clear_bit(i & (BITS_PER_LONG - 1),
466 h->cmd_pool_bits + (i / BITS_PER_LONG));
471 static inline ctlr_info_t *get_host(struct gendisk *disk)
473 return disk->queue->queuedata;
476 static inline drive_info_struct *get_drv(struct gendisk *disk)
478 return disk->private_data;
482 * Open. Make sure the device is really there.
484 static int cciss_open(struct inode *inode, struct file *filep)
486 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
487 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
490 printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name);
491 #endif /* CCISS_DEBUG */
493 if (host->busy_initializing || drv->busy_configuring)
496 * Root is allowed to open raw volume zero even if it's not configured
497 * so array config can still work. Root is also allowed to open any
498 * volume that has a LUN ID, so it can issue IOCTL to reread the
499 * disk information. I don't think I really like this
500 * but I'm already using way to many device nodes to claim another one
501 * for "raw controller".
503 if (drv->heads == 0) {
504 if (iminor(inode) != 0) { /* not node 0? */
505 /* if not node 0 make sure it is a partition = 0 */
506 if (iminor(inode) & 0x0f) {
508 /* if it is, make sure we have a LUN ID */
509 } else if (drv->LunID == 0) {
513 if (!capable(CAP_SYS_ADMIN))
524 static int cciss_release(struct inode *inode, struct file *filep)
526 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
527 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
530 printk(KERN_DEBUG "cciss_release %s\n",
531 inode->i_bdev->bd_disk->disk_name);
532 #endif /* CCISS_DEBUG */
541 static int do_ioctl(struct file *f, unsigned cmd, unsigned long arg)
545 ret = cciss_ioctl(f->f_path.dentry->d_inode, f, cmd, arg);
550 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
552 static int cciss_ioctl32_big_passthru(struct file *f, unsigned cmd,
555 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
558 case CCISS_GETPCIINFO:
559 case CCISS_GETINTINFO:
560 case CCISS_SETINTINFO:
561 case CCISS_GETNODENAME:
562 case CCISS_SETNODENAME:
563 case CCISS_GETHEARTBEAT:
564 case CCISS_GETBUSTYPES:
565 case CCISS_GETFIRMVER:
566 case CCISS_GETDRIVVER:
567 case CCISS_REVALIDVOLS:
568 case CCISS_DEREGDISK:
569 case CCISS_REGNEWDISK:
571 case CCISS_RESCANDISK:
572 case CCISS_GETLUNINFO:
573 return do_ioctl(f, cmd, arg);
575 case CCISS_PASSTHRU32:
576 return cciss_ioctl32_passthru(f, cmd, arg);
577 case CCISS_BIG_PASSTHRU32:
578 return cciss_ioctl32_big_passthru(f, cmd, arg);
585 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
588 IOCTL32_Command_struct __user *arg32 =
589 (IOCTL32_Command_struct __user *) arg;
590 IOCTL_Command_struct arg64;
591 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
597 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
598 sizeof(arg64.LUN_info));
600 copy_from_user(&arg64.Request, &arg32->Request,
601 sizeof(arg64.Request));
603 copy_from_user(&arg64.error_info, &arg32->error_info,
604 sizeof(arg64.error_info));
605 err |= get_user(arg64.buf_size, &arg32->buf_size);
606 err |= get_user(cp, &arg32->buf);
607 arg64.buf = compat_ptr(cp);
608 err |= copy_to_user(p, &arg64, sizeof(arg64));
613 err = do_ioctl(f, CCISS_PASSTHRU, (unsigned long)p);
617 copy_in_user(&arg32->error_info, &p->error_info,
618 sizeof(arg32->error_info));
624 static int cciss_ioctl32_big_passthru(struct file *file, unsigned cmd,
627 BIG_IOCTL32_Command_struct __user *arg32 =
628 (BIG_IOCTL32_Command_struct __user *) arg;
629 BIG_IOCTL_Command_struct arg64;
630 BIG_IOCTL_Command_struct __user *p =
631 compat_alloc_user_space(sizeof(arg64));
637 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
638 sizeof(arg64.LUN_info));
640 copy_from_user(&arg64.Request, &arg32->Request,
641 sizeof(arg64.Request));
643 copy_from_user(&arg64.error_info, &arg32->error_info,
644 sizeof(arg64.error_info));
645 err |= get_user(arg64.buf_size, &arg32->buf_size);
646 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
647 err |= get_user(cp, &arg32->buf);
648 arg64.buf = compat_ptr(cp);
649 err |= copy_to_user(p, &arg64, sizeof(arg64));
654 err = do_ioctl(file, CCISS_BIG_PASSTHRU, (unsigned long)p);
658 copy_in_user(&arg32->error_info, &p->error_info,
659 sizeof(arg32->error_info));
666 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
668 drive_info_struct *drv = get_drv(bdev->bd_disk);
673 geo->heads = drv->heads;
674 geo->sectors = drv->sectors;
675 geo->cylinders = drv->cylinders;
682 static int cciss_ioctl(struct inode *inode, struct file *filep,
683 unsigned int cmd, unsigned long arg)
685 struct block_device *bdev = inode->i_bdev;
686 struct gendisk *disk = bdev->bd_disk;
687 ctlr_info_t *host = get_host(disk);
688 drive_info_struct *drv = get_drv(disk);
689 int ctlr = host->ctlr;
690 void __user *argp = (void __user *)arg;
693 printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
694 #endif /* CCISS_DEBUG */
697 case CCISS_GETPCIINFO:
699 cciss_pci_info_struct pciinfo;
703 pciinfo.domain = pci_domain_nr(host->pdev->bus);
704 pciinfo.bus = host->pdev->bus->number;
705 pciinfo.dev_fn = host->pdev->devfn;
706 pciinfo.board_id = host->board_id;
708 (argp, &pciinfo, sizeof(cciss_pci_info_struct)))
712 case CCISS_GETINTINFO:
714 cciss_coalint_struct intinfo;
718 readl(&host->cfgtable->HostWrite.CoalIntDelay);
720 readl(&host->cfgtable->HostWrite.CoalIntCount);
722 (argp, &intinfo, sizeof(cciss_coalint_struct)))
726 case CCISS_SETINTINFO:
728 cciss_coalint_struct intinfo;
734 if (!capable(CAP_SYS_ADMIN))
737 (&intinfo, argp, sizeof(cciss_coalint_struct)))
739 if ((intinfo.delay == 0) && (intinfo.count == 0))
741 // printk("cciss_ioctl: delay and count cannot be 0\n");
744 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
745 /* Update the field, and then ring the doorbell */
746 writel(intinfo.delay,
747 &(host->cfgtable->HostWrite.CoalIntDelay));
748 writel(intinfo.count,
749 &(host->cfgtable->HostWrite.CoalIntCount));
750 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
752 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
753 if (!(readl(host->vaddr + SA5_DOORBELL)
756 /* delay and try again */
759 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
760 if (i >= MAX_IOCTL_CONFIG_WAIT)
764 case CCISS_GETNODENAME:
766 NodeName_type NodeName;
771 for (i = 0; i < 16; i++)
773 readb(&host->cfgtable->ServerName[i]);
774 if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
778 case CCISS_SETNODENAME:
780 NodeName_type NodeName;
786 if (!capable(CAP_SYS_ADMIN))
790 (NodeName, argp, sizeof(NodeName_type)))
793 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
795 /* Update the field, and then ring the doorbell */
796 for (i = 0; i < 16; i++)
798 &host->cfgtable->ServerName[i]);
800 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
802 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
803 if (!(readl(host->vaddr + SA5_DOORBELL)
806 /* delay and try again */
809 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
810 if (i >= MAX_IOCTL_CONFIG_WAIT)
815 case CCISS_GETHEARTBEAT:
817 Heartbeat_type heartbeat;
821 heartbeat = readl(&host->cfgtable->HeartBeat);
823 (argp, &heartbeat, sizeof(Heartbeat_type)))
827 case CCISS_GETBUSTYPES:
829 BusTypes_type BusTypes;
833 BusTypes = readl(&host->cfgtable->BusTypes);
835 (argp, &BusTypes, sizeof(BusTypes_type)))
839 case CCISS_GETFIRMVER:
841 FirmwareVer_type firmware;
845 memcpy(firmware, host->firm_ver, 4);
848 (argp, firmware, sizeof(FirmwareVer_type)))
852 case CCISS_GETDRIVVER:
854 DriverVer_type DriverVer = DRIVER_VERSION;
860 (argp, &DriverVer, sizeof(DriverVer_type)))
865 case CCISS_REVALIDVOLS:
866 return rebuild_lun_table(host, NULL);
868 case CCISS_GETLUNINFO:{
869 LogvolInfo_struct luninfo;
871 luninfo.LunID = drv->LunID;
872 luninfo.num_opens = drv->usage_count;
873 luninfo.num_parts = 0;
874 if (copy_to_user(argp, &luninfo,
875 sizeof(LogvolInfo_struct)))
879 case CCISS_DEREGDISK:
880 return rebuild_lun_table(host, disk);
883 return rebuild_lun_table(host, NULL);
887 IOCTL_Command_struct iocommand;
888 CommandList_struct *c;
892 DECLARE_COMPLETION_ONSTACK(wait);
897 if (!capable(CAP_SYS_RAWIO))
901 (&iocommand, argp, sizeof(IOCTL_Command_struct)))
903 if ((iocommand.buf_size < 1) &&
904 (iocommand.Request.Type.Direction != XFER_NONE)) {
907 #if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
908 /* Check kmalloc limits */
909 if (iocommand.buf_size > 128000)
912 if (iocommand.buf_size > 0) {
913 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
917 if (iocommand.Request.Type.Direction == XFER_WRITE) {
918 /* Copy the data into the buffer we created */
920 (buff, iocommand.buf, iocommand.buf_size)) {
925 memset(buff, 0, iocommand.buf_size);
927 if ((c = cmd_alloc(host, 0)) == NULL) {
931 // Fill in the command type
932 c->cmd_type = CMD_IOCTL_PEND;
933 // Fill in Command Header
934 c->Header.ReplyQueue = 0; // unused in simple mode
935 if (iocommand.buf_size > 0) // buffer to fill
937 c->Header.SGList = 1;
938 c->Header.SGTotal = 1;
939 } else // no buffers to fill
941 c->Header.SGList = 0;
942 c->Header.SGTotal = 0;
944 c->Header.LUN = iocommand.LUN_info;
945 c->Header.Tag.lower = c->busaddr; // use the kernel address the cmd block for tag
947 // Fill in Request block
948 c->Request = iocommand.Request;
950 // Fill in the scatter gather information
951 if (iocommand.buf_size > 0) {
952 temp64.val = pci_map_single(host->pdev, buff,
954 PCI_DMA_BIDIRECTIONAL);
955 c->SG[0].Addr.lower = temp64.val32.lower;
956 c->SG[0].Addr.upper = temp64.val32.upper;
957 c->SG[0].Len = iocommand.buf_size;
958 c->SG[0].Ext = 0; // we are not chaining
962 /* Put the request on the tail of the request queue */
963 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
964 addQ(&host->reqQ, c);
967 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
969 wait_for_completion(&wait);
971 /* unlock the buffers from DMA */
972 temp64.val32.lower = c->SG[0].Addr.lower;
973 temp64.val32.upper = c->SG[0].Addr.upper;
974 pci_unmap_single(host->pdev, (dma_addr_t) temp64.val,
976 PCI_DMA_BIDIRECTIONAL);
978 /* Copy the error information out */
979 iocommand.error_info = *(c->err_info);
981 (argp, &iocommand, sizeof(IOCTL_Command_struct))) {
983 cmd_free(host, c, 0);
987 if (iocommand.Request.Type.Direction == XFER_READ) {
988 /* Copy the data out of the buffer we created */
990 (iocommand.buf, buff, iocommand.buf_size)) {
992 cmd_free(host, c, 0);
997 cmd_free(host, c, 0);
1000 case CCISS_BIG_PASSTHRU:{
1001 BIG_IOCTL_Command_struct *ioc;
1002 CommandList_struct *c;
1003 unsigned char **buff = NULL;
1004 int *buff_size = NULL;
1006 unsigned long flags;
1010 DECLARE_COMPLETION_ONSTACK(wait);
1013 BYTE __user *data_ptr;
1017 if (!capable(CAP_SYS_RAWIO))
1019 ioc = (BIG_IOCTL_Command_struct *)
1020 kmalloc(sizeof(*ioc), GFP_KERNEL);
1025 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
1029 if ((ioc->buf_size < 1) &&
1030 (ioc->Request.Type.Direction != XFER_NONE)) {
1034 /* Check kmalloc limits using all SGs */
1035 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
1039 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
1044 kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
1049 buff_size = kmalloc(MAXSGENTRIES * sizeof(int),
1055 left = ioc->buf_size;
1056 data_ptr = ioc->buf;
1059 ioc->malloc_size) ? ioc->
1061 buff_size[sg_used] = sz;
1062 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
1063 if (buff[sg_used] == NULL) {
1067 if (ioc->Request.Type.Direction == XFER_WRITE) {
1069 (buff[sg_used], data_ptr, sz)) {
1074 memset(buff[sg_used], 0, sz);
1080 if ((c = cmd_alloc(host, 0)) == NULL) {
1084 c->cmd_type = CMD_IOCTL_PEND;
1085 c->Header.ReplyQueue = 0;
1087 if (ioc->buf_size > 0) {
1088 c->Header.SGList = sg_used;
1089 c->Header.SGTotal = sg_used;
1091 c->Header.SGList = 0;
1092 c->Header.SGTotal = 0;
1094 c->Header.LUN = ioc->LUN_info;
1095 c->Header.Tag.lower = c->busaddr;
1097 c->Request = ioc->Request;
1098 if (ioc->buf_size > 0) {
1100 for (i = 0; i < sg_used; i++) {
1102 pci_map_single(host->pdev, buff[i],
1104 PCI_DMA_BIDIRECTIONAL);
1105 c->SG[i].Addr.lower =
1107 c->SG[i].Addr.upper =
1109 c->SG[i].Len = buff_size[i];
1110 c->SG[i].Ext = 0; /* we are not chaining */
1114 /* Put the request on the tail of the request queue */
1115 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1116 addQ(&host->reqQ, c);
1119 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1120 wait_for_completion(&wait);
1121 /* unlock the buffers from DMA */
1122 for (i = 0; i < sg_used; i++) {
1123 temp64.val32.lower = c->SG[i].Addr.lower;
1124 temp64.val32.upper = c->SG[i].Addr.upper;
1125 pci_unmap_single(host->pdev,
1126 (dma_addr_t) temp64.val, buff_size[i],
1127 PCI_DMA_BIDIRECTIONAL);
1129 /* Copy the error information out */
1130 ioc->error_info = *(c->err_info);
1131 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
1132 cmd_free(host, c, 0);
1136 if (ioc->Request.Type.Direction == XFER_READ) {
1137 /* Copy the data out of the buffer we created */
1138 BYTE __user *ptr = ioc->buf;
1139 for (i = 0; i < sg_used; i++) {
1141 (ptr, buff[i], buff_size[i])) {
1142 cmd_free(host, c, 0);
1146 ptr += buff_size[i];
1149 cmd_free(host, c, 0);
1153 for (i = 0; i < sg_used; i++)
1162 /* scsi_cmd_ioctl handles these, below, though some are not */
1163 /* very meaningful for cciss. SG_IO is the main one people want. */
1165 case SG_GET_VERSION_NUM:
1166 case SG_SET_TIMEOUT:
1167 case SG_GET_TIMEOUT:
1168 case SG_GET_RESERVED_SIZE:
1169 case SG_SET_RESERVED_SIZE:
1170 case SG_EMULATED_HOST:
1172 case SCSI_IOCTL_SEND_COMMAND:
1173 return scsi_cmd_ioctl(filep, disk->queue, disk, cmd, argp);
1175 /* scsi_cmd_ioctl would normally handle these, below, but */
1176 /* they aren't a good fit for cciss, as CD-ROMs are */
1177 /* not supported, and we don't have any bus/target/lun */
1178 /* which we present to the kernel. */
1180 case CDROM_SEND_PACKET:
1181 case CDROMCLOSETRAY:
1183 case SCSI_IOCTL_GET_IDLUN:
1184 case SCSI_IOCTL_GET_BUS_NUMBER:
1190 static inline void complete_buffers(struct bio *bio, int status)
1193 struct bio *xbh = bio->bi_next;
1194 int nr_sectors = bio_sectors(bio);
1196 bio->bi_next = NULL;
1197 bio_endio(bio, nr_sectors << 9, status ? 0 : -EIO);
1202 static void cciss_check_queues(ctlr_info_t *h)
1204 int start_queue = h->next_to_run;
1207 /* check to see if we have maxed out the number of commands that can
1208 * be placed on the queue. If so then exit. We do this check here
1209 * in case the interrupt we serviced was from an ioctl and did not
1210 * free any new commands.
1212 if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds)
1215 /* We have room on the queue for more commands. Now we need to queue
1216 * them up. We will also keep track of the next queue to run so
1217 * that every queue gets a chance to be started first.
1219 for (i = 0; i < h->highest_lun + 1; i++) {
1220 int curr_queue = (start_queue + i) % (h->highest_lun + 1);
1221 /* make sure the disk has been added and the drive is real
1222 * because this can be called from the middle of init_one.
1224 if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads))
1226 blk_start_queue(h->gendisk[curr_queue]->queue);
1228 /* check to see if we have maxed out the number of commands
1229 * that can be placed on the queue.
1231 if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds) {
1232 if (curr_queue == start_queue) {
1234 (start_queue + 1) % (h->highest_lun + 1);
1237 h->next_to_run = curr_queue;
1241 curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
1246 static void cciss_softirq_done(struct request *rq)
1248 CommandList_struct *cmd = rq->completion_data;
1249 ctlr_info_t *h = hba[cmd->ctlr];
1250 unsigned long flags;
1254 if (cmd->Request.Type.Direction == XFER_READ)
1255 ddir = PCI_DMA_FROMDEVICE;
1257 ddir = PCI_DMA_TODEVICE;
1259 /* command did not need to be retried */
1260 /* unmap the DMA mapping for all the scatter gather elements */
1261 for (i = 0; i < cmd->Header.SGList; i++) {
1262 temp64.val32.lower = cmd->SG[i].Addr.lower;
1263 temp64.val32.upper = cmd->SG[i].Addr.upper;
1264 pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir);
1267 complete_buffers(rq->bio, (rq->errors == 0));
1269 if (blk_fs_request(rq)) {
1270 const int rw = rq_data_dir(rq);
1272 disk_stat_add(rq->rq_disk, sectors[rw], rq->nr_sectors);
1276 printk("Done with %p\n", rq);
1277 #endif /* CCISS_DEBUG */
1279 add_disk_randomness(rq->rq_disk);
1280 spin_lock_irqsave(&h->lock, flags);
1281 end_that_request_last(rq, (rq->errors == 0));
1282 cmd_free(h, cmd, 1);
1283 cciss_check_queues(h);
1284 spin_unlock_irqrestore(&h->lock, flags);
1287 /* This function will check the usage_count of the drive to be updated/added.
1288 * If the usage_count is zero then the drive information will be updated and
1289 * the disk will be re-registered with the kernel. If not then it will be
1290 * left alone for the next reboot. The exception to this is disk 0 which
1291 * will always be left registered with the kernel since it is also the
1292 * controller node. Any changes to disk 0 will show up on the next
1295 static void cciss_update_drive_info(int ctlr, int drv_index)
1297 ctlr_info_t *h = hba[ctlr];
1298 struct gendisk *disk;
1299 InquiryData_struct *inq_buff = NULL;
1300 unsigned int block_size;
1301 sector_t total_size;
1302 unsigned long flags = 0;
1305 /* if the disk already exists then deregister it before proceeding */
1306 if (h->drv[drv_index].raid_level != -1) {
1307 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1308 h->drv[drv_index].busy_configuring = 1;
1309 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1310 ret = deregister_disk(h->gendisk[drv_index],
1311 &h->drv[drv_index], 0);
1312 h->drv[drv_index].busy_configuring = 0;
1315 /* If the disk is in use return */
1319 /* Get information about the disk and modify the driver structure */
1320 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
1321 if (inq_buff == NULL)
1324 /* testing to see if 16-byte CDBs are already being used */
1325 if (h->cciss_read == CCISS_READ_16) {
1326 cciss_read_capacity_16(h->ctlr, drv_index, 1,
1327 &total_size, &block_size);
1331 cciss_read_capacity(ctlr, drv_index, 1,
1332 &total_size, &block_size);
1334 /* if read_capacity returns all F's this volume is >2TB in size */
1335 /* so we switch to 16-byte CDB's for all read/write ops */
1336 if (total_size == 0xFFFFFFFFULL) {
1337 cciss_read_capacity_16(ctlr, drv_index, 1,
1338 &total_size, &block_size);
1339 h->cciss_read = CCISS_READ_16;
1340 h->cciss_write = CCISS_WRITE_16;
1342 h->cciss_read = CCISS_READ_10;
1343 h->cciss_write = CCISS_WRITE_10;
1346 cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
1347 inq_buff, &h->drv[drv_index]);
1350 disk = h->gendisk[drv_index];
1351 set_capacity(disk, h->drv[drv_index].nr_blocks);
1353 /* if it's the controller it's already added */
1355 disk->queue = blk_init_queue(do_cciss_request, &h->lock);
1356 sprintf(disk->disk_name, "cciss/c%dd%d", ctlr, drv_index);
1357 disk->major = h->major;
1358 disk->first_minor = drv_index << NWD_SHIFT;
1359 disk->fops = &cciss_fops;
1360 disk->private_data = &h->drv[drv_index];
1362 /* Set up queue information */
1363 disk->queue->backing_dev_info.ra_pages = READ_AHEAD;
1364 blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask);
1366 /* This is a hardware imposed limit. */
1367 blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES);
1369 /* This is a limit in the driver and could be eliminated. */
1370 blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);
1372 blk_queue_max_sectors(disk->queue, h->cciss_max_sectors);
1374 blk_queue_softirq_done(disk->queue, cciss_softirq_done);
1376 disk->queue->queuedata = hba[ctlr];
1378 blk_queue_hardsect_size(disk->queue,
1379 hba[ctlr]->drv[drv_index].block_size);
1381 h->drv[drv_index].queue = disk->queue;
1389 printk(KERN_ERR "cciss: out of memory\n");
1393 /* This function will find the first index of the controllers drive array
1394 * that has a -1 for the raid_level and will return that index. This is
1395 * where new drives will be added. If the index to be returned is greater
1396 * than the highest_lun index for the controller then highest_lun is set
1397 * to this new index. If there are no available indexes then -1 is returned.
1399 static int cciss_find_free_drive_index(int ctlr)
1403 for (i = 0; i < CISS_MAX_LUN; i++) {
1404 if (hba[ctlr]->drv[i].raid_level == -1) {
1405 if (i > hba[ctlr]->highest_lun)
1406 hba[ctlr]->highest_lun = i;
1413 /* This function will add and remove logical drives from the Logical
1414 * drive array of the controller and maintain persistency of ordering
1415 * so that mount points are preserved until the next reboot. This allows
1416 * for the removal of logical drives in the middle of the drive array
1417 * without a re-ordering of those drives.
1419 * h = The controller to perform the operations on
1420 * del_disk = The disk to remove if specified. If the value given
1421 * is NULL then no disk is removed.
1423 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
1427 ReportLunData_struct *ld_buff = NULL;
1428 drive_info_struct *drv = NULL;
1435 unsigned long flags;
1437 /* Set busy_configuring flag for this operation */
1438 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1439 if (h->busy_configuring) {
1440 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1443 h->busy_configuring = 1;
1445 /* if del_disk is NULL then we are being called to add a new disk
1446 * and update the logical drive table. If it is not NULL then
1447 * we will check if the disk is in use or not.
1449 if (del_disk != NULL) {
1450 drv = get_drv(del_disk);
1451 drv->busy_configuring = 1;
1452 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1453 return_code = deregister_disk(del_disk, drv, 1);
1454 drv->busy_configuring = 0;
1455 h->busy_configuring = 0;
1458 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1459 if (!capable(CAP_SYS_RAWIO))
1462 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
1463 if (ld_buff == NULL)
1466 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
1467 sizeof(ReportLunData_struct), 0,
1470 if (return_code == IO_OK) {
1472 be32_to_cpu(*(__be32 *) ld_buff->LUNListLength);
1473 } else { /* reading number of logical volumes failed */
1474 printk(KERN_WARNING "cciss: report logical volume"
1475 " command failed\n");
1480 num_luns = listlength / 8; /* 8 bytes per entry */
1481 if (num_luns > CISS_MAX_LUN) {
1482 num_luns = CISS_MAX_LUN;
1483 printk(KERN_WARNING "cciss: more luns configured"
1484 " on controller than can be handled by"
1488 /* Compare controller drive array to drivers drive array.
1489 * Check for updates in the drive information and any new drives
1490 * on the controller.
1492 for (i = 0; i < num_luns; i++) {
1498 (unsigned int)(ld_buff->LUN[i][3])) << 24;
1500 (unsigned int)(ld_buff->LUN[i][2])) << 16;
1502 (unsigned int)(ld_buff->LUN[i][1])) << 8;
1503 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
1505 /* Find if the LUN is already in the drive array
1506 * of the controller. If so then update its info
1507 * if not is use. If it does not exist then find
1508 * the first free index and add it.
1510 for (j = 0; j <= h->highest_lun; j++) {
1511 if (h->drv[j].LunID == lunid) {
1517 /* check if the drive was found already in the array */
1519 drv_index = cciss_find_free_drive_index(ctlr);
1520 if (drv_index == -1)
1523 /*Check if the gendisk needs to be allocated */
1524 if (!h->gendisk[drv_index]){
1525 h->gendisk[drv_index] = alloc_disk(1 << NWD_SHIFT);
1526 if (!h->gendisk[drv_index]){
1527 printk(KERN_ERR "cciss: could not allocate new disk %d\n", drv_index);
1532 h->drv[drv_index].LunID = lunid;
1533 cciss_update_drive_info(ctlr, drv_index);
1539 h->busy_configuring = 0;
1540 /* We return -1 here to tell the ACU that we have registered/updated
1541 * all of the drives that we can and to keep it from calling us
1546 printk(KERN_ERR "cciss: out of memory\n");
1550 /* This function will deregister the disk and it's queue from the
1551 * kernel. It must be called with the controller lock held and the
1552 * drv structures busy_configuring flag set. It's parameters are:
1554 * disk = This is the disk to be deregistered
1555 * drv = This is the drive_info_struct associated with the disk to be
1556 * deregistered. It contains information about the disk used
1558 * clear_all = This flag determines whether or not the disk information
1559 * is going to be completely cleared out and the highest_lun
1560 * reset. Sometimes we want to clear out information about
1561 * the disk in preparation for re-adding it. In this case
1562 * the highest_lun should be left unchanged and the LunID
1563 * should not be cleared.
1565 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
1569 ctlr_info_t *h = get_host(disk);
1571 if (!capable(CAP_SYS_RAWIO))
1574 /* make sure logical volume is NOT is use */
1575 if (clear_all || (h->gendisk[0] == disk)) {
1576 if (drv->usage_count > 1)
1578 } else if (drv->usage_count > 0)
1581 /* invalidate the devices and deregister the disk. If it is disk
1582 * zero do not deregister it but just zero out it's values. This
1583 * allows us to delete disk zero but keep the controller registered.
1585 if (h->gendisk[0] != disk) {
1587 request_queue_t *q = disk->queue;
1588 if (disk->flags & GENHD_FL_UP)
1591 blk_cleanup_queue(q);
1592 /* Set drv->queue to NULL so that we do not try
1593 * to call blk_start_queue on this queue in the
1598 /* If clear_all is set then we are deleting the logical
1599 * drive, not just refreshing its info. For drives
1600 * other than disk 0 we will call put_disk. We do not
1601 * do this for disk 0 as we need it to be able to
1602 * configure the controller.
1605 /* This isn't pretty, but we need to find the
1606 * disk in our array and NULL our the pointer.
1607 * This is so that we will call alloc_disk if
1608 * this index is used again later.
1610 for (i=0; i < CISS_MAX_LUN; i++){
1611 if(h->gendisk[i] == disk){
1612 h->gendisk[i] = NULL;
1620 set_capacity(disk, 0);
1624 /* zero out the disk size info */
1626 drv->block_size = 0;
1630 drv->raid_level = -1; /* This can be used as a flag variable to
1631 * indicate that this element of the drive
1636 /* check to see if it was the last disk */
1637 if (drv == h->drv + h->highest_lun) {
1638 /* if so, find the new hightest lun */
1639 int i, newhighest = -1;
1640 for (i = 0; i < h->highest_lun; i++) {
1641 /* if the disk has size > 0, it is available */
1642 if (h->drv[i].heads)
1645 h->highest_lun = newhighest;
1653 static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
1654 1: address logical volume log_unit,
1655 2: periph device address is scsi3addr */
1656 unsigned int log_unit, __u8 page_code,
1657 unsigned char *scsi3addr, int cmd_type)
1659 ctlr_info_t *h = hba[ctlr];
1660 u64bit buff_dma_handle;
1663 c->cmd_type = CMD_IOCTL_PEND;
1664 c->Header.ReplyQueue = 0;
1666 c->Header.SGList = 1;
1667 c->Header.SGTotal = 1;
1669 c->Header.SGList = 0;
1670 c->Header.SGTotal = 0;
1672 c->Header.Tag.lower = c->busaddr;
1674 c->Request.Type.Type = cmd_type;
1675 if (cmd_type == TYPE_CMD) {
1678 /* If the logical unit number is 0 then, this is going
1679 to controller so It's a physical command
1680 mode = 0 target = 0. So we have nothing to write.
1681 otherwise, if use_unit_num == 1,
1682 mode = 1(volume set addressing) target = LUNID
1683 otherwise, if use_unit_num == 2,
1684 mode = 0(periph dev addr) target = scsi3addr */
1685 if (use_unit_num == 1) {
1686 c->Header.LUN.LogDev.VolId =
1687 h->drv[log_unit].LunID;
1688 c->Header.LUN.LogDev.Mode = 1;
1689 } else if (use_unit_num == 2) {
1690 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr,
1692 c->Header.LUN.LogDev.Mode = 0;
1694 /* are we trying to read a vital product page */
1695 if (page_code != 0) {
1696 c->Request.CDB[1] = 0x01;
1697 c->Request.CDB[2] = page_code;
1699 c->Request.CDBLen = 6;
1700 c->Request.Type.Attribute = ATTR_SIMPLE;
1701 c->Request.Type.Direction = XFER_READ;
1702 c->Request.Timeout = 0;
1703 c->Request.CDB[0] = CISS_INQUIRY;
1704 c->Request.CDB[4] = size & 0xFF;
1706 case CISS_REPORT_LOG:
1707 case CISS_REPORT_PHYS:
1708 /* Talking to controller so It's a physical command
1709 mode = 00 target = 0. Nothing to write.
1711 c->Request.CDBLen = 12;
1712 c->Request.Type.Attribute = ATTR_SIMPLE;
1713 c->Request.Type.Direction = XFER_READ;
1714 c->Request.Timeout = 0;
1715 c->Request.CDB[0] = cmd;
1716 c->Request.CDB[6] = (size >> 24) & 0xFF; //MSB
1717 c->Request.CDB[7] = (size >> 16) & 0xFF;
1718 c->Request.CDB[8] = (size >> 8) & 0xFF;
1719 c->Request.CDB[9] = size & 0xFF;
1722 case CCISS_READ_CAPACITY:
1723 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1724 c->Header.LUN.LogDev.Mode = 1;
1725 c->Request.CDBLen = 10;
1726 c->Request.Type.Attribute = ATTR_SIMPLE;
1727 c->Request.Type.Direction = XFER_READ;
1728 c->Request.Timeout = 0;
1729 c->Request.CDB[0] = cmd;
1731 case CCISS_READ_CAPACITY_16:
1732 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1733 c->Header.LUN.LogDev.Mode = 1;
1734 c->Request.CDBLen = 16;
1735 c->Request.Type.Attribute = ATTR_SIMPLE;
1736 c->Request.Type.Direction = XFER_READ;
1737 c->Request.Timeout = 0;
1738 c->Request.CDB[0] = cmd;
1739 c->Request.CDB[1] = 0x10;
1740 c->Request.CDB[10] = (size >> 24) & 0xFF;
1741 c->Request.CDB[11] = (size >> 16) & 0xFF;
1742 c->Request.CDB[12] = (size >> 8) & 0xFF;
1743 c->Request.CDB[13] = size & 0xFF;
1744 c->Request.Timeout = 0;
1745 c->Request.CDB[0] = cmd;
1747 case CCISS_CACHE_FLUSH:
1748 c->Request.CDBLen = 12;
1749 c->Request.Type.Attribute = ATTR_SIMPLE;
1750 c->Request.Type.Direction = XFER_WRITE;
1751 c->Request.Timeout = 0;
1752 c->Request.CDB[0] = BMIC_WRITE;
1753 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
1757 "cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
1760 } else if (cmd_type == TYPE_MSG) {
1762 case 0: /* ABORT message */
1763 c->Request.CDBLen = 12;
1764 c->Request.Type.Attribute = ATTR_SIMPLE;
1765 c->Request.Type.Direction = XFER_WRITE;
1766 c->Request.Timeout = 0;
1767 c->Request.CDB[0] = cmd; /* abort */
1768 c->Request.CDB[1] = 0; /* abort a command */
1769 /* buff contains the tag of the command to abort */
1770 memcpy(&c->Request.CDB[4], buff, 8);
1772 case 1: /* RESET message */
1773 c->Request.CDBLen = 12;
1774 c->Request.Type.Attribute = ATTR_SIMPLE;
1775 c->Request.Type.Direction = XFER_WRITE;
1776 c->Request.Timeout = 0;
1777 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
1778 c->Request.CDB[0] = cmd; /* reset */
1779 c->Request.CDB[1] = 0x04; /* reset a LUN */
1781 case 3: /* No-Op message */
1782 c->Request.CDBLen = 1;
1783 c->Request.Type.Attribute = ATTR_SIMPLE;
1784 c->Request.Type.Direction = XFER_WRITE;
1785 c->Request.Timeout = 0;
1786 c->Request.CDB[0] = cmd;
1790 "cciss%d: unknown message type %d\n", ctlr, cmd);
1795 "cciss%d: unknown command type %d\n", ctlr, cmd_type);
1798 /* Fill in the scatter gather information */
1800 buff_dma_handle.val = (__u64) pci_map_single(h->pdev,
1802 PCI_DMA_BIDIRECTIONAL);
1803 c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
1804 c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
1805 c->SG[0].Len = size;
1806 c->SG[0].Ext = 0; /* we are not chaining */
1811 static int sendcmd_withirq(__u8 cmd,
1815 unsigned int use_unit_num,
1816 unsigned int log_unit, __u8 page_code, int cmd_type)
1818 ctlr_info_t *h = hba[ctlr];
1819 CommandList_struct *c;
1820 u64bit buff_dma_handle;
1821 unsigned long flags;
1823 DECLARE_COMPLETION_ONSTACK(wait);
1825 if ((c = cmd_alloc(h, 0)) == NULL)
1827 return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
1828 log_unit, page_code, NULL, cmd_type);
1829 if (return_status != IO_OK) {
1831 return return_status;
1836 /* Put the request on the tail of the queue and send it */
1837 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1841 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1843 wait_for_completion(&wait);
1845 if (c->err_info->CommandStatus != 0) { /* an error has occurred */
1846 switch (c->err_info->CommandStatus) {
1847 case CMD_TARGET_STATUS:
1848 printk(KERN_WARNING "cciss: cmd %p has "
1849 " completed with errors\n", c);
1850 if (c->err_info->ScsiStatus) {
1851 printk(KERN_WARNING "cciss: cmd %p "
1852 "has SCSI Status = %x\n",
1853 c, c->err_info->ScsiStatus);
1857 case CMD_DATA_UNDERRUN:
1858 case CMD_DATA_OVERRUN:
1859 /* expected for inquire and report lun commands */
1862 printk(KERN_WARNING "cciss: Cmd %p is "
1863 "reported invalid\n", c);
1864 return_status = IO_ERROR;
1866 case CMD_PROTOCOL_ERR:
1867 printk(KERN_WARNING "cciss: cmd %p has "
1868 "protocol error \n", c);
1869 return_status = IO_ERROR;
1871 case CMD_HARDWARE_ERR:
1872 printk(KERN_WARNING "cciss: cmd %p had "
1873 " hardware error\n", c);
1874 return_status = IO_ERROR;
1876 case CMD_CONNECTION_LOST:
1877 printk(KERN_WARNING "cciss: cmd %p had "
1878 "connection lost\n", c);
1879 return_status = IO_ERROR;
1882 printk(KERN_WARNING "cciss: cmd %p was "
1884 return_status = IO_ERROR;
1886 case CMD_ABORT_FAILED:
1887 printk(KERN_WARNING "cciss: cmd %p reports "
1888 "abort failed\n", c);
1889 return_status = IO_ERROR;
1891 case CMD_UNSOLICITED_ABORT:
1893 "cciss%d: unsolicited abort %p\n", ctlr, c);
1894 if (c->retry_count < MAX_CMD_RETRIES) {
1896 "cciss%d: retrying %p\n", ctlr, c);
1898 /* erase the old error information */
1899 memset(c->err_info, 0,
1900 sizeof(ErrorInfo_struct));
1901 return_status = IO_OK;
1902 INIT_COMPLETION(wait);
1905 return_status = IO_ERROR;
1908 printk(KERN_WARNING "cciss: cmd %p returned "
1909 "unknown status %x\n", c,
1910 c->err_info->CommandStatus);
1911 return_status = IO_ERROR;
1914 /* unlock the buffers from DMA */
1915 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
1916 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
1917 pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val,
1918 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
1920 return return_status;
1923 static void cciss_geometry_inquiry(int ctlr, int logvol,
1924 int withirq, sector_t total_size,
1925 unsigned int block_size,
1926 InquiryData_struct *inq_buff,
1927 drive_info_struct *drv)
1932 memset(inq_buff, 0, sizeof(InquiryData_struct));
1934 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
1935 inq_buff, sizeof(*inq_buff), 1,
1936 logvol, 0xC1, TYPE_CMD);
1938 return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
1939 sizeof(*inq_buff), 1, logvol, 0xC1, NULL,
1941 if (return_code == IO_OK) {
1942 if (inq_buff->data_byte[8] == 0xFF) {
1944 "cciss: reading geometry failed, volume "
1945 "does not support reading geometry\n");
1947 drv->sectors = 32; // Sectors per track
1948 drv->cylinders = total_size + 1;
1949 drv->raid_level = RAID_UNKNOWN;
1951 drv->heads = inq_buff->data_byte[6];
1952 drv->sectors = inq_buff->data_byte[7];
1953 drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8;
1954 drv->cylinders += inq_buff->data_byte[5];
1955 drv->raid_level = inq_buff->data_byte[8];
1957 drv->block_size = block_size;
1958 drv->nr_blocks = total_size + 1;
1959 t = drv->heads * drv->sectors;
1961 sector_t real_size = total_size + 1;
1962 unsigned long rem = sector_div(real_size, t);
1965 drv->cylinders = real_size;
1967 } else { /* Get geometry failed */
1968 printk(KERN_WARNING "cciss: reading geometry failed\n");
1970 printk(KERN_INFO " heads=%d, sectors=%d, cylinders=%d\n\n",
1971 drv->heads, drv->sectors, drv->cylinders);
1975 cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size,
1976 unsigned int *block_size)
1978 ReadCapdata_struct *buf;
1980 buf = kmalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
1982 printk(KERN_WARNING "cciss: out of memory\n");
1985 memset(buf, 0, sizeof(ReadCapdata_struct));
1987 return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
1988 ctlr, buf, sizeof(ReadCapdata_struct),
1989 1, logvol, 0, TYPE_CMD);
1991 return_code = sendcmd(CCISS_READ_CAPACITY,
1992 ctlr, buf, sizeof(ReadCapdata_struct),
1993 1, logvol, 0, NULL, TYPE_CMD);
1994 if (return_code == IO_OK) {
1995 *total_size = be32_to_cpu(*(__be32 *) buf->total_size);
1996 *block_size = be32_to_cpu(*(__be32 *) buf->block_size);
1997 } else { /* read capacity command failed */
1998 printk(KERN_WARNING "cciss: read capacity failed\n");
2000 *block_size = BLOCK_SIZE;
2002 if (*total_size != 0)
2003 printk(KERN_INFO " blocks= %llu block_size= %d\n",
2004 (unsigned long long)*total_size+1, *block_size);
2010 cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size, unsigned int *block_size)
2012 ReadCapdata_struct_16 *buf;
2014 buf = kmalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL);
2016 printk(KERN_WARNING "cciss: out of memory\n");
2019 memset(buf, 0, sizeof(ReadCapdata_struct_16));
2021 return_code = sendcmd_withirq(CCISS_READ_CAPACITY_16,
2022 ctlr, buf, sizeof(ReadCapdata_struct_16),
2023 1, logvol, 0, TYPE_CMD);
2026 return_code = sendcmd(CCISS_READ_CAPACITY_16,
2027 ctlr, buf, sizeof(ReadCapdata_struct_16),
2028 1, logvol, 0, NULL, TYPE_CMD);
2030 if (return_code == IO_OK) {
2031 *total_size = be64_to_cpu(*(__be64 *) buf->total_size);
2032 *block_size = be32_to_cpu(*(__be32 *) buf->block_size);
2033 } else { /* read capacity command failed */
2034 printk(KERN_WARNING "cciss: read capacity failed\n");
2036 *block_size = BLOCK_SIZE;
2038 printk(KERN_INFO " blocks= %llu block_size= %d\n",
2039 (unsigned long long)*total_size+1, *block_size);
2044 static int cciss_revalidate(struct gendisk *disk)
2046 ctlr_info_t *h = get_host(disk);
2047 drive_info_struct *drv = get_drv(disk);
2050 unsigned int block_size;
2051 sector_t total_size;
2052 InquiryData_struct *inq_buff = NULL;
2054 for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) {
2055 if (h->drv[logvol].LunID == drv->LunID) {
2064 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
2065 if (inq_buff == NULL) {
2066 printk(KERN_WARNING "cciss: out of memory\n");
2069 if (h->cciss_read == CCISS_READ_10) {
2070 cciss_read_capacity(h->ctlr, logvol, 1,
2071 &total_size, &block_size);
2073 cciss_read_capacity_16(h->ctlr, logvol, 1,
2074 &total_size, &block_size);
2076 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size,
2079 blk_queue_hardsect_size(drv->queue, drv->block_size);
2080 set_capacity(disk, drv->nr_blocks);
2087 * Wait polling for a command to complete.
2088 * The memory mapped FIFO is polled for the completion.
2089 * Used only at init time, interrupts from the HBA are disabled.
2091 static unsigned long pollcomplete(int ctlr)
2096 /* Wait (up to 20 seconds) for a command to complete */
2098 for (i = 20 * HZ; i > 0; i--) {
2099 done = hba[ctlr]->access.command_completed(hba[ctlr]);
2100 if (done == FIFO_EMPTY)
2101 schedule_timeout_uninterruptible(1);
2105 /* Invalid address to tell caller we ran out of time */
2109 static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete)
2111 /* We get in here if sendcmd() is polling for completions
2112 and gets some command back that it wasn't expecting --
2113 something other than that which it just sent down.
2114 Ordinarily, that shouldn't happen, but it can happen when
2115 the scsi tape stuff gets into error handling mode, and
2116 starts using sendcmd() to try to abort commands and
2117 reset tape drives. In that case, sendcmd may pick up
2118 completions of commands that were sent to logical drives
2119 through the block i/o system, or cciss ioctls completing, etc.
2120 In that case, we need to save those completions for later
2121 processing by the interrupt handler.
2124 #ifdef CONFIG_CISS_SCSI_TAPE
2125 struct sendcmd_reject_list *srl = &hba[ctlr]->scsi_rejects;
2127 /* If it's not the scsi tape stuff doing error handling, (abort */
2128 /* or reset) then we don't expect anything weird. */
2129 if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) {
2131 printk(KERN_WARNING "cciss cciss%d: SendCmd "
2132 "Invalid command list address returned! (%lx)\n",
2134 /* not much we can do. */
2135 #ifdef CONFIG_CISS_SCSI_TAPE
2139 /* We've sent down an abort or reset, but something else
2141 if (srl->ncompletions >= (hba[ctlr]->nr_cmds + 2)) {
2142 /* Uh oh. No room to save it for later... */
2143 printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
2144 "reject list overflow, command lost!\n", ctlr);
2147 /* Save it for later */
2148 srl->complete[srl->ncompletions] = complete;
2149 srl->ncompletions++;
2155 * Send a command to the controller, and wait for it to complete.
2156 * Only used at init time.
2158 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
2159 1: address logical volume log_unit,
2160 2: periph device address is scsi3addr */
2161 unsigned int log_unit,
2162 __u8 page_code, unsigned char *scsi3addr, int cmd_type)
2164 CommandList_struct *c;
2166 unsigned long complete;
2167 ctlr_info_t *info_p = hba[ctlr];
2168 u64bit buff_dma_handle;
2169 int status, done = 0;
2171 if ((c = cmd_alloc(info_p, 1)) == NULL) {
2172 printk(KERN_WARNING "cciss: unable to get memory");
2175 status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
2176 log_unit, page_code, scsi3addr, cmd_type);
2177 if (status != IO_OK) {
2178 cmd_free(info_p, c, 1);
2186 printk(KERN_DEBUG "cciss: turning intr off\n");
2187 #endif /* CCISS_DEBUG */
2188 info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
2190 /* Make sure there is room in the command FIFO */
2191 /* Actually it should be completely empty at this time */
2192 /* unless we are in here doing error handling for the scsi */
2193 /* tape side of the driver. */
2194 for (i = 200000; i > 0; i--) {
2195 /* if fifo isn't full go */
2196 if (!(info_p->access.fifo_full(info_p))) {
2201 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
2202 " waiting!\n", ctlr);
2207 info_p->access.submit_command(info_p, c);
2210 complete = pollcomplete(ctlr);
2213 printk(KERN_DEBUG "cciss: command completed\n");
2214 #endif /* CCISS_DEBUG */
2216 if (complete == 1) {
2218 "cciss cciss%d: SendCmd Timeout out, "
2219 "No command list address returned!\n", ctlr);
2225 /* This will need to change for direct lookup completions */
2226 if ((complete & CISS_ERROR_BIT)
2227 && (complete & ~CISS_ERROR_BIT) == c->busaddr) {
2228 /* if data overrun or underun on Report command
2231 if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
2232 (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
2233 (c->Request.CDB[0] == CISS_INQUIRY)) &&
2234 ((c->err_info->CommandStatus ==
2235 CMD_DATA_OVERRUN) ||
2236 (c->err_info->CommandStatus == CMD_DATA_UNDERRUN)
2238 complete = c->busaddr;
2240 if (c->err_info->CommandStatus ==
2241 CMD_UNSOLICITED_ABORT) {
2242 printk(KERN_WARNING "cciss%d: "
2243 "unsolicited abort %p\n",
2245 if (c->retry_count < MAX_CMD_RETRIES) {
2247 "cciss%d: retrying %p\n",
2250 /* erase the old error */
2252 memset(c->err_info, 0,
2254 (ErrorInfo_struct));
2258 "cciss%d: retried %p too "
2259 "many times\n", ctlr, c);
2263 } else if (c->err_info->CommandStatus ==
2266 "cciss%d: command could not be aborted.\n",
2271 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2272 " Error %x \n", ctlr,
2273 c->err_info->CommandStatus);
2274 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2276 " size %x\n num %x value %x\n",
2278 c->err_info->MoreErrInfo.Invalid_Cmd.
2280 c->err_info->MoreErrInfo.Invalid_Cmd.
2282 c->err_info->MoreErrInfo.Invalid_Cmd.
2288 /* This will need changing for direct lookup completions */
2289 if (complete != c->busaddr) {
2290 if (add_sendcmd_reject(cmd, ctlr, complete) != 0) {
2291 BUG(); /* we are pretty much hosed if we get here. */
2299 /* unlock the data buffer from DMA */
2300 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
2301 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
2302 pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
2303 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
2304 #ifdef CONFIG_CISS_SCSI_TAPE
2305 /* if we saved some commands for later, process them now. */
2306 if (info_p->scsi_rejects.ncompletions > 0)
2307 do_cciss_intr(0, info_p);
2309 cmd_free(info_p, c, 1);
2314 * Map (physical) PCI mem into (virtual) kernel space
2316 static void __iomem *remap_pci_mem(ulong base, ulong size)
2318 ulong page_base = ((ulong) base) & PAGE_MASK;
2319 ulong page_offs = ((ulong) base) - page_base;
2320 void __iomem *page_remapped = ioremap(page_base, page_offs + size);
2322 return page_remapped ? (page_remapped + page_offs) : NULL;
2326 * Takes jobs of the Q and sends them to the hardware, then puts it on
2327 * the Q to wait for completion.
2329 static void start_io(ctlr_info_t *h)
2331 CommandList_struct *c;
2333 while ((c = h->reqQ) != NULL) {
2334 /* can't do anything if fifo is full */
2335 if ((h->access.fifo_full(h))) {
2336 printk(KERN_WARNING "cciss: fifo full\n");
2340 /* Get the first entry from the Request Q */
2341 removeQ(&(h->reqQ), c);
2344 /* Tell the controller execute command */
2345 h->access.submit_command(h, c);
2347 /* Put job onto the completed Q */
2348 addQ(&(h->cmpQ), c);
2352 /* Assumes that CCISS_LOCK(h->ctlr) is held. */
2353 /* Zeros out the error record and then resends the command back */
2354 /* to the controller */
2355 static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c)
2357 /* erase the old error information */
2358 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
2360 /* add it to software queue and then send it to the controller */
2361 addQ(&(h->reqQ), c);
2363 if (h->Qdepth > h->maxQsinceinit)
2364 h->maxQsinceinit = h->Qdepth;
2369 static inline int evaluate_target_status(CommandList_struct *cmd)
2371 unsigned char sense_key;
2372 int error_count = 1;
2374 if (cmd->err_info->ScsiStatus != 0x02) { /* not check condition? */
2375 if (!blk_pc_request(cmd->rq))
2376 printk(KERN_WARNING "cciss: cmd %p "
2377 "has SCSI Status 0x%x\n",
2378 cmd, cmd->err_info->ScsiStatus);
2382 /* check the sense key */
2383 sense_key = 0xf & cmd->err_info->SenseInfo[2];
2384 /* no status or recovered error */
2385 if ((sense_key == 0x0) || (sense_key == 0x1))
2388 if (!blk_pc_request(cmd->rq)) { /* Not SG_IO or similar? */
2389 if (error_count != 0)
2390 printk(KERN_WARNING "cciss: cmd %p has CHECK CONDITION"
2391 " sense key = 0x%x\n", cmd, sense_key);
2395 /* SG_IO or similar, copy sense data back */
2396 if (cmd->rq->sense) {
2397 if (cmd->rq->sense_len > cmd->err_info->SenseLen)
2398 cmd->rq->sense_len = cmd->err_info->SenseLen;
2399 memcpy(cmd->rq->sense, cmd->err_info->SenseInfo,
2400 cmd->rq->sense_len);
2402 cmd->rq->sense_len = 0;
2407 /* checks the status of the job and calls complete buffers to mark all
2408 * buffers for the completed job. Note that this function does not need
2409 * to hold the hba/queue lock.
2411 static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
2415 struct request *rq = cmd->rq;
2422 if (cmd->err_info->CommandStatus == 0) /* no error has occurred */
2423 goto after_error_processing;
2425 switch (cmd->err_info->CommandStatus) {
2426 case CMD_TARGET_STATUS:
2427 rq->errors = evaluate_target_status(cmd);
2429 case CMD_DATA_UNDERRUN:
2430 if (blk_fs_request(cmd->rq)) {
2431 printk(KERN_WARNING "cciss: cmd %p has"
2432 " completed with data underrun "
2434 cmd->rq->data_len = cmd->err_info->ResidualCnt;
2437 case CMD_DATA_OVERRUN:
2438 if (blk_fs_request(cmd->rq))
2439 printk(KERN_WARNING "cciss: cmd %p has"
2440 " completed with data overrun "
2444 printk(KERN_WARNING "cciss: cmd %p is "
2445 "reported invalid\n", cmd);
2448 case CMD_PROTOCOL_ERR:
2449 printk(KERN_WARNING "cciss: cmd %p has "
2450 "protocol error \n", cmd);
2453 case CMD_HARDWARE_ERR:
2454 printk(KERN_WARNING "cciss: cmd %p had "
2455 " hardware error\n", cmd);
2458 case CMD_CONNECTION_LOST:
2459 printk(KERN_WARNING "cciss: cmd %p had "
2460 "connection lost\n", cmd);
2464 printk(KERN_WARNING "cciss: cmd %p was "
2468 case CMD_ABORT_FAILED:
2469 printk(KERN_WARNING "cciss: cmd %p reports "
2470 "abort failed\n", cmd);
2473 case CMD_UNSOLICITED_ABORT:
2474 printk(KERN_WARNING "cciss%d: unsolicited "
2475 "abort %p\n", h->ctlr, cmd);
2476 if (cmd->retry_count < MAX_CMD_RETRIES) {
2479 "cciss%d: retrying %p\n", h->ctlr, cmd);
2483 "cciss%d: %p retried too "
2484 "many times\n", h->ctlr, cmd);
2488 printk(KERN_WARNING "cciss: cmd %p timedout\n", cmd);
2492 printk(KERN_WARNING "cciss: cmd %p returned "
2493 "unknown status %x\n", cmd,
2494 cmd->err_info->CommandStatus);
2498 after_error_processing:
2500 /* We need to return this command */
2502 resend_cciss_cmd(h, cmd);
2505 cmd->rq->data_len = 0;
2506 cmd->rq->completion_data = cmd;
2507 blk_add_trace_rq(cmd->rq->q, cmd->rq, BLK_TA_COMPLETE);
2508 blk_complete_request(cmd->rq);
2512 * Get a request and submit it to the controller.
2514 static void do_cciss_request(request_queue_t *q)
2516 ctlr_info_t *h = q->queuedata;
2517 CommandList_struct *c;
2520 struct request *creq;
2522 struct scatterlist tmp_sg[MAXSGENTRIES];
2523 drive_info_struct *drv;
2526 /* We call start_io here in case there is a command waiting on the
2527 * queue that has not been sent.
2529 if (blk_queue_plugged(q))
2533 creq = elv_next_request(q);
2537 BUG_ON(creq->nr_phys_segments > MAXSGENTRIES);
2539 if ((c = cmd_alloc(h, 1)) == NULL)
2542 blkdev_dequeue_request(creq);
2544 spin_unlock_irq(q->queue_lock);
2546 c->cmd_type = CMD_RWREQ;
2549 /* fill in the request */
2550 drv = creq->rq_disk->private_data;
2551 c->Header.ReplyQueue = 0; // unused in simple mode
2552 /* got command from pool, so use the command block index instead */
2553 /* for direct lookups. */
2554 /* The first 2 bits are reserved for controller error reporting. */
2555 c->Header.Tag.lower = (c->cmdindex << 3);
2556 c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
2557 c->Header.LUN.LogDev.VolId = drv->LunID;
2558 c->Header.LUN.LogDev.Mode = 1;
2559 c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
2560 c->Request.Type.Type = TYPE_CMD; // It is a command.
2561 c->Request.Type.Attribute = ATTR_SIMPLE;
2562 c->Request.Type.Direction =
2563 (rq_data_dir(creq) == READ) ? XFER_READ : XFER_WRITE;
2564 c->Request.Timeout = 0; // Don't time out
2566 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
2567 start_blk = creq->sector;
2569 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", (int)creq->sector,
2570 (int)creq->nr_sectors);
2571 #endif /* CCISS_DEBUG */
2573 seg = blk_rq_map_sg(q, creq, tmp_sg);
2575 /* get the DMA records for the setup */
2576 if (c->Request.Type.Direction == XFER_READ)
2577 dir = PCI_DMA_FROMDEVICE;
2579 dir = PCI_DMA_TODEVICE;
2581 for (i = 0; i < seg; i++) {
2582 c->SG[i].Len = tmp_sg[i].length;
2583 temp64.val = (__u64) pci_map_page(h->pdev, tmp_sg[i].page,
2585 tmp_sg[i].length, dir);
2586 c->SG[i].Addr.lower = temp64.val32.lower;
2587 c->SG[i].Addr.upper = temp64.val32.upper;
2588 c->SG[i].Ext = 0; // we are not chaining
2590 /* track how many SG entries we are using */
2595 printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n",
2596 creq->nr_sectors, seg);
2597 #endif /* CCISS_DEBUG */
2599 c->Header.SGList = c->Header.SGTotal = seg;
2600 if (likely(blk_fs_request(creq))) {
2601 if(h->cciss_read == CCISS_READ_10) {
2602 c->Request.CDB[1] = 0;
2603 c->Request.CDB[2] = (start_blk >> 24) & 0xff; //MSB
2604 c->Request.CDB[3] = (start_blk >> 16) & 0xff;
2605 c->Request.CDB[4] = (start_blk >> 8) & 0xff;
2606 c->Request.CDB[5] = start_blk & 0xff;
2607 c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB
2608 c->Request.CDB[7] = (creq->nr_sectors >> 8) & 0xff;
2609 c->Request.CDB[8] = creq->nr_sectors & 0xff;
2610 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
2612 c->Request.CDBLen = 16;
2613 c->Request.CDB[1]= 0;
2614 c->Request.CDB[2]= (start_blk >> 56) & 0xff; //MSB
2615 c->Request.CDB[3]= (start_blk >> 48) & 0xff;
2616 c->Request.CDB[4]= (start_blk >> 40) & 0xff;
2617 c->Request.CDB[5]= (start_blk >> 32) & 0xff;
2618 c->Request.CDB[6]= (start_blk >> 24) & 0xff;
2619 c->Request.CDB[7]= (start_blk >> 16) & 0xff;
2620 c->Request.CDB[8]= (start_blk >> 8) & 0xff;
2621 c->Request.CDB[9]= start_blk & 0xff;
2622 c->Request.CDB[10]= (creq->nr_sectors >> 24) & 0xff;
2623 c->Request.CDB[11]= (creq->nr_sectors >> 16) & 0xff;
2624 c->Request.CDB[12]= (creq->nr_sectors >> 8) & 0xff;
2625 c->Request.CDB[13]= creq->nr_sectors & 0xff;
2626 c->Request.CDB[14] = c->Request.CDB[15] = 0;
2628 } else if (blk_pc_request(creq)) {
2629 c->Request.CDBLen = creq->cmd_len;
2630 memcpy(c->Request.CDB, creq->cmd, BLK_MAX_CDB);
2632 printk(KERN_WARNING "cciss%d: bad request type %d\n", h->ctlr, creq->cmd_type);
2636 spin_lock_irq(q->queue_lock);
2638 addQ(&(h->reqQ), c);
2640 if (h->Qdepth > h->maxQsinceinit)
2641 h->maxQsinceinit = h->Qdepth;
2647 /* We will already have the driver lock here so not need
2653 static inline unsigned long get_next_completion(ctlr_info_t *h)
2655 #ifdef CONFIG_CISS_SCSI_TAPE
2656 /* Any rejects from sendcmd() lying around? Process them first */
2657 if (h->scsi_rejects.ncompletions == 0)
2658 return h->access.command_completed(h);
2660 struct sendcmd_reject_list *srl;
2662 srl = &h->scsi_rejects;
2663 n = --srl->ncompletions;
2664 /* printk("cciss%d: processing saved reject\n", h->ctlr); */
2666 return srl->complete[n];
2669 return h->access.command_completed(h);
2673 static inline int interrupt_pending(ctlr_info_t *h)
2675 #ifdef CONFIG_CISS_SCSI_TAPE
2676 return (h->access.intr_pending(h)
2677 || (h->scsi_rejects.ncompletions > 0));
2679 return h->access.intr_pending(h);
2683 static inline long interrupt_not_for_us(ctlr_info_t *h)
2685 #ifdef CONFIG_CISS_SCSI_TAPE
2686 return (((h->access.intr_pending(h) == 0) ||
2687 (h->interrupts_enabled == 0))
2688 && (h->scsi_rejects.ncompletions == 0));
2690 return (((h->access.intr_pending(h) == 0) ||
2691 (h->interrupts_enabled == 0)));
2695 static irqreturn_t do_cciss_intr(int irq, void *dev_id)
2697 ctlr_info_t *h = dev_id;
2698 CommandList_struct *c;
2699 unsigned long flags;
2702 if (interrupt_not_for_us(h))
2705 * If there are completed commands in the completion queue,
2706 * we had better do something about it.
2708 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
2709 while (interrupt_pending(h)) {
2710 while ((a = get_next_completion(h)) != FIFO_EMPTY) {
2714 if (a2 >= h->nr_cmds) {
2716 "cciss: controller cciss%d failed, stopping.\n",
2718 fail_all_cmds(h->ctlr);
2722 c = h->cmd_pool + a2;
2727 if ((c = h->cmpQ) == NULL) {
2729 "cciss: Completion of %08x ignored\n",
2733 while (c->busaddr != a) {
2740 * If we've found the command, take it off the
2741 * completion Q and free it
2743 if (c->busaddr == a) {
2744 removeQ(&h->cmpQ, c);
2745 if (c->cmd_type == CMD_RWREQ) {
2746 complete_command(h, c, 0);
2747 } else if (c->cmd_type == CMD_IOCTL_PEND) {
2748 complete(c->waiting);
2750 # ifdef CONFIG_CISS_SCSI_TAPE
2751 else if (c->cmd_type == CMD_SCSI)
2752 complete_scsi_command(c, 0, a1);
2759 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
2764 * We cannot read the structure directly, for portability we must use
2766 * This is for debug only.
2769 static void print_cfg_table(CfgTable_struct *tb)
2774 printk("Controller Configuration information\n");
2775 printk("------------------------------------\n");
2776 for (i = 0; i < 4; i++)
2777 temp_name[i] = readb(&(tb->Signature[i]));
2778 temp_name[4] = '\0';
2779 printk(" Signature = %s\n", temp_name);
2780 printk(" Spec Number = %d\n", readl(&(tb->SpecValence)));
2781 printk(" Transport methods supported = 0x%x\n",
2782 readl(&(tb->TransportSupport)));
2783 printk(" Transport methods active = 0x%x\n",
2784 readl(&(tb->TransportActive)));
2785 printk(" Requested transport Method = 0x%x\n",
2786 readl(&(tb->HostWrite.TransportRequest)));
2787 printk(" Coalesce Interrupt Delay = 0x%x\n",
2788 readl(&(tb->HostWrite.CoalIntDelay)));
2789 printk(" Coalesce Interrupt Count = 0x%x\n",
2790 readl(&(tb->HostWrite.CoalIntCount)));
2791 printk(" Max outstanding commands = 0x%d\n",
2792 readl(&(tb->CmdsOutMax)));
2793 printk(" Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
2794 for (i = 0; i < 16; i++)
2795 temp_name[i] = readb(&(tb->ServerName[i]));
2796 temp_name[16] = '\0';
2797 printk(" Server Name = %s\n", temp_name);
2798 printk(" Heartbeat Counter = 0x%x\n\n\n", readl(&(tb->HeartBeat)));
2800 #endif /* CCISS_DEBUG */
2802 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
2804 int i, offset, mem_type, bar_type;
2805 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
2808 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2809 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
2810 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
2813 mem_type = pci_resource_flags(pdev, i) &
2814 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
2816 case PCI_BASE_ADDRESS_MEM_TYPE_32:
2817 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
2818 offset += 4; /* 32 bit */
2820 case PCI_BASE_ADDRESS_MEM_TYPE_64:
2823 default: /* reserved in PCI 2.2 */
2825 "Base address is invalid\n");
2830 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
2836 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
2837 * controllers that are capable. If not, we use IO-APIC mode.
2840 static void __devinit cciss_interrupt_mode(ctlr_info_t *c,
2841 struct pci_dev *pdev, __u32 board_id)
2843 #ifdef CONFIG_PCI_MSI
2845 struct msix_entry cciss_msix_entries[4] = { {0, 0}, {0, 1},
2849 /* Some boards advertise MSI but don't really support it */
2850 if ((board_id == 0x40700E11) ||
2851 (board_id == 0x40800E11) ||
2852 (board_id == 0x40820E11) || (board_id == 0x40830E11))
2853 goto default_int_mode;
2855 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
2856 err = pci_enable_msix(pdev, cciss_msix_entries, 4);
2858 c->intr[0] = cciss_msix_entries[0].vector;
2859 c->intr[1] = cciss_msix_entries[1].vector;
2860 c->intr[2] = cciss_msix_entries[2].vector;
2861 c->intr[3] = cciss_msix_entries[3].vector;
2866 printk(KERN_WARNING "cciss: only %d MSI-X vectors "
2867 "available\n", err);
2868 goto default_int_mode;
2870 printk(KERN_WARNING "cciss: MSI-X init failed %d\n",
2872 goto default_int_mode;
2875 if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
2876 if (!pci_enable_msi(pdev)) {
2879 printk(KERN_WARNING "cciss: MSI init failed\n");
2883 #endif /* CONFIG_PCI_MSI */
2884 /* if we get here we're going to use the default interrupt mode */
2885 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2889 static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
2891 ushort subsystem_vendor_id, subsystem_device_id, command;
2892 __u32 board_id, scratchpad = 0;
2894 __u32 cfg_base_addr;
2895 __u64 cfg_base_addr_index;
2898 /* check to see if controller has been disabled */
2899 /* BEFORE trying to enable it */
2900 (void)pci_read_config_word(pdev, PCI_COMMAND, &command);
2901 if (!(command & 0x02)) {
2903 "cciss: controller appears to be disabled\n");
2907 err = pci_enable_device(pdev);
2909 printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
2913 err = pci_request_regions(pdev, "cciss");
2915 printk(KERN_ERR "cciss: Cannot obtain PCI resources, "
2920 subsystem_vendor_id = pdev->subsystem_vendor;
2921 subsystem_device_id = pdev->subsystem_device;
2922 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
2923 subsystem_vendor_id);
2926 printk("command = %x\n", command);
2927 printk("irq = %x\n", pdev->irq);
2928 printk("board_id = %x\n", board_id);
2929 #endif /* CCISS_DEBUG */
2931 /* If the kernel supports MSI/MSI-X we will try to enable that functionality,
2932 * else we use the IO-APIC interrupt assigned to us by system ROM.
2934 cciss_interrupt_mode(c, pdev, board_id);
2937 * Memory base addr is first addr , the second points to the config
2941 c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
2943 printk("address 0 = %x\n", c->paddr);
2944 #endif /* CCISS_DEBUG */
2945 c->vaddr = remap_pci_mem(c->paddr, 0x250);
2947 /* Wait for the board to become ready. (PCI hotplug needs this.)
2948 * We poll for up to 120 secs, once per 100ms. */
2949 for (i = 0; i < 1200; i++) {
2950 scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET);
2951 if (scratchpad == CCISS_FIRMWARE_READY)
2953 set_current_state(TASK_INTERRUPTIBLE);
2954 schedule_timeout(HZ / 10); /* wait 100ms */
2956 if (scratchpad != CCISS_FIRMWARE_READY) {
2957 printk(KERN_WARNING "cciss: Board not ready. Timed out.\n");
2959 goto err_out_free_res;
2962 /* get the address index number */
2963 cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
2964 cfg_base_addr &= (__u32) 0x0000ffff;
2966 printk("cfg base address = %x\n", cfg_base_addr);
2967 #endif /* CCISS_DEBUG */
2968 cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
2970 printk("cfg base address index = %x\n", cfg_base_addr_index);
2971 #endif /* CCISS_DEBUG */
2972 if (cfg_base_addr_index == -1) {
2973 printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
2975 goto err_out_free_res;
2978 cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
2980 printk("cfg offset = %x\n", cfg_offset);
2981 #endif /* CCISS_DEBUG */
2982 c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
2983 cfg_base_addr_index) +
2984 cfg_offset, sizeof(CfgTable_struct));
2985 c->board_id = board_id;
2988 print_cfg_table(c->cfgtable);
2989 #endif /* CCISS_DEBUG */
2991 for (i = 0; i < ARRAY_SIZE(products); i++) {
2992 if (board_id == products[i].board_id) {
2993 c->product_name = products[i].product_name;
2994 c->access = *(products[i].access);
2995 c->nr_cmds = products[i].nr_cmds;
2999 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
3000 (readb(&c->cfgtable->Signature[1]) != 'I') ||
3001 (readb(&c->cfgtable->Signature[2]) != 'S') ||
3002 (readb(&c->cfgtable->Signature[3]) != 'S')) {
3003 printk("Does not appear to be a valid CISS config table\n");
3005 goto err_out_free_res;
3007 /* We didn't find the controller in our list. We know the
3008 * signature is valid. If it's an HP device let's try to
3009 * bind to the device and fire it up. Otherwise we bail.
3011 if (i == ARRAY_SIZE(products)) {
3012 if (subsystem_vendor_id == PCI_VENDOR_ID_HP) {
3013 c->product_name = products[i-1].product_name;
3014 c->access = *(products[i-1].access);
3015 c->nr_cmds = products[i-1].nr_cmds;
3016 printk(KERN_WARNING "cciss: This is an unknown "
3017 "Smart Array controller.\n"
3018 "cciss: Please update to the latest driver "
3019 "available from www.hp.com.\n");
3021 printk(KERN_WARNING "cciss: Sorry, I don't know how"
3022 " to access the Smart Array controller %08lx\n"
3023 , (unsigned long)board_id);
3025 goto err_out_free_res;
3030 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
3032 prefetch = readl(&(c->cfgtable->SCSI_Prefetch));
3034 writel(prefetch, &(c->cfgtable->SCSI_Prefetch));
3038 /* Disabling DMA prefetch for the P600
3039 * An ASIC bug may result in a prefetch beyond
3042 if(board_id == 0x3225103C) {
3044 dma_prefetch = readl(c->vaddr + I2O_DMA1_CFG);
3045 dma_prefetch |= 0x8000;
3046 writel(dma_prefetch, c->vaddr + I2O_DMA1_CFG);
3050 printk("Trying to put board into Simple mode\n");
3051 #endif /* CCISS_DEBUG */
3052 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
3053 /* Update the field, and then ring the doorbell */
3054 writel(CFGTBL_Trans_Simple, &(c->cfgtable->HostWrite.TransportRequest));
3055 writel(CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
3057 /* under certain very rare conditions, this can take awhile.
3058 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
3059 * as we enter this code.) */
3060 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
3061 if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
3063 /* delay and try again */
3064 set_current_state(TASK_INTERRUPTIBLE);
3065 schedule_timeout(10);
3069 printk(KERN_DEBUG "I counter got to %d %x\n", i,
3070 readl(c->vaddr + SA5_DOORBELL));
3071 #endif /* CCISS_DEBUG */
3073 print_cfg_table(c->cfgtable);
3074 #endif /* CCISS_DEBUG */
3076 if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
3077 printk(KERN_WARNING "cciss: unable to get board into"
3080 goto err_out_free_res;
3086 * Deliberately omit pci_disable_device(): it does something nasty to
3087 * Smart Array controllers that pci_enable_device does not undo
3089 pci_release_regions(pdev);
3094 * Gets information about the local volumes attached to the controller.
3096 static void cciss_getgeometry(int cntl_num)
3098 ReportLunData_struct *ld_buff;
3099 InquiryData_struct *inq_buff;
3105 sector_t total_size;
3107 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
3108 if (ld_buff == NULL) {
3109 printk(KERN_ERR "cciss: out of memory\n");
3112 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
3113 if (inq_buff == NULL) {
3114 printk(KERN_ERR "cciss: out of memory\n");
3118 /* Get the firmware version */
3119 return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff,
3120 sizeof(InquiryData_struct), 0, 0, 0, NULL,
3122 if (return_code == IO_OK) {
3123 hba[cntl_num]->firm_ver[0] = inq_buff->data_byte[32];
3124 hba[cntl_num]->firm_ver[1] = inq_buff->data_byte[33];
3125 hba[cntl_num]->firm_ver[2] = inq_buff->data_byte[34];
3126 hba[cntl_num]->firm_ver[3] = inq_buff->data_byte[35];
3127 } else { /* send command failed */
3129 printk(KERN_WARNING "cciss: unable to determine firmware"
3130 " version of controller\n");
3132 /* Get the number of logical volumes */
3133 return_code = sendcmd(CISS_REPORT_LOG, cntl_num, ld_buff,
3134 sizeof(ReportLunData_struct), 0, 0, 0, NULL,
3137 if (return_code == IO_OK) {
3139 printk("LUN Data\n--------------------------\n");
3140 #endif /* CCISS_DEBUG */
3143 (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
3145 (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
3147 (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
3148 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
3149 } else { /* reading number of logical volumes failed */
3151 printk(KERN_WARNING "cciss: report logical volume"
3152 " command failed\n");
3155 hba[cntl_num]->num_luns = listlength / 8; // 8 bytes pre entry
3156 if (hba[cntl_num]->num_luns > CISS_MAX_LUN) {
3158 "ciss: only %d number of logical volumes supported\n",
3160 hba[cntl_num]->num_luns = CISS_MAX_LUN;
3163 printk(KERN_DEBUG "Length = %x %x %x %x = %d\n",
3164 ld_buff->LUNListLength[0], ld_buff->LUNListLength[1],
3165 ld_buff->LUNListLength[2], ld_buff->LUNListLength[3],
3166 hba[cntl_num]->num_luns);
3167 #endif /* CCISS_DEBUG */
3169 hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns - 1;
3170 for (i = 0; i < CISS_MAX_LUN; i++) {
3171 if (i < hba[cntl_num]->num_luns) {
3172 lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3]))
3174 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2]))
3176 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1]))
3178 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
3180 hba[cntl_num]->drv[i].LunID = lunid;
3183 printk(KERN_DEBUG "LUN[%d]: %x %x %x %x = %x\n", i,
3184 ld_buff->LUN[i][0], ld_buff->LUN[i][1],
3185 ld_buff->LUN[i][2], ld_buff->LUN[i][3],
3186 hba[cntl_num]->drv[i].LunID);
3187 #endif /* CCISS_DEBUG */
3189 /* testing to see if 16-byte CDBs are already being used */
3190 if(hba[cntl_num]->cciss_read == CCISS_READ_16) {
3191 cciss_read_capacity_16(cntl_num, i, 0,
3192 &total_size, &block_size);
3195 cciss_read_capacity(cntl_num, i, 0, &total_size, &block_size);
3197 /* If read_capacity returns all F's the logical is >2TB */
3198 /* so we switch to 16-byte CDBs for all read/write ops */
3199 if(total_size == 0xFFFFFFFFULL) {
3200 cciss_read_capacity_16(cntl_num, i, 0,
3201 &total_size, &block_size);
3202 hba[cntl_num]->cciss_read = CCISS_READ_16;
3203 hba[cntl_num]->cciss_write = CCISS_WRITE_16;
3205 hba[cntl_num]->cciss_read = CCISS_READ_10;
3206 hba[cntl_num]->cciss_write = CCISS_WRITE_10;
3209 cciss_geometry_inquiry(cntl_num, i, 0, total_size,
3210 block_size, inq_buff,
3211 &hba[cntl_num]->drv[i]);
3213 /* initialize raid_level to indicate a free space */
3214 hba[cntl_num]->drv[i].raid_level = -1;
3221 /* Function to find the first free pointer into our hba[] array */
3222 /* Returns -1 if no free entries are left. */
3223 static int alloc_cciss_hba(void)
3227 for (i = 0; i < MAX_CTLR; i++) {
3230 p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
3233 p->gendisk[0] = alloc_disk(1 << NWD_SHIFT);
3240 printk(KERN_WARNING "cciss: This driver supports a maximum"
3241 " of %d controllers.\n", MAX_CTLR);
3244 printk(KERN_ERR "cciss: out of memory.\n");
3248 static void free_hba(int i)
3250 ctlr_info_t *p = hba[i];
3254 for (n = 0; n < CISS_MAX_LUN; n++)
3255 put_disk(p->gendisk[n]);
3260 * This is it. Find all the controllers and register them. I really hate
3261 * stealing all these major device numbers.
3262 * returns the number of block devices registered.
3264 static int __devinit cciss_init_one(struct pci_dev *pdev,
3265 const struct pci_device_id *ent)
3272 i = alloc_cciss_hba();
3276 hba[i]->busy_initializing = 1;
3278 if (cciss_pci_init(hba[i], pdev) != 0)
3281 sprintf(hba[i]->devname, "cciss%d", i);
3283 hba[i]->pdev = pdev;
3285 /* configure PCI DMA stuff */
3286 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
3288 else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
3291 printk(KERN_ERR "cciss: no suitable DMA available\n");
3296 * register with the major number, or get a dynamic major number
3297 * by passing 0 as argument. This is done for greater than
3298 * 8 controller support.
3300 if (i < MAX_CTLR_ORIG)
3301 hba[i]->major = COMPAQ_CISS_MAJOR + i;
3302 rc = register_blkdev(hba[i]->major, hba[i]->devname);
3303 if (rc == -EBUSY || rc == -EINVAL) {
3305 "cciss: Unable to get major number %d for %s "
3306 "on hba %d\n", hba[i]->major, hba[i]->devname, i);
3309 if (i >= MAX_CTLR_ORIG)
3313 /* make sure the board interrupts are off */
3314 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
3315 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
3316 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
3317 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
3318 hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname);
3322 printk(KERN_INFO "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
3323 hba[i]->devname, pdev->device, pci_name(pdev),
3324 hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not");
3326 hba[i]->cmd_pool_bits =
3327 kmalloc(((hba[i]->nr_cmds + BITS_PER_LONG -
3328 1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
3329 hba[i]->cmd_pool = (CommandList_struct *)
3330 pci_alloc_consistent(hba[i]->pdev,
3331 hba[i]->nr_cmds * sizeof(CommandList_struct),
3332 &(hba[i]->cmd_pool_dhandle));
3333 hba[i]->errinfo_pool = (ErrorInfo_struct *)
3334 pci_alloc_consistent(hba[i]->pdev,
3335 hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3336 &(hba[i]->errinfo_pool_dhandle));
3337 if ((hba[i]->cmd_pool_bits == NULL)
3338 || (hba[i]->cmd_pool == NULL)
3339 || (hba[i]->errinfo_pool == NULL)) {
3340 printk(KERN_ERR "cciss: out of memory");
3343 #ifdef CONFIG_CISS_SCSI_TAPE
3344 hba[i]->scsi_rejects.complete =
3345 kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
3346 (hba[i]->nr_cmds + 5), GFP_KERNEL);
3347 if (hba[i]->scsi_rejects.complete == NULL) {
3348 printk(KERN_ERR "cciss: out of memory");
3352 spin_lock_init(&hba[i]->lock);
3354 /* Initialize the pdev driver private data.
3355 have it point to hba[i]. */
3356 pci_set_drvdata(pdev, hba[i]);
3357 /* command and error info recs zeroed out before
3359 memset(hba[i]->cmd_pool_bits, 0,
3360 ((hba[i]->nr_cmds + BITS_PER_LONG -
3361 1) / BITS_PER_LONG) * sizeof(unsigned long));
3364 printk(KERN_DEBUG "Scanning for drives on controller cciss%d\n", i);
3365 #endif /* CCISS_DEBUG */
3367 cciss_getgeometry(i);
3369 cciss_scsi_setup(i);
3371 /* Turn the interrupts on so we can service requests */
3372 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
3376 hba[i]->cciss_max_sectors = 2048;
3378 hba[i]->busy_initializing = 0;
3381 drive_info_struct *drv = &(hba[i]->drv[j]);
3382 struct gendisk *disk = hba[i]->gendisk[j];
3385 /* Check if the disk was allocated already */
3387 hba[i]->gendisk[j] = alloc_disk(1 << NWD_SHIFT);
3388 disk = hba[i]->gendisk[j];
3391 /* Check that the disk was able to be allocated */
3393 printk(KERN_ERR "cciss: unable to allocate memory for disk %d\n", j);
3397 q = blk_init_queue(do_cciss_request, &hba[i]->lock);
3400 "cciss: unable to allocate queue for disk %d\n",
3406 q->backing_dev_info.ra_pages = READ_AHEAD;
3407 blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
3409 /* This is a hardware imposed limit. */
3410 blk_queue_max_hw_segments(q, MAXSGENTRIES);
3412 /* This is a limit in the driver and could be eliminated. */
3413 blk_queue_max_phys_segments(q, MAXSGENTRIES);
3415 blk_queue_max_sectors(q, hba[i]->cciss_max_sectors);
3417 blk_queue_softirq_done(q, cciss_softirq_done);
3419 q->queuedata = hba[i];
3420 sprintf(disk->disk_name, "cciss/c%dd%d", i, j);
3421 disk->major = hba[i]->major;
3422 disk->first_minor = j << NWD_SHIFT;
3423 disk->fops = &cciss_fops;
3425 disk->private_data = drv;
3426 disk->driverfs_dev = &pdev->dev;
3427 /* we must register the controller even if no disks exist */
3428 /* this is for the online array utilities */
3429 if (!drv->heads && j)
3431 blk_queue_hardsect_size(q, drv->block_size);
3432 set_capacity(disk, drv->nr_blocks);
3435 } while (j <= hba[i]->highest_lun);
3440 #ifdef CONFIG_CISS_SCSI_TAPE
3441 kfree(hba[i]->scsi_rejects.complete);
3443 kfree(hba[i]->cmd_pool_bits);
3444 if (hba[i]->cmd_pool)
3445 pci_free_consistent(hba[i]->pdev,
3446 hba[i]->nr_cmds * sizeof(CommandList_struct),
3447 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3448 if (hba[i]->errinfo_pool)
3449 pci_free_consistent(hba[i]->pdev,
3450 hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3451 hba[i]->errinfo_pool,
3452 hba[i]->errinfo_pool_dhandle);
3453 free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
3455 unregister_blkdev(hba[i]->major, hba[i]->devname);
3457 hba[i]->busy_initializing = 0;
3458 /* cleanup any queues that may have been initialized */
3459 for (j=0; j <= hba[i]->highest_lun; j++){
3460 drive_info_struct *drv = &(hba[i]->drv[j]);
3462 blk_cleanup_queue(drv->queue);
3465 * Deliberately omit pci_disable_device(): it does something nasty to
3466 * Smart Array controllers that pci_enable_device does not undo
3468 pci_release_regions(pdev);
3469 pci_set_drvdata(pdev, NULL);
3474 static void cciss_shutdown(struct pci_dev *pdev)
3476 ctlr_info_t *tmp_ptr;
3481 tmp_ptr = pci_get_drvdata(pdev);
3482 if (tmp_ptr == NULL)
3488 /* Turn board interrupts off and send the flush cache command */
3489 /* sendcmd will turn off interrupt, and send the flush...
3490 * To write all data in the battery backed cache to disks */
3491 memset(flush_buf, 0, 4);
3492 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL,
3494 if (return_code == IO_OK) {
3495 printk(KERN_INFO "Completed flushing cache on controller %d\n", i);
3497 printk(KERN_WARNING "Error flushing cache on controller %d\n", i);
3499 free_irq(hba[i]->intr[2], hba[i]);
3502 static void __devexit cciss_remove_one(struct pci_dev *pdev)
3504 ctlr_info_t *tmp_ptr;
3507 if (pci_get_drvdata(pdev) == NULL) {
3508 printk(KERN_ERR "cciss: Unable to remove device \n");
3511 tmp_ptr = pci_get_drvdata(pdev);
3513 if (hba[i] == NULL) {
3514 printk(KERN_ERR "cciss: device appears to "
3515 "already be removed \n");
3519 remove_proc_entry(hba[i]->devname, proc_cciss);
3520 unregister_blkdev(hba[i]->major, hba[i]->devname);
3522 /* remove it from the disk list */
3523 for (j = 0; j < CISS_MAX_LUN; j++) {
3524 struct gendisk *disk = hba[i]->gendisk[j];
3526 request_queue_t *q = disk->queue;
3528 if (disk->flags & GENHD_FL_UP)
3531 blk_cleanup_queue(q);
3535 cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
3537 cciss_shutdown(pdev);
3539 #ifdef CONFIG_PCI_MSI
3540 if (hba[i]->msix_vector)
3541 pci_disable_msix(hba[i]->pdev);
3542 else if (hba[i]->msi_vector)
3543 pci_disable_msi(hba[i]->pdev);
3544 #endif /* CONFIG_PCI_MSI */
3546 iounmap(hba[i]->vaddr);
3548 pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(CommandList_struct),
3549 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3550 pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3551 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
3552 kfree(hba[i]->cmd_pool_bits);
3553 #ifdef CONFIG_CISS_SCSI_TAPE
3554 kfree(hba[i]->scsi_rejects.complete);
3557 * Deliberately omit pci_disable_device(): it does something nasty to
3558 * Smart Array controllers that pci_enable_device does not undo
3560 pci_release_regions(pdev);
3561 pci_set_drvdata(pdev, NULL);
3565 static struct pci_driver cciss_pci_driver = {
3567 .probe = cciss_init_one,
3568 .remove = __devexit_p(cciss_remove_one),
3569 .id_table = cciss_pci_device_id, /* id_table */
3570 .shutdown = cciss_shutdown,
3574 * This is it. Register the PCI driver information for the cards we control
3575 * the OS will call our registered routines when it finds one of our cards.
3577 static int __init cciss_init(void)
3579 printk(KERN_INFO DRIVER_NAME "\n");
3581 /* Register for our PCI devices */
3582 return pci_register_driver(&cciss_pci_driver);
3585 static void __exit cciss_cleanup(void)
3589 pci_unregister_driver(&cciss_pci_driver);
3590 /* double check that all controller entrys have been removed */
3591 for (i = 0; i < MAX_CTLR; i++) {
3592 if (hba[i] != NULL) {
3593 printk(KERN_WARNING "cciss: had to remove"
3594 " controller %d\n", i);
3595 cciss_remove_one(hba[i]->pdev);
3598 remove_proc_entry("cciss", proc_root_driver);
3601 static void fail_all_cmds(unsigned long ctlr)
3603 /* If we get here, the board is apparently dead. */
3604 ctlr_info_t *h = hba[ctlr];
3605 CommandList_struct *c;
3606 unsigned long flags;
3608 printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
3609 h->alive = 0; /* the controller apparently died... */
3611 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
3613 pci_disable_device(h->pdev); /* Make sure it is really dead. */
3615 /* move everything off the request queue onto the completed queue */
3616 while ((c = h->reqQ) != NULL) {
3617 removeQ(&(h->reqQ), c);
3619 addQ(&(h->cmpQ), c);
3622 /* Now, fail everything on the completed queue with a HW error */
3623 while ((c = h->cmpQ) != NULL) {
3624 removeQ(&h->cmpQ, c);
3625 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
3626 if (c->cmd_type == CMD_RWREQ) {
3627 complete_command(h, c, 0);
3628 } else if (c->cmd_type == CMD_IOCTL_PEND)
3629 complete(c->waiting);
3630 #ifdef CONFIG_CISS_SCSI_TAPE
3631 else if (c->cmd_type == CMD_SCSI)
3632 complete_scsi_command(c, 0, 0);
3635 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
3639 module_init(cciss_init);
3640 module_exit(cciss_cleanup);