2 * Disk Array driver for HP SA 5xxx and 6xxx Controllers
3 * Copyright 2000, 2006 Hewlett-Packard Development Company, L.P.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/types.h>
26 #include <linux/pci.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/major.h>
32 #include <linux/bio.h>
33 #include <linux/blkpg.h>
34 #include <linux/timer.h>
35 #include <linux/proc_fs.h>
36 #include <linux/init.h>
37 #include <linux/hdreg.h>
38 #include <linux/spinlock.h>
39 #include <linux/compat.h>
40 #include <linux/blktrace_api.h>
41 #include <asm/uaccess.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/blkdev.h>
46 #include <linux/genhd.h>
47 #include <linux/completion.h>
48 #include <scsi/scsi.h>
50 #include <scsi/scsi_ioctl.h>
51 #include <linux/cdrom.h>
53 #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
54 #define DRIVER_NAME "HP CISS Driver (v 3.6.14)"
55 #define DRIVER_VERSION CCISS_DRIVER_VERSION(3,6,14)
57 /* Embedded module documentation macros - see modules.h */
58 MODULE_AUTHOR("Hewlett-Packard Company");
59 MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 3.6.14");
60 MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
61 " SA6i P600 P800 P400 P400i E200 E200i E500");
62 MODULE_VERSION("3.6.14");
63 MODULE_LICENSE("GPL");
65 #include "cciss_cmd.h"
67 #include <linux/cciss_ioctl.h>
69 /* define the PCI info for the cards we can control */
70 static const struct pci_device_id cciss_pci_device_id[] = {
71 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS, 0x0E11, 0x4070},
72 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4080},
73 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4082},
74 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4083},
75 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x4091},
76 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409A},
77 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409B},
78 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409C},
79 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409D},
80 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA, 0x103C, 0x3225},
81 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223},
82 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234},
83 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3235},
84 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3211},
85 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3212},
86 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3213},
87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3214},
88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3215},
89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3237},
90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x323D},
91 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
92 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
96 MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
98 /* board_id = Subsystem Device ID & Vendor ID
99 * product = Marketing Name for the board
100 * access = Address of the struct of function pointers
101 * nr_cmds = Number of commands supported by controller
103 static struct board_type products[] = {
104 {0x40700E11, "Smart Array 5300", &SA5_access, 512},
105 {0x40800E11, "Smart Array 5i", &SA5B_access, 512},
106 {0x40820E11, "Smart Array 532", &SA5B_access, 512},
107 {0x40830E11, "Smart Array 5312", &SA5B_access, 512},
108 {0x409A0E11, "Smart Array 641", &SA5_access, 512},
109 {0x409B0E11, "Smart Array 642", &SA5_access, 512},
110 {0x409C0E11, "Smart Array 6400", &SA5_access, 512},
111 {0x409D0E11, "Smart Array 6400 EM", &SA5_access, 512},
112 {0x40910E11, "Smart Array 6i", &SA5_access, 512},
113 {0x3225103C, "Smart Array P600", &SA5_access, 512},
114 {0x3223103C, "Smart Array P800", &SA5_access, 512},
115 {0x3234103C, "Smart Array P400", &SA5_access, 512},
116 {0x3235103C, "Smart Array P400i", &SA5_access, 512},
117 {0x3211103C, "Smart Array E200i", &SA5_access, 120},
118 {0x3212103C, "Smart Array E200", &SA5_access, 120},
119 {0x3213103C, "Smart Array E200i", &SA5_access, 120},
120 {0x3214103C, "Smart Array E200i", &SA5_access, 120},
121 {0x3215103C, "Smart Array E200i", &SA5_access, 120},
122 {0x3237103C, "Smart Array E500", &SA5_access, 512},
123 {0x323D103C, "Smart Array P700m", &SA5_access, 512},
124 {0xFFFF103C, "Unknown Smart Array", &SA5_access, 120},
127 /* How long to wait (in milliseconds) for board to go into simple mode */
128 #define MAX_CONFIG_WAIT 30000
129 #define MAX_IOCTL_CONFIG_WAIT 1000
131 /*define how many times we will try a command because of bus resets */
132 #define MAX_CMD_RETRIES 3
134 #define READ_AHEAD 1024
137 /* Originally cciss driver only supports 8 major numbers */
138 #define MAX_CTLR_ORIG 8
140 static ctlr_info_t *hba[MAX_CTLR];
142 static void do_cciss_request(struct request_queue *q);
143 static irqreturn_t do_cciss_intr(int irq, void *dev_id);
144 static int cciss_open(struct inode *inode, struct file *filep);
145 static int cciss_release(struct inode *inode, struct file *filep);
146 static int cciss_ioctl(struct inode *inode, struct file *filep,
147 unsigned int cmd, unsigned long arg);
148 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
150 static int cciss_revalidate(struct gendisk *disk);
151 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk);
152 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
155 static void cciss_read_capacity(int ctlr, int logvol, int withirq,
156 sector_t *total_size, unsigned int *block_size);
157 static void cciss_read_capacity_16(int ctlr, int logvol, int withirq,
158 sector_t *total_size, unsigned int *block_size);
159 static void cciss_geometry_inquiry(int ctlr, int logvol,
160 int withirq, sector_t total_size,
161 unsigned int block_size, InquiryData_struct *inq_buff,
162 drive_info_struct *drv);
163 static void cciss_getgeometry(int cntl_num);
164 static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *,
166 static void start_io(ctlr_info_t *h);
167 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size,
168 unsigned int use_unit_num, unsigned int log_unit,
169 __u8 page_code, unsigned char *scsi3addr, int cmd_type);
170 static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
171 unsigned int use_unit_num, unsigned int log_unit,
172 __u8 page_code, int cmd_type);
174 static void fail_all_cmds(unsigned long ctlr);
176 #ifdef CONFIG_PROC_FS
177 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
178 int length, int *eof, void *data);
179 static void cciss_procinit(int i);
181 static void cciss_procinit(int i)
184 #endif /* CONFIG_PROC_FS */
187 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg);
190 static struct block_device_operations cciss_fops = {
191 .owner = THIS_MODULE,
193 .release = cciss_release,
194 .ioctl = cciss_ioctl,
195 .getgeo = cciss_getgeo,
197 .compat_ioctl = cciss_compat_ioctl,
199 .revalidate_disk = cciss_revalidate,
203 * Enqueuing and dequeuing functions for cmdlists.
205 static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c)
209 c->next = c->prev = c;
211 c->prev = (*Qptr)->prev;
213 (*Qptr)->prev->next = c;
218 static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
219 CommandList_struct *c)
221 if (c && c->next != c) {
224 c->prev->next = c->next;
225 c->next->prev = c->prev;
232 #include "cciss_scsi.c" /* For SCSI tape support */
234 #define RAID_UNKNOWN 6
236 #ifdef CONFIG_PROC_FS
239 * Report information about this controller.
241 #define ENG_GIG 1000000000
242 #define ENG_GIG_FACTOR (ENG_GIG/512)
243 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
247 static struct proc_dir_entry *proc_cciss;
249 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
250 int length, int *eof, void *data)
255 ctlr_info_t *h = (ctlr_info_t *) data;
256 drive_info_struct *drv;
258 sector_t vol_sz, vol_sz_frac;
262 /* prevent displaying bogus info during configuration
263 * or deconfiguration of a logical volume
265 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
266 if (h->busy_configuring) {
267 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
270 h->busy_configuring = 1;
271 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
273 size = sprintf(buffer, "%s: HP %s Controller\n"
274 "Board ID: 0x%08lx\n"
275 "Firmware Version: %c%c%c%c\n"
277 "Logical drives: %d\n"
279 "Current Q depth: %d\n"
280 "Current # commands on controller: %d\n"
281 "Max Q depth since init: %d\n"
282 "Max # commands on controller since init: %d\n"
283 "Max SG entries since init: %d\n\n",
286 (unsigned long)h->board_id,
287 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
288 h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT],
290 h->cciss_max_sectors,
291 h->Qdepth, h->commands_outstanding,
292 h->maxQsinceinit, h->max_outstanding, h->maxSG);
296 cciss_proc_tape_report(ctlr, buffer, &pos, &len);
297 for (i = 0; i <= h->highest_lun; i++) {
303 vol_sz = drv->nr_blocks;
304 vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
306 sector_div(vol_sz_frac, ENG_GIG_FACTOR);
308 if (drv->raid_level > 5)
309 drv->raid_level = RAID_UNKNOWN;
310 size = sprintf(buffer + len, "cciss/c%dd%d:"
311 "\t%4u.%02uGB\tRAID %s\n",
312 ctlr, i, (int)vol_sz, (int)vol_sz_frac,
313 raid_label[drv->raid_level]);
319 *start = buffer + offset;
323 h->busy_configuring = 0;
328 cciss_proc_write(struct file *file, const char __user *buffer,
329 unsigned long count, void *data)
331 unsigned char cmd[80];
333 #ifdef CONFIG_CISS_SCSI_TAPE
334 ctlr_info_t *h = (ctlr_info_t *) data;
338 if (count > sizeof(cmd) - 1)
340 if (copy_from_user(cmd, buffer, count))
343 len = strlen(cmd); // above 3 lines ensure safety
344 if (len && cmd[len - 1] == '\n')
346 # ifdef CONFIG_CISS_SCSI_TAPE
347 if (strcmp("engage scsi", cmd) == 0) {
348 rc = cciss_engage_scsi(h->ctlr);
353 /* might be nice to have "disengage" too, but it's not
354 safely possible. (only 1 module use count, lock issues.) */
360 * Get us a file in /proc/cciss that says something about each controller.
361 * Create /proc/cciss if it doesn't exist yet.
363 static void __devinit cciss_procinit(int i)
365 struct proc_dir_entry *pde;
367 if (proc_cciss == NULL) {
368 proc_cciss = proc_mkdir("cciss", proc_root_driver);
373 pde = create_proc_read_entry(hba[i]->devname,
374 S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH,
375 proc_cciss, cciss_proc_get_info, hba[i]);
376 pde->write_proc = cciss_proc_write;
378 #endif /* CONFIG_PROC_FS */
381 * For operations that cannot sleep, a command block is allocated at init,
382 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
383 * which ones are free or in use. For operations that can wait for kmalloc
384 * to possible sleep, this routine can be called with get_from_pool set to 0.
385 * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
387 static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool)
389 CommandList_struct *c;
392 dma_addr_t cmd_dma_handle, err_dma_handle;
394 if (!get_from_pool) {
395 c = (CommandList_struct *) pci_alloc_consistent(h->pdev,
396 sizeof(CommandList_struct), &cmd_dma_handle);
399 memset(c, 0, sizeof(CommandList_struct));
403 c->err_info = (ErrorInfo_struct *)
404 pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct),
407 if (c->err_info == NULL) {
408 pci_free_consistent(h->pdev,
409 sizeof(CommandList_struct), c, cmd_dma_handle);
412 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
413 } else { /* get it out of the controllers pool */
416 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
419 } while (test_and_set_bit
420 (i & (BITS_PER_LONG - 1),
421 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
423 printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
426 memset(c, 0, sizeof(CommandList_struct));
427 cmd_dma_handle = h->cmd_pool_dhandle
428 + i * sizeof(CommandList_struct);
429 c->err_info = h->errinfo_pool + i;
430 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
431 err_dma_handle = h->errinfo_pool_dhandle
432 + i * sizeof(ErrorInfo_struct);
438 c->busaddr = (__u32) cmd_dma_handle;
439 temp64.val = (__u64) err_dma_handle;
440 c->ErrDesc.Addr.lower = temp64.val32.lower;
441 c->ErrDesc.Addr.upper = temp64.val32.upper;
442 c->ErrDesc.Len = sizeof(ErrorInfo_struct);
449 * Frees a command block that was previously allocated with cmd_alloc().
451 static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
456 if (!got_from_pool) {
457 temp64.val32.lower = c->ErrDesc.Addr.lower;
458 temp64.val32.upper = c->ErrDesc.Addr.upper;
459 pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
460 c->err_info, (dma_addr_t) temp64.val);
461 pci_free_consistent(h->pdev, sizeof(CommandList_struct),
462 c, (dma_addr_t) c->busaddr);
465 clear_bit(i & (BITS_PER_LONG - 1),
466 h->cmd_pool_bits + (i / BITS_PER_LONG));
471 static inline ctlr_info_t *get_host(struct gendisk *disk)
473 return disk->queue->queuedata;
476 static inline drive_info_struct *get_drv(struct gendisk *disk)
478 return disk->private_data;
482 * Open. Make sure the device is really there.
484 static int cciss_open(struct inode *inode, struct file *filep)
486 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
487 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
490 printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name);
491 #endif /* CCISS_DEBUG */
493 if (host->busy_initializing || drv->busy_configuring)
496 * Root is allowed to open raw volume zero even if it's not configured
497 * so array config can still work. Root is also allowed to open any
498 * volume that has a LUN ID, so it can issue IOCTL to reread the
499 * disk information. I don't think I really like this
500 * but I'm already using way to many device nodes to claim another one
501 * for "raw controller".
503 if (drv->heads == 0) {
504 if (iminor(inode) != 0) { /* not node 0? */
505 /* if not node 0 make sure it is a partition = 0 */
506 if (iminor(inode) & 0x0f) {
508 /* if it is, make sure we have a LUN ID */
509 } else if (drv->LunID == 0) {
513 if (!capable(CAP_SYS_ADMIN))
524 static int cciss_release(struct inode *inode, struct file *filep)
526 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
527 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
530 printk(KERN_DEBUG "cciss_release %s\n",
531 inode->i_bdev->bd_disk->disk_name);
532 #endif /* CCISS_DEBUG */
541 static int do_ioctl(struct file *f, unsigned cmd, unsigned long arg)
545 ret = cciss_ioctl(f->f_path.dentry->d_inode, f, cmd, arg);
550 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
552 static int cciss_ioctl32_big_passthru(struct file *f, unsigned cmd,
555 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
558 case CCISS_GETPCIINFO:
559 case CCISS_GETINTINFO:
560 case CCISS_SETINTINFO:
561 case CCISS_GETNODENAME:
562 case CCISS_SETNODENAME:
563 case CCISS_GETHEARTBEAT:
564 case CCISS_GETBUSTYPES:
565 case CCISS_GETFIRMVER:
566 case CCISS_GETDRIVVER:
567 case CCISS_REVALIDVOLS:
568 case CCISS_DEREGDISK:
569 case CCISS_REGNEWDISK:
571 case CCISS_RESCANDISK:
572 case CCISS_GETLUNINFO:
573 return do_ioctl(f, cmd, arg);
575 case CCISS_PASSTHRU32:
576 return cciss_ioctl32_passthru(f, cmd, arg);
577 case CCISS_BIG_PASSTHRU32:
578 return cciss_ioctl32_big_passthru(f, cmd, arg);
585 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
588 IOCTL32_Command_struct __user *arg32 =
589 (IOCTL32_Command_struct __user *) arg;
590 IOCTL_Command_struct arg64;
591 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
597 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
598 sizeof(arg64.LUN_info));
600 copy_from_user(&arg64.Request, &arg32->Request,
601 sizeof(arg64.Request));
603 copy_from_user(&arg64.error_info, &arg32->error_info,
604 sizeof(arg64.error_info));
605 err |= get_user(arg64.buf_size, &arg32->buf_size);
606 err |= get_user(cp, &arg32->buf);
607 arg64.buf = compat_ptr(cp);
608 err |= copy_to_user(p, &arg64, sizeof(arg64));
613 err = do_ioctl(f, CCISS_PASSTHRU, (unsigned long)p);
617 copy_in_user(&arg32->error_info, &p->error_info,
618 sizeof(arg32->error_info));
624 static int cciss_ioctl32_big_passthru(struct file *file, unsigned cmd,
627 BIG_IOCTL32_Command_struct __user *arg32 =
628 (BIG_IOCTL32_Command_struct __user *) arg;
629 BIG_IOCTL_Command_struct arg64;
630 BIG_IOCTL_Command_struct __user *p =
631 compat_alloc_user_space(sizeof(arg64));
637 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
638 sizeof(arg64.LUN_info));
640 copy_from_user(&arg64.Request, &arg32->Request,
641 sizeof(arg64.Request));
643 copy_from_user(&arg64.error_info, &arg32->error_info,
644 sizeof(arg64.error_info));
645 err |= get_user(arg64.buf_size, &arg32->buf_size);
646 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
647 err |= get_user(cp, &arg32->buf);
648 arg64.buf = compat_ptr(cp);
649 err |= copy_to_user(p, &arg64, sizeof(arg64));
654 err = do_ioctl(file, CCISS_BIG_PASSTHRU, (unsigned long)p);
658 copy_in_user(&arg32->error_info, &p->error_info,
659 sizeof(arg32->error_info));
666 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
668 drive_info_struct *drv = get_drv(bdev->bd_disk);
673 geo->heads = drv->heads;
674 geo->sectors = drv->sectors;
675 geo->cylinders = drv->cylinders;
682 static int cciss_ioctl(struct inode *inode, struct file *filep,
683 unsigned int cmd, unsigned long arg)
685 struct block_device *bdev = inode->i_bdev;
686 struct gendisk *disk = bdev->bd_disk;
687 ctlr_info_t *host = get_host(disk);
688 drive_info_struct *drv = get_drv(disk);
689 int ctlr = host->ctlr;
690 void __user *argp = (void __user *)arg;
693 printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
694 #endif /* CCISS_DEBUG */
697 case CCISS_GETPCIINFO:
699 cciss_pci_info_struct pciinfo;
703 pciinfo.domain = pci_domain_nr(host->pdev->bus);
704 pciinfo.bus = host->pdev->bus->number;
705 pciinfo.dev_fn = host->pdev->devfn;
706 pciinfo.board_id = host->board_id;
708 (argp, &pciinfo, sizeof(cciss_pci_info_struct)))
712 case CCISS_GETINTINFO:
714 cciss_coalint_struct intinfo;
718 readl(&host->cfgtable->HostWrite.CoalIntDelay);
720 readl(&host->cfgtable->HostWrite.CoalIntCount);
722 (argp, &intinfo, sizeof(cciss_coalint_struct)))
726 case CCISS_SETINTINFO:
728 cciss_coalint_struct intinfo;
734 if (!capable(CAP_SYS_ADMIN))
737 (&intinfo, argp, sizeof(cciss_coalint_struct)))
739 if ((intinfo.delay == 0) && (intinfo.count == 0))
741 // printk("cciss_ioctl: delay and count cannot be 0\n");
744 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
745 /* Update the field, and then ring the doorbell */
746 writel(intinfo.delay,
747 &(host->cfgtable->HostWrite.CoalIntDelay));
748 writel(intinfo.count,
749 &(host->cfgtable->HostWrite.CoalIntCount));
750 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
752 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
753 if (!(readl(host->vaddr + SA5_DOORBELL)
756 /* delay and try again */
759 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
760 if (i >= MAX_IOCTL_CONFIG_WAIT)
764 case CCISS_GETNODENAME:
766 NodeName_type NodeName;
771 for (i = 0; i < 16; i++)
773 readb(&host->cfgtable->ServerName[i]);
774 if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
778 case CCISS_SETNODENAME:
780 NodeName_type NodeName;
786 if (!capable(CAP_SYS_ADMIN))
790 (NodeName, argp, sizeof(NodeName_type)))
793 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
795 /* Update the field, and then ring the doorbell */
796 for (i = 0; i < 16; i++)
798 &host->cfgtable->ServerName[i]);
800 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
802 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
803 if (!(readl(host->vaddr + SA5_DOORBELL)
806 /* delay and try again */
809 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
810 if (i >= MAX_IOCTL_CONFIG_WAIT)
815 case CCISS_GETHEARTBEAT:
817 Heartbeat_type heartbeat;
821 heartbeat = readl(&host->cfgtable->HeartBeat);
823 (argp, &heartbeat, sizeof(Heartbeat_type)))
827 case CCISS_GETBUSTYPES:
829 BusTypes_type BusTypes;
833 BusTypes = readl(&host->cfgtable->BusTypes);
835 (argp, &BusTypes, sizeof(BusTypes_type)))
839 case CCISS_GETFIRMVER:
841 FirmwareVer_type firmware;
845 memcpy(firmware, host->firm_ver, 4);
848 (argp, firmware, sizeof(FirmwareVer_type)))
852 case CCISS_GETDRIVVER:
854 DriverVer_type DriverVer = DRIVER_VERSION;
860 (argp, &DriverVer, sizeof(DriverVer_type)))
865 case CCISS_REVALIDVOLS:
866 return rebuild_lun_table(host, NULL);
868 case CCISS_GETLUNINFO:{
869 LogvolInfo_struct luninfo;
871 luninfo.LunID = drv->LunID;
872 luninfo.num_opens = drv->usage_count;
873 luninfo.num_parts = 0;
874 if (copy_to_user(argp, &luninfo,
875 sizeof(LogvolInfo_struct)))
879 case CCISS_DEREGDISK:
880 return rebuild_lun_table(host, disk);
883 return rebuild_lun_table(host, NULL);
887 IOCTL_Command_struct iocommand;
888 CommandList_struct *c;
892 DECLARE_COMPLETION_ONSTACK(wait);
897 if (!capable(CAP_SYS_RAWIO))
901 (&iocommand, argp, sizeof(IOCTL_Command_struct)))
903 if ((iocommand.buf_size < 1) &&
904 (iocommand.Request.Type.Direction != XFER_NONE)) {
907 #if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
908 /* Check kmalloc limits */
909 if (iocommand.buf_size > 128000)
912 if (iocommand.buf_size > 0) {
913 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
917 if (iocommand.Request.Type.Direction == XFER_WRITE) {
918 /* Copy the data into the buffer we created */
920 (buff, iocommand.buf, iocommand.buf_size)) {
925 memset(buff, 0, iocommand.buf_size);
927 if ((c = cmd_alloc(host, 0)) == NULL) {
931 // Fill in the command type
932 c->cmd_type = CMD_IOCTL_PEND;
933 // Fill in Command Header
934 c->Header.ReplyQueue = 0; // unused in simple mode
935 if (iocommand.buf_size > 0) // buffer to fill
937 c->Header.SGList = 1;
938 c->Header.SGTotal = 1;
939 } else // no buffers to fill
941 c->Header.SGList = 0;
942 c->Header.SGTotal = 0;
944 c->Header.LUN = iocommand.LUN_info;
945 c->Header.Tag.lower = c->busaddr; // use the kernel address the cmd block for tag
947 // Fill in Request block
948 c->Request = iocommand.Request;
950 // Fill in the scatter gather information
951 if (iocommand.buf_size > 0) {
952 temp64.val = pci_map_single(host->pdev, buff,
954 PCI_DMA_BIDIRECTIONAL);
955 c->SG[0].Addr.lower = temp64.val32.lower;
956 c->SG[0].Addr.upper = temp64.val32.upper;
957 c->SG[0].Len = iocommand.buf_size;
958 c->SG[0].Ext = 0; // we are not chaining
962 /* Put the request on the tail of the request queue */
963 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
964 addQ(&host->reqQ, c);
967 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
969 wait_for_completion(&wait);
971 /* unlock the buffers from DMA */
972 temp64.val32.lower = c->SG[0].Addr.lower;
973 temp64.val32.upper = c->SG[0].Addr.upper;
974 pci_unmap_single(host->pdev, (dma_addr_t) temp64.val,
976 PCI_DMA_BIDIRECTIONAL);
978 /* Copy the error information out */
979 iocommand.error_info = *(c->err_info);
981 (argp, &iocommand, sizeof(IOCTL_Command_struct))) {
983 cmd_free(host, c, 0);
987 if (iocommand.Request.Type.Direction == XFER_READ) {
988 /* Copy the data out of the buffer we created */
990 (iocommand.buf, buff, iocommand.buf_size)) {
992 cmd_free(host, c, 0);
997 cmd_free(host, c, 0);
1000 case CCISS_BIG_PASSTHRU:{
1001 BIG_IOCTL_Command_struct *ioc;
1002 CommandList_struct *c;
1003 unsigned char **buff = NULL;
1004 int *buff_size = NULL;
1006 unsigned long flags;
1010 DECLARE_COMPLETION_ONSTACK(wait);
1013 BYTE __user *data_ptr;
1017 if (!capable(CAP_SYS_RAWIO))
1019 ioc = (BIG_IOCTL_Command_struct *)
1020 kmalloc(sizeof(*ioc), GFP_KERNEL);
1025 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
1029 if ((ioc->buf_size < 1) &&
1030 (ioc->Request.Type.Direction != XFER_NONE)) {
1034 /* Check kmalloc limits using all SGs */
1035 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
1039 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
1044 kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
1049 buff_size = kmalloc(MAXSGENTRIES * sizeof(int),
1055 left = ioc->buf_size;
1056 data_ptr = ioc->buf;
1059 ioc->malloc_size) ? ioc->
1061 buff_size[sg_used] = sz;
1062 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
1063 if (buff[sg_used] == NULL) {
1067 if (ioc->Request.Type.Direction == XFER_WRITE) {
1069 (buff[sg_used], data_ptr, sz)) {
1074 memset(buff[sg_used], 0, sz);
1080 if ((c = cmd_alloc(host, 0)) == NULL) {
1084 c->cmd_type = CMD_IOCTL_PEND;
1085 c->Header.ReplyQueue = 0;
1087 if (ioc->buf_size > 0) {
1088 c->Header.SGList = sg_used;
1089 c->Header.SGTotal = sg_used;
1091 c->Header.SGList = 0;
1092 c->Header.SGTotal = 0;
1094 c->Header.LUN = ioc->LUN_info;
1095 c->Header.Tag.lower = c->busaddr;
1097 c->Request = ioc->Request;
1098 if (ioc->buf_size > 0) {
1100 for (i = 0; i < sg_used; i++) {
1102 pci_map_single(host->pdev, buff[i],
1104 PCI_DMA_BIDIRECTIONAL);
1105 c->SG[i].Addr.lower =
1107 c->SG[i].Addr.upper =
1109 c->SG[i].Len = buff_size[i];
1110 c->SG[i].Ext = 0; /* we are not chaining */
1114 /* Put the request on the tail of the request queue */
1115 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1116 addQ(&host->reqQ, c);
1119 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1120 wait_for_completion(&wait);
1121 /* unlock the buffers from DMA */
1122 for (i = 0; i < sg_used; i++) {
1123 temp64.val32.lower = c->SG[i].Addr.lower;
1124 temp64.val32.upper = c->SG[i].Addr.upper;
1125 pci_unmap_single(host->pdev,
1126 (dma_addr_t) temp64.val, buff_size[i],
1127 PCI_DMA_BIDIRECTIONAL);
1129 /* Copy the error information out */
1130 ioc->error_info = *(c->err_info);
1131 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
1132 cmd_free(host, c, 0);
1136 if (ioc->Request.Type.Direction == XFER_READ) {
1137 /* Copy the data out of the buffer we created */
1138 BYTE __user *ptr = ioc->buf;
1139 for (i = 0; i < sg_used; i++) {
1141 (ptr, buff[i], buff_size[i])) {
1142 cmd_free(host, c, 0);
1146 ptr += buff_size[i];
1149 cmd_free(host, c, 0);
1153 for (i = 0; i < sg_used; i++)
1162 /* scsi_cmd_ioctl handles these, below, though some are not */
1163 /* very meaningful for cciss. SG_IO is the main one people want. */
1165 case SG_GET_VERSION_NUM:
1166 case SG_SET_TIMEOUT:
1167 case SG_GET_TIMEOUT:
1168 case SG_GET_RESERVED_SIZE:
1169 case SG_SET_RESERVED_SIZE:
1170 case SG_EMULATED_HOST:
1172 case SCSI_IOCTL_SEND_COMMAND:
1173 return scsi_cmd_ioctl(filep, disk->queue, disk, cmd, argp);
1175 /* scsi_cmd_ioctl would normally handle these, below, but */
1176 /* they aren't a good fit for cciss, as CD-ROMs are */
1177 /* not supported, and we don't have any bus/target/lun */
1178 /* which we present to the kernel. */
1180 case CDROM_SEND_PACKET:
1181 case CDROMCLOSETRAY:
1183 case SCSI_IOCTL_GET_IDLUN:
1184 case SCSI_IOCTL_GET_BUS_NUMBER:
1190 static inline void complete_buffers(struct bio *bio, int status)
1193 struct bio *xbh = bio->bi_next;
1195 bio->bi_next = NULL;
1196 bio_endio(bio, status ? 0 : -EIO);
1201 static void cciss_check_queues(ctlr_info_t *h)
1203 int start_queue = h->next_to_run;
1206 /* check to see if we have maxed out the number of commands that can
1207 * be placed on the queue. If so then exit. We do this check here
1208 * in case the interrupt we serviced was from an ioctl and did not
1209 * free any new commands.
1211 if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds)
1214 /* We have room on the queue for more commands. Now we need to queue
1215 * them up. We will also keep track of the next queue to run so
1216 * that every queue gets a chance to be started first.
1218 for (i = 0; i < h->highest_lun + 1; i++) {
1219 int curr_queue = (start_queue + i) % (h->highest_lun + 1);
1220 /* make sure the disk has been added and the drive is real
1221 * because this can be called from the middle of init_one.
1223 if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads))
1225 blk_start_queue(h->gendisk[curr_queue]->queue);
1227 /* check to see if we have maxed out the number of commands
1228 * that can be placed on the queue.
1230 if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds) {
1231 if (curr_queue == start_queue) {
1233 (start_queue + 1) % (h->highest_lun + 1);
1236 h->next_to_run = curr_queue;
1240 curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
1245 static void cciss_softirq_done(struct request *rq)
1247 CommandList_struct *cmd = rq->completion_data;
1248 ctlr_info_t *h = hba[cmd->ctlr];
1249 unsigned long flags;
1253 if (cmd->Request.Type.Direction == XFER_READ)
1254 ddir = PCI_DMA_FROMDEVICE;
1256 ddir = PCI_DMA_TODEVICE;
1258 /* command did not need to be retried */
1259 /* unmap the DMA mapping for all the scatter gather elements */
1260 for (i = 0; i < cmd->Header.SGList; i++) {
1261 temp64.val32.lower = cmd->SG[i].Addr.lower;
1262 temp64.val32.upper = cmd->SG[i].Addr.upper;
1263 pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir);
1266 complete_buffers(rq->bio, (rq->errors == 0));
1268 if (blk_fs_request(rq)) {
1269 const int rw = rq_data_dir(rq);
1271 disk_stat_add(rq->rq_disk, sectors[rw], rq->nr_sectors);
1275 printk("Done with %p\n", rq);
1276 #endif /* CCISS_DEBUG */
1278 add_disk_randomness(rq->rq_disk);
1279 spin_lock_irqsave(&h->lock, flags);
1280 end_that_request_last(rq, (rq->errors == 0));
1281 cmd_free(h, cmd, 1);
1282 cciss_check_queues(h);
1283 spin_unlock_irqrestore(&h->lock, flags);
1286 /* This function will check the usage_count of the drive to be updated/added.
1287 * If the usage_count is zero then the drive information will be updated and
1288 * the disk will be re-registered with the kernel. If not then it will be
1289 * left alone for the next reboot. The exception to this is disk 0 which
1290 * will always be left registered with the kernel since it is also the
1291 * controller node. Any changes to disk 0 will show up on the next
1294 static void cciss_update_drive_info(int ctlr, int drv_index)
1296 ctlr_info_t *h = hba[ctlr];
1297 struct gendisk *disk;
1298 InquiryData_struct *inq_buff = NULL;
1299 unsigned int block_size;
1300 sector_t total_size;
1301 unsigned long flags = 0;
1304 /* if the disk already exists then deregister it before proceeding */
1305 if (h->drv[drv_index].raid_level != -1) {
1306 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1307 h->drv[drv_index].busy_configuring = 1;
1308 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1309 ret = deregister_disk(h->gendisk[drv_index],
1310 &h->drv[drv_index], 0);
1311 h->drv[drv_index].busy_configuring = 0;
1314 /* If the disk is in use return */
1318 /* Get information about the disk and modify the driver structure */
1319 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
1320 if (inq_buff == NULL)
1323 /* testing to see if 16-byte CDBs are already being used */
1324 if (h->cciss_read == CCISS_READ_16) {
1325 cciss_read_capacity_16(h->ctlr, drv_index, 1,
1326 &total_size, &block_size);
1330 cciss_read_capacity(ctlr, drv_index, 1,
1331 &total_size, &block_size);
1333 /* if read_capacity returns all F's this volume is >2TB in size */
1334 /* so we switch to 16-byte CDB's for all read/write ops */
1335 if (total_size == 0xFFFFFFFFULL) {
1336 cciss_read_capacity_16(ctlr, drv_index, 1,
1337 &total_size, &block_size);
1338 h->cciss_read = CCISS_READ_16;
1339 h->cciss_write = CCISS_WRITE_16;
1341 h->cciss_read = CCISS_READ_10;
1342 h->cciss_write = CCISS_WRITE_10;
1345 cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
1346 inq_buff, &h->drv[drv_index]);
1349 disk = h->gendisk[drv_index];
1350 set_capacity(disk, h->drv[drv_index].nr_blocks);
1352 /* if it's the controller it's already added */
1354 disk->queue = blk_init_queue(do_cciss_request, &h->lock);
1355 sprintf(disk->disk_name, "cciss/c%dd%d", ctlr, drv_index);
1356 disk->major = h->major;
1357 disk->first_minor = drv_index << NWD_SHIFT;
1358 disk->fops = &cciss_fops;
1359 disk->private_data = &h->drv[drv_index];
1361 /* Set up queue information */
1362 disk->queue->backing_dev_info.ra_pages = READ_AHEAD;
1363 blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask);
1365 /* This is a hardware imposed limit. */
1366 blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES);
1368 /* This is a limit in the driver and could be eliminated. */
1369 blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);
1371 blk_queue_max_sectors(disk->queue, h->cciss_max_sectors);
1373 blk_queue_softirq_done(disk->queue, cciss_softirq_done);
1375 disk->queue->queuedata = hba[ctlr];
1377 blk_queue_hardsect_size(disk->queue,
1378 hba[ctlr]->drv[drv_index].block_size);
1380 h->drv[drv_index].queue = disk->queue;
1388 printk(KERN_ERR "cciss: out of memory\n");
1392 /* This function will find the first index of the controllers drive array
1393 * that has a -1 for the raid_level and will return that index. This is
1394 * where new drives will be added. If the index to be returned is greater
1395 * than the highest_lun index for the controller then highest_lun is set
1396 * to this new index. If there are no available indexes then -1 is returned.
1398 static int cciss_find_free_drive_index(int ctlr)
1402 for (i = 0; i < CISS_MAX_LUN; i++) {
1403 if (hba[ctlr]->drv[i].raid_level == -1) {
1404 if (i > hba[ctlr]->highest_lun)
1405 hba[ctlr]->highest_lun = i;
1412 /* This function will add and remove logical drives from the Logical
1413 * drive array of the controller and maintain persistency of ordering
1414 * so that mount points are preserved until the next reboot. This allows
1415 * for the removal of logical drives in the middle of the drive array
1416 * without a re-ordering of those drives.
1418 * h = The controller to perform the operations on
1419 * del_disk = The disk to remove if specified. If the value given
1420 * is NULL then no disk is removed.
1422 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
1426 ReportLunData_struct *ld_buff = NULL;
1427 drive_info_struct *drv = NULL;
1434 unsigned long flags;
1436 /* Set busy_configuring flag for this operation */
1437 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1438 if (h->busy_configuring) {
1439 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1442 h->busy_configuring = 1;
1444 /* if del_disk is NULL then we are being called to add a new disk
1445 * and update the logical drive table. If it is not NULL then
1446 * we will check if the disk is in use or not.
1448 if (del_disk != NULL) {
1449 drv = get_drv(del_disk);
1450 drv->busy_configuring = 1;
1451 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1452 return_code = deregister_disk(del_disk, drv, 1);
1453 drv->busy_configuring = 0;
1454 h->busy_configuring = 0;
1457 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1458 if (!capable(CAP_SYS_RAWIO))
1461 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
1462 if (ld_buff == NULL)
1465 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
1466 sizeof(ReportLunData_struct), 0,
1469 if (return_code == IO_OK) {
1471 be32_to_cpu(*(__be32 *) ld_buff->LUNListLength);
1472 } else { /* reading number of logical volumes failed */
1473 printk(KERN_WARNING "cciss: report logical volume"
1474 " command failed\n");
1479 num_luns = listlength / 8; /* 8 bytes per entry */
1480 if (num_luns > CISS_MAX_LUN) {
1481 num_luns = CISS_MAX_LUN;
1482 printk(KERN_WARNING "cciss: more luns configured"
1483 " on controller than can be handled by"
1487 /* Compare controller drive array to drivers drive array.
1488 * Check for updates in the drive information and any new drives
1489 * on the controller.
1491 for (i = 0; i < num_luns; i++) {
1497 (unsigned int)(ld_buff->LUN[i][3])) << 24;
1499 (unsigned int)(ld_buff->LUN[i][2])) << 16;
1501 (unsigned int)(ld_buff->LUN[i][1])) << 8;
1502 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
1504 /* Find if the LUN is already in the drive array
1505 * of the controller. If so then update its info
1506 * if not is use. If it does not exist then find
1507 * the first free index and add it.
1509 for (j = 0; j <= h->highest_lun; j++) {
1510 if (h->drv[j].LunID == lunid) {
1516 /* check if the drive was found already in the array */
1518 drv_index = cciss_find_free_drive_index(ctlr);
1519 if (drv_index == -1)
1522 /*Check if the gendisk needs to be allocated */
1523 if (!h->gendisk[drv_index]){
1524 h->gendisk[drv_index] = alloc_disk(1 << NWD_SHIFT);
1525 if (!h->gendisk[drv_index]){
1526 printk(KERN_ERR "cciss: could not allocate new disk %d\n", drv_index);
1531 h->drv[drv_index].LunID = lunid;
1532 cciss_update_drive_info(ctlr, drv_index);
1538 h->busy_configuring = 0;
1539 /* We return -1 here to tell the ACU that we have registered/updated
1540 * all of the drives that we can and to keep it from calling us
1545 printk(KERN_ERR "cciss: out of memory\n");
1549 /* This function will deregister the disk and it's queue from the
1550 * kernel. It must be called with the controller lock held and the
1551 * drv structures busy_configuring flag set. It's parameters are:
1553 * disk = This is the disk to be deregistered
1554 * drv = This is the drive_info_struct associated with the disk to be
1555 * deregistered. It contains information about the disk used
1557 * clear_all = This flag determines whether or not the disk information
1558 * is going to be completely cleared out and the highest_lun
1559 * reset. Sometimes we want to clear out information about
1560 * the disk in preparation for re-adding it. In this case
1561 * the highest_lun should be left unchanged and the LunID
1562 * should not be cleared.
1564 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
1568 ctlr_info_t *h = get_host(disk);
1570 if (!capable(CAP_SYS_RAWIO))
1573 /* make sure logical volume is NOT is use */
1574 if (clear_all || (h->gendisk[0] == disk)) {
1575 if (drv->usage_count > 1)
1577 } else if (drv->usage_count > 0)
1580 /* invalidate the devices and deregister the disk. If it is disk
1581 * zero do not deregister it but just zero out it's values. This
1582 * allows us to delete disk zero but keep the controller registered.
1584 if (h->gendisk[0] != disk) {
1585 struct request_queue *q = disk->queue;
1586 if (disk->flags & GENHD_FL_UP)
1589 blk_cleanup_queue(q);
1590 /* Set drv->queue to NULL so that we do not try
1591 * to call blk_start_queue on this queue in the
1596 /* If clear_all is set then we are deleting the logical
1597 * drive, not just refreshing its info. For drives
1598 * other than disk 0 we will call put_disk. We do not
1599 * do this for disk 0 as we need it to be able to
1600 * configure the controller.
1603 /* This isn't pretty, but we need to find the
1604 * disk in our array and NULL our the pointer.
1605 * This is so that we will call alloc_disk if
1606 * this index is used again later.
1608 for (i=0; i < CISS_MAX_LUN; i++){
1609 if(h->gendisk[i] == disk){
1610 h->gendisk[i] = NULL;
1617 set_capacity(disk, 0);
1621 /* zero out the disk size info */
1623 drv->block_size = 0;
1627 drv->raid_level = -1; /* This can be used as a flag variable to
1628 * indicate that this element of the drive
1633 /* check to see if it was the last disk */
1634 if (drv == h->drv + h->highest_lun) {
1635 /* if so, find the new hightest lun */
1636 int i, newhighest = -1;
1637 for (i = 0; i < h->highest_lun; i++) {
1638 /* if the disk has size > 0, it is available */
1639 if (h->drv[i].heads)
1642 h->highest_lun = newhighest;
1650 static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
1651 1: address logical volume log_unit,
1652 2: periph device address is scsi3addr */
1653 unsigned int log_unit, __u8 page_code,
1654 unsigned char *scsi3addr, int cmd_type)
1656 ctlr_info_t *h = hba[ctlr];
1657 u64bit buff_dma_handle;
1660 c->cmd_type = CMD_IOCTL_PEND;
1661 c->Header.ReplyQueue = 0;
1663 c->Header.SGList = 1;
1664 c->Header.SGTotal = 1;
1666 c->Header.SGList = 0;
1667 c->Header.SGTotal = 0;
1669 c->Header.Tag.lower = c->busaddr;
1671 c->Request.Type.Type = cmd_type;
1672 if (cmd_type == TYPE_CMD) {
1675 /* If the logical unit number is 0 then, this is going
1676 to controller so It's a physical command
1677 mode = 0 target = 0. So we have nothing to write.
1678 otherwise, if use_unit_num == 1,
1679 mode = 1(volume set addressing) target = LUNID
1680 otherwise, if use_unit_num == 2,
1681 mode = 0(periph dev addr) target = scsi3addr */
1682 if (use_unit_num == 1) {
1683 c->Header.LUN.LogDev.VolId =
1684 h->drv[log_unit].LunID;
1685 c->Header.LUN.LogDev.Mode = 1;
1686 } else if (use_unit_num == 2) {
1687 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr,
1689 c->Header.LUN.LogDev.Mode = 0;
1691 /* are we trying to read a vital product page */
1692 if (page_code != 0) {
1693 c->Request.CDB[1] = 0x01;
1694 c->Request.CDB[2] = page_code;
1696 c->Request.CDBLen = 6;
1697 c->Request.Type.Attribute = ATTR_SIMPLE;
1698 c->Request.Type.Direction = XFER_READ;
1699 c->Request.Timeout = 0;
1700 c->Request.CDB[0] = CISS_INQUIRY;
1701 c->Request.CDB[4] = size & 0xFF;
1703 case CISS_REPORT_LOG:
1704 case CISS_REPORT_PHYS:
1705 /* Talking to controller so It's a physical command
1706 mode = 00 target = 0. Nothing to write.
1708 c->Request.CDBLen = 12;
1709 c->Request.Type.Attribute = ATTR_SIMPLE;
1710 c->Request.Type.Direction = XFER_READ;
1711 c->Request.Timeout = 0;
1712 c->Request.CDB[0] = cmd;
1713 c->Request.CDB[6] = (size >> 24) & 0xFF; //MSB
1714 c->Request.CDB[7] = (size >> 16) & 0xFF;
1715 c->Request.CDB[8] = (size >> 8) & 0xFF;
1716 c->Request.CDB[9] = size & 0xFF;
1719 case CCISS_READ_CAPACITY:
1720 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1721 c->Header.LUN.LogDev.Mode = 1;
1722 c->Request.CDBLen = 10;
1723 c->Request.Type.Attribute = ATTR_SIMPLE;
1724 c->Request.Type.Direction = XFER_READ;
1725 c->Request.Timeout = 0;
1726 c->Request.CDB[0] = cmd;
1728 case CCISS_READ_CAPACITY_16:
1729 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1730 c->Header.LUN.LogDev.Mode = 1;
1731 c->Request.CDBLen = 16;
1732 c->Request.Type.Attribute = ATTR_SIMPLE;
1733 c->Request.Type.Direction = XFER_READ;
1734 c->Request.Timeout = 0;
1735 c->Request.CDB[0] = cmd;
1736 c->Request.CDB[1] = 0x10;
1737 c->Request.CDB[10] = (size >> 24) & 0xFF;
1738 c->Request.CDB[11] = (size >> 16) & 0xFF;
1739 c->Request.CDB[12] = (size >> 8) & 0xFF;
1740 c->Request.CDB[13] = size & 0xFF;
1741 c->Request.Timeout = 0;
1742 c->Request.CDB[0] = cmd;
1744 case CCISS_CACHE_FLUSH:
1745 c->Request.CDBLen = 12;
1746 c->Request.Type.Attribute = ATTR_SIMPLE;
1747 c->Request.Type.Direction = XFER_WRITE;
1748 c->Request.Timeout = 0;
1749 c->Request.CDB[0] = BMIC_WRITE;
1750 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
1754 "cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
1757 } else if (cmd_type == TYPE_MSG) {
1759 case 0: /* ABORT message */
1760 c->Request.CDBLen = 12;
1761 c->Request.Type.Attribute = ATTR_SIMPLE;
1762 c->Request.Type.Direction = XFER_WRITE;
1763 c->Request.Timeout = 0;
1764 c->Request.CDB[0] = cmd; /* abort */
1765 c->Request.CDB[1] = 0; /* abort a command */
1766 /* buff contains the tag of the command to abort */
1767 memcpy(&c->Request.CDB[4], buff, 8);
1769 case 1: /* RESET message */
1770 c->Request.CDBLen = 12;
1771 c->Request.Type.Attribute = ATTR_SIMPLE;
1772 c->Request.Type.Direction = XFER_WRITE;
1773 c->Request.Timeout = 0;
1774 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
1775 c->Request.CDB[0] = cmd; /* reset */
1776 c->Request.CDB[1] = 0x04; /* reset a LUN */
1778 case 3: /* No-Op message */
1779 c->Request.CDBLen = 1;
1780 c->Request.Type.Attribute = ATTR_SIMPLE;
1781 c->Request.Type.Direction = XFER_WRITE;
1782 c->Request.Timeout = 0;
1783 c->Request.CDB[0] = cmd;
1787 "cciss%d: unknown message type %d\n", ctlr, cmd);
1792 "cciss%d: unknown command type %d\n", ctlr, cmd_type);
1795 /* Fill in the scatter gather information */
1797 buff_dma_handle.val = (__u64) pci_map_single(h->pdev,
1799 PCI_DMA_BIDIRECTIONAL);
1800 c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
1801 c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
1802 c->SG[0].Len = size;
1803 c->SG[0].Ext = 0; /* we are not chaining */
1808 static int sendcmd_withirq(__u8 cmd,
1812 unsigned int use_unit_num,
1813 unsigned int log_unit, __u8 page_code, int cmd_type)
1815 ctlr_info_t *h = hba[ctlr];
1816 CommandList_struct *c;
1817 u64bit buff_dma_handle;
1818 unsigned long flags;
1820 DECLARE_COMPLETION_ONSTACK(wait);
1822 if ((c = cmd_alloc(h, 0)) == NULL)
1824 return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
1825 log_unit, page_code, NULL, cmd_type);
1826 if (return_status != IO_OK) {
1828 return return_status;
1833 /* Put the request on the tail of the queue and send it */
1834 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1838 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1840 wait_for_completion(&wait);
1842 if (c->err_info->CommandStatus != 0) { /* an error has occurred */
1843 switch (c->err_info->CommandStatus) {
1844 case CMD_TARGET_STATUS:
1845 printk(KERN_WARNING "cciss: cmd %p has "
1846 " completed with errors\n", c);
1847 if (c->err_info->ScsiStatus) {
1848 printk(KERN_WARNING "cciss: cmd %p "
1849 "has SCSI Status = %x\n",
1850 c, c->err_info->ScsiStatus);
1854 case CMD_DATA_UNDERRUN:
1855 case CMD_DATA_OVERRUN:
1856 /* expected for inquire and report lun commands */
1859 printk(KERN_WARNING "cciss: Cmd %p is "
1860 "reported invalid\n", c);
1861 return_status = IO_ERROR;
1863 case CMD_PROTOCOL_ERR:
1864 printk(KERN_WARNING "cciss: cmd %p has "
1865 "protocol error \n", c);
1866 return_status = IO_ERROR;
1868 case CMD_HARDWARE_ERR:
1869 printk(KERN_WARNING "cciss: cmd %p had "
1870 " hardware error\n", c);
1871 return_status = IO_ERROR;
1873 case CMD_CONNECTION_LOST:
1874 printk(KERN_WARNING "cciss: cmd %p had "
1875 "connection lost\n", c);
1876 return_status = IO_ERROR;
1879 printk(KERN_WARNING "cciss: cmd %p was "
1881 return_status = IO_ERROR;
1883 case CMD_ABORT_FAILED:
1884 printk(KERN_WARNING "cciss: cmd %p reports "
1885 "abort failed\n", c);
1886 return_status = IO_ERROR;
1888 case CMD_UNSOLICITED_ABORT:
1890 "cciss%d: unsolicited abort %p\n", ctlr, c);
1891 if (c->retry_count < MAX_CMD_RETRIES) {
1893 "cciss%d: retrying %p\n", ctlr, c);
1895 /* erase the old error information */
1896 memset(c->err_info, 0,
1897 sizeof(ErrorInfo_struct));
1898 return_status = IO_OK;
1899 INIT_COMPLETION(wait);
1902 return_status = IO_ERROR;
1905 printk(KERN_WARNING "cciss: cmd %p returned "
1906 "unknown status %x\n", c,
1907 c->err_info->CommandStatus);
1908 return_status = IO_ERROR;
1911 /* unlock the buffers from DMA */
1912 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
1913 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
1914 pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val,
1915 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
1917 return return_status;
1920 static void cciss_geometry_inquiry(int ctlr, int logvol,
1921 int withirq, sector_t total_size,
1922 unsigned int block_size,
1923 InquiryData_struct *inq_buff,
1924 drive_info_struct *drv)
1929 memset(inq_buff, 0, sizeof(InquiryData_struct));
1931 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
1932 inq_buff, sizeof(*inq_buff), 1,
1933 logvol, 0xC1, TYPE_CMD);
1935 return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
1936 sizeof(*inq_buff), 1, logvol, 0xC1, NULL,
1938 if (return_code == IO_OK) {
1939 if (inq_buff->data_byte[8] == 0xFF) {
1941 "cciss: reading geometry failed, volume "
1942 "does not support reading geometry\n");
1944 drv->sectors = 32; // Sectors per track
1945 drv->cylinders = total_size + 1;
1946 drv->raid_level = RAID_UNKNOWN;
1948 drv->heads = inq_buff->data_byte[6];
1949 drv->sectors = inq_buff->data_byte[7];
1950 drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8;
1951 drv->cylinders += inq_buff->data_byte[5];
1952 drv->raid_level = inq_buff->data_byte[8];
1954 drv->block_size = block_size;
1955 drv->nr_blocks = total_size + 1;
1956 t = drv->heads * drv->sectors;
1958 sector_t real_size = total_size + 1;
1959 unsigned long rem = sector_div(real_size, t);
1962 drv->cylinders = real_size;
1964 } else { /* Get geometry failed */
1965 printk(KERN_WARNING "cciss: reading geometry failed\n");
1967 printk(KERN_INFO " heads=%d, sectors=%d, cylinders=%d\n\n",
1968 drv->heads, drv->sectors, drv->cylinders);
1972 cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size,
1973 unsigned int *block_size)
1975 ReadCapdata_struct *buf;
1978 buf = kzalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
1980 printk(KERN_WARNING "cciss: out of memory\n");
1985 return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
1986 ctlr, buf, sizeof(ReadCapdata_struct),
1987 1, logvol, 0, TYPE_CMD);
1989 return_code = sendcmd(CCISS_READ_CAPACITY,
1990 ctlr, buf, sizeof(ReadCapdata_struct),
1991 1, logvol, 0, NULL, TYPE_CMD);
1992 if (return_code == IO_OK) {
1993 *total_size = be32_to_cpu(*(__be32 *) buf->total_size);
1994 *block_size = be32_to_cpu(*(__be32 *) buf->block_size);
1995 } else { /* read capacity command failed */
1996 printk(KERN_WARNING "cciss: read capacity failed\n");
1998 *block_size = BLOCK_SIZE;
2000 if (*total_size != 0)
2001 printk(KERN_INFO " blocks= %llu block_size= %d\n",
2002 (unsigned long long)*total_size+1, *block_size);
2007 cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size, unsigned int *block_size)
2009 ReadCapdata_struct_16 *buf;
2012 buf = kzalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL);
2014 printk(KERN_WARNING "cciss: out of memory\n");
2019 return_code = sendcmd_withirq(CCISS_READ_CAPACITY_16,
2020 ctlr, buf, sizeof(ReadCapdata_struct_16),
2021 1, logvol, 0, TYPE_CMD);
2024 return_code = sendcmd(CCISS_READ_CAPACITY_16,
2025 ctlr, buf, sizeof(ReadCapdata_struct_16),
2026 1, logvol, 0, NULL, TYPE_CMD);
2028 if (return_code == IO_OK) {
2029 *total_size = be64_to_cpu(*(__be64 *) buf->total_size);
2030 *block_size = be32_to_cpu(*(__be32 *) buf->block_size);
2031 } else { /* read capacity command failed */
2032 printk(KERN_WARNING "cciss: read capacity failed\n");
2034 *block_size = BLOCK_SIZE;
2036 printk(KERN_INFO " blocks= %llu block_size= %d\n",
2037 (unsigned long long)*total_size+1, *block_size);
2041 static int cciss_revalidate(struct gendisk *disk)
2043 ctlr_info_t *h = get_host(disk);
2044 drive_info_struct *drv = get_drv(disk);
2047 unsigned int block_size;
2048 sector_t total_size;
2049 InquiryData_struct *inq_buff = NULL;
2051 for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) {
2052 if (h->drv[logvol].LunID == drv->LunID) {
2061 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
2062 if (inq_buff == NULL) {
2063 printk(KERN_WARNING "cciss: out of memory\n");
2066 if (h->cciss_read == CCISS_READ_10) {
2067 cciss_read_capacity(h->ctlr, logvol, 1,
2068 &total_size, &block_size);
2070 cciss_read_capacity_16(h->ctlr, logvol, 1,
2071 &total_size, &block_size);
2073 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size,
2076 blk_queue_hardsect_size(drv->queue, drv->block_size);
2077 set_capacity(disk, drv->nr_blocks);
2084 * Wait polling for a command to complete.
2085 * The memory mapped FIFO is polled for the completion.
2086 * Used only at init time, interrupts from the HBA are disabled.
2088 static unsigned long pollcomplete(int ctlr)
2093 /* Wait (up to 20 seconds) for a command to complete */
2095 for (i = 20 * HZ; i > 0; i--) {
2096 done = hba[ctlr]->access.command_completed(hba[ctlr]);
2097 if (done == FIFO_EMPTY)
2098 schedule_timeout_uninterruptible(1);
2102 /* Invalid address to tell caller we ran out of time */
2106 static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete)
2108 /* We get in here if sendcmd() is polling for completions
2109 and gets some command back that it wasn't expecting --
2110 something other than that which it just sent down.
2111 Ordinarily, that shouldn't happen, but it can happen when
2112 the scsi tape stuff gets into error handling mode, and
2113 starts using sendcmd() to try to abort commands and
2114 reset tape drives. In that case, sendcmd may pick up
2115 completions of commands that were sent to logical drives
2116 through the block i/o system, or cciss ioctls completing, etc.
2117 In that case, we need to save those completions for later
2118 processing by the interrupt handler.
2121 #ifdef CONFIG_CISS_SCSI_TAPE
2122 struct sendcmd_reject_list *srl = &hba[ctlr]->scsi_rejects;
2124 /* If it's not the scsi tape stuff doing error handling, (abort */
2125 /* or reset) then we don't expect anything weird. */
2126 if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) {
2128 printk(KERN_WARNING "cciss cciss%d: SendCmd "
2129 "Invalid command list address returned! (%lx)\n",
2131 /* not much we can do. */
2132 #ifdef CONFIG_CISS_SCSI_TAPE
2136 /* We've sent down an abort or reset, but something else
2138 if (srl->ncompletions >= (hba[ctlr]->nr_cmds + 2)) {
2139 /* Uh oh. No room to save it for later... */
2140 printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
2141 "reject list overflow, command lost!\n", ctlr);
2144 /* Save it for later */
2145 srl->complete[srl->ncompletions] = complete;
2146 srl->ncompletions++;
2152 * Send a command to the controller, and wait for it to complete.
2153 * Only used at init time.
2155 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
2156 1: address logical volume log_unit,
2157 2: periph device address is scsi3addr */
2158 unsigned int log_unit,
2159 __u8 page_code, unsigned char *scsi3addr, int cmd_type)
2161 CommandList_struct *c;
2163 unsigned long complete;
2164 ctlr_info_t *info_p = hba[ctlr];
2165 u64bit buff_dma_handle;
2166 int status, done = 0;
2168 if ((c = cmd_alloc(info_p, 1)) == NULL) {
2169 printk(KERN_WARNING "cciss: unable to get memory");
2172 status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
2173 log_unit, page_code, scsi3addr, cmd_type);
2174 if (status != IO_OK) {
2175 cmd_free(info_p, c, 1);
2183 printk(KERN_DEBUG "cciss: turning intr off\n");
2184 #endif /* CCISS_DEBUG */
2185 info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
2187 /* Make sure there is room in the command FIFO */
2188 /* Actually it should be completely empty at this time */
2189 /* unless we are in here doing error handling for the scsi */
2190 /* tape side of the driver. */
2191 for (i = 200000; i > 0; i--) {
2192 /* if fifo isn't full go */
2193 if (!(info_p->access.fifo_full(info_p))) {
2198 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
2199 " waiting!\n", ctlr);
2204 info_p->access.submit_command(info_p, c);
2207 complete = pollcomplete(ctlr);
2210 printk(KERN_DEBUG "cciss: command completed\n");
2211 #endif /* CCISS_DEBUG */
2213 if (complete == 1) {
2215 "cciss cciss%d: SendCmd Timeout out, "
2216 "No command list address returned!\n", ctlr);
2222 /* This will need to change for direct lookup completions */
2223 if ((complete & CISS_ERROR_BIT)
2224 && (complete & ~CISS_ERROR_BIT) == c->busaddr) {
2225 /* if data overrun or underun on Report command
2228 if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
2229 (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
2230 (c->Request.CDB[0] == CISS_INQUIRY)) &&
2231 ((c->err_info->CommandStatus ==
2232 CMD_DATA_OVERRUN) ||
2233 (c->err_info->CommandStatus == CMD_DATA_UNDERRUN)
2235 complete = c->busaddr;
2237 if (c->err_info->CommandStatus ==
2238 CMD_UNSOLICITED_ABORT) {
2239 printk(KERN_WARNING "cciss%d: "
2240 "unsolicited abort %p\n",
2242 if (c->retry_count < MAX_CMD_RETRIES) {
2244 "cciss%d: retrying %p\n",
2247 /* erase the old error */
2249 memset(c->err_info, 0,
2251 (ErrorInfo_struct));
2255 "cciss%d: retried %p too "
2256 "many times\n", ctlr, c);
2260 } else if (c->err_info->CommandStatus ==
2263 "cciss%d: command could not be aborted.\n",
2268 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2269 " Error %x \n", ctlr,
2270 c->err_info->CommandStatus);
2271 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2273 " size %x\n num %x value %x\n",
2275 c->err_info->MoreErrInfo.Invalid_Cmd.
2277 c->err_info->MoreErrInfo.Invalid_Cmd.
2279 c->err_info->MoreErrInfo.Invalid_Cmd.
2285 /* This will need changing for direct lookup completions */
2286 if (complete != c->busaddr) {
2287 if (add_sendcmd_reject(cmd, ctlr, complete) != 0) {
2288 BUG(); /* we are pretty much hosed if we get here. */
2296 /* unlock the data buffer from DMA */
2297 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
2298 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
2299 pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
2300 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
2301 #ifdef CONFIG_CISS_SCSI_TAPE
2302 /* if we saved some commands for later, process them now. */
2303 if (info_p->scsi_rejects.ncompletions > 0)
2304 do_cciss_intr(0, info_p);
2306 cmd_free(info_p, c, 1);
2311 * Map (physical) PCI mem into (virtual) kernel space
2313 static void __iomem *remap_pci_mem(ulong base, ulong size)
2315 ulong page_base = ((ulong) base) & PAGE_MASK;
2316 ulong page_offs = ((ulong) base) - page_base;
2317 void __iomem *page_remapped = ioremap(page_base, page_offs + size);
2319 return page_remapped ? (page_remapped + page_offs) : NULL;
2323 * Takes jobs of the Q and sends them to the hardware, then puts it on
2324 * the Q to wait for completion.
2326 static void start_io(ctlr_info_t *h)
2328 CommandList_struct *c;
2330 while ((c = h->reqQ) != NULL) {
2331 /* can't do anything if fifo is full */
2332 if ((h->access.fifo_full(h))) {
2333 printk(KERN_WARNING "cciss: fifo full\n");
2337 /* Get the first entry from the Request Q */
2338 removeQ(&(h->reqQ), c);
2341 /* Tell the controller execute command */
2342 h->access.submit_command(h, c);
2344 /* Put job onto the completed Q */
2345 addQ(&(h->cmpQ), c);
2349 /* Assumes that CCISS_LOCK(h->ctlr) is held. */
2350 /* Zeros out the error record and then resends the command back */
2351 /* to the controller */
2352 static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c)
2354 /* erase the old error information */
2355 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
2357 /* add it to software queue and then send it to the controller */
2358 addQ(&(h->reqQ), c);
2360 if (h->Qdepth > h->maxQsinceinit)
2361 h->maxQsinceinit = h->Qdepth;
2366 static inline unsigned int make_status_bytes(unsigned int scsi_status_byte,
2367 unsigned int msg_byte, unsigned int host_byte,
2368 unsigned int driver_byte)
2370 /* inverse of macros in scsi.h */
2371 return (scsi_status_byte & 0xff) |
2372 ((msg_byte & 0xff) << 8) |
2373 ((host_byte & 0xff) << 16) |
2374 ((driver_byte & 0xff) << 24);
2377 static inline int evaluate_target_status(CommandList_struct *cmd)
2379 unsigned char sense_key;
2380 unsigned char status_byte, msg_byte, host_byte, driver_byte;
2383 /* If we get in here, it means we got "target status", that is, scsi status */
2384 status_byte = cmd->err_info->ScsiStatus;
2385 driver_byte = DRIVER_OK;
2386 msg_byte = cmd->err_info->CommandStatus; /* correct? seems too device specific */
2388 if (blk_pc_request(cmd->rq))
2389 host_byte = DID_PASSTHROUGH;
2393 error_value = make_status_bytes(status_byte, msg_byte,
2394 host_byte, driver_byte);
2396 if (cmd->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) {
2397 if (!blk_pc_request(cmd->rq))
2398 printk(KERN_WARNING "cciss: cmd %p "
2399 "has SCSI Status 0x%x\n",
2400 cmd, cmd->err_info->ScsiStatus);
2404 /* check the sense key */
2405 sense_key = 0xf & cmd->err_info->SenseInfo[2];
2406 /* no status or recovered error */
2407 if (((sense_key == 0x0) || (sense_key == 0x1)) && !blk_pc_request(cmd->rq))
2410 if (!blk_pc_request(cmd->rq)) { /* Not SG_IO or similar? */
2411 if (error_value != 0)
2412 printk(KERN_WARNING "cciss: cmd %p has CHECK CONDITION"
2413 " sense key = 0x%x\n", cmd, sense_key);
2417 /* SG_IO or similar, copy sense data back */
2418 if (cmd->rq->sense) {
2419 if (cmd->rq->sense_len > cmd->err_info->SenseLen)
2420 cmd->rq->sense_len = cmd->err_info->SenseLen;
2421 memcpy(cmd->rq->sense, cmd->err_info->SenseInfo,
2422 cmd->rq->sense_len);
2424 cmd->rq->sense_len = 0;
2429 /* checks the status of the job and calls complete buffers to mark all
2430 * buffers for the completed job. Note that this function does not need
2431 * to hold the hba/queue lock.
2433 static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
2437 struct request *rq = cmd->rq;
2442 rq->errors = make_status_bytes(0, 0, 0, DRIVER_TIMEOUT);
2444 if (cmd->err_info->CommandStatus == 0) /* no error has occurred */
2445 goto after_error_processing;
2447 switch (cmd->err_info->CommandStatus) {
2448 case CMD_TARGET_STATUS:
2449 rq->errors = evaluate_target_status(cmd);
2451 case CMD_DATA_UNDERRUN:
2452 if (blk_fs_request(cmd->rq)) {
2453 printk(KERN_WARNING "cciss: cmd %p has"
2454 " completed with data underrun "
2456 cmd->rq->data_len = cmd->err_info->ResidualCnt;
2459 case CMD_DATA_OVERRUN:
2460 if (blk_fs_request(cmd->rq))
2461 printk(KERN_WARNING "cciss: cmd %p has"
2462 " completed with data overrun "
2466 printk(KERN_WARNING "cciss: cmd %p is "
2467 "reported invalid\n", cmd);
2468 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2469 cmd->err_info->CommandStatus, DRIVER_OK,
2470 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2472 case CMD_PROTOCOL_ERR:
2473 printk(KERN_WARNING "cciss: cmd %p has "
2474 "protocol error \n", cmd);
2475 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2476 cmd->err_info->CommandStatus, DRIVER_OK,
2477 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2479 case CMD_HARDWARE_ERR:
2480 printk(KERN_WARNING "cciss: cmd %p had "
2481 " hardware error\n", cmd);
2482 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2483 cmd->err_info->CommandStatus, DRIVER_OK,
2484 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2486 case CMD_CONNECTION_LOST:
2487 printk(KERN_WARNING "cciss: cmd %p had "
2488 "connection lost\n", cmd);
2489 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2490 cmd->err_info->CommandStatus, DRIVER_OK,
2491 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2494 printk(KERN_WARNING "cciss: cmd %p was "
2496 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2497 cmd->err_info->CommandStatus, DRIVER_OK,
2498 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT);
2500 case CMD_ABORT_FAILED:
2501 printk(KERN_WARNING "cciss: cmd %p reports "
2502 "abort failed\n", cmd);
2503 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2504 cmd->err_info->CommandStatus, DRIVER_OK,
2505 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2507 case CMD_UNSOLICITED_ABORT:
2508 printk(KERN_WARNING "cciss%d: unsolicited "
2509 "abort %p\n", h->ctlr, cmd);
2510 if (cmd->retry_count < MAX_CMD_RETRIES) {
2513 "cciss%d: retrying %p\n", h->ctlr, cmd);
2517 "cciss%d: %p retried too "
2518 "many times\n", h->ctlr, cmd);
2519 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2520 cmd->err_info->CommandStatus, DRIVER_OK,
2521 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT);
2524 printk(KERN_WARNING "cciss: cmd %p timedout\n", cmd);
2525 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2526 cmd->err_info->CommandStatus, DRIVER_OK,
2527 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2530 printk(KERN_WARNING "cciss: cmd %p returned "
2531 "unknown status %x\n", cmd,
2532 cmd->err_info->CommandStatus);
2533 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2534 cmd->err_info->CommandStatus, DRIVER_OK,
2535 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2538 after_error_processing:
2540 /* We need to return this command */
2542 resend_cciss_cmd(h, cmd);
2545 cmd->rq->data_len = 0;
2546 cmd->rq->completion_data = cmd;
2547 blk_add_trace_rq(cmd->rq->q, cmd->rq, BLK_TA_COMPLETE);
2548 blk_complete_request(cmd->rq);
2552 * Get a request and submit it to the controller.
2554 static void do_cciss_request(struct request_queue *q)
2556 ctlr_info_t *h = q->queuedata;
2557 CommandList_struct *c;
2560 struct request *creq;
2562 struct scatterlist tmp_sg[MAXSGENTRIES];
2563 drive_info_struct *drv;
2566 /* We call start_io here in case there is a command waiting on the
2567 * queue that has not been sent.
2569 if (blk_queue_plugged(q))
2573 creq = elv_next_request(q);
2577 BUG_ON(creq->nr_phys_segments > MAXSGENTRIES);
2579 if ((c = cmd_alloc(h, 1)) == NULL)
2582 blkdev_dequeue_request(creq);
2584 spin_unlock_irq(q->queue_lock);
2586 c->cmd_type = CMD_RWREQ;
2589 /* fill in the request */
2590 drv = creq->rq_disk->private_data;
2591 c->Header.ReplyQueue = 0; // unused in simple mode
2592 /* got command from pool, so use the command block index instead */
2593 /* for direct lookups. */
2594 /* The first 2 bits are reserved for controller error reporting. */
2595 c->Header.Tag.lower = (c->cmdindex << 3);
2596 c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
2597 c->Header.LUN.LogDev.VolId = drv->LunID;
2598 c->Header.LUN.LogDev.Mode = 1;
2599 c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
2600 c->Request.Type.Type = TYPE_CMD; // It is a command.
2601 c->Request.Type.Attribute = ATTR_SIMPLE;
2602 c->Request.Type.Direction =
2603 (rq_data_dir(creq) == READ) ? XFER_READ : XFER_WRITE;
2604 c->Request.Timeout = 0; // Don't time out
2606 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
2607 start_blk = creq->sector;
2609 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", (int)creq->sector,
2610 (int)creq->nr_sectors);
2611 #endif /* CCISS_DEBUG */
2613 sg_init_table(tmp_sg, MAXSGENTRIES);
2614 seg = blk_rq_map_sg(q, creq, tmp_sg);
2616 /* get the DMA records for the setup */
2617 if (c->Request.Type.Direction == XFER_READ)
2618 dir = PCI_DMA_FROMDEVICE;
2620 dir = PCI_DMA_TODEVICE;
2622 for (i = 0; i < seg; i++) {
2623 c->SG[i].Len = tmp_sg[i].length;
2624 temp64.val = (__u64) pci_map_page(h->pdev, sg_page(&tmp_sg[i]),
2626 tmp_sg[i].length, dir);
2627 c->SG[i].Addr.lower = temp64.val32.lower;
2628 c->SG[i].Addr.upper = temp64.val32.upper;
2629 c->SG[i].Ext = 0; // we are not chaining
2631 /* track how many SG entries we are using */
2636 printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n",
2637 creq->nr_sectors, seg);
2638 #endif /* CCISS_DEBUG */
2640 c->Header.SGList = c->Header.SGTotal = seg;
2641 if (likely(blk_fs_request(creq))) {
2642 if(h->cciss_read == CCISS_READ_10) {
2643 c->Request.CDB[1] = 0;
2644 c->Request.CDB[2] = (start_blk >> 24) & 0xff; //MSB
2645 c->Request.CDB[3] = (start_blk >> 16) & 0xff;
2646 c->Request.CDB[4] = (start_blk >> 8) & 0xff;
2647 c->Request.CDB[5] = start_blk & 0xff;
2648 c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB
2649 c->Request.CDB[7] = (creq->nr_sectors >> 8) & 0xff;
2650 c->Request.CDB[8] = creq->nr_sectors & 0xff;
2651 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
2653 c->Request.CDBLen = 16;
2654 c->Request.CDB[1]= 0;
2655 c->Request.CDB[2]= (start_blk >> 56) & 0xff; //MSB
2656 c->Request.CDB[3]= (start_blk >> 48) & 0xff;
2657 c->Request.CDB[4]= (start_blk >> 40) & 0xff;
2658 c->Request.CDB[5]= (start_blk >> 32) & 0xff;
2659 c->Request.CDB[6]= (start_blk >> 24) & 0xff;
2660 c->Request.CDB[7]= (start_blk >> 16) & 0xff;
2661 c->Request.CDB[8]= (start_blk >> 8) & 0xff;
2662 c->Request.CDB[9]= start_blk & 0xff;
2663 c->Request.CDB[10]= (creq->nr_sectors >> 24) & 0xff;
2664 c->Request.CDB[11]= (creq->nr_sectors >> 16) & 0xff;
2665 c->Request.CDB[12]= (creq->nr_sectors >> 8) & 0xff;
2666 c->Request.CDB[13]= creq->nr_sectors & 0xff;
2667 c->Request.CDB[14] = c->Request.CDB[15] = 0;
2669 } else if (blk_pc_request(creq)) {
2670 c->Request.CDBLen = creq->cmd_len;
2671 memcpy(c->Request.CDB, creq->cmd, BLK_MAX_CDB);
2673 printk(KERN_WARNING "cciss%d: bad request type %d\n", h->ctlr, creq->cmd_type);
2677 spin_lock_irq(q->queue_lock);
2679 addQ(&(h->reqQ), c);
2681 if (h->Qdepth > h->maxQsinceinit)
2682 h->maxQsinceinit = h->Qdepth;
2688 /* We will already have the driver lock here so not need
2694 static inline unsigned long get_next_completion(ctlr_info_t *h)
2696 #ifdef CONFIG_CISS_SCSI_TAPE
2697 /* Any rejects from sendcmd() lying around? Process them first */
2698 if (h->scsi_rejects.ncompletions == 0)
2699 return h->access.command_completed(h);
2701 struct sendcmd_reject_list *srl;
2703 srl = &h->scsi_rejects;
2704 n = --srl->ncompletions;
2705 /* printk("cciss%d: processing saved reject\n", h->ctlr); */
2707 return srl->complete[n];
2710 return h->access.command_completed(h);
2714 static inline int interrupt_pending(ctlr_info_t *h)
2716 #ifdef CONFIG_CISS_SCSI_TAPE
2717 return (h->access.intr_pending(h)
2718 || (h->scsi_rejects.ncompletions > 0));
2720 return h->access.intr_pending(h);
2724 static inline long interrupt_not_for_us(ctlr_info_t *h)
2726 #ifdef CONFIG_CISS_SCSI_TAPE
2727 return (((h->access.intr_pending(h) == 0) ||
2728 (h->interrupts_enabled == 0))
2729 && (h->scsi_rejects.ncompletions == 0));
2731 return (((h->access.intr_pending(h) == 0) ||
2732 (h->interrupts_enabled == 0)));
2736 static irqreturn_t do_cciss_intr(int irq, void *dev_id)
2738 ctlr_info_t *h = dev_id;
2739 CommandList_struct *c;
2740 unsigned long flags;
2743 if (interrupt_not_for_us(h))
2746 * If there are completed commands in the completion queue,
2747 * we had better do something about it.
2749 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
2750 while (interrupt_pending(h)) {
2751 while ((a = get_next_completion(h)) != FIFO_EMPTY) {
2755 if (a2 >= h->nr_cmds) {
2757 "cciss: controller cciss%d failed, stopping.\n",
2759 fail_all_cmds(h->ctlr);
2763 c = h->cmd_pool + a2;
2768 if ((c = h->cmpQ) == NULL) {
2770 "cciss: Completion of %08x ignored\n",
2774 while (c->busaddr != a) {
2781 * If we've found the command, take it off the
2782 * completion Q and free it
2784 if (c->busaddr == a) {
2785 removeQ(&h->cmpQ, c);
2786 if (c->cmd_type == CMD_RWREQ) {
2787 complete_command(h, c, 0);
2788 } else if (c->cmd_type == CMD_IOCTL_PEND) {
2789 complete(c->waiting);
2791 # ifdef CONFIG_CISS_SCSI_TAPE
2792 else if (c->cmd_type == CMD_SCSI)
2793 complete_scsi_command(c, 0, a1);
2800 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
2805 * We cannot read the structure directly, for portability we must use
2807 * This is for debug only.
2810 static void print_cfg_table(CfgTable_struct *tb)
2815 printk("Controller Configuration information\n");
2816 printk("------------------------------------\n");
2817 for (i = 0; i < 4; i++)
2818 temp_name[i] = readb(&(tb->Signature[i]));
2819 temp_name[4] = '\0';
2820 printk(" Signature = %s\n", temp_name);
2821 printk(" Spec Number = %d\n", readl(&(tb->SpecValence)));
2822 printk(" Transport methods supported = 0x%x\n",
2823 readl(&(tb->TransportSupport)));
2824 printk(" Transport methods active = 0x%x\n",
2825 readl(&(tb->TransportActive)));
2826 printk(" Requested transport Method = 0x%x\n",
2827 readl(&(tb->HostWrite.TransportRequest)));
2828 printk(" Coalesce Interrupt Delay = 0x%x\n",
2829 readl(&(tb->HostWrite.CoalIntDelay)));
2830 printk(" Coalesce Interrupt Count = 0x%x\n",
2831 readl(&(tb->HostWrite.CoalIntCount)));
2832 printk(" Max outstanding commands = 0x%d\n",
2833 readl(&(tb->CmdsOutMax)));
2834 printk(" Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
2835 for (i = 0; i < 16; i++)
2836 temp_name[i] = readb(&(tb->ServerName[i]));
2837 temp_name[16] = '\0';
2838 printk(" Server Name = %s\n", temp_name);
2839 printk(" Heartbeat Counter = 0x%x\n\n\n", readl(&(tb->HeartBeat)));
2841 #endif /* CCISS_DEBUG */
2843 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
2845 int i, offset, mem_type, bar_type;
2846 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
2849 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2850 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
2851 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
2854 mem_type = pci_resource_flags(pdev, i) &
2855 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
2857 case PCI_BASE_ADDRESS_MEM_TYPE_32:
2858 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
2859 offset += 4; /* 32 bit */
2861 case PCI_BASE_ADDRESS_MEM_TYPE_64:
2864 default: /* reserved in PCI 2.2 */
2866 "Base address is invalid\n");
2871 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
2877 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
2878 * controllers that are capable. If not, we use IO-APIC mode.
2881 static void __devinit cciss_interrupt_mode(ctlr_info_t *c,
2882 struct pci_dev *pdev, __u32 board_id)
2884 #ifdef CONFIG_PCI_MSI
2886 struct msix_entry cciss_msix_entries[4] = { {0, 0}, {0, 1},
2890 /* Some boards advertise MSI but don't really support it */
2891 if ((board_id == 0x40700E11) ||
2892 (board_id == 0x40800E11) ||
2893 (board_id == 0x40820E11) || (board_id == 0x40830E11))
2894 goto default_int_mode;
2896 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
2897 err = pci_enable_msix(pdev, cciss_msix_entries, 4);
2899 c->intr[0] = cciss_msix_entries[0].vector;
2900 c->intr[1] = cciss_msix_entries[1].vector;
2901 c->intr[2] = cciss_msix_entries[2].vector;
2902 c->intr[3] = cciss_msix_entries[3].vector;
2907 printk(KERN_WARNING "cciss: only %d MSI-X vectors "
2908 "available\n", err);
2909 goto default_int_mode;
2911 printk(KERN_WARNING "cciss: MSI-X init failed %d\n",
2913 goto default_int_mode;
2916 if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
2917 if (!pci_enable_msi(pdev)) {
2920 printk(KERN_WARNING "cciss: MSI init failed\n");
2924 #endif /* CONFIG_PCI_MSI */
2925 /* if we get here we're going to use the default interrupt mode */
2926 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2930 static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
2932 ushort subsystem_vendor_id, subsystem_device_id, command;
2933 __u32 board_id, scratchpad = 0;
2935 __u32 cfg_base_addr;
2936 __u64 cfg_base_addr_index;
2939 /* check to see if controller has been disabled */
2940 /* BEFORE trying to enable it */
2941 (void)pci_read_config_word(pdev, PCI_COMMAND, &command);
2942 if (!(command & 0x02)) {
2944 "cciss: controller appears to be disabled\n");
2948 err = pci_enable_device(pdev);
2950 printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
2954 err = pci_request_regions(pdev, "cciss");
2956 printk(KERN_ERR "cciss: Cannot obtain PCI resources, "
2961 subsystem_vendor_id = pdev->subsystem_vendor;
2962 subsystem_device_id = pdev->subsystem_device;
2963 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
2964 subsystem_vendor_id);
2967 printk("command = %x\n", command);
2968 printk("irq = %x\n", pdev->irq);
2969 printk("board_id = %x\n", board_id);
2970 #endif /* CCISS_DEBUG */
2972 /* If the kernel supports MSI/MSI-X we will try to enable that functionality,
2973 * else we use the IO-APIC interrupt assigned to us by system ROM.
2975 cciss_interrupt_mode(c, pdev, board_id);
2978 * Memory base addr is first addr , the second points to the config
2982 c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
2984 printk("address 0 = %x\n", c->paddr);
2985 #endif /* CCISS_DEBUG */
2986 c->vaddr = remap_pci_mem(c->paddr, 0x250);
2988 /* Wait for the board to become ready. (PCI hotplug needs this.)
2989 * We poll for up to 120 secs, once per 100ms. */
2990 for (i = 0; i < 1200; i++) {
2991 scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET);
2992 if (scratchpad == CCISS_FIRMWARE_READY)
2994 set_current_state(TASK_INTERRUPTIBLE);
2995 schedule_timeout(HZ / 10); /* wait 100ms */
2997 if (scratchpad != CCISS_FIRMWARE_READY) {
2998 printk(KERN_WARNING "cciss: Board not ready. Timed out.\n");
3000 goto err_out_free_res;
3003 /* get the address index number */
3004 cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
3005 cfg_base_addr &= (__u32) 0x0000ffff;
3007 printk("cfg base address = %x\n", cfg_base_addr);
3008 #endif /* CCISS_DEBUG */
3009 cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
3011 printk("cfg base address index = %x\n", cfg_base_addr_index);
3012 #endif /* CCISS_DEBUG */
3013 if (cfg_base_addr_index == -1) {
3014 printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
3016 goto err_out_free_res;
3019 cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
3021 printk("cfg offset = %x\n", cfg_offset);
3022 #endif /* CCISS_DEBUG */
3023 c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
3024 cfg_base_addr_index) +
3025 cfg_offset, sizeof(CfgTable_struct));
3026 c->board_id = board_id;
3029 print_cfg_table(c->cfgtable);
3030 #endif /* CCISS_DEBUG */
3032 for (i = 0; i < ARRAY_SIZE(products); i++) {
3033 if (board_id == products[i].board_id) {
3034 c->product_name = products[i].product_name;
3035 c->access = *(products[i].access);
3036 c->nr_cmds = products[i].nr_cmds;
3040 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
3041 (readb(&c->cfgtable->Signature[1]) != 'I') ||
3042 (readb(&c->cfgtable->Signature[2]) != 'S') ||
3043 (readb(&c->cfgtable->Signature[3]) != 'S')) {
3044 printk("Does not appear to be a valid CISS config table\n");
3046 goto err_out_free_res;
3048 /* We didn't find the controller in our list. We know the
3049 * signature is valid. If it's an HP device let's try to
3050 * bind to the device and fire it up. Otherwise we bail.
3052 if (i == ARRAY_SIZE(products)) {
3053 if (subsystem_vendor_id == PCI_VENDOR_ID_HP) {
3054 c->product_name = products[i-1].product_name;
3055 c->access = *(products[i-1].access);
3056 c->nr_cmds = products[i-1].nr_cmds;
3057 printk(KERN_WARNING "cciss: This is an unknown "
3058 "Smart Array controller.\n"
3059 "cciss: Please update to the latest driver "
3060 "available from www.hp.com.\n");
3062 printk(KERN_WARNING "cciss: Sorry, I don't know how"
3063 " to access the Smart Array controller %08lx\n"
3064 , (unsigned long)board_id);
3066 goto err_out_free_res;
3071 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
3073 prefetch = readl(&(c->cfgtable->SCSI_Prefetch));
3075 writel(prefetch, &(c->cfgtable->SCSI_Prefetch));
3079 /* Disabling DMA prefetch and refetch for the P600.
3080 * An ASIC bug may result in accesses to invalid memory addresses.
3081 * We've disabled prefetch for some time now. Testing with XEN
3082 * kernels revealed a bug in the refetch if dom0 resides on a P600.
3084 if(board_id == 0x3225103C) {
3087 dma_prefetch = readl(c->vaddr + I2O_DMA1_CFG);
3088 dma_prefetch |= 0x8000;
3089 writel(dma_prefetch, c->vaddr + I2O_DMA1_CFG);
3090 pci_read_config_dword(pdev, PCI_COMMAND_PARITY, &dma_refetch);
3092 pci_write_config_dword(pdev, PCI_COMMAND_PARITY, dma_refetch);
3096 printk("Trying to put board into Simple mode\n");
3097 #endif /* CCISS_DEBUG */
3098 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
3099 /* Update the field, and then ring the doorbell */
3100 writel(CFGTBL_Trans_Simple, &(c->cfgtable->HostWrite.TransportRequest));
3101 writel(CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
3103 /* under certain very rare conditions, this can take awhile.
3104 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
3105 * as we enter this code.) */
3106 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
3107 if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
3109 /* delay and try again */
3110 set_current_state(TASK_INTERRUPTIBLE);
3111 schedule_timeout(10);
3115 printk(KERN_DEBUG "I counter got to %d %x\n", i,
3116 readl(c->vaddr + SA5_DOORBELL));
3117 #endif /* CCISS_DEBUG */
3119 print_cfg_table(c->cfgtable);
3120 #endif /* CCISS_DEBUG */
3122 if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
3123 printk(KERN_WARNING "cciss: unable to get board into"
3126 goto err_out_free_res;
3132 * Deliberately omit pci_disable_device(): it does something nasty to
3133 * Smart Array controllers that pci_enable_device does not undo
3135 pci_release_regions(pdev);
3140 * Gets information about the local volumes attached to the controller.
3142 static void cciss_getgeometry(int cntl_num)
3144 ReportLunData_struct *ld_buff;
3145 InquiryData_struct *inq_buff;
3150 unsigned block_size;
3151 sector_t total_size;
3153 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
3154 if (ld_buff == NULL) {
3155 printk(KERN_ERR "cciss: out of memory\n");
3158 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
3159 if (inq_buff == NULL) {
3160 printk(KERN_ERR "cciss: out of memory\n");
3164 /* Get the firmware version */
3165 return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff,
3166 sizeof(InquiryData_struct), 0, 0, 0, NULL,
3168 if (return_code == IO_OK) {
3169 hba[cntl_num]->firm_ver[0] = inq_buff->data_byte[32];
3170 hba[cntl_num]->firm_ver[1] = inq_buff->data_byte[33];
3171 hba[cntl_num]->firm_ver[2] = inq_buff->data_byte[34];
3172 hba[cntl_num]->firm_ver[3] = inq_buff->data_byte[35];
3173 } else { /* send command failed */
3175 printk(KERN_WARNING "cciss: unable to determine firmware"
3176 " version of controller\n");
3178 /* Get the number of logical volumes */
3179 return_code = sendcmd(CISS_REPORT_LOG, cntl_num, ld_buff,
3180 sizeof(ReportLunData_struct), 0, 0, 0, NULL,
3183 if (return_code == IO_OK) {
3185 printk("LUN Data\n--------------------------\n");
3186 #endif /* CCISS_DEBUG */
3189 (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
3191 (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
3193 (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
3194 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
3195 } else { /* reading number of logical volumes failed */
3197 printk(KERN_WARNING "cciss: report logical volume"
3198 " command failed\n");
3201 hba[cntl_num]->num_luns = listlength / 8; // 8 bytes pre entry
3202 if (hba[cntl_num]->num_luns > CISS_MAX_LUN) {
3204 "ciss: only %d number of logical volumes supported\n",
3206 hba[cntl_num]->num_luns = CISS_MAX_LUN;
3209 printk(KERN_DEBUG "Length = %x %x %x %x = %d\n",
3210 ld_buff->LUNListLength[0], ld_buff->LUNListLength[1],
3211 ld_buff->LUNListLength[2], ld_buff->LUNListLength[3],
3212 hba[cntl_num]->num_luns);
3213 #endif /* CCISS_DEBUG */
3215 hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns - 1;
3216 for (i = 0; i < CISS_MAX_LUN; i++) {
3217 if (i < hba[cntl_num]->num_luns) {
3218 lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3]))
3220 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2]))
3222 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1]))
3224 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
3226 hba[cntl_num]->drv[i].LunID = lunid;
3229 printk(KERN_DEBUG "LUN[%d]: %x %x %x %x = %x\n", i,
3230 ld_buff->LUN[i][0], ld_buff->LUN[i][1],
3231 ld_buff->LUN[i][2], ld_buff->LUN[i][3],
3232 hba[cntl_num]->drv[i].LunID);
3233 #endif /* CCISS_DEBUG */
3235 /* testing to see if 16-byte CDBs are already being used */
3236 if(hba[cntl_num]->cciss_read == CCISS_READ_16) {
3237 cciss_read_capacity_16(cntl_num, i, 0,
3238 &total_size, &block_size);
3241 cciss_read_capacity(cntl_num, i, 0, &total_size, &block_size);
3243 /* If read_capacity returns all F's the logical is >2TB */
3244 /* so we switch to 16-byte CDBs for all read/write ops */
3245 if(total_size == 0xFFFFFFFFULL) {
3246 cciss_read_capacity_16(cntl_num, i, 0,
3247 &total_size, &block_size);
3248 hba[cntl_num]->cciss_read = CCISS_READ_16;
3249 hba[cntl_num]->cciss_write = CCISS_WRITE_16;
3251 hba[cntl_num]->cciss_read = CCISS_READ_10;
3252 hba[cntl_num]->cciss_write = CCISS_WRITE_10;
3255 cciss_geometry_inquiry(cntl_num, i, 0, total_size,
3256 block_size, inq_buff,
3257 &hba[cntl_num]->drv[i]);
3259 /* initialize raid_level to indicate a free space */
3260 hba[cntl_num]->drv[i].raid_level = -1;
3267 /* Function to find the first free pointer into our hba[] array */
3268 /* Returns -1 if no free entries are left. */
3269 static int alloc_cciss_hba(void)
3273 for (i = 0; i < MAX_CTLR; i++) {
3277 p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
3280 p->gendisk[0] = alloc_disk(1 << NWD_SHIFT);
3281 if (!p->gendisk[0]) {
3289 printk(KERN_WARNING "cciss: This driver supports a maximum"
3290 " of %d controllers.\n", MAX_CTLR);
3293 printk(KERN_ERR "cciss: out of memory.\n");
3297 static void free_hba(int i)
3299 ctlr_info_t *p = hba[i];
3303 for (n = 0; n < CISS_MAX_LUN; n++)
3304 put_disk(p->gendisk[n]);
3309 * This is it. Find all the controllers and register them. I really hate
3310 * stealing all these major device numbers.
3311 * returns the number of block devices registered.
3313 static int __devinit cciss_init_one(struct pci_dev *pdev,
3314 const struct pci_device_id *ent)
3321 i = alloc_cciss_hba();
3325 hba[i]->busy_initializing = 1;
3327 if (cciss_pci_init(hba[i], pdev) != 0)
3330 sprintf(hba[i]->devname, "cciss%d", i);
3332 hba[i]->pdev = pdev;
3334 /* configure PCI DMA stuff */
3335 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
3337 else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
3340 printk(KERN_ERR "cciss: no suitable DMA available\n");
3345 * register with the major number, or get a dynamic major number
3346 * by passing 0 as argument. This is done for greater than
3347 * 8 controller support.
3349 if (i < MAX_CTLR_ORIG)
3350 hba[i]->major = COMPAQ_CISS_MAJOR + i;
3351 rc = register_blkdev(hba[i]->major, hba[i]->devname);
3352 if (rc == -EBUSY || rc == -EINVAL) {
3354 "cciss: Unable to get major number %d for %s "
3355 "on hba %d\n", hba[i]->major, hba[i]->devname, i);
3358 if (i >= MAX_CTLR_ORIG)
3362 /* make sure the board interrupts are off */
3363 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
3364 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
3365 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
3366 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
3367 hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname);
3371 printk(KERN_INFO "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
3372 hba[i]->devname, pdev->device, pci_name(pdev),
3373 hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not");
3375 hba[i]->cmd_pool_bits =
3376 kmalloc(((hba[i]->nr_cmds + BITS_PER_LONG -
3377 1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
3378 hba[i]->cmd_pool = (CommandList_struct *)
3379 pci_alloc_consistent(hba[i]->pdev,
3380 hba[i]->nr_cmds * sizeof(CommandList_struct),
3381 &(hba[i]->cmd_pool_dhandle));
3382 hba[i]->errinfo_pool = (ErrorInfo_struct *)
3383 pci_alloc_consistent(hba[i]->pdev,
3384 hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3385 &(hba[i]->errinfo_pool_dhandle));
3386 if ((hba[i]->cmd_pool_bits == NULL)
3387 || (hba[i]->cmd_pool == NULL)
3388 || (hba[i]->errinfo_pool == NULL)) {
3389 printk(KERN_ERR "cciss: out of memory");
3392 #ifdef CONFIG_CISS_SCSI_TAPE
3393 hba[i]->scsi_rejects.complete =
3394 kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
3395 (hba[i]->nr_cmds + 5), GFP_KERNEL);
3396 if (hba[i]->scsi_rejects.complete == NULL) {
3397 printk(KERN_ERR "cciss: out of memory");
3401 spin_lock_init(&hba[i]->lock);
3403 /* Initialize the pdev driver private data.
3404 have it point to hba[i]. */
3405 pci_set_drvdata(pdev, hba[i]);
3406 /* command and error info recs zeroed out before
3408 memset(hba[i]->cmd_pool_bits, 0,
3409 ((hba[i]->nr_cmds + BITS_PER_LONG -
3410 1) / BITS_PER_LONG) * sizeof(unsigned long));
3413 printk(KERN_DEBUG "Scanning for drives on controller cciss%d\n", i);
3414 #endif /* CCISS_DEBUG */
3416 cciss_getgeometry(i);
3418 cciss_scsi_setup(i);
3420 /* Turn the interrupts on so we can service requests */
3421 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
3425 hba[i]->cciss_max_sectors = 2048;
3427 hba[i]->busy_initializing = 0;
3430 drive_info_struct *drv = &(hba[i]->drv[j]);
3431 struct gendisk *disk = hba[i]->gendisk[j];
3432 struct request_queue *q;
3434 /* Check if the disk was allocated already */
3436 hba[i]->gendisk[j] = alloc_disk(1 << NWD_SHIFT);
3437 disk = hba[i]->gendisk[j];
3440 /* Check that the disk was able to be allocated */
3442 printk(KERN_ERR "cciss: unable to allocate memory for disk %d\n", j);
3446 q = blk_init_queue(do_cciss_request, &hba[i]->lock);
3449 "cciss: unable to allocate queue for disk %d\n",
3455 q->backing_dev_info.ra_pages = READ_AHEAD;
3456 blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
3458 /* This is a hardware imposed limit. */
3459 blk_queue_max_hw_segments(q, MAXSGENTRIES);
3461 /* This is a limit in the driver and could be eliminated. */
3462 blk_queue_max_phys_segments(q, MAXSGENTRIES);
3464 blk_queue_max_sectors(q, hba[i]->cciss_max_sectors);
3466 blk_queue_softirq_done(q, cciss_softirq_done);
3468 q->queuedata = hba[i];
3469 sprintf(disk->disk_name, "cciss/c%dd%d", i, j);
3470 disk->major = hba[i]->major;
3471 disk->first_minor = j << NWD_SHIFT;
3472 disk->fops = &cciss_fops;
3474 disk->private_data = drv;
3475 disk->driverfs_dev = &pdev->dev;
3476 /* we must register the controller even if no disks exist */
3477 /* this is for the online array utilities */
3478 if (!drv->heads && j)
3480 blk_queue_hardsect_size(q, drv->block_size);
3481 set_capacity(disk, drv->nr_blocks);
3484 } while (j <= hba[i]->highest_lun);
3489 #ifdef CONFIG_CISS_SCSI_TAPE
3490 kfree(hba[i]->scsi_rejects.complete);
3492 kfree(hba[i]->cmd_pool_bits);
3493 if (hba[i]->cmd_pool)
3494 pci_free_consistent(hba[i]->pdev,
3495 hba[i]->nr_cmds * sizeof(CommandList_struct),
3496 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3497 if (hba[i]->errinfo_pool)
3498 pci_free_consistent(hba[i]->pdev,
3499 hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3500 hba[i]->errinfo_pool,
3501 hba[i]->errinfo_pool_dhandle);
3502 free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
3504 unregister_blkdev(hba[i]->major, hba[i]->devname);
3506 hba[i]->busy_initializing = 0;
3507 /* cleanup any queues that may have been initialized */
3508 for (j=0; j <= hba[i]->highest_lun; j++){
3509 drive_info_struct *drv = &(hba[i]->drv[j]);
3511 blk_cleanup_queue(drv->queue);
3514 * Deliberately omit pci_disable_device(): it does something nasty to
3515 * Smart Array controllers that pci_enable_device does not undo
3517 pci_release_regions(pdev);
3518 pci_set_drvdata(pdev, NULL);
3523 static void cciss_shutdown(struct pci_dev *pdev)
3525 ctlr_info_t *tmp_ptr;
3530 tmp_ptr = pci_get_drvdata(pdev);
3531 if (tmp_ptr == NULL)
3537 /* Turn board interrupts off and send the flush cache command */
3538 /* sendcmd will turn off interrupt, and send the flush...
3539 * To write all data in the battery backed cache to disks */
3540 memset(flush_buf, 0, 4);
3541 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL,
3543 if (return_code == IO_OK) {
3544 printk(KERN_INFO "Completed flushing cache on controller %d\n", i);
3546 printk(KERN_WARNING "Error flushing cache on controller %d\n", i);
3548 free_irq(hba[i]->intr[2], hba[i]);
3551 static void __devexit cciss_remove_one(struct pci_dev *pdev)
3553 ctlr_info_t *tmp_ptr;
3556 if (pci_get_drvdata(pdev) == NULL) {
3557 printk(KERN_ERR "cciss: Unable to remove device \n");
3560 tmp_ptr = pci_get_drvdata(pdev);
3562 if (hba[i] == NULL) {
3563 printk(KERN_ERR "cciss: device appears to "
3564 "already be removed \n");
3568 remove_proc_entry(hba[i]->devname, proc_cciss);
3569 unregister_blkdev(hba[i]->major, hba[i]->devname);
3571 /* remove it from the disk list */
3572 for (j = 0; j < CISS_MAX_LUN; j++) {
3573 struct gendisk *disk = hba[i]->gendisk[j];
3575 struct request_queue *q = disk->queue;
3577 if (disk->flags & GENHD_FL_UP)
3580 blk_cleanup_queue(q);
3584 cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
3586 cciss_shutdown(pdev);
3588 #ifdef CONFIG_PCI_MSI
3589 if (hba[i]->msix_vector)
3590 pci_disable_msix(hba[i]->pdev);
3591 else if (hba[i]->msi_vector)
3592 pci_disable_msi(hba[i]->pdev);
3593 #endif /* CONFIG_PCI_MSI */
3595 iounmap(hba[i]->vaddr);
3597 pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(CommandList_struct),
3598 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3599 pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3600 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
3601 kfree(hba[i]->cmd_pool_bits);
3602 #ifdef CONFIG_CISS_SCSI_TAPE
3603 kfree(hba[i]->scsi_rejects.complete);
3606 * Deliberately omit pci_disable_device(): it does something nasty to
3607 * Smart Array controllers that pci_enable_device does not undo
3609 pci_release_regions(pdev);
3610 pci_set_drvdata(pdev, NULL);
3614 static struct pci_driver cciss_pci_driver = {
3616 .probe = cciss_init_one,
3617 .remove = __devexit_p(cciss_remove_one),
3618 .id_table = cciss_pci_device_id, /* id_table */
3619 .shutdown = cciss_shutdown,
3623 * This is it. Register the PCI driver information for the cards we control
3624 * the OS will call our registered routines when it finds one of our cards.
3626 static int __init cciss_init(void)
3628 printk(KERN_INFO DRIVER_NAME "\n");
3630 /* Register for our PCI devices */
3631 return pci_register_driver(&cciss_pci_driver);
3634 static void __exit cciss_cleanup(void)
3638 pci_unregister_driver(&cciss_pci_driver);
3639 /* double check that all controller entrys have been removed */
3640 for (i = 0; i < MAX_CTLR; i++) {
3641 if (hba[i] != NULL) {
3642 printk(KERN_WARNING "cciss: had to remove"
3643 " controller %d\n", i);
3644 cciss_remove_one(hba[i]->pdev);
3647 remove_proc_entry("cciss", proc_root_driver);
3650 static void fail_all_cmds(unsigned long ctlr)
3652 /* If we get here, the board is apparently dead. */
3653 ctlr_info_t *h = hba[ctlr];
3654 CommandList_struct *c;
3655 unsigned long flags;
3657 printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
3658 h->alive = 0; /* the controller apparently died... */
3660 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
3662 pci_disable_device(h->pdev); /* Make sure it is really dead. */
3664 /* move everything off the request queue onto the completed queue */
3665 while ((c = h->reqQ) != NULL) {
3666 removeQ(&(h->reqQ), c);
3668 addQ(&(h->cmpQ), c);
3671 /* Now, fail everything on the completed queue with a HW error */
3672 while ((c = h->cmpQ) != NULL) {
3673 removeQ(&h->cmpQ, c);
3674 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
3675 if (c->cmd_type == CMD_RWREQ) {
3676 complete_command(h, c, 0);
3677 } else if (c->cmd_type == CMD_IOCTL_PEND)
3678 complete(c->waiting);
3679 #ifdef CONFIG_CISS_SCSI_TAPE
3680 else if (c->cmd_type == CMD_SCSI)
3681 complete_scsi_command(c, 0, 0);
3684 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
3688 module_init(cciss_init);
3689 module_exit(cciss_cleanup);