2 * ipr.c -- driver for IBM Power Linux RAID adapters
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
6 * Copyright (C) 2003, 2004 IBM Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 * This driver is used to control the following SCSI adapters:
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
50 * - Tagged command queuing
51 * - Adapter microcode download
53 * - SCSI device hot plug
57 #include <linux/config.h>
59 #include <linux/init.h>
60 #include <linux/types.h>
61 #include <linux/errno.h>
62 #include <linux/kernel.h>
63 #include <linux/ioport.h>
64 #include <linux/delay.h>
65 #include <linux/pci.h>
66 #include <linux/wait.h>
67 #include <linux/spinlock.h>
68 #include <linux/sched.h>
69 #include <linux/interrupt.h>
70 #include <linux/blkdev.h>
71 #include <linux/firmware.h>
72 #include <linux/module.h>
73 #include <linux/moduleparam.h>
76 #include <asm/processor.h>
77 #include <scsi/scsi.h>
78 #include <scsi/scsi_host.h>
79 #include <scsi/scsi_tcq.h>
80 #include <scsi/scsi_eh.h>
81 #include <scsi/scsi_cmnd.h>
82 #include <scsi/scsi_request.h>
88 static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
89 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
90 static unsigned int ipr_max_speed = 1;
91 static int ipr_testmode = 0;
92 static unsigned int ipr_fastfail = 0;
93 static unsigned int ipr_transop_timeout = IPR_OPERATIONAL_TIMEOUT;
94 static unsigned int ipr_enable_cache = 1;
95 static unsigned int ipr_debug = 0;
96 static int ipr_auto_create = 1;
97 static DEFINE_SPINLOCK(ipr_driver_lock);
99 /* This table describes the differences between DMA controller chips */
100 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
101 { /* Gemstone and Citrine */
103 .cache_line_size = 0x20,
105 .set_interrupt_mask_reg = 0x0022C,
106 .clr_interrupt_mask_reg = 0x00230,
107 .sense_interrupt_mask_reg = 0x0022C,
108 .clr_interrupt_reg = 0x00228,
109 .sense_interrupt_reg = 0x00224,
110 .ioarrin_reg = 0x00404,
111 .sense_uproc_interrupt_reg = 0x00214,
112 .set_uproc_interrupt_reg = 0x00214,
113 .clr_uproc_interrupt_reg = 0x00218
116 { /* Snipe and Scamp */
118 .cache_line_size = 0x20,
120 .set_interrupt_mask_reg = 0x00288,
121 .clr_interrupt_mask_reg = 0x0028C,
122 .sense_interrupt_mask_reg = 0x00288,
123 .clr_interrupt_reg = 0x00284,
124 .sense_interrupt_reg = 0x00280,
125 .ioarrin_reg = 0x00504,
126 .sense_uproc_interrupt_reg = 0x00290,
127 .set_uproc_interrupt_reg = 0x00290,
128 .clr_uproc_interrupt_reg = 0x00294
133 static const struct ipr_chip_t ipr_chip[] = {
134 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
135 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
136 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
137 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
140 static int ipr_max_bus_speeds [] = {
141 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
144 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
145 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
146 module_param_named(max_speed, ipr_max_speed, uint, 0);
147 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
148 module_param_named(log_level, ipr_log_level, uint, 0);
149 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
150 module_param_named(testmode, ipr_testmode, int, 0);
151 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
152 module_param_named(fastfail, ipr_fastfail, int, 0);
153 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
154 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
155 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
156 module_param_named(enable_cache, ipr_enable_cache, int, 0);
157 MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
158 module_param_named(debug, ipr_debug, int, 0);
159 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
160 module_param_named(auto_create, ipr_auto_create, int, 0);
161 MODULE_PARM_DESC(auto_create, "Auto-create single device RAID 0 arrays when initialized (default: 1)");
162 MODULE_LICENSE("GPL");
163 MODULE_VERSION(IPR_DRIVER_VERSION);
165 static const char *ipr_gpdd_dev_end_states[] = {
167 "Terminated by host",
168 "Terminated by device reset",
169 "Terminated by bus reset",
171 "Command not started"
174 static const char *ipr_gpdd_dev_bus_phases[] = {
188 /* A constant array of IOASCs/URCs/Error Messages */
190 struct ipr_error_table_t ipr_error_table[] = {
192 "8155: An unknown error was received"},
194 "Soft underlength error"},
196 "Command to be cancelled not found"},
198 "Qualified success"},
200 "FFFE: Soft device bus error recovered by the IOA"},
202 "FFF9: Device sector reassign successful"},
204 "FFF7: Media error recovered by device rewrite procedures"},
206 "7001: IOA sector reassignment successful"},
208 "FFF9: Soft media error. Sector reassignment recommended"},
210 "FFF7: Media error recovered by IOA rewrite procedures"},
212 "FF3D: Soft PCI bus error recovered by the IOA"},
214 "FFF6: Device hardware error recovered by the IOA"},
216 "FFF6: Device hardware error recovered by the device"},
218 "FF3D: Soft IOA error recovered by the IOA"},
220 "FFFA: Undefined device response recovered by the IOA"},
222 "FFF6: Device bus error, message or command phase"},
224 "FFF6: Failure prediction threshold exceeded"},
226 "8009: Impending cache battery pack failure"},
228 "34FF: Disk device format in progress"},
230 "Synchronization required"},
232 "No ready, IOA shutdown"},
234 "Not ready, IOA has been shutdown"},
236 "3020: Storage subsystem configuration error"},
238 "FFF5: Medium error, data unreadable, recommend reassign"},
240 "7000: Medium error, data unreadable, do not reassign"},
242 "FFF3: Disk media format bad"},
244 "3002: Addressed device failed to respond to selection"},
246 "3100: Device bus error"},
248 "3109: IOA timed out a device command"},
250 "3120: SCSI bus is not operational"},
252 "9000: IOA reserved area data check"},
254 "9001: IOA reserved area invalid data pattern"},
256 "9002: IOA reserved area LRC error"},
258 "102E: Out of alternate sectors for disk storage"},
260 "FFF4: Data transfer underlength error"},
262 "FFF4: Data transfer overlength error"},
264 "3400: Logical unit failure"},
266 "FFF4: Device microcode is corrupt"},
268 "8150: PCI bus error"},
270 "Unsupported device bus message received"},
272 "FFF4: Disk device problem"},
274 "8150: Permanent IOA failure"},
276 "3010: Disk device returned wrong response to IOA"},
278 "8151: IOA microcode error"},
280 "Device bus status error"},
282 "8157: IOA error requiring IOA reset to recover"},
284 "Message reject received from the device"},
286 "8008: A permanent cache battery pack failure occurred"},
288 "9090: Disk unit has been modified after the last known status"},
290 "9081: IOA detected device error"},
292 "9082: IOA detected device error"},
294 "3110: Device bus error, message or command phase"},
296 "9091: Incorrect hardware configuration change has been detected"},
298 "9073: Invalid multi-adapter configuration"},
300 "FFF4: Command to logical unit failed"},
302 "Illegal request, invalid request type or request packet"},
304 "Illegal request, invalid resource handle"},
306 "Illegal request, commands not allowed to this device"},
308 "Illegal request, command not allowed to a secondary adapter"},
310 "Illegal request, invalid field in parameter list"},
312 "Illegal request, parameter not supported"},
314 "Illegal request, parameter value invalid"},
316 "Illegal request, command sequence error"},
318 "Illegal request, dual adapter support not enabled"},
320 "9031: Array protection temporarily suspended, protection resuming"},
322 "9040: Array protection temporarily suspended, protection resuming"},
324 "FFFB: SCSI bus was reset"},
326 "FFFE: SCSI bus transition to single ended"},
328 "FFFE: SCSI bus transition to LVD"},
330 "FFFB: SCSI bus was reset by another initiator"},
332 "3029: A device replacement has occurred"},
334 "9051: IOA cache data exists for a missing or failed device"},
336 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
338 "9025: Disk unit is not supported at its physical location"},
340 "3020: IOA detected a SCSI bus configuration error"},
342 "3150: SCSI bus configuration error"},
344 "9074: Asymmetric advanced function disk configuration"},
346 "9041: Array protection temporarily suspended"},
348 "9042: Corrupt array parity detected on specified device"},
350 "9030: Array no longer protected due to missing or failed disk unit"},
352 "9071: Link operational transition"},
354 "9072: Link not operational transition"},
356 "9032: Array exposed but still protected"},
358 "Failure due to other device"},
360 "9008: IOA does not support functions expected by devices"},
362 "9010: Cache data associated with attached devices cannot be found"},
364 "9011: Cache data belongs to devices other than those attached"},
366 "9020: Array missing 2 or more devices with only 1 device present"},
368 "9021: Array missing 2 or more devices with 2 or more devices present"},
370 "9022: Exposed array is missing a required device"},
372 "9023: Array member(s) not at required physical locations"},
374 "9024: Array not functional due to present hardware configuration"},
376 "9026: Array not functional due to present hardware configuration"},
378 "9027: Array is missing a device and parity is out of sync"},
380 "9028: Maximum number of arrays already exist"},
382 "9050: Required cache data cannot be located for a disk unit"},
384 "9052: Cache data exists for a device that has been modified"},
386 "9054: IOA resources not available due to previous problems"},
388 "9092: Disk unit requires initialization before use"},
390 "9029: Incorrect hardware configuration change has been detected"},
392 "9060: One or more disk pairs are missing from an array"},
394 "9061: One or more disks are missing from an array"},
396 "9062: One or more disks are missing from an array"},
398 "9063: Maximum number of functional arrays has been exceeded"},
400 "Aborted command, invalid descriptor"},
402 "Command terminated by host"}
405 static const struct ipr_ses_table_entry ipr_ses_table[] = {
406 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
407 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
408 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
409 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
410 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
411 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
412 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
413 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
414 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
415 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
416 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
417 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
418 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
422 * Function Prototypes
424 static int ipr_reset_alert(struct ipr_cmnd *);
425 static void ipr_process_ccn(struct ipr_cmnd *);
426 static void ipr_process_error(struct ipr_cmnd *);
427 static void ipr_reset_ioa_job(struct ipr_cmnd *);
428 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
429 enum ipr_shutdown_type);
431 #ifdef CONFIG_SCSI_IPR_TRACE
433 * ipr_trc_hook - Add a trace entry to the driver trace
434 * @ipr_cmd: ipr command struct
436 * @add_data: additional data
441 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
442 u8 type, u32 add_data)
444 struct ipr_trace_entry *trace_entry;
445 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
447 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
448 trace_entry->time = jiffies;
449 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
450 trace_entry->type = type;
451 trace_entry->cmd_index = ipr_cmd->cmd_index;
452 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
453 trace_entry->u.add_data = add_data;
456 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
460 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
461 * @ipr_cmd: ipr command struct
466 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
468 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
469 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
471 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
472 ioarcb->write_data_transfer_length = 0;
473 ioarcb->read_data_transfer_length = 0;
474 ioarcb->write_ioadl_len = 0;
475 ioarcb->read_ioadl_len = 0;
477 ioasa->residual_data_len = 0;
479 ipr_cmd->scsi_cmd = NULL;
480 ipr_cmd->sense_buffer[0] = 0;
481 ipr_cmd->dma_use_sg = 0;
485 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
486 * @ipr_cmd: ipr command struct
491 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
493 ipr_reinit_ipr_cmnd(ipr_cmd);
494 ipr_cmd->u.scratch = 0;
495 ipr_cmd->sibling = NULL;
496 init_timer(&ipr_cmd->timer);
500 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
501 * @ioa_cfg: ioa config struct
504 * pointer to ipr command struct
507 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
509 struct ipr_cmnd *ipr_cmd;
511 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
512 list_del(&ipr_cmd->queue);
513 ipr_init_ipr_cmnd(ipr_cmd);
519 * ipr_unmap_sglist - Unmap scatterlist if mapped
520 * @ioa_cfg: ioa config struct
521 * @ipr_cmd: ipr command struct
526 static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
527 struct ipr_cmnd *ipr_cmd)
529 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
531 if (ipr_cmd->dma_use_sg) {
532 if (scsi_cmd->use_sg > 0) {
533 pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
535 scsi_cmd->sc_data_direction);
537 pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
538 scsi_cmd->request_bufflen,
539 scsi_cmd->sc_data_direction);
545 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
546 * @ioa_cfg: ioa config struct
547 * @clr_ints: interrupts to clear
549 * This function masks all interrupts on the adapter, then clears the
550 * interrupts specified in the mask
555 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
558 volatile u32 int_reg;
560 /* Stop new interrupts */
561 ioa_cfg->allow_interrupts = 0;
563 /* Set interrupt mask to stop all new interrupts */
564 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
566 /* Clear any pending interrupts */
567 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
568 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
572 * ipr_save_pcix_cmd_reg - Save PCI-X command register
573 * @ioa_cfg: ioa config struct
576 * 0 on success / -EIO on failure
578 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
580 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
582 if (pcix_cmd_reg == 0) {
583 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
587 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
588 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
589 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
593 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
598 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
599 * @ioa_cfg: ioa config struct
602 * 0 on success / -EIO on failure
604 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
606 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
609 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
610 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
611 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
615 dev_err(&ioa_cfg->pdev->dev,
616 "Failed to setup PCI-X command register\n");
624 * ipr_scsi_eh_done - mid-layer done function for aborted ops
625 * @ipr_cmd: ipr command struct
627 * This function is invoked by the interrupt handler for
628 * ops generated by the SCSI mid-layer which are being aborted.
633 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
635 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
636 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
638 scsi_cmd->result |= (DID_ERROR << 16);
640 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
641 scsi_cmd->scsi_done(scsi_cmd);
642 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
646 * ipr_fail_all_ops - Fails all outstanding ops.
647 * @ioa_cfg: ioa config struct
649 * This function fails all outstanding ops.
654 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
656 struct ipr_cmnd *ipr_cmd, *temp;
659 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
660 list_del(&ipr_cmd->queue);
662 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
663 ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
665 if (ipr_cmd->scsi_cmd)
666 ipr_cmd->done = ipr_scsi_eh_done;
668 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
669 del_timer(&ipr_cmd->timer);
670 ipr_cmd->done(ipr_cmd);
677 * ipr_do_req - Send driver initiated requests.
678 * @ipr_cmd: ipr command struct
679 * @done: done function
680 * @timeout_func: timeout function
681 * @timeout: timeout value
683 * This function sends the specified command to the adapter with the
684 * timeout given. The done function is invoked on command completion.
689 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
690 void (*done) (struct ipr_cmnd *),
691 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
693 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
695 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
697 ipr_cmd->done = done;
699 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
700 ipr_cmd->timer.expires = jiffies + timeout;
701 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
703 add_timer(&ipr_cmd->timer);
705 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
708 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
709 ioa_cfg->regs.ioarrin_reg);
713 * ipr_internal_cmd_done - Op done function for an internally generated op.
714 * @ipr_cmd: ipr command struct
716 * This function is the op done function for an internally generated,
717 * blocking op. It simply wakes the sleeping thread.
722 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
724 if (ipr_cmd->sibling)
725 ipr_cmd->sibling = NULL;
727 complete(&ipr_cmd->completion);
731 * ipr_send_blocking_cmd - Send command and sleep on its completion.
732 * @ipr_cmd: ipr command struct
733 * @timeout_func: function to invoke if command times out
739 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
740 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
743 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
745 init_completion(&ipr_cmd->completion);
746 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
748 spin_unlock_irq(ioa_cfg->host->host_lock);
749 wait_for_completion(&ipr_cmd->completion);
750 spin_lock_irq(ioa_cfg->host->host_lock);
754 * ipr_send_hcam - Send an HCAM to the adapter.
755 * @ioa_cfg: ioa config struct
757 * @hostrcb: hostrcb struct
759 * This function will send a Host Controlled Async command to the adapter.
760 * If HCAMs are currently not allowed to be issued to the adapter, it will
761 * place the hostrcb on the free queue.
766 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
767 struct ipr_hostrcb *hostrcb)
769 struct ipr_cmnd *ipr_cmd;
770 struct ipr_ioarcb *ioarcb;
772 if (ioa_cfg->allow_cmds) {
773 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
774 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
775 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
777 ipr_cmd->u.hostrcb = hostrcb;
778 ioarcb = &ipr_cmd->ioarcb;
780 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
781 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
782 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
783 ioarcb->cmd_pkt.cdb[1] = type;
784 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
785 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
787 ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
788 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
789 ipr_cmd->ioadl[0].flags_and_data_len =
790 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
791 ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
793 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
794 ipr_cmd->done = ipr_process_ccn;
796 ipr_cmd->done = ipr_process_error;
798 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
801 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
802 ioa_cfg->regs.ioarrin_reg);
804 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
809 * ipr_init_res_entry - Initialize a resource entry struct.
810 * @res: resource entry struct
815 static void ipr_init_res_entry(struct ipr_resource_entry *res)
817 res->needs_sync_complete = 0;
820 res->del_from_ml = 0;
821 res->resetting_device = 0;
826 * ipr_handle_config_change - Handle a config change from the adapter
827 * @ioa_cfg: ioa config struct
833 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
834 struct ipr_hostrcb *hostrcb)
836 struct ipr_resource_entry *res = NULL;
837 struct ipr_config_table_entry *cfgte;
840 cfgte = &hostrcb->hcam.u.ccn.cfgte;
842 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
843 if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
844 sizeof(cfgte->res_addr))) {
851 if (list_empty(&ioa_cfg->free_res_q)) {
852 ipr_send_hcam(ioa_cfg,
853 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
858 res = list_entry(ioa_cfg->free_res_q.next,
859 struct ipr_resource_entry, queue);
861 list_del(&res->queue);
862 ipr_init_res_entry(res);
863 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
866 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
868 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
870 res->sdev->hostdata = NULL;
871 res->del_from_ml = 1;
872 if (ioa_cfg->allow_ml_add_del)
873 schedule_work(&ioa_cfg->work_q);
875 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
876 } else if (!res->sdev) {
878 if (ioa_cfg->allow_ml_add_del)
879 schedule_work(&ioa_cfg->work_q);
882 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
886 * ipr_process_ccn - Op done function for a CCN.
887 * @ipr_cmd: ipr command struct
889 * This function is the op done function for a configuration
890 * change notification host controlled async from the adapter.
895 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
897 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
898 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
899 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
901 list_del(&hostrcb->queue);
902 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
905 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
906 dev_err(&ioa_cfg->pdev->dev,
907 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
909 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
911 ipr_handle_config_change(ioa_cfg, hostrcb);
916 * ipr_log_vpd - Log the passed VPD to the error log.
917 * @vpd: vendor/product id/sn struct
922 static void ipr_log_vpd(struct ipr_vpd *vpd)
924 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
925 + IPR_SERIAL_NUM_LEN];
927 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
928 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
930 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
931 ipr_err("Vendor/Product ID: %s\n", buffer);
933 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
934 buffer[IPR_SERIAL_NUM_LEN] = '\0';
935 ipr_err(" Serial Number: %s\n", buffer);
939 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
940 * @vpd: vendor/product id/sn/wwn struct
945 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
947 ipr_log_vpd(&vpd->vpd);
948 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
949 be32_to_cpu(vpd->wwid[1]));
953 * ipr_log_enhanced_cache_error - Log a cache error.
954 * @ioa_cfg: ioa config struct
955 * @hostrcb: hostrcb struct
960 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
961 struct ipr_hostrcb *hostrcb)
963 struct ipr_hostrcb_type_12_error *error =
964 &hostrcb->hcam.u.error.u.type_12_error;
966 ipr_err("-----Current Configuration-----\n");
967 ipr_err("Cache Directory Card Information:\n");
968 ipr_log_ext_vpd(&error->ioa_vpd);
969 ipr_err("Adapter Card Information:\n");
970 ipr_log_ext_vpd(&error->cfc_vpd);
972 ipr_err("-----Expected Configuration-----\n");
973 ipr_err("Cache Directory Card Information:\n");
974 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
975 ipr_err("Adapter Card Information:\n");
976 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
978 ipr_err("Additional IOA Data: %08X %08X %08X\n",
979 be32_to_cpu(error->ioa_data[0]),
980 be32_to_cpu(error->ioa_data[1]),
981 be32_to_cpu(error->ioa_data[2]));
985 * ipr_log_cache_error - Log a cache error.
986 * @ioa_cfg: ioa config struct
987 * @hostrcb: hostrcb struct
992 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
993 struct ipr_hostrcb *hostrcb)
995 struct ipr_hostrcb_type_02_error *error =
996 &hostrcb->hcam.u.error.u.type_02_error;
998 ipr_err("-----Current Configuration-----\n");
999 ipr_err("Cache Directory Card Information:\n");
1000 ipr_log_vpd(&error->ioa_vpd);
1001 ipr_err("Adapter Card Information:\n");
1002 ipr_log_vpd(&error->cfc_vpd);
1004 ipr_err("-----Expected Configuration-----\n");
1005 ipr_err("Cache Directory Card Information:\n");
1006 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1007 ipr_err("Adapter Card Information:\n");
1008 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1010 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1011 be32_to_cpu(error->ioa_data[0]),
1012 be32_to_cpu(error->ioa_data[1]),
1013 be32_to_cpu(error->ioa_data[2]));
1017 * ipr_log_enhanced_config_error - Log a configuration error.
1018 * @ioa_cfg: ioa config struct
1019 * @hostrcb: hostrcb struct
1024 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1025 struct ipr_hostrcb *hostrcb)
1027 int errors_logged, i;
1028 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1029 struct ipr_hostrcb_type_13_error *error;
1031 error = &hostrcb->hcam.u.error.u.type_13_error;
1032 errors_logged = be32_to_cpu(error->errors_logged);
1034 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1035 be32_to_cpu(error->errors_detected), errors_logged);
1037 dev_entry = error->dev;
1039 for (i = 0; i < errors_logged; i++, dev_entry++) {
1042 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1043 ipr_log_ext_vpd(&dev_entry->vpd);
1045 ipr_err("-----New Device Information-----\n");
1046 ipr_log_ext_vpd(&dev_entry->new_vpd);
1048 ipr_err("Cache Directory Card Information:\n");
1049 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1051 ipr_err("Adapter Card Information:\n");
1052 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1057 * ipr_log_config_error - Log a configuration error.
1058 * @ioa_cfg: ioa config struct
1059 * @hostrcb: hostrcb struct
1064 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1065 struct ipr_hostrcb *hostrcb)
1067 int errors_logged, i;
1068 struct ipr_hostrcb_device_data_entry *dev_entry;
1069 struct ipr_hostrcb_type_03_error *error;
1071 error = &hostrcb->hcam.u.error.u.type_03_error;
1072 errors_logged = be32_to_cpu(error->errors_logged);
1074 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1075 be32_to_cpu(error->errors_detected), errors_logged);
1077 dev_entry = error->dev;
1079 for (i = 0; i < errors_logged; i++, dev_entry++) {
1082 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1083 ipr_log_vpd(&dev_entry->vpd);
1085 ipr_err("-----New Device Information-----\n");
1086 ipr_log_vpd(&dev_entry->new_vpd);
1088 ipr_err("Cache Directory Card Information:\n");
1089 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1091 ipr_err("Adapter Card Information:\n");
1092 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1094 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1095 be32_to_cpu(dev_entry->ioa_data[0]),
1096 be32_to_cpu(dev_entry->ioa_data[1]),
1097 be32_to_cpu(dev_entry->ioa_data[2]),
1098 be32_to_cpu(dev_entry->ioa_data[3]),
1099 be32_to_cpu(dev_entry->ioa_data[4]));
1104 * ipr_log_enhanced_array_error - Log an array configuration error.
1105 * @ioa_cfg: ioa config struct
1106 * @hostrcb: hostrcb struct
1111 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1112 struct ipr_hostrcb *hostrcb)
1115 struct ipr_hostrcb_type_14_error *error;
1116 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1117 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1119 error = &hostrcb->hcam.u.error.u.type_14_error;
1123 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1124 error->protection_level,
1125 ioa_cfg->host->host_no,
1126 error->last_func_vset_res_addr.bus,
1127 error->last_func_vset_res_addr.target,
1128 error->last_func_vset_res_addr.lun);
1132 array_entry = error->array_member;
1133 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1134 sizeof(error->array_member));
1136 for (i = 0; i < num_entries; i++, array_entry++) {
1137 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1140 if (be32_to_cpu(error->exposed_mode_adn) == i)
1141 ipr_err("Exposed Array Member %d:\n", i);
1143 ipr_err("Array Member %d:\n", i);
1145 ipr_log_ext_vpd(&array_entry->vpd);
1146 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1147 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1148 "Expected Location");
1155 * ipr_log_array_error - Log an array configuration error.
1156 * @ioa_cfg: ioa config struct
1157 * @hostrcb: hostrcb struct
1162 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1163 struct ipr_hostrcb *hostrcb)
1166 struct ipr_hostrcb_type_04_error *error;
1167 struct ipr_hostrcb_array_data_entry *array_entry;
1168 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1170 error = &hostrcb->hcam.u.error.u.type_04_error;
1174 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1175 error->protection_level,
1176 ioa_cfg->host->host_no,
1177 error->last_func_vset_res_addr.bus,
1178 error->last_func_vset_res_addr.target,
1179 error->last_func_vset_res_addr.lun);
1183 array_entry = error->array_member;
1185 for (i = 0; i < 18; i++) {
1186 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1189 if (be32_to_cpu(error->exposed_mode_adn) == i)
1190 ipr_err("Exposed Array Member %d:\n", i);
1192 ipr_err("Array Member %d:\n", i);
1194 ipr_log_vpd(&array_entry->vpd);
1196 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1197 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1198 "Expected Location");
1203 array_entry = error->array_member2;
1210 * ipr_log_hex_data - Log additional hex IOA error data.
1211 * @data: IOA error data
1217 static void ipr_log_hex_data(u32 *data, int len)
1224 for (i = 0; i < len / 4; i += 4) {
1225 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1226 be32_to_cpu(data[i]),
1227 be32_to_cpu(data[i+1]),
1228 be32_to_cpu(data[i+2]),
1229 be32_to_cpu(data[i+3]));
1234 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1235 * @ioa_cfg: ioa config struct
1236 * @hostrcb: hostrcb struct
1241 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1242 struct ipr_hostrcb *hostrcb)
1244 struct ipr_hostrcb_type_17_error *error;
1246 error = &hostrcb->hcam.u.error.u.type_17_error;
1247 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1249 ipr_err("%s\n", error->failure_reason);
1250 ipr_err("Remote Adapter VPD:\n");
1251 ipr_log_ext_vpd(&error->vpd);
1252 ipr_log_hex_data(error->data,
1253 be32_to_cpu(hostrcb->hcam.length) -
1254 (offsetof(struct ipr_hostrcb_error, u) +
1255 offsetof(struct ipr_hostrcb_type_17_error, data)));
1259 * ipr_log_dual_ioa_error - Log a dual adapter error.
1260 * @ioa_cfg: ioa config struct
1261 * @hostrcb: hostrcb struct
1266 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1267 struct ipr_hostrcb *hostrcb)
1269 struct ipr_hostrcb_type_07_error *error;
1271 error = &hostrcb->hcam.u.error.u.type_07_error;
1272 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1274 ipr_err("%s\n", error->failure_reason);
1275 ipr_err("Remote Adapter VPD:\n");
1276 ipr_log_vpd(&error->vpd);
1277 ipr_log_hex_data(error->data,
1278 be32_to_cpu(hostrcb->hcam.length) -
1279 (offsetof(struct ipr_hostrcb_error, u) +
1280 offsetof(struct ipr_hostrcb_type_07_error, data)));
1284 * ipr_log_generic_error - Log an adapter error.
1285 * @ioa_cfg: ioa config struct
1286 * @hostrcb: hostrcb struct
1291 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1292 struct ipr_hostrcb *hostrcb)
1294 ipr_log_hex_data(hostrcb->hcam.u.raw.data,
1295 be32_to_cpu(hostrcb->hcam.length));
1299 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1302 * This function will return the index of into the ipr_error_table
1303 * for the specified IOASC. If the IOASC is not in the table,
1304 * 0 will be returned, which points to the entry used for unknown errors.
1307 * index into the ipr_error_table
1309 static u32 ipr_get_error(u32 ioasc)
1313 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
1314 if (ipr_error_table[i].ioasc == ioasc)
1321 * ipr_handle_log_data - Log an adapter error.
1322 * @ioa_cfg: ioa config struct
1323 * @hostrcb: hostrcb struct
1325 * This function logs an adapter error to the system.
1330 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1331 struct ipr_hostrcb *hostrcb)
1336 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1339 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1340 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1342 ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1344 if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1345 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1346 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
1347 scsi_report_bus_reset(ioa_cfg->host,
1348 hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1351 error_index = ipr_get_error(ioasc);
1353 if (!ipr_error_table[error_index].log_hcam)
1356 if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) {
1357 ipr_res_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
1358 "%s\n", ipr_error_table[error_index].error);
1360 dev_err(&ioa_cfg->pdev->dev, "%s\n",
1361 ipr_error_table[error_index].error);
1364 /* Set indication we have logged an error */
1365 ioa_cfg->errors_logged++;
1367 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
1369 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
1370 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1372 switch (hostrcb->hcam.overlay_id) {
1373 case IPR_HOST_RCB_OVERLAY_ID_2:
1374 ipr_log_cache_error(ioa_cfg, hostrcb);
1376 case IPR_HOST_RCB_OVERLAY_ID_3:
1377 ipr_log_config_error(ioa_cfg, hostrcb);
1379 case IPR_HOST_RCB_OVERLAY_ID_4:
1380 case IPR_HOST_RCB_OVERLAY_ID_6:
1381 ipr_log_array_error(ioa_cfg, hostrcb);
1383 case IPR_HOST_RCB_OVERLAY_ID_7:
1384 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
1386 case IPR_HOST_RCB_OVERLAY_ID_12:
1387 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
1389 case IPR_HOST_RCB_OVERLAY_ID_13:
1390 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
1392 case IPR_HOST_RCB_OVERLAY_ID_14:
1393 case IPR_HOST_RCB_OVERLAY_ID_16:
1394 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
1396 case IPR_HOST_RCB_OVERLAY_ID_17:
1397 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
1399 case IPR_HOST_RCB_OVERLAY_ID_1:
1400 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1402 ipr_log_generic_error(ioa_cfg, hostrcb);
1408 * ipr_process_error - Op done function for an adapter error log.
1409 * @ipr_cmd: ipr command struct
1411 * This function is the op done function for an error log host
1412 * controlled async from the adapter. It will log the error and
1413 * send the HCAM back to the adapter.
1418 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1420 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1421 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1422 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1424 list_del(&hostrcb->queue);
1425 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1428 ipr_handle_log_data(ioa_cfg, hostrcb);
1429 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1430 dev_err(&ioa_cfg->pdev->dev,
1431 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1434 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1438 * ipr_timeout - An internally generated op has timed out.
1439 * @ipr_cmd: ipr command struct
1441 * This function blocks host requests and initiates an
1447 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1449 unsigned long lock_flags = 0;
1450 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1453 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1455 ioa_cfg->errors_logged++;
1456 dev_err(&ioa_cfg->pdev->dev,
1457 "Adapter being reset due to command timeout.\n");
1459 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1460 ioa_cfg->sdt_state = GET_DUMP;
1462 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1463 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1465 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1470 * ipr_oper_timeout - Adapter timed out transitioning to operational
1471 * @ipr_cmd: ipr command struct
1473 * This function blocks host requests and initiates an
1479 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
1481 unsigned long lock_flags = 0;
1482 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1485 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1487 ioa_cfg->errors_logged++;
1488 dev_err(&ioa_cfg->pdev->dev,
1489 "Adapter timed out transitioning to operational.\n");
1491 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1492 ioa_cfg->sdt_state = GET_DUMP;
1494 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
1496 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
1497 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1500 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1505 * ipr_reset_reload - Reset/Reload the IOA
1506 * @ioa_cfg: ioa config struct
1507 * @shutdown_type: shutdown type
1509 * This function resets the adapter and re-initializes it.
1510 * This function assumes that all new host commands have been stopped.
1514 static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1515 enum ipr_shutdown_type shutdown_type)
1517 if (!ioa_cfg->in_reset_reload)
1518 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1520 spin_unlock_irq(ioa_cfg->host->host_lock);
1521 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1522 spin_lock_irq(ioa_cfg->host->host_lock);
1524 /* If we got hit with a host reset while we were already resetting
1525 the adapter for some reason, and the reset failed. */
1526 if (ioa_cfg->ioa_is_dead) {
1535 * ipr_find_ses_entry - Find matching SES in SES table
1536 * @res: resource entry struct of SES
1539 * pointer to SES table entry / NULL on failure
1541 static const struct ipr_ses_table_entry *
1542 ipr_find_ses_entry(struct ipr_resource_entry *res)
1545 const struct ipr_ses_table_entry *ste = ipr_ses_table;
1547 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1548 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1549 if (ste->compare_product_id_byte[j] == 'X') {
1550 if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1558 if (matches == IPR_PROD_ID_LEN)
1566 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1567 * @ioa_cfg: ioa config struct
1569 * @bus_width: bus width
1572 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1573 * For a 2-byte wide SCSI bus, the maximum transfer speed is
1574 * twice the maximum transfer rate (e.g. for a wide enabled bus,
1575 * max 160MHz = max 320MB/sec).
1577 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1579 struct ipr_resource_entry *res;
1580 const struct ipr_ses_table_entry *ste;
1581 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1583 /* Loop through each config table entry in the config table buffer */
1584 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1585 if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1588 if (bus != res->cfgte.res_addr.bus)
1591 if (!(ste = ipr_find_ses_entry(res)))
1594 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1597 return max_xfer_rate;
1601 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1602 * @ioa_cfg: ioa config struct
1603 * @max_delay: max delay in micro-seconds to wait
1605 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1608 * 0 on success / other on failure
1610 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1612 volatile u32 pcii_reg;
1615 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
1616 while (delay < max_delay) {
1617 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1619 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1622 /* udelay cannot be used if delay is more than a few milliseconds */
1623 if ((delay / 1000) > MAX_UDELAY_MS)
1624 mdelay(delay / 1000);
1634 * ipr_get_ldump_data_section - Dump IOA memory
1635 * @ioa_cfg: ioa config struct
1636 * @start_addr: adapter address to dump
1637 * @dest: destination kernel buffer
1638 * @length_in_words: length to dump in 4 byte words
1641 * 0 on success / -EIO on failure
1643 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1645 __be32 *dest, u32 length_in_words)
1647 volatile u32 temp_pcii_reg;
1650 /* Write IOA interrupt reg starting LDUMP state */
1651 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1652 ioa_cfg->regs.set_uproc_interrupt_reg);
1654 /* Wait for IO debug acknowledge */
1655 if (ipr_wait_iodbg_ack(ioa_cfg,
1656 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1657 dev_err(&ioa_cfg->pdev->dev,
1658 "IOA dump long data transfer timeout\n");
1662 /* Signal LDUMP interlocked - clear IO debug ack */
1663 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1664 ioa_cfg->regs.clr_interrupt_reg);
1666 /* Write Mailbox with starting address */
1667 writel(start_addr, ioa_cfg->ioa_mailbox);
1669 /* Signal address valid - clear IOA Reset alert */
1670 writel(IPR_UPROCI_RESET_ALERT,
1671 ioa_cfg->regs.clr_uproc_interrupt_reg);
1673 for (i = 0; i < length_in_words; i++) {
1674 /* Wait for IO debug acknowledge */
1675 if (ipr_wait_iodbg_ack(ioa_cfg,
1676 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
1677 dev_err(&ioa_cfg->pdev->dev,
1678 "IOA dump short data transfer timeout\n");
1682 /* Read data from mailbox and increment destination pointer */
1683 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
1686 /* For all but the last word of data, signal data received */
1687 if (i < (length_in_words - 1)) {
1688 /* Signal dump data received - Clear IO debug Ack */
1689 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1690 ioa_cfg->regs.clr_interrupt_reg);
1694 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
1695 writel(IPR_UPROCI_RESET_ALERT,
1696 ioa_cfg->regs.set_uproc_interrupt_reg);
1698 writel(IPR_UPROCI_IO_DEBUG_ALERT,
1699 ioa_cfg->regs.clr_uproc_interrupt_reg);
1701 /* Signal dump data received - Clear IO debug Ack */
1702 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1703 ioa_cfg->regs.clr_interrupt_reg);
1705 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
1706 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
1708 readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
1710 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
1720 #ifdef CONFIG_SCSI_IPR_DUMP
1722 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
1723 * @ioa_cfg: ioa config struct
1724 * @pci_address: adapter address
1725 * @length: length of data to copy
1727 * Copy data from PCI adapter to kernel buffer.
1728 * Note: length MUST be a 4 byte multiple
1730 * 0 on success / other on failure
1732 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
1733 unsigned long pci_address, u32 length)
1735 int bytes_copied = 0;
1736 int cur_len, rc, rem_len, rem_page_len;
1738 unsigned long lock_flags = 0;
1739 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
1741 while (bytes_copied < length &&
1742 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
1743 if (ioa_dump->page_offset >= PAGE_SIZE ||
1744 ioa_dump->page_offset == 0) {
1745 page = (__be32 *)__get_free_page(GFP_ATOMIC);
1749 return bytes_copied;
1752 ioa_dump->page_offset = 0;
1753 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
1754 ioa_dump->next_page_index++;
1756 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
1758 rem_len = length - bytes_copied;
1759 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
1760 cur_len = min(rem_len, rem_page_len);
1762 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1763 if (ioa_cfg->sdt_state == ABORT_DUMP) {
1766 rc = ipr_get_ldump_data_section(ioa_cfg,
1767 pci_address + bytes_copied,
1768 &page[ioa_dump->page_offset / 4],
1769 (cur_len / sizeof(u32)));
1771 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1774 ioa_dump->page_offset += cur_len;
1775 bytes_copied += cur_len;
1783 return bytes_copied;
1787 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
1788 * @hdr: dump entry header struct
1793 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
1795 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
1797 hdr->offset = sizeof(*hdr);
1798 hdr->status = IPR_DUMP_STATUS_SUCCESS;
1802 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
1803 * @ioa_cfg: ioa config struct
1804 * @driver_dump: driver dump struct
1809 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
1810 struct ipr_driver_dump *driver_dump)
1812 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
1814 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
1815 driver_dump->ioa_type_entry.hdr.len =
1816 sizeof(struct ipr_dump_ioa_type_entry) -
1817 sizeof(struct ipr_dump_entry_header);
1818 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1819 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
1820 driver_dump->ioa_type_entry.type = ioa_cfg->type;
1821 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
1822 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
1823 ucode_vpd->minor_release[1];
1824 driver_dump->hdr.num_entries++;
1828 * ipr_dump_version_data - Fill in the driver version in the dump.
1829 * @ioa_cfg: ioa config struct
1830 * @driver_dump: driver dump struct
1835 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
1836 struct ipr_driver_dump *driver_dump)
1838 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
1839 driver_dump->version_entry.hdr.len =
1840 sizeof(struct ipr_dump_version_entry) -
1841 sizeof(struct ipr_dump_entry_header);
1842 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1843 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
1844 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
1845 driver_dump->hdr.num_entries++;
1849 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
1850 * @ioa_cfg: ioa config struct
1851 * @driver_dump: driver dump struct
1856 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
1857 struct ipr_driver_dump *driver_dump)
1859 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
1860 driver_dump->trace_entry.hdr.len =
1861 sizeof(struct ipr_dump_trace_entry) -
1862 sizeof(struct ipr_dump_entry_header);
1863 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1864 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
1865 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
1866 driver_dump->hdr.num_entries++;
1870 * ipr_dump_location_data - Fill in the IOA location in the dump.
1871 * @ioa_cfg: ioa config struct
1872 * @driver_dump: driver dump struct
1877 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
1878 struct ipr_driver_dump *driver_dump)
1880 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
1881 driver_dump->location_entry.hdr.len =
1882 sizeof(struct ipr_dump_location_entry) -
1883 sizeof(struct ipr_dump_entry_header);
1884 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1885 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
1886 strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
1887 driver_dump->hdr.num_entries++;
1891 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
1892 * @ioa_cfg: ioa config struct
1893 * @dump: dump struct
1898 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
1900 unsigned long start_addr, sdt_word;
1901 unsigned long lock_flags = 0;
1902 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
1903 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
1904 u32 num_entries, start_off, end_off;
1905 u32 bytes_to_copy, bytes_copied, rc;
1906 struct ipr_sdt *sdt;
1911 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1913 if (ioa_cfg->sdt_state != GET_DUMP) {
1914 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1918 start_addr = readl(ioa_cfg->ioa_mailbox);
1920 if (!ipr_sdt_is_fmt2(start_addr)) {
1921 dev_err(&ioa_cfg->pdev->dev,
1922 "Invalid dump table format: %lx\n", start_addr);
1923 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1927 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
1929 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
1931 /* Initialize the overall dump header */
1932 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
1933 driver_dump->hdr.num_entries = 1;
1934 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
1935 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
1936 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
1937 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
1939 ipr_dump_version_data(ioa_cfg, driver_dump);
1940 ipr_dump_location_data(ioa_cfg, driver_dump);
1941 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
1942 ipr_dump_trace_data(ioa_cfg, driver_dump);
1944 /* Update dump_header */
1945 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
1947 /* IOA Dump entry */
1948 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1949 ioa_dump->format = IPR_SDT_FMT2;
1950 ioa_dump->hdr.len = 0;
1951 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1952 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
1954 /* First entries in sdt are actually a list of dump addresses and
1955 lengths to gather the real dump data. sdt represents the pointer
1956 to the ioa generated dump table. Dump data will be extracted based
1957 on entries in this table */
1958 sdt = &ioa_dump->sdt;
1960 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
1961 sizeof(struct ipr_sdt) / sizeof(__be32));
1963 /* Smart Dump table is ready to use and the first entry is valid */
1964 if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
1965 dev_err(&ioa_cfg->pdev->dev,
1966 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
1967 rc, be32_to_cpu(sdt->hdr.state));
1968 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
1969 ioa_cfg->sdt_state = DUMP_OBTAINED;
1970 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1974 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
1976 if (num_entries > IPR_NUM_SDT_ENTRIES)
1977 num_entries = IPR_NUM_SDT_ENTRIES;
1979 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1981 for (i = 0; i < num_entries; i++) {
1982 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
1983 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1987 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
1988 sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
1989 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
1990 end_off = be32_to_cpu(sdt->entry[i].end_offset);
1992 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
1993 bytes_to_copy = end_off - start_off;
1994 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
1995 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
1999 /* Copy data from adapter to driver buffers */
2000 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2003 ioa_dump->hdr.len += bytes_copied;
2005 if (bytes_copied != bytes_to_copy) {
2006 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2013 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
2015 /* Update dump_header */
2016 driver_dump->hdr.len += ioa_dump->hdr.len;
2018 ioa_cfg->sdt_state = DUMP_OBTAINED;
2023 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
2027 * ipr_release_dump - Free adapter dump memory
2028 * @kref: kref struct
2033 static void ipr_release_dump(struct kref *kref)
2035 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
2036 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
2037 unsigned long lock_flags = 0;
2041 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2042 ioa_cfg->dump = NULL;
2043 ioa_cfg->sdt_state = INACTIVE;
2044 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2046 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
2047 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
2054 * ipr_worker_thread - Worker thread
2055 * @data: ioa config struct
2057 * Called at task level from a work thread. This function takes care
2058 * of adding and removing device from the mid-layer as configuration
2059 * changes are detected by the adapter.
2064 static void ipr_worker_thread(void *data)
2066 unsigned long lock_flags;
2067 struct ipr_resource_entry *res;
2068 struct scsi_device *sdev;
2069 struct ipr_dump *dump;
2070 struct ipr_ioa_cfg *ioa_cfg = data;
2071 u8 bus, target, lun;
2075 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2077 if (ioa_cfg->sdt_state == GET_DUMP) {
2078 dump = ioa_cfg->dump;
2080 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2083 kref_get(&dump->kref);
2084 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2085 ipr_get_ioa_dump(ioa_cfg, dump);
2086 kref_put(&dump->kref, ipr_release_dump);
2088 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2089 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
2090 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2091 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2098 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
2099 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2103 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2104 if (res->del_from_ml && res->sdev) {
2107 if (!scsi_device_get(sdev)) {
2109 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
2110 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2111 scsi_remove_device(sdev);
2112 scsi_device_put(sdev);
2113 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2120 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2121 if (res->add_to_ml) {
2122 bus = res->cfgte.res_addr.bus;
2123 target = res->cfgte.res_addr.target;
2124 lun = res->cfgte.res_addr.lun;
2125 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2126 scsi_add_device(ioa_cfg->host, bus, target, lun);
2127 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2132 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2133 kobject_uevent(&ioa_cfg->host->shost_classdev.kobj, KOBJ_CHANGE, NULL);
2137 #ifdef CONFIG_SCSI_IPR_TRACE
2139 * ipr_read_trace - Dump the adapter trace
2140 * @kobj: kobject struct
2143 * @count: buffer size
2146 * number of bytes printed to buffer
2148 static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
2149 loff_t off, size_t count)
2151 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2152 struct Scsi_Host *shost = class_to_shost(cdev);
2153 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2154 unsigned long lock_flags = 0;
2155 int size = IPR_TRACE_SIZE;
2156 char *src = (char *)ioa_cfg->trace;
2160 if (off + count > size) {
2165 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2166 memcpy(buf, &src[off], count);
2167 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2171 static struct bin_attribute ipr_trace_attr = {
2177 .read = ipr_read_trace,
2181 static const struct {
2182 enum ipr_cache_state state;
2184 } cache_state [] = {
2185 { CACHE_NONE, "none" },
2186 { CACHE_DISABLED, "disabled" },
2187 { CACHE_ENABLED, "enabled" }
2191 * ipr_show_write_caching - Show the write caching attribute
2192 * @class_dev: class device struct
2196 * number of bytes printed to buffer
2198 static ssize_t ipr_show_write_caching(struct class_device *class_dev, char *buf)
2200 struct Scsi_Host *shost = class_to_shost(class_dev);
2201 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2202 unsigned long lock_flags = 0;
2205 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2206 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2207 if (cache_state[i].state == ioa_cfg->cache_state) {
2208 len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
2212 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2218 * ipr_store_write_caching - Enable/disable adapter write cache
2219 * @class_dev: class_device struct
2221 * @count: buffer size
2223 * This function will enable/disable adapter write cache.
2226 * count on success / other on failure
2228 static ssize_t ipr_store_write_caching(struct class_device *class_dev,
2229 const char *buf, size_t count)
2231 struct Scsi_Host *shost = class_to_shost(class_dev);
2232 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2233 unsigned long lock_flags = 0;
2234 enum ipr_cache_state new_state = CACHE_INVALID;
2237 if (!capable(CAP_SYS_ADMIN))
2239 if (ioa_cfg->cache_state == CACHE_NONE)
2242 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2243 if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
2244 new_state = cache_state[i].state;
2249 if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
2252 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2253 if (ioa_cfg->cache_state == new_state) {
2254 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2258 ioa_cfg->cache_state = new_state;
2259 dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
2260 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
2261 if (!ioa_cfg->in_reset_reload)
2262 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2263 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2264 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2269 static struct class_device_attribute ipr_ioa_cache_attr = {
2271 .name = "write_cache",
2272 .mode = S_IRUGO | S_IWUSR,
2274 .show = ipr_show_write_caching,
2275 .store = ipr_store_write_caching
2279 * ipr_show_fw_version - Show the firmware version
2280 * @class_dev: class device struct
2284 * number of bytes printed to buffer
2286 static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
2288 struct Scsi_Host *shost = class_to_shost(class_dev);
2289 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2290 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2291 unsigned long lock_flags = 0;
2294 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2295 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
2296 ucode_vpd->major_release, ucode_vpd->card_type,
2297 ucode_vpd->minor_release[0],
2298 ucode_vpd->minor_release[1]);
2299 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2303 static struct class_device_attribute ipr_fw_version_attr = {
2305 .name = "fw_version",
2308 .show = ipr_show_fw_version,
2312 * ipr_show_log_level - Show the adapter's error logging level
2313 * @class_dev: class device struct
2317 * number of bytes printed to buffer
2319 static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
2321 struct Scsi_Host *shost = class_to_shost(class_dev);
2322 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2323 unsigned long lock_flags = 0;
2326 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2327 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
2328 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2333 * ipr_store_log_level - Change the adapter's error logging level
2334 * @class_dev: class device struct
2338 * number of bytes printed to buffer
2340 static ssize_t ipr_store_log_level(struct class_device *class_dev,
2341 const char *buf, size_t count)
2343 struct Scsi_Host *shost = class_to_shost(class_dev);
2344 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2345 unsigned long lock_flags = 0;
2347 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2348 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
2349 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2353 static struct class_device_attribute ipr_log_level_attr = {
2355 .name = "log_level",
2356 .mode = S_IRUGO | S_IWUSR,
2358 .show = ipr_show_log_level,
2359 .store = ipr_store_log_level
2363 * ipr_store_diagnostics - IOA Diagnostics interface
2364 * @class_dev: class_device struct
2366 * @count: buffer size
2368 * This function will reset the adapter and wait a reasonable
2369 * amount of time for any errors that the adapter might log.
2372 * count on success / other on failure
2374 static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
2375 const char *buf, size_t count)
2377 struct Scsi_Host *shost = class_to_shost(class_dev);
2378 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2379 unsigned long lock_flags = 0;
2382 if (!capable(CAP_SYS_ADMIN))
2385 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2386 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2387 ioa_cfg->errors_logged = 0;
2388 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2390 if (ioa_cfg->in_reset_reload) {
2391 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2392 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2394 /* Wait for a second for any errors to be logged */
2397 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2401 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2402 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2404 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2409 static struct class_device_attribute ipr_diagnostics_attr = {
2411 .name = "run_diagnostics",
2414 .store = ipr_store_diagnostics
2418 * ipr_show_adapter_state - Show the adapter's state
2419 * @class_dev: class device struct
2423 * number of bytes printed to buffer
2425 static ssize_t ipr_show_adapter_state(struct class_device *class_dev, char *buf)
2427 struct Scsi_Host *shost = class_to_shost(class_dev);
2428 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2429 unsigned long lock_flags = 0;
2432 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2433 if (ioa_cfg->ioa_is_dead)
2434 len = snprintf(buf, PAGE_SIZE, "offline\n");
2436 len = snprintf(buf, PAGE_SIZE, "online\n");
2437 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2442 * ipr_store_adapter_state - Change adapter state
2443 * @class_dev: class_device struct
2445 * @count: buffer size
2447 * This function will change the adapter's state.
2450 * count on success / other on failure
2452 static ssize_t ipr_store_adapter_state(struct class_device *class_dev,
2453 const char *buf, size_t count)
2455 struct Scsi_Host *shost = class_to_shost(class_dev);
2456 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2457 unsigned long lock_flags;
2460 if (!capable(CAP_SYS_ADMIN))
2463 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2464 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
2465 ioa_cfg->ioa_is_dead = 0;
2466 ioa_cfg->reset_retries = 0;
2467 ioa_cfg->in_ioa_bringdown = 0;
2468 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2470 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2471 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2476 static struct class_device_attribute ipr_ioa_state_attr = {
2479 .mode = S_IRUGO | S_IWUSR,
2481 .show = ipr_show_adapter_state,
2482 .store = ipr_store_adapter_state
2486 * ipr_store_reset_adapter - Reset the adapter
2487 * @class_dev: class_device struct
2489 * @count: buffer size
2491 * This function will reset the adapter.
2494 * count on success / other on failure
2496 static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
2497 const char *buf, size_t count)
2499 struct Scsi_Host *shost = class_to_shost(class_dev);
2500 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2501 unsigned long lock_flags;
2504 if (!capable(CAP_SYS_ADMIN))
2507 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2508 if (!ioa_cfg->in_reset_reload)
2509 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2510 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2511 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2516 static struct class_device_attribute ipr_ioa_reset_attr = {
2518 .name = "reset_host",
2521 .store = ipr_store_reset_adapter
2525 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2526 * @buf_len: buffer length
2528 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2529 * list to use for microcode download
2532 * pointer to sglist / NULL on failure
2534 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2536 int sg_size, order, bsize_elem, num_elem, i, j;
2537 struct ipr_sglist *sglist;
2538 struct scatterlist *scatterlist;
2541 /* Get the minimum size per scatter/gather element */
2542 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2544 /* Get the actual size per element */
2545 order = get_order(sg_size);
2547 /* Determine the actual number of bytes per element */
2548 bsize_elem = PAGE_SIZE * (1 << order);
2550 /* Determine the actual number of sg entries needed */
2551 if (buf_len % bsize_elem)
2552 num_elem = (buf_len / bsize_elem) + 1;
2554 num_elem = buf_len / bsize_elem;
2556 /* Allocate a scatter/gather list for the DMA */
2557 sglist = kzalloc(sizeof(struct ipr_sglist) +
2558 (sizeof(struct scatterlist) * (num_elem - 1)),
2561 if (sglist == NULL) {
2566 scatterlist = sglist->scatterlist;
2568 sglist->order = order;
2569 sglist->num_sg = num_elem;
2571 /* Allocate a bunch of sg elements */
2572 for (i = 0; i < num_elem; i++) {
2573 page = alloc_pages(GFP_KERNEL, order);
2577 /* Free up what we already allocated */
2578 for (j = i - 1; j >= 0; j--)
2579 __free_pages(scatterlist[j].page, order);
2584 scatterlist[i].page = page;
2591 * ipr_free_ucode_buffer - Frees a microcode download buffer
2592 * @p_dnld: scatter/gather list pointer
2594 * Free a DMA'able ucode download buffer previously allocated with
2595 * ipr_alloc_ucode_buffer
2600 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2604 for (i = 0; i < sglist->num_sg; i++)
2605 __free_pages(sglist->scatterlist[i].page, sglist->order);
2611 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2612 * @sglist: scatter/gather list pointer
2613 * @buffer: buffer pointer
2614 * @len: buffer length
2616 * Copy a microcode image from a user buffer into a buffer allocated by
2617 * ipr_alloc_ucode_buffer
2620 * 0 on success / other on failure
2622 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2623 u8 *buffer, u32 len)
2625 int bsize_elem, i, result = 0;
2626 struct scatterlist *scatterlist;
2629 /* Determine the actual number of bytes per element */
2630 bsize_elem = PAGE_SIZE * (1 << sglist->order);
2632 scatterlist = sglist->scatterlist;
2634 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2635 kaddr = kmap(scatterlist[i].page);
2636 memcpy(kaddr, buffer, bsize_elem);
2637 kunmap(scatterlist[i].page);
2639 scatterlist[i].length = bsize_elem;
2647 if (len % bsize_elem) {
2648 kaddr = kmap(scatterlist[i].page);
2649 memcpy(kaddr, buffer, len % bsize_elem);
2650 kunmap(scatterlist[i].page);
2652 scatterlist[i].length = len % bsize_elem;
2655 sglist->buffer_len = len;
2660 * ipr_build_ucode_ioadl - Build a microcode download IOADL
2661 * @ipr_cmd: ipr command struct
2662 * @sglist: scatter/gather list
2664 * Builds a microcode download IOA data list (IOADL).
2667 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
2668 struct ipr_sglist *sglist)
2670 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2671 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
2672 struct scatterlist *scatterlist = sglist->scatterlist;
2675 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
2676 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
2677 ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len);
2678 ioarcb->write_ioadl_len =
2679 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
2681 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
2682 ioadl[i].flags_and_data_len =
2683 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
2685 cpu_to_be32(sg_dma_address(&scatterlist[i]));
2688 ioadl[i-1].flags_and_data_len |=
2689 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
2693 * ipr_update_ioa_ucode - Update IOA's microcode
2694 * @ioa_cfg: ioa config struct
2695 * @sglist: scatter/gather list
2697 * Initiate an adapter reset to update the IOA's microcode
2700 * 0 on success / -EIO on failure
2702 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
2703 struct ipr_sglist *sglist)
2705 unsigned long lock_flags;
2707 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2709 if (ioa_cfg->ucode_sglist) {
2710 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2711 dev_err(&ioa_cfg->pdev->dev,
2712 "Microcode download already in progress\n");
2716 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
2717 sglist->num_sg, DMA_TO_DEVICE);
2719 if (!sglist->num_dma_sg) {
2720 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2721 dev_err(&ioa_cfg->pdev->dev,
2722 "Failed to map microcode download buffer!\n");
2726 ioa_cfg->ucode_sglist = sglist;
2727 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2728 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2729 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2731 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2732 ioa_cfg->ucode_sglist = NULL;
2733 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2738 * ipr_store_update_fw - Update the firmware on the adapter
2739 * @class_dev: class_device struct
2741 * @count: buffer size
2743 * This function will update the firmware on the adapter.
2746 * count on success / other on failure
2748 static ssize_t ipr_store_update_fw(struct class_device *class_dev,
2749 const char *buf, size_t count)
2751 struct Scsi_Host *shost = class_to_shost(class_dev);
2752 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2753 struct ipr_ucode_image_header *image_hdr;
2754 const struct firmware *fw_entry;
2755 struct ipr_sglist *sglist;
2758 int len, result, dnld_size;
2760 if (!capable(CAP_SYS_ADMIN))
2763 len = snprintf(fname, 99, "%s", buf);
2764 fname[len-1] = '\0';
2766 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
2767 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
2771 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
2773 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
2774 (ioa_cfg->vpd_cbs->page3_data.card_type &&
2775 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
2776 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
2777 release_firmware(fw_entry);
2781 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
2782 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
2783 sglist = ipr_alloc_ucode_buffer(dnld_size);
2786 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
2787 release_firmware(fw_entry);
2791 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
2794 dev_err(&ioa_cfg->pdev->dev,
2795 "Microcode buffer copy to DMA buffer failed\n");
2799 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
2804 ipr_free_ucode_buffer(sglist);
2805 release_firmware(fw_entry);
2809 static struct class_device_attribute ipr_update_fw_attr = {
2811 .name = "update_fw",
2814 .store = ipr_store_update_fw
2817 static struct class_device_attribute *ipr_ioa_attrs[] = {
2818 &ipr_fw_version_attr,
2819 &ipr_log_level_attr,
2820 &ipr_diagnostics_attr,
2821 &ipr_ioa_state_attr,
2822 &ipr_ioa_reset_attr,
2823 &ipr_update_fw_attr,
2824 &ipr_ioa_cache_attr,
2828 #ifdef CONFIG_SCSI_IPR_DUMP
2830 * ipr_read_dump - Dump the adapter
2831 * @kobj: kobject struct
2834 * @count: buffer size
2837 * number of bytes printed to buffer
2839 static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
2840 loff_t off, size_t count)
2842 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2843 struct Scsi_Host *shost = class_to_shost(cdev);
2844 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2845 struct ipr_dump *dump;
2846 unsigned long lock_flags = 0;
2851 if (!capable(CAP_SYS_ADMIN))
2854 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2855 dump = ioa_cfg->dump;
2857 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
2858 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2861 kref_get(&dump->kref);
2862 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2864 if (off > dump->driver_dump.hdr.len) {
2865 kref_put(&dump->kref, ipr_release_dump);
2869 if (off + count > dump->driver_dump.hdr.len) {
2870 count = dump->driver_dump.hdr.len - off;
2874 if (count && off < sizeof(dump->driver_dump)) {
2875 if (off + count > sizeof(dump->driver_dump))
2876 len = sizeof(dump->driver_dump) - off;
2879 src = (u8 *)&dump->driver_dump + off;
2880 memcpy(buf, src, len);
2886 off -= sizeof(dump->driver_dump);
2888 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
2889 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
2890 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
2893 src = (u8 *)&dump->ioa_dump + off;
2894 memcpy(buf, src, len);
2900 off -= offsetof(struct ipr_ioa_dump, ioa_data);
2903 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
2904 len = PAGE_ALIGN(off) - off;
2907 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
2908 src += off & ~PAGE_MASK;
2909 memcpy(buf, src, len);
2915 kref_put(&dump->kref, ipr_release_dump);
2920 * ipr_alloc_dump - Prepare for adapter dump
2921 * @ioa_cfg: ioa config struct
2924 * 0 on success / other on failure
2926 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
2928 struct ipr_dump *dump;
2929 unsigned long lock_flags = 0;
2932 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
2935 ipr_err("Dump memory allocation failed\n");
2939 kref_init(&dump->kref);
2940 dump->ioa_cfg = ioa_cfg;
2942 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2944 if (INACTIVE != ioa_cfg->sdt_state) {
2945 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2950 ioa_cfg->dump = dump;
2951 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
2952 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
2953 ioa_cfg->dump_taken = 1;
2954 schedule_work(&ioa_cfg->work_q);
2956 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2963 * ipr_free_dump - Free adapter dump memory
2964 * @ioa_cfg: ioa config struct
2967 * 0 on success / other on failure
2969 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
2971 struct ipr_dump *dump;
2972 unsigned long lock_flags = 0;
2976 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2977 dump = ioa_cfg->dump;
2979 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2983 ioa_cfg->dump = NULL;
2984 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2986 kref_put(&dump->kref, ipr_release_dump);
2993 * ipr_write_dump - Setup dump state of adapter
2994 * @kobj: kobject struct
2997 * @count: buffer size
3000 * number of bytes printed to buffer
3002 static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
3003 loff_t off, size_t count)
3005 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
3006 struct Scsi_Host *shost = class_to_shost(cdev);
3007 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3010 if (!capable(CAP_SYS_ADMIN))
3014 rc = ipr_alloc_dump(ioa_cfg);
3015 else if (buf[0] == '0')
3016 rc = ipr_free_dump(ioa_cfg);
3026 static struct bin_attribute ipr_dump_attr = {
3029 .mode = S_IRUSR | S_IWUSR,
3032 .read = ipr_read_dump,
3033 .write = ipr_write_dump
3036 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3040 * ipr_change_queue_depth - Change the device's queue depth
3041 * @sdev: scsi device struct
3042 * @qdepth: depth to set
3047 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
3049 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
3050 return sdev->queue_depth;
3054 * ipr_change_queue_type - Change the device's queue type
3055 * @dsev: scsi device struct
3056 * @tag_type: type of tags to use
3059 * actual queue type set
3061 static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
3063 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3064 struct ipr_resource_entry *res;
3065 unsigned long lock_flags = 0;
3067 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3068 res = (struct ipr_resource_entry *)sdev->hostdata;
3071 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
3073 * We don't bother quiescing the device here since the
3074 * adapter firmware does it for us.
3076 scsi_set_tag_type(sdev, tag_type);
3079 scsi_activate_tcq(sdev, sdev->queue_depth);
3081 scsi_deactivate_tcq(sdev, sdev->queue_depth);
3087 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3092 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
3093 * @dev: device struct
3097 * number of bytes printed to buffer
3099 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
3101 struct scsi_device *sdev = to_scsi_device(dev);
3102 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3103 struct ipr_resource_entry *res;
3104 unsigned long lock_flags = 0;
3105 ssize_t len = -ENXIO;
3107 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3108 res = (struct ipr_resource_entry *)sdev->hostdata;
3110 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
3111 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3115 static struct device_attribute ipr_adapter_handle_attr = {
3117 .name = "adapter_handle",
3120 .show = ipr_show_adapter_handle
3123 static struct device_attribute *ipr_dev_attrs[] = {
3124 &ipr_adapter_handle_attr,
3129 * ipr_biosparam - Return the HSC mapping
3130 * @sdev: scsi device struct
3131 * @block_device: block device pointer
3132 * @capacity: capacity of the device
3133 * @parm: Array containing returned HSC values.
3135 * This function generates the HSC parms that fdisk uses.
3136 * We want to make sure we return something that places partitions
3137 * on 4k boundaries for best performance with the IOA.
3142 static int ipr_biosparam(struct scsi_device *sdev,
3143 struct block_device *block_device,
3144 sector_t capacity, int *parm)
3152 cylinders = capacity;
3153 sector_div(cylinders, (128 * 32));
3158 parm[2] = cylinders;
3164 * ipr_slave_destroy - Unconfigure a SCSI device
3165 * @sdev: scsi device struct
3170 static void ipr_slave_destroy(struct scsi_device *sdev)
3172 struct ipr_resource_entry *res;
3173 struct ipr_ioa_cfg *ioa_cfg;
3174 unsigned long lock_flags = 0;
3176 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3178 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3179 res = (struct ipr_resource_entry *) sdev->hostdata;
3181 sdev->hostdata = NULL;
3184 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3188 * ipr_slave_configure - Configure a SCSI device
3189 * @sdev: scsi device struct
3191 * This function configures the specified scsi device.
3196 static int ipr_slave_configure(struct scsi_device *sdev)
3198 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3199 struct ipr_resource_entry *res;
3200 unsigned long lock_flags = 0;
3202 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3203 res = sdev->hostdata;
3205 if (ipr_is_af_dasd_device(res))
3206 sdev->type = TYPE_RAID;
3207 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
3208 sdev->scsi_level = 4;
3209 sdev->no_uld_attach = 1;
3211 if (ipr_is_vset_device(res)) {
3212 sdev->timeout = IPR_VSET_RW_TIMEOUT;
3213 blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
3215 if (IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data))
3216 sdev->allow_restart = 1;
3217 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3219 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3224 * ipr_slave_alloc - Prepare for commands to a device.
3225 * @sdev: scsi device struct
3227 * This function saves a pointer to the resource entry
3228 * in the scsi device struct if the device exists. We
3229 * can then use this pointer in ipr_queuecommand when
3230 * handling new commands.
3233 * 0 on success / -ENXIO if device does not exist
3235 static int ipr_slave_alloc(struct scsi_device *sdev)
3237 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3238 struct ipr_resource_entry *res;
3239 unsigned long lock_flags;
3242 sdev->hostdata = NULL;
3244 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3246 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3247 if ((res->cfgte.res_addr.bus == sdev->channel) &&
3248 (res->cfgte.res_addr.target == sdev->id) &&
3249 (res->cfgte.res_addr.lun == sdev->lun)) {
3253 sdev->hostdata = res;
3254 if (!ipr_is_naca_model(res))
3255 res->needs_sync_complete = 1;
3261 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3267 * ipr_eh_host_reset - Reset the host adapter
3268 * @scsi_cmd: scsi command struct
3273 static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
3275 struct ipr_ioa_cfg *ioa_cfg;
3279 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3281 dev_err(&ioa_cfg->pdev->dev,
3282 "Adapter being reset as a result of error recovery.\n");
3284 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3285 ioa_cfg->sdt_state = GET_DUMP;
3287 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
3293 static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
3297 spin_lock_irq(cmd->device->host->host_lock);
3298 rc = __ipr_eh_host_reset(cmd);
3299 spin_unlock_irq(cmd->device->host->host_lock);
3305 * ipr_eh_dev_reset - Reset the device
3306 * @scsi_cmd: scsi command struct
3308 * This function issues a device reset to the affected device.
3309 * A LUN reset will be sent to the device first. If that does
3310 * not work, a target reset will be sent.
3315 static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
3317 struct ipr_cmnd *ipr_cmd;
3318 struct ipr_ioa_cfg *ioa_cfg;
3319 struct ipr_resource_entry *res;
3320 struct ipr_cmd_pkt *cmd_pkt;
3324 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3325 res = scsi_cmd->device->hostdata;
3331 * If we are currently going through reset/reload, return failed. This will force the
3332 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
3335 if (ioa_cfg->in_reset_reload)
3337 if (ioa_cfg->ioa_is_dead)
3340 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3341 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3342 if (ipr_cmd->scsi_cmd)
3343 ipr_cmd->done = ipr_scsi_eh_done;
3347 res->resetting_device = 1;
3349 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3351 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3352 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3353 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3354 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3356 ipr_sdev_err(scsi_cmd->device, "Resetting device\n");
3357 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3359 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3361 res->resetting_device = 0;
3363 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3366 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3369 static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
3373 spin_lock_irq(cmd->device->host->host_lock);
3374 rc = __ipr_eh_dev_reset(cmd);
3375 spin_unlock_irq(cmd->device->host->host_lock);
3381 * ipr_bus_reset_done - Op done function for bus reset.
3382 * @ipr_cmd: ipr command struct
3384 * This function is the op done function for a bus reset
3389 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3391 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3392 struct ipr_resource_entry *res;
3395 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3396 if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
3397 sizeof(res->cfgte.res_handle))) {
3398 scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
3404 * If abort has not completed, indicate the reset has, else call the
3405 * abort's done function to wake the sleeping eh thread
3407 if (ipr_cmd->sibling->sibling)
3408 ipr_cmd->sibling->sibling = NULL;
3410 ipr_cmd->sibling->done(ipr_cmd->sibling);
3412 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3417 * ipr_abort_timeout - An abort task has timed out
3418 * @ipr_cmd: ipr command struct
3420 * This function handles when an abort task times out. If this
3421 * happens we issue a bus reset since we have resources tied
3422 * up that must be freed before returning to the midlayer.
3427 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
3429 struct ipr_cmnd *reset_cmd;
3430 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3431 struct ipr_cmd_pkt *cmd_pkt;
3432 unsigned long lock_flags = 0;
3435 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3436 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
3437 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3441 ipr_sdev_err(ipr_cmd->u.sdev, "Abort timed out. Resetting bus\n");
3442 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3443 ipr_cmd->sibling = reset_cmd;
3444 reset_cmd->sibling = ipr_cmd;
3445 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
3446 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
3447 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3448 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3449 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
3451 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3452 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3457 * ipr_cancel_op - Cancel specified op
3458 * @scsi_cmd: scsi command struct
3460 * This function cancels specified op.
3465 static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
3467 struct ipr_cmnd *ipr_cmd;
3468 struct ipr_ioa_cfg *ioa_cfg;
3469 struct ipr_resource_entry *res;
3470 struct ipr_cmd_pkt *cmd_pkt;
3475 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3476 res = scsi_cmd->device->hostdata;
3478 /* If we are currently going through reset/reload, return failed.
3479 * This will force the mid-layer to call ipr_eh_host_reset,
3480 * which will then go to sleep and wait for the reset to complete
3482 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
3484 if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
3487 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3488 if (ipr_cmd->scsi_cmd == scsi_cmd) {
3489 ipr_cmd->done = ipr_scsi_eh_done;
3498 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3499 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3500 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3501 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3502 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3503 ipr_cmd->u.sdev = scsi_cmd->device;
3505 ipr_sdev_err(scsi_cmd->device, "Aborting command: %02X\n", scsi_cmd->cmnd[0]);
3506 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
3507 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3510 * If the abort task timed out and we sent a bus reset, we will get
3511 * one the following responses to the abort
3513 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
3518 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3519 if (!ipr_is_naca_model(res))
3520 res->needs_sync_complete = 1;
3523 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3527 * ipr_eh_abort - Abort a single op
3528 * @scsi_cmd: scsi command struct
3533 static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
3535 unsigned long flags;
3540 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
3541 rc = ipr_cancel_op(scsi_cmd);
3542 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
3549 * ipr_handle_other_interrupt - Handle "other" interrupts
3550 * @ioa_cfg: ioa config struct
3551 * @int_reg: interrupt register
3554 * IRQ_NONE / IRQ_HANDLED
3556 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
3557 volatile u32 int_reg)
3559 irqreturn_t rc = IRQ_HANDLED;
3561 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
3562 /* Mask the interrupt */
3563 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
3565 /* Clear the interrupt */
3566 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
3567 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
3569 list_del(&ioa_cfg->reset_cmd->queue);
3570 del_timer(&ioa_cfg->reset_cmd->timer);
3571 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
3573 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
3574 ioa_cfg->ioa_unit_checked = 1;
3576 dev_err(&ioa_cfg->pdev->dev,
3577 "Permanent IOA failure. 0x%08X\n", int_reg);
3579 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3580 ioa_cfg->sdt_state = GET_DUMP;
3582 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
3583 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3590 * ipr_isr - Interrupt service routine
3592 * @devp: pointer to ioa config struct
3593 * @regs: pt_regs struct
3596 * IRQ_NONE / IRQ_HANDLED
3598 static irqreturn_t ipr_isr(int irq, void *devp, struct pt_regs *regs)
3600 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
3601 unsigned long lock_flags = 0;
3602 volatile u32 int_reg, int_mask_reg;
3605 struct ipr_cmnd *ipr_cmd;
3606 irqreturn_t rc = IRQ_NONE;
3608 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3610 /* If interrupts are disabled, ignore the interrupt */
3611 if (!ioa_cfg->allow_interrupts) {
3612 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3616 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
3617 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3619 /* If an interrupt on the adapter did not occur, ignore it */
3620 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
3621 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3628 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
3629 ioa_cfg->toggle_bit) {
3631 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
3632 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
3634 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
3635 ioa_cfg->errors_logged++;
3636 dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
3638 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3639 ioa_cfg->sdt_state = GET_DUMP;
3641 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3642 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3646 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
3648 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3650 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
3652 list_del(&ipr_cmd->queue);
3653 del_timer(&ipr_cmd->timer);
3654 ipr_cmd->done(ipr_cmd);
3658 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
3659 ioa_cfg->hrrq_curr++;
3661 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
3662 ioa_cfg->toggle_bit ^= 1u;
3666 if (ipr_cmd != NULL) {
3667 /* Clear the PCI interrupt */
3668 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
3669 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3674 if (unlikely(rc == IRQ_NONE))
3675 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
3677 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3682 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
3683 * @ioa_cfg: ioa config struct
3684 * @ipr_cmd: ipr command struct
3687 * 0 on success / -1 on failure
3689 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
3690 struct ipr_cmnd *ipr_cmd)
3693 struct scatterlist *sglist;
3695 u32 ioadl_flags = 0;
3696 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3697 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3698 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
3700 length = scsi_cmd->request_bufflen;
3705 if (scsi_cmd->use_sg) {
3706 ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev,
3707 scsi_cmd->request_buffer,
3709 scsi_cmd->sc_data_direction);
3711 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3712 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3713 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3714 ioarcb->write_data_transfer_length = cpu_to_be32(length);
3715 ioarcb->write_ioadl_len =
3716 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3717 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3718 ioadl_flags = IPR_IOADL_FLAGS_READ;
3719 ioarcb->read_data_transfer_length = cpu_to_be32(length);
3720 ioarcb->read_ioadl_len =
3721 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3724 sglist = scsi_cmd->request_buffer;
3726 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3727 ioadl[i].flags_and_data_len =
3728 cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
3730 cpu_to_be32(sg_dma_address(&sglist[i]));
3733 if (likely(ipr_cmd->dma_use_sg)) {
3734 ioadl[i-1].flags_and_data_len |=
3735 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3738 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
3740 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3741 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3742 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3743 ioarcb->write_data_transfer_length = cpu_to_be32(length);
3744 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3745 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3746 ioadl_flags = IPR_IOADL_FLAGS_READ;
3747 ioarcb->read_data_transfer_length = cpu_to_be32(length);
3748 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3751 ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev,
3752 scsi_cmd->request_buffer, length,
3753 scsi_cmd->sc_data_direction);
3755 if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
3756 ipr_cmd->dma_use_sg = 1;
3757 ioadl[0].flags_and_data_len =
3758 cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
3759 ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
3762 dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
3769 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
3770 * @scsi_cmd: scsi command struct
3775 static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
3778 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
3780 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
3782 case MSG_SIMPLE_TAG:
3783 rc = IPR_FLAGS_LO_SIMPLE_TASK;
3786 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
3788 case MSG_ORDERED_TAG:
3789 rc = IPR_FLAGS_LO_ORDERED_TASK;
3798 * ipr_erp_done - Process completion of ERP for a device
3799 * @ipr_cmd: ipr command struct
3801 * This function copies the sense buffer into the scsi_cmd
3802 * struct and pushes the scsi_done function.
3807 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
3809 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3810 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3811 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3812 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3814 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3815 scsi_cmd->result |= (DID_ERROR << 16);
3816 ipr_sdev_err(scsi_cmd->device,
3817 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
3819 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
3820 SCSI_SENSE_BUFFERSIZE);
3824 if (!ipr_is_naca_model(res))
3825 res->needs_sync_complete = 1;
3828 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3829 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3830 scsi_cmd->scsi_done(scsi_cmd);
3834 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
3835 * @ipr_cmd: ipr command struct
3840 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
3842 struct ipr_ioarcb *ioarcb;
3843 struct ipr_ioasa *ioasa;
3845 ioarcb = &ipr_cmd->ioarcb;
3846 ioasa = &ipr_cmd->ioasa;
3848 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
3849 ioarcb->write_data_transfer_length = 0;
3850 ioarcb->read_data_transfer_length = 0;
3851 ioarcb->write_ioadl_len = 0;
3852 ioarcb->read_ioadl_len = 0;
3854 ioasa->residual_data_len = 0;
3858 * ipr_erp_request_sense - Send request sense to a device
3859 * @ipr_cmd: ipr command struct
3861 * This function sends a request sense to a device as a result
3862 * of a check condition.
3867 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
3869 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3870 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3872 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3873 ipr_erp_done(ipr_cmd);
3877 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3879 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
3880 cmd_pkt->cdb[0] = REQUEST_SENSE;
3881 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
3882 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
3883 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
3884 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
3886 ipr_cmd->ioadl[0].flags_and_data_len =
3887 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
3888 ipr_cmd->ioadl[0].address =
3889 cpu_to_be32(ipr_cmd->sense_buffer_dma);
3891 ipr_cmd->ioarcb.read_ioadl_len =
3892 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3893 ipr_cmd->ioarcb.read_data_transfer_length =
3894 cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
3896 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
3897 IPR_REQUEST_SENSE_TIMEOUT * 2);
3901 * ipr_erp_cancel_all - Send cancel all to a device
3902 * @ipr_cmd: ipr command struct
3904 * This function sends a cancel all to a device to clear the
3905 * queue. If we are running TCQ on the device, QERR is set to 1,
3906 * which means all outstanding ops have been dropped on the floor.
3907 * Cancel all will return them to us.
3912 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
3914 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3915 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3916 struct ipr_cmd_pkt *cmd_pkt;
3920 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3922 if (!scsi_get_tag_type(scsi_cmd->device)) {
3923 ipr_erp_request_sense(ipr_cmd);
3927 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3928 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3929 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3931 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
3932 IPR_CANCEL_ALL_TIMEOUT);
3936 * ipr_dump_ioasa - Dump contents of IOASA
3937 * @ioa_cfg: ioa config struct
3938 * @ipr_cmd: ipr command struct
3940 * This function is invoked by the interrupt handler when ops
3941 * fail. It will log the IOASA if appropriate. Only called
3947 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
3948 struct ipr_cmnd *ipr_cmd)
3953 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3954 __be32 *ioasa_data = (__be32 *)ioasa;
3957 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
3962 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
3965 error_index = ipr_get_error(ioasc);
3967 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
3968 /* Don't log an error if the IOA already logged one */
3969 if (ioasa->ilid != 0)
3972 if (ipr_error_table[error_index].log_ioasa == 0)
3976 ipr_sdev_err(ipr_cmd->scsi_cmd->device, "%s\n",
3977 ipr_error_table[error_index].error);
3979 if ((ioasa->u.gpdd.end_state <= ARRAY_SIZE(ipr_gpdd_dev_end_states)) &&
3980 (ioasa->u.gpdd.bus_phase <= ARRAY_SIZE(ipr_gpdd_dev_bus_phases))) {
3981 ipr_sdev_err(ipr_cmd->scsi_cmd->device,
3982 "Device End state: %s Phase: %s\n",
3983 ipr_gpdd_dev_end_states[ioasa->u.gpdd.end_state],
3984 ipr_gpdd_dev_bus_phases[ioasa->u.gpdd.bus_phase]);
3987 if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
3988 data_len = sizeof(struct ipr_ioasa);
3990 data_len = be16_to_cpu(ioasa->ret_stat_len);
3992 ipr_err("IOASA Dump:\n");
3994 for (i = 0; i < data_len / 4; i += 4) {
3995 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
3996 be32_to_cpu(ioasa_data[i]),
3997 be32_to_cpu(ioasa_data[i+1]),
3998 be32_to_cpu(ioasa_data[i+2]),
3999 be32_to_cpu(ioasa_data[i+3]));
4004 * ipr_gen_sense - Generate SCSI sense data from an IOASA
4006 * @sense_buf: sense data buffer
4011 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
4014 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
4015 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
4016 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4017 u32 ioasc = be32_to_cpu(ioasa->ioasc);
4019 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
4021 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
4024 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
4026 if (ipr_is_vset_device(res) &&
4027 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
4028 ioasa->u.vset.failing_lba_hi != 0) {
4029 sense_buf[0] = 0x72;
4030 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
4031 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
4032 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
4036 sense_buf[9] = 0x0A;
4037 sense_buf[10] = 0x80;
4039 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
4041 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
4042 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
4043 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
4044 sense_buf[15] = failing_lba & 0x000000ff;
4046 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4048 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
4049 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
4050 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
4051 sense_buf[19] = failing_lba & 0x000000ff;
4053 sense_buf[0] = 0x70;
4054 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
4055 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
4056 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
4058 /* Illegal request */
4059 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
4060 (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
4061 sense_buf[7] = 10; /* additional length */
4063 /* IOARCB was in error */
4064 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
4065 sense_buf[15] = 0xC0;
4066 else /* Parameter data was invalid */
4067 sense_buf[15] = 0x80;
4070 ((IPR_FIELD_POINTER_MASK &
4071 be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
4073 (IPR_FIELD_POINTER_MASK &
4074 be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
4076 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
4077 if (ipr_is_vset_device(res))
4078 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4080 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
4082 sense_buf[0] |= 0x80; /* Or in the Valid bit */
4083 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
4084 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
4085 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
4086 sense_buf[6] = failing_lba & 0x000000ff;
4089 sense_buf[7] = 6; /* additional length */
4095 * ipr_get_autosense - Copy autosense data to sense buffer
4096 * @ipr_cmd: ipr command struct
4098 * This function copies the autosense buffer to the buffer
4099 * in the scsi_cmd, if there is autosense available.
4102 * 1 if autosense was available / 0 if not
4104 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
4106 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4108 if ((be32_to_cpu(ioasa->ioasc_specific) &
4109 (IPR_ADDITIONAL_STATUS_FMT | IPR_AUTOSENSE_VALID)) == 0)
4112 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
4113 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
4114 SCSI_SENSE_BUFFERSIZE));
4119 * ipr_erp_start - Process an error response for a SCSI op
4120 * @ioa_cfg: ioa config struct
4121 * @ipr_cmd: ipr command struct
4123 * This function determines whether or not to initiate ERP
4124 * on the affected device.
4129 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
4130 struct ipr_cmnd *ipr_cmd)
4132 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4133 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4134 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4137 ipr_scsi_eh_done(ipr_cmd);
4141 if (ipr_is_gscsi(res))
4142 ipr_dump_ioasa(ioa_cfg, ipr_cmd);
4144 ipr_gen_sense(ipr_cmd);
4146 switch (ioasc & IPR_IOASC_IOASC_MASK) {
4147 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
4148 if (ipr_is_naca_model(res))
4149 scsi_cmd->result |= (DID_ABORT << 16);
4151 scsi_cmd->result |= (DID_IMM_RETRY << 16);
4153 case IPR_IOASC_IR_RESOURCE_HANDLE:
4154 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
4155 scsi_cmd->result |= (DID_NO_CONNECT << 16);
4157 case IPR_IOASC_HW_SEL_TIMEOUT:
4158 scsi_cmd->result |= (DID_NO_CONNECT << 16);
4159 if (!ipr_is_naca_model(res))
4160 res->needs_sync_complete = 1;
4162 case IPR_IOASC_SYNC_REQUIRED:
4164 res->needs_sync_complete = 1;
4165 scsi_cmd->result |= (DID_IMM_RETRY << 16);
4167 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
4168 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
4169 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
4171 case IPR_IOASC_BUS_WAS_RESET:
4172 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
4174 * Report the bus reset and ask for a retry. The device
4175 * will give CC/UA the next command.
4177 if (!res->resetting_device)
4178 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
4179 scsi_cmd->result |= (DID_ERROR << 16);
4180 if (!ipr_is_naca_model(res))
4181 res->needs_sync_complete = 1;
4183 case IPR_IOASC_HW_DEV_BUS_STATUS:
4184 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
4185 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
4186 if (!ipr_get_autosense(ipr_cmd)) {
4187 if (!ipr_is_naca_model(res)) {
4188 ipr_erp_cancel_all(ipr_cmd);
4193 if (!ipr_is_naca_model(res))
4194 res->needs_sync_complete = 1;
4196 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
4199 scsi_cmd->result |= (DID_ERROR << 16);
4200 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
4201 res->needs_sync_complete = 1;
4205 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4206 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4207 scsi_cmd->scsi_done(scsi_cmd);
4211 * ipr_scsi_done - mid-layer done function
4212 * @ipr_cmd: ipr command struct
4214 * This function is invoked by the interrupt handler for
4215 * ops generated by the SCSI mid-layer
4220 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
4222 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4223 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4224 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4226 scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len);
4228 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
4229 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4230 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4231 scsi_cmd->scsi_done(scsi_cmd);
4233 ipr_erp_start(ioa_cfg, ipr_cmd);
4237 * ipr_save_ioafp_mode_select - Save adapters mode select data
4238 * @ioa_cfg: ioa config struct
4239 * @scsi_cmd: scsi command struct
4241 * This function saves mode select data for the adapter to
4242 * use following an adapter reset.
4245 * 0 on success / SCSI_MLQUEUE_HOST_BUSY on failure
4247 static int ipr_save_ioafp_mode_select(struct ipr_ioa_cfg *ioa_cfg,
4248 struct scsi_cmnd *scsi_cmd)
4250 if (!ioa_cfg->saved_mode_pages) {
4251 ioa_cfg->saved_mode_pages = kmalloc(sizeof(struct ipr_mode_pages),
4253 if (!ioa_cfg->saved_mode_pages) {
4254 dev_err(&ioa_cfg->pdev->dev,
4255 "IOA mode select buffer allocation failed\n");
4256 return SCSI_MLQUEUE_HOST_BUSY;
4260 memcpy(ioa_cfg->saved_mode_pages, scsi_cmd->buffer, scsi_cmd->cmnd[4]);
4261 ioa_cfg->saved_mode_page_len = scsi_cmd->cmnd[4];
4266 * ipr_queuecommand - Queue a mid-layer request
4267 * @scsi_cmd: scsi command struct
4268 * @done: done function
4270 * This function queues a request generated by the mid-layer.
4274 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
4275 * SCSI_MLQUEUE_HOST_BUSY if host is busy
4277 static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4278 void (*done) (struct scsi_cmnd *))
4280 struct ipr_ioa_cfg *ioa_cfg;
4281 struct ipr_resource_entry *res;
4282 struct ipr_ioarcb *ioarcb;
4283 struct ipr_cmnd *ipr_cmd;
4286 scsi_cmd->scsi_done = done;
4287 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4288 res = scsi_cmd->device->hostdata;
4289 scsi_cmd->result = (DID_OK << 16);
4292 * We are currently blocking all devices due to a host reset
4293 * We have told the host to stop giving us new requests, but
4294 * ERP ops don't count. FIXME
4296 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
4297 return SCSI_MLQUEUE_HOST_BUSY;
4300 * FIXME - Create scsi_set_host_offline interface
4301 * and the ioa_is_dead check can be removed
4303 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
4304 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
4305 scsi_cmd->result = (DID_NO_CONNECT << 16);
4306 scsi_cmd->scsi_done(scsi_cmd);
4310 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4311 ioarcb = &ipr_cmd->ioarcb;
4312 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4314 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
4315 ipr_cmd->scsi_cmd = scsi_cmd;
4316 ioarcb->res_handle = res->cfgte.res_handle;
4317 ipr_cmd->done = ipr_scsi_done;
4318 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
4320 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
4321 if (scsi_cmd->underflow == 0)
4322 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4324 if (res->needs_sync_complete) {
4325 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
4326 res->needs_sync_complete = 0;
4329 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
4330 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
4331 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
4332 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
4335 if (scsi_cmd->cmnd[0] >= 0xC0 &&
4336 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
4337 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4339 if (ipr_is_ioa_resource(res) && scsi_cmd->cmnd[0] == MODE_SELECT)
4340 rc = ipr_save_ioafp_mode_select(ioa_cfg, scsi_cmd);
4342 if (likely(rc == 0))
4343 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
4345 if (likely(rc == 0)) {
4347 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
4348 ioa_cfg->regs.ioarrin_reg);
4350 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4351 return SCSI_MLQUEUE_HOST_BUSY;
4358 * ipr_info - Get information about the card/driver
4359 * @scsi_host: scsi host struct
4362 * pointer to buffer with description string
4364 static const char * ipr_ioa_info(struct Scsi_Host *host)
4366 static char buffer[512];
4367 struct ipr_ioa_cfg *ioa_cfg;
4368 unsigned long lock_flags = 0;
4370 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
4372 spin_lock_irqsave(host->host_lock, lock_flags);
4373 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
4374 spin_unlock_irqrestore(host->host_lock, lock_flags);
4379 static struct scsi_host_template driver_template = {
4380 .module = THIS_MODULE,
4382 .info = ipr_ioa_info,
4383 .queuecommand = ipr_queuecommand,
4384 .eh_abort_handler = ipr_eh_abort,
4385 .eh_device_reset_handler = ipr_eh_dev_reset,
4386 .eh_host_reset_handler = ipr_eh_host_reset,
4387 .slave_alloc = ipr_slave_alloc,
4388 .slave_configure = ipr_slave_configure,
4389 .slave_destroy = ipr_slave_destroy,
4390 .change_queue_depth = ipr_change_queue_depth,
4391 .change_queue_type = ipr_change_queue_type,
4392 .bios_param = ipr_biosparam,
4393 .can_queue = IPR_MAX_COMMANDS,
4395 .sg_tablesize = IPR_MAX_SGLIST,
4396 .max_sectors = IPR_IOA_MAX_SECTORS,
4397 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
4398 .use_clustering = ENABLE_CLUSTERING,
4399 .shost_attrs = ipr_ioa_attrs,
4400 .sdev_attrs = ipr_dev_attrs,
4401 .proc_name = IPR_NAME
4404 #ifdef CONFIG_PPC_PSERIES
4405 static const u16 ipr_blocked_processors[] = {
4417 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
4418 * @ioa_cfg: ioa cfg struct
4420 * Adapters that use Gemstone revision < 3.1 do not work reliably on
4421 * certain pSeries hardware. This function determines if the given
4422 * adapter is in one of these confgurations or not.
4425 * 1 if adapter is not supported / 0 if adapter is supported
4427 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
4432 if (ioa_cfg->type == 0x5702) {
4433 if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
4434 &rev_id) == PCIBIOS_SUCCESSFUL) {
4436 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
4437 if (__is_processor(ipr_blocked_processors[i]))
4446 #define ipr_invalid_adapter(ioa_cfg) 0
4450 * ipr_ioa_bringdown_done - IOA bring down completion.
4451 * @ipr_cmd: ipr command struct
4453 * This function processes the completion of an adapter bring down.
4454 * It wakes any reset sleepers.
4459 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
4461 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4464 ioa_cfg->in_reset_reload = 0;
4465 ioa_cfg->reset_retries = 0;
4466 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4467 wake_up_all(&ioa_cfg->reset_wait_q);
4469 spin_unlock_irq(ioa_cfg->host->host_lock);
4470 scsi_unblock_requests(ioa_cfg->host);
4471 spin_lock_irq(ioa_cfg->host->host_lock);
4474 return IPR_RC_JOB_RETURN;
4478 * ipr_ioa_reset_done - IOA reset completion.
4479 * @ipr_cmd: ipr command struct
4481 * This function processes the completion of an adapter reset.
4482 * It schedules any necessary mid-layer add/removes and
4483 * wakes any reset sleepers.
4488 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
4490 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4491 struct ipr_resource_entry *res;
4492 struct ipr_hostrcb *hostrcb, *temp;
4496 ioa_cfg->in_reset_reload = 0;
4497 ioa_cfg->allow_cmds = 1;
4498 ioa_cfg->reset_cmd = NULL;
4499 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
4501 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4502 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
4507 schedule_work(&ioa_cfg->work_q);
4509 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
4510 list_del(&hostrcb->queue);
4511 if (i++ < IPR_NUM_LOG_HCAMS)
4512 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
4514 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
4517 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
4519 ioa_cfg->reset_retries = 0;
4520 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4521 wake_up_all(&ioa_cfg->reset_wait_q);
4523 spin_unlock_irq(ioa_cfg->host->host_lock);
4524 scsi_unblock_requests(ioa_cfg->host);
4525 spin_lock_irq(ioa_cfg->host->host_lock);
4527 if (!ioa_cfg->allow_cmds)
4528 scsi_block_requests(ioa_cfg->host);
4531 return IPR_RC_JOB_RETURN;
4535 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
4536 * @supported_dev: supported device struct
4537 * @vpids: vendor product id struct
4542 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
4543 struct ipr_std_inq_vpids *vpids)
4545 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
4546 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
4547 supported_dev->num_records = 1;
4548 supported_dev->data_length =
4549 cpu_to_be16(sizeof(struct ipr_supported_device));
4550 supported_dev->reserved = 0;
4554 * ipr_set_supported_devs - Send Set Supported Devices for a device
4555 * @ipr_cmd: ipr command struct
4557 * This function send a Set Supported Devices to the adapter
4560 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4562 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
4564 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4565 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
4566 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4567 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4568 struct ipr_resource_entry *res = ipr_cmd->u.res;
4570 ipr_cmd->job_step = ipr_ioa_reset_done;
4572 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
4573 if (!IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data))
4576 ipr_cmd->u.res = res;
4577 ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
4579 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4580 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4581 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4583 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
4584 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
4585 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
4587 ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
4588 sizeof(struct ipr_supported_device));
4589 ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
4590 offsetof(struct ipr_misc_cbs, supp_dev));
4591 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4592 ioarcb->write_data_transfer_length =
4593 cpu_to_be32(sizeof(struct ipr_supported_device));
4595 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
4596 IPR_SET_SUP_DEVICE_TIMEOUT);
4598 ipr_cmd->job_step = ipr_set_supported_devs;
4599 return IPR_RC_JOB_RETURN;
4602 return IPR_RC_JOB_CONTINUE;
4606 * ipr_setup_write_cache - Disable write cache if needed
4607 * @ipr_cmd: ipr command struct
4609 * This function sets up adapters write cache to desired setting
4612 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4614 static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
4616 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4618 ipr_cmd->job_step = ipr_set_supported_devs;
4619 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
4620 struct ipr_resource_entry, queue);
4622 if (ioa_cfg->cache_state != CACHE_DISABLED)
4623 return IPR_RC_JOB_CONTINUE;
4625 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4626 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4627 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
4628 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
4630 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4632 return IPR_RC_JOB_RETURN;
4636 * ipr_get_mode_page - Locate specified mode page
4637 * @mode_pages: mode page buffer
4638 * @page_code: page code to find
4639 * @len: minimum required length for mode page
4642 * pointer to mode page / NULL on failure
4644 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
4645 u32 page_code, u32 len)
4647 struct ipr_mode_page_hdr *mode_hdr;
4651 if (!mode_pages || (mode_pages->hdr.length == 0))
4654 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
4655 mode_hdr = (struct ipr_mode_page_hdr *)
4656 (mode_pages->data + mode_pages->hdr.block_desc_len);
4659 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
4660 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
4664 page_length = (sizeof(struct ipr_mode_page_hdr) +
4665 mode_hdr->page_length);
4666 length -= page_length;
4667 mode_hdr = (struct ipr_mode_page_hdr *)
4668 ((unsigned long)mode_hdr + page_length);
4675 * ipr_check_term_power - Check for term power errors
4676 * @ioa_cfg: ioa config struct
4677 * @mode_pages: IOAFP mode pages buffer
4679 * Check the IOAFP's mode page 28 for term power errors
4684 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
4685 struct ipr_mode_pages *mode_pages)
4689 struct ipr_dev_bus_entry *bus;
4690 struct ipr_mode_page28 *mode_page;
4692 mode_page = ipr_get_mode_page(mode_pages, 0x28,
4693 sizeof(struct ipr_mode_page28));
4695 entry_length = mode_page->entry_length;
4697 bus = mode_page->bus;
4699 for (i = 0; i < mode_page->num_entries; i++) {
4700 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
4701 dev_err(&ioa_cfg->pdev->dev,
4702 "Term power is absent on scsi bus %d\n",
4706 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
4711 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
4712 * @ioa_cfg: ioa config struct
4714 * Looks through the config table checking for SES devices. If
4715 * the SES device is in the SES table indicating a maximum SCSI
4716 * bus speed, the speed is limited for the bus.
4721 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
4726 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
4727 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
4728 ioa_cfg->bus_attr[i].bus_width);
4730 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
4731 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
4736 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
4737 * @ioa_cfg: ioa config struct
4738 * @mode_pages: mode page 28 buffer
4740 * Updates mode page 28 based on driver configuration
4745 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
4746 struct ipr_mode_pages *mode_pages)
4748 int i, entry_length;
4749 struct ipr_dev_bus_entry *bus;
4750 struct ipr_bus_attributes *bus_attr;
4751 struct ipr_mode_page28 *mode_page;
4753 mode_page = ipr_get_mode_page(mode_pages, 0x28,
4754 sizeof(struct ipr_mode_page28));
4756 entry_length = mode_page->entry_length;
4758 /* Loop for each device bus entry */
4759 for (i = 0, bus = mode_page->bus;
4760 i < mode_page->num_entries;
4761 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
4762 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
4763 dev_err(&ioa_cfg->pdev->dev,
4764 "Invalid resource address reported: 0x%08X\n",
4765 IPR_GET_PHYS_LOC(bus->res_addr));
4769 bus_attr = &ioa_cfg->bus_attr[i];
4770 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
4771 bus->bus_width = bus_attr->bus_width;
4772 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
4773 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
4774 if (bus_attr->qas_enabled)
4775 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
4777 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
4782 * ipr_build_mode_select - Build a mode select command
4783 * @ipr_cmd: ipr command struct
4784 * @res_handle: resource handle to send command to
4785 * @parm: Byte 2 of Mode Sense command
4786 * @dma_addr: DMA buffer address
4787 * @xfer_len: data transfer length
4792 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
4793 __be32 res_handle, u8 parm, u32 dma_addr,
4796 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4797 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4799 ioarcb->res_handle = res_handle;
4800 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4801 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4802 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
4803 ioarcb->cmd_pkt.cdb[1] = parm;
4804 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4806 ioadl->flags_and_data_len =
4807 cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
4808 ioadl->address = cpu_to_be32(dma_addr);
4809 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4810 ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
4814 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
4815 * @ipr_cmd: ipr command struct
4817 * This function sets up the SCSI bus attributes and sends
4818 * a Mode Select for Page 28 to activate them.
4823 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
4825 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4826 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
4830 if (ioa_cfg->saved_mode_pages) {
4831 memcpy(mode_pages, ioa_cfg->saved_mode_pages,
4832 ioa_cfg->saved_mode_page_len);
4833 length = ioa_cfg->saved_mode_page_len;
4835 ipr_scsi_bus_speed_limit(ioa_cfg);
4836 ipr_check_term_power(ioa_cfg, mode_pages);
4837 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
4838 length = mode_pages->hdr.length + 1;
4839 mode_pages->hdr.length = 0;
4842 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
4843 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
4846 ipr_cmd->job_step = ipr_setup_write_cache;
4847 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4850 return IPR_RC_JOB_RETURN;
4854 * ipr_build_mode_sense - Builds a mode sense command
4855 * @ipr_cmd: ipr command struct
4856 * @res: resource entry struct
4857 * @parm: Byte 2 of mode sense command
4858 * @dma_addr: DMA address of mode sense buffer
4859 * @xfer_len: Size of DMA buffer
4864 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
4866 u8 parm, u32 dma_addr, u8 xfer_len)
4868 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4869 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4871 ioarcb->res_handle = res_handle;
4872 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
4873 ioarcb->cmd_pkt.cdb[2] = parm;
4874 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4875 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4877 ioadl->flags_and_data_len =
4878 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
4879 ioadl->address = cpu_to_be32(dma_addr);
4880 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4881 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
4885 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
4886 * @ipr_cmd: ipr command struct
4888 * This function send a Page 28 mode sense to the IOA to
4889 * retrieve SCSI bus attributes.
4894 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
4896 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4899 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
4900 0x28, ioa_cfg->vpd_cbs_dma +
4901 offsetof(struct ipr_misc_cbs, mode_pages),
4902 sizeof(struct ipr_mode_pages));
4904 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
4906 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4909 return IPR_RC_JOB_RETURN;
4913 * ipr_init_res_table - Initialize the resource table
4914 * @ipr_cmd: ipr command struct
4916 * This function looks through the existing resource table, comparing
4917 * it with the config table. This function will take care of old/new
4918 * devices and schedule adding/removing them from the mid-layer
4922 * IPR_RC_JOB_CONTINUE
4924 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
4926 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4927 struct ipr_resource_entry *res, *temp;
4928 struct ipr_config_table_entry *cfgte;
4933 if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
4934 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
4936 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
4937 list_move_tail(&res->queue, &old_res);
4939 for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
4940 cfgte = &ioa_cfg->cfg_table->dev[i];
4943 list_for_each_entry_safe(res, temp, &old_res, queue) {
4944 if (!memcmp(&res->cfgte.res_addr,
4945 &cfgte->res_addr, sizeof(cfgte->res_addr))) {
4946 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4953 if (list_empty(&ioa_cfg->free_res_q)) {
4954 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
4959 res = list_entry(ioa_cfg->free_res_q.next,
4960 struct ipr_resource_entry, queue);
4961 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4962 ipr_init_res_entry(res);
4967 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
4970 list_for_each_entry_safe(res, temp, &old_res, queue) {
4972 res->del_from_ml = 1;
4973 res->sdev->hostdata = NULL;
4974 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4976 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
4980 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
4983 return IPR_RC_JOB_CONTINUE;
4987 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
4988 * @ipr_cmd: ipr command struct
4990 * This function sends a Query IOA Configuration command
4991 * to the adapter to retrieve the IOA configuration table.
4996 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
4998 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4999 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5000 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5001 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
5004 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
5005 ucode_vpd->major_release, ucode_vpd->card_type,
5006 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
5007 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5008 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5010 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
5011 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
5012 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
5014 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5015 ioarcb->read_data_transfer_length =
5016 cpu_to_be32(sizeof(struct ipr_config_table));
5018 ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
5019 ioadl->flags_and_data_len =
5020 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
5022 ipr_cmd->job_step = ipr_init_res_table;
5024 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5027 return IPR_RC_JOB_RETURN;
5031 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
5032 * @ipr_cmd: ipr command struct
5034 * This utility function sends an inquiry to the adapter.
5039 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
5040 u32 dma_addr, u8 xfer_len)
5042 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5043 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5046 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5047 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5049 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
5050 ioarcb->cmd_pkt.cdb[1] = flags;
5051 ioarcb->cmd_pkt.cdb[2] = page;
5052 ioarcb->cmd_pkt.cdb[4] = xfer_len;
5054 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5055 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
5057 ioadl->address = cpu_to_be32(dma_addr);
5058 ioadl->flags_and_data_len =
5059 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
5061 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5066 * ipr_inquiry_page_supported - Is the given inquiry page supported
5067 * @page0: inquiry page 0 buffer
5070 * This function determines if the specified inquiry page is supported.
5073 * 1 if page is supported / 0 if not
5075 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
5079 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
5080 if (page0->page[i] == page)
5087 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
5088 * @ipr_cmd: ipr command struct
5090 * This function sends a Page 3 inquiry to the adapter
5091 * to retrieve software VPD information.
5094 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5096 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
5098 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5099 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
5103 if (!ipr_inquiry_page_supported(page0, 1))
5104 ioa_cfg->cache_state = CACHE_NONE;
5106 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
5108 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
5109 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
5110 sizeof(struct ipr_inquiry_page3));
5113 return IPR_RC_JOB_RETURN;
5117 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
5118 * @ipr_cmd: ipr command struct
5120 * This function sends a Page 0 inquiry to the adapter
5121 * to retrieve supported inquiry pages.
5124 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5126 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
5128 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5133 /* Grab the type out of the VPD and store it away */
5134 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
5136 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
5138 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
5140 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
5141 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
5142 sizeof(struct ipr_inquiry_page0));
5145 return IPR_RC_JOB_RETURN;
5149 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
5150 * @ipr_cmd: ipr command struct
5152 * This function sends a standard inquiry to the adapter.
5157 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
5159 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5162 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
5164 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
5165 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
5166 sizeof(struct ipr_ioa_vpd));
5169 return IPR_RC_JOB_RETURN;
5173 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
5174 * @ipr_cmd: ipr command struct
5176 * This function send an Identify Host Request Response Queue
5177 * command to establish the HRRQ with the adapter.
5182 static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
5184 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5185 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5188 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
5190 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
5191 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5193 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5194 ioarcb->cmd_pkt.cdb[2] =
5195 ((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
5196 ioarcb->cmd_pkt.cdb[3] =
5197 ((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
5198 ioarcb->cmd_pkt.cdb[4] =
5199 ((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
5200 ioarcb->cmd_pkt.cdb[5] =
5201 ((u32) ioa_cfg->host_rrq_dma) & 0xff;
5202 ioarcb->cmd_pkt.cdb[7] =
5203 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
5204 ioarcb->cmd_pkt.cdb[8] =
5205 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
5207 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
5209 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5212 return IPR_RC_JOB_RETURN;
5216 * ipr_reset_timer_done - Adapter reset timer function
5217 * @ipr_cmd: ipr command struct
5219 * Description: This function is used in adapter reset processing
5220 * for timing events. If the reset_cmd pointer in the IOA
5221 * config struct is not this adapter's we are doing nested
5222 * resets and fail_all_ops will take care of freeing the
5228 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
5230 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5231 unsigned long lock_flags = 0;
5233 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5235 if (ioa_cfg->reset_cmd == ipr_cmd) {
5236 list_del(&ipr_cmd->queue);
5237 ipr_cmd->done(ipr_cmd);
5240 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5244 * ipr_reset_start_timer - Start a timer for adapter reset job
5245 * @ipr_cmd: ipr command struct
5246 * @timeout: timeout value
5248 * Description: This function is used in adapter reset processing
5249 * for timing events. If the reset_cmd pointer in the IOA
5250 * config struct is not this adapter's we are doing nested
5251 * resets and fail_all_ops will take care of freeing the
5257 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
5258 unsigned long timeout)
5260 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
5261 ipr_cmd->done = ipr_reset_ioa_job;
5263 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5264 ipr_cmd->timer.expires = jiffies + timeout;
5265 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
5266 add_timer(&ipr_cmd->timer);
5270 * ipr_init_ioa_mem - Initialize ioa_cfg control block
5271 * @ioa_cfg: ioa cfg struct
5276 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
5278 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
5280 /* Initialize Host RRQ pointers */
5281 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
5282 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
5283 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
5284 ioa_cfg->toggle_bit = 1;
5286 /* Zero out config table */
5287 memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
5291 * ipr_reset_enable_ioa - Enable the IOA following a reset.
5292 * @ipr_cmd: ipr command struct
5294 * This function reinitializes some control blocks and
5295 * enables destructive diagnostics on the adapter.
5300 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
5302 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5303 volatile u32 int_reg;
5306 ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
5307 ipr_init_ioa_mem(ioa_cfg);
5309 ioa_cfg->allow_interrupts = 1;
5310 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5312 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5313 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
5314 ioa_cfg->regs.clr_interrupt_mask_reg);
5315 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5316 return IPR_RC_JOB_CONTINUE;
5319 /* Enable destructive diagnostics on IOA */
5320 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg);
5322 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
5323 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5325 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
5327 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5328 ipr_cmd->timer.expires = jiffies + (ipr_transop_timeout * HZ);
5329 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
5330 ipr_cmd->done = ipr_reset_ioa_job;
5331 add_timer(&ipr_cmd->timer);
5332 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5335 return IPR_RC_JOB_RETURN;
5339 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
5340 * @ipr_cmd: ipr command struct
5342 * This function is invoked when an adapter dump has run out
5343 * of processing time.
5346 * IPR_RC_JOB_CONTINUE
5348 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
5350 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5352 if (ioa_cfg->sdt_state == GET_DUMP)
5353 ioa_cfg->sdt_state = ABORT_DUMP;
5355 ipr_cmd->job_step = ipr_reset_alert;
5357 return IPR_RC_JOB_CONTINUE;
5361 * ipr_unit_check_no_data - Log a unit check/no data error log
5362 * @ioa_cfg: ioa config struct
5364 * Logs an error indicating the adapter unit checked, but for some
5365 * reason, we were unable to fetch the unit check buffer.
5370 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
5372 ioa_cfg->errors_logged++;
5373 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
5377 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
5378 * @ioa_cfg: ioa config struct
5380 * Fetches the unit check buffer from the adapter by clocking the data
5381 * through the mailbox register.
5386 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
5388 unsigned long mailbox;
5389 struct ipr_hostrcb *hostrcb;
5390 struct ipr_uc_sdt sdt;
5393 mailbox = readl(ioa_cfg->ioa_mailbox);
5395 if (!ipr_sdt_is_fmt2(mailbox)) {
5396 ipr_unit_check_no_data(ioa_cfg);
5400 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
5401 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
5402 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
5404 if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
5405 !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
5406 ipr_unit_check_no_data(ioa_cfg);
5410 /* Find length of the first sdt entry (UC buffer) */
5411 length = (be32_to_cpu(sdt.entry[0].end_offset) -
5412 be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
5414 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
5415 struct ipr_hostrcb, queue);
5416 list_del(&hostrcb->queue);
5417 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
5419 rc = ipr_get_ldump_data_section(ioa_cfg,
5420 be32_to_cpu(sdt.entry[0].bar_str_offset),
5421 (__be32 *)&hostrcb->hcam,
5422 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
5425 ipr_handle_log_data(ioa_cfg, hostrcb);
5427 ipr_unit_check_no_data(ioa_cfg);
5429 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
5433 * ipr_reset_restore_cfg_space - Restore PCI config space.
5434 * @ipr_cmd: ipr command struct
5436 * Description: This function restores the saved PCI config space of
5437 * the adapter, fails all outstanding ops back to the callers, and
5438 * fetches the dump/unit check if applicable to this reset.
5441 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5443 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
5445 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5449 pci_unblock_user_cfg_access(ioa_cfg->pdev);
5450 rc = pci_restore_state(ioa_cfg->pdev);
5452 if (rc != PCIBIOS_SUCCESSFUL) {
5453 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5454 return IPR_RC_JOB_CONTINUE;
5457 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
5458 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5459 return IPR_RC_JOB_CONTINUE;
5462 ipr_fail_all_ops(ioa_cfg);
5464 if (ioa_cfg->ioa_unit_checked) {
5465 ioa_cfg->ioa_unit_checked = 0;
5466 ipr_get_unit_check_buffer(ioa_cfg);
5467 ipr_cmd->job_step = ipr_reset_alert;
5468 ipr_reset_start_timer(ipr_cmd, 0);
5469 return IPR_RC_JOB_RETURN;
5472 if (ioa_cfg->in_ioa_bringdown) {
5473 ipr_cmd->job_step = ipr_ioa_bringdown_done;
5475 ipr_cmd->job_step = ipr_reset_enable_ioa;
5477 if (GET_DUMP == ioa_cfg->sdt_state) {
5478 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
5479 ipr_cmd->job_step = ipr_reset_wait_for_dump;
5480 schedule_work(&ioa_cfg->work_q);
5481 return IPR_RC_JOB_RETURN;
5486 return IPR_RC_JOB_CONTINUE;
5490 * ipr_reset_start_bist - Run BIST on the adapter.
5491 * @ipr_cmd: ipr command struct
5493 * Description: This function runs BIST on the adapter, then delays 2 seconds.
5496 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5498 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
5500 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5504 pci_block_user_cfg_access(ioa_cfg->pdev);
5505 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
5507 if (rc != PCIBIOS_SUCCESSFUL) {
5508 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5509 rc = IPR_RC_JOB_CONTINUE;
5511 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
5512 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
5513 rc = IPR_RC_JOB_RETURN;
5521 * ipr_reset_allowed - Query whether or not IOA can be reset
5522 * @ioa_cfg: ioa config struct
5525 * 0 if reset not allowed / non-zero if reset is allowed
5527 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
5529 volatile u32 temp_reg;
5531 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5532 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
5536 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
5537 * @ipr_cmd: ipr command struct
5539 * Description: This function waits for adapter permission to run BIST,
5540 * then runs BIST. If the adapter does not give permission after a
5541 * reasonable time, we will reset the adapter anyway. The impact of
5542 * resetting the adapter without warning the adapter is the risk of
5543 * losing the persistent error log on the adapter. If the adapter is
5544 * reset while it is writing to the flash on the adapter, the flash
5545 * segment will have bad ECC and be zeroed.
5548 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5550 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
5552 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5553 int rc = IPR_RC_JOB_RETURN;
5555 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
5556 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
5557 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5559 ipr_cmd->job_step = ipr_reset_start_bist;
5560 rc = IPR_RC_JOB_CONTINUE;
5567 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
5568 * @ipr_cmd: ipr command struct
5570 * Description: This function alerts the adapter that it will be reset.
5571 * If memory space is not currently enabled, proceed directly
5572 * to running BIST on the adapter. The timer must always be started
5573 * so we guarantee we do not run BIST from ipr_isr.
5578 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
5580 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5585 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
5587 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
5588 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5589 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
5590 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
5592 ipr_cmd->job_step = ipr_reset_start_bist;
5595 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
5596 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5599 return IPR_RC_JOB_RETURN;
5603 * ipr_reset_ucode_download_done - Microcode download completion
5604 * @ipr_cmd: ipr command struct
5606 * Description: This function unmaps the microcode download buffer.
5609 * IPR_RC_JOB_CONTINUE
5611 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
5613 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5614 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5616 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
5617 sglist->num_sg, DMA_TO_DEVICE);
5619 ipr_cmd->job_step = ipr_reset_alert;
5620 return IPR_RC_JOB_CONTINUE;
5624 * ipr_reset_ucode_download - Download microcode to the adapter
5625 * @ipr_cmd: ipr command struct
5627 * Description: This function checks to see if it there is microcode
5628 * to download to the adapter. If there is, a download is performed.
5631 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5633 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
5635 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5636 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5639 ipr_cmd->job_step = ipr_reset_alert;
5642 return IPR_RC_JOB_CONTINUE;
5644 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5645 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5646 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
5647 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
5648 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
5649 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
5650 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
5652 ipr_build_ucode_ioadl(ipr_cmd, sglist);
5653 ipr_cmd->job_step = ipr_reset_ucode_download_done;
5655 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5656 IPR_WRITE_BUFFER_TIMEOUT);
5659 return IPR_RC_JOB_RETURN;
5663 * ipr_reset_shutdown_ioa - Shutdown the adapter
5664 * @ipr_cmd: ipr command struct
5666 * Description: This function issues an adapter shutdown of the
5667 * specified type to the specified adapter as part of the
5668 * adapter reset job.
5671 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5673 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
5675 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5676 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
5677 unsigned long timeout;
5678 int rc = IPR_RC_JOB_CONTINUE;
5681 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
5682 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5683 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5684 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5685 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
5687 if (shutdown_type == IPR_SHUTDOWN_ABBREV)
5688 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
5689 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
5690 timeout = IPR_INTERNAL_TIMEOUT;
5692 timeout = IPR_SHUTDOWN_TIMEOUT;
5694 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
5696 rc = IPR_RC_JOB_RETURN;
5697 ipr_cmd->job_step = ipr_reset_ucode_download;
5699 ipr_cmd->job_step = ipr_reset_alert;
5706 * ipr_reset_ioa_job - Adapter reset job
5707 * @ipr_cmd: ipr command struct
5709 * Description: This function is the job router for the adapter reset job.
5714 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
5717 unsigned long scratch = ipr_cmd->u.scratch;
5718 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5721 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5723 if (ioa_cfg->reset_cmd != ipr_cmd) {
5725 * We are doing nested adapter resets and this is
5726 * not the current reset job.
5728 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5732 if (IPR_IOASC_SENSE_KEY(ioasc)) {
5733 dev_err(&ioa_cfg->pdev->dev,
5734 "0x%02X failed with IOASC: 0x%08X\n",
5735 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
5737 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5738 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5742 ipr_reinit_ipr_cmnd(ipr_cmd);
5743 ipr_cmd->u.scratch = scratch;
5744 rc = ipr_cmd->job_step(ipr_cmd);
5745 } while(rc == IPR_RC_JOB_CONTINUE);
5749 * _ipr_initiate_ioa_reset - Initiate an adapter reset
5750 * @ioa_cfg: ioa config struct
5751 * @job_step: first job step of reset job
5752 * @shutdown_type: shutdown type
5754 * Description: This function will initiate the reset of the given adapter
5755 * starting at the selected job step.
5756 * If the caller needs to wait on the completion of the reset,
5757 * the caller must sleep on the reset_wait_q.
5762 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5763 int (*job_step) (struct ipr_cmnd *),
5764 enum ipr_shutdown_type shutdown_type)
5766 struct ipr_cmnd *ipr_cmd;
5768 ioa_cfg->in_reset_reload = 1;
5769 ioa_cfg->allow_cmds = 0;
5770 scsi_block_requests(ioa_cfg->host);
5772 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5773 ioa_cfg->reset_cmd = ipr_cmd;
5774 ipr_cmd->job_step = job_step;
5775 ipr_cmd->u.shutdown_type = shutdown_type;
5777 ipr_reset_ioa_job(ipr_cmd);
5781 * ipr_initiate_ioa_reset - Initiate an adapter reset
5782 * @ioa_cfg: ioa config struct
5783 * @shutdown_type: shutdown type
5785 * Description: This function will initiate the reset of the given adapter.
5786 * If the caller needs to wait on the completion of the reset,
5787 * the caller must sleep on the reset_wait_q.
5792 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5793 enum ipr_shutdown_type shutdown_type)
5795 if (ioa_cfg->ioa_is_dead)
5798 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
5799 ioa_cfg->sdt_state = ABORT_DUMP;
5801 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
5802 dev_err(&ioa_cfg->pdev->dev,
5803 "IOA taken offline - error recovery failed\n");
5805 ioa_cfg->reset_retries = 0;
5806 ioa_cfg->ioa_is_dead = 1;
5808 if (ioa_cfg->in_ioa_bringdown) {
5809 ioa_cfg->reset_cmd = NULL;
5810 ioa_cfg->in_reset_reload = 0;
5811 ipr_fail_all_ops(ioa_cfg);
5812 wake_up_all(&ioa_cfg->reset_wait_q);
5814 spin_unlock_irq(ioa_cfg->host->host_lock);
5815 scsi_unblock_requests(ioa_cfg->host);
5816 spin_lock_irq(ioa_cfg->host->host_lock);
5819 ioa_cfg->in_ioa_bringdown = 1;
5820 shutdown_type = IPR_SHUTDOWN_NONE;
5824 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
5829 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
5830 * @ioa_cfg: ioa cfg struct
5832 * Description: This is the second phase of adapter intialization
5833 * This function takes care of initilizing the adapter to the point
5834 * where it can accept new commands.
5837 * 0 on sucess / -EIO on failure
5839 static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
5842 unsigned long host_lock_flags = 0;
5845 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5846 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
5847 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa, IPR_SHUTDOWN_NONE);
5849 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5850 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5851 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5853 if (ioa_cfg->ioa_is_dead) {
5855 } else if (ipr_invalid_adapter(ioa_cfg)) {
5859 dev_err(&ioa_cfg->pdev->dev,
5860 "Adapter not supported in this hardware configuration.\n");
5863 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5870 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
5871 * @ioa_cfg: ioa config struct
5876 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5880 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5881 if (ioa_cfg->ipr_cmnd_list[i])
5882 pci_pool_free(ioa_cfg->ipr_cmd_pool,
5883 ioa_cfg->ipr_cmnd_list[i],
5884 ioa_cfg->ipr_cmnd_list_dma[i]);
5886 ioa_cfg->ipr_cmnd_list[i] = NULL;
5889 if (ioa_cfg->ipr_cmd_pool)
5890 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
5892 ioa_cfg->ipr_cmd_pool = NULL;
5896 * ipr_free_mem - Frees memory allocated for an adapter
5897 * @ioa_cfg: ioa cfg struct
5902 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
5906 kfree(ioa_cfg->res_entries);
5907 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
5908 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
5909 ipr_free_cmd_blks(ioa_cfg);
5910 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
5911 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
5912 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
5914 ioa_cfg->cfg_table_dma);
5916 for (i = 0; i < IPR_NUM_HCAMS; i++) {
5917 pci_free_consistent(ioa_cfg->pdev,
5918 sizeof(struct ipr_hostrcb),
5919 ioa_cfg->hostrcb[i],
5920 ioa_cfg->hostrcb_dma[i]);
5923 ipr_free_dump(ioa_cfg);
5924 kfree(ioa_cfg->saved_mode_pages);
5925 kfree(ioa_cfg->trace);
5929 * ipr_free_all_resources - Free all allocated resources for an adapter.
5930 * @ipr_cmd: ipr command struct
5932 * This function frees all allocated resources for the
5933 * specified adapter.
5938 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
5940 struct pci_dev *pdev = ioa_cfg->pdev;
5943 free_irq(pdev->irq, ioa_cfg);
5944 iounmap(ioa_cfg->hdw_dma_regs);
5945 pci_release_regions(pdev);
5946 ipr_free_mem(ioa_cfg);
5947 scsi_host_put(ioa_cfg->host);
5948 pci_disable_device(pdev);
5953 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
5954 * @ioa_cfg: ioa config struct
5957 * 0 on success / -ENOMEM on allocation failure
5959 static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5961 struct ipr_cmnd *ipr_cmd;
5962 struct ipr_ioarcb *ioarcb;
5963 dma_addr_t dma_addr;
5966 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
5967 sizeof(struct ipr_cmnd), 8, 0);
5969 if (!ioa_cfg->ipr_cmd_pool)
5972 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5973 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, SLAB_KERNEL, &dma_addr);
5976 ipr_free_cmd_blks(ioa_cfg);
5980 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
5981 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
5982 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
5984 ioarcb = &ipr_cmd->ioarcb;
5985 ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
5986 ioarcb->host_response_handle = cpu_to_be32(i << 2);
5987 ioarcb->write_ioadl_addr =
5988 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
5989 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5990 ioarcb->ioasa_host_pci_addr =
5991 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
5992 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
5993 ipr_cmd->cmd_index = i;
5994 ipr_cmd->ioa_cfg = ioa_cfg;
5995 ipr_cmd->sense_buffer_dma = dma_addr +
5996 offsetof(struct ipr_cmnd, sense_buffer);
5998 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6005 * ipr_alloc_mem - Allocate memory for an adapter
6006 * @ioa_cfg: ioa config struct
6009 * 0 on success / non-zero for error
6011 static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
6013 struct pci_dev *pdev = ioa_cfg->pdev;
6014 int i, rc = -ENOMEM;
6017 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
6018 IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
6020 if (!ioa_cfg->res_entries)
6023 for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
6024 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
6026 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
6027 sizeof(struct ipr_misc_cbs),
6028 &ioa_cfg->vpd_cbs_dma);
6030 if (!ioa_cfg->vpd_cbs)
6031 goto out_free_res_entries;
6033 if (ipr_alloc_cmd_blks(ioa_cfg))
6034 goto out_free_vpd_cbs;
6036 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
6037 sizeof(u32) * IPR_NUM_CMD_BLKS,
6038 &ioa_cfg->host_rrq_dma);
6040 if (!ioa_cfg->host_rrq)
6041 goto out_ipr_free_cmd_blocks;
6043 ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
6044 sizeof(struct ipr_config_table),
6045 &ioa_cfg->cfg_table_dma);
6047 if (!ioa_cfg->cfg_table)
6048 goto out_free_host_rrq;
6050 for (i = 0; i < IPR_NUM_HCAMS; i++) {
6051 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
6052 sizeof(struct ipr_hostrcb),
6053 &ioa_cfg->hostrcb_dma[i]);
6055 if (!ioa_cfg->hostrcb[i])
6056 goto out_free_hostrcb_dma;
6058 ioa_cfg->hostrcb[i]->hostrcb_dma =
6059 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
6060 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
6063 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
6064 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
6066 if (!ioa_cfg->trace)
6067 goto out_free_hostrcb_dma;
6074 out_free_hostrcb_dma:
6076 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
6077 ioa_cfg->hostrcb[i],
6078 ioa_cfg->hostrcb_dma[i]);
6080 pci_free_consistent(pdev, sizeof(struct ipr_config_table),
6081 ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
6083 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
6084 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
6085 out_ipr_free_cmd_blocks:
6086 ipr_free_cmd_blks(ioa_cfg);
6088 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
6089 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
6090 out_free_res_entries:
6091 kfree(ioa_cfg->res_entries);
6096 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
6097 * @ioa_cfg: ioa config struct
6102 static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
6106 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6107 ioa_cfg->bus_attr[i].bus = i;
6108 ioa_cfg->bus_attr[i].qas_enabled = 0;
6109 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
6110 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
6111 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
6113 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
6118 * ipr_init_ioa_cfg - Initialize IOA config struct
6119 * @ioa_cfg: ioa config struct
6120 * @host: scsi host struct
6121 * @pdev: PCI dev struct
6126 static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
6127 struct Scsi_Host *host, struct pci_dev *pdev)
6129 const struct ipr_interrupt_offsets *p;
6130 struct ipr_interrupts *t;
6133 ioa_cfg->host = host;
6134 ioa_cfg->pdev = pdev;
6135 ioa_cfg->log_level = ipr_log_level;
6136 ioa_cfg->doorbell = IPR_DOORBELL;
6137 if (!ipr_auto_create)
6138 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6139 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
6140 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
6141 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
6142 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
6143 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
6144 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
6145 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
6146 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
6148 INIT_LIST_HEAD(&ioa_cfg->free_q);
6149 INIT_LIST_HEAD(&ioa_cfg->pending_q);
6150 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
6151 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
6152 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
6153 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
6154 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg);
6155 init_waitqueue_head(&ioa_cfg->reset_wait_q);
6156 ioa_cfg->sdt_state = INACTIVE;
6157 if (ipr_enable_cache)
6158 ioa_cfg->cache_state = CACHE_ENABLED;
6160 ioa_cfg->cache_state = CACHE_DISABLED;
6162 ipr_initialize_bus_attr(ioa_cfg);
6164 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
6165 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
6166 host->max_channel = IPR_MAX_BUS_TO_SCAN;
6167 host->unique_id = host->host_no;
6168 host->max_cmd_len = IPR_MAX_CDB_LEN;
6169 pci_set_drvdata(pdev, ioa_cfg);
6171 p = &ioa_cfg->chip_cfg->regs;
6173 base = ioa_cfg->hdw_dma_regs;
6175 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
6176 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
6177 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
6178 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
6179 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
6180 t->ioarrin_reg = base + p->ioarrin_reg;
6181 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
6182 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
6183 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
6187 * ipr_get_chip_cfg - Find adapter chip configuration
6188 * @dev_id: PCI device id struct
6191 * ptr to chip config on success / NULL on failure
6193 static const struct ipr_chip_cfg_t * __devinit
6194 ipr_get_chip_cfg(const struct pci_device_id *dev_id)
6198 if (dev_id->driver_data)
6199 return (const struct ipr_chip_cfg_t *)dev_id->driver_data;
6201 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
6202 if (ipr_chip[i].vendor == dev_id->vendor &&
6203 ipr_chip[i].device == dev_id->device)
6204 return ipr_chip[i].cfg;
6209 * ipr_probe_ioa - Allocates memory and does first stage of initialization
6210 * @pdev: PCI device struct
6211 * @dev_id: PCI device id struct
6214 * 0 on success / non-zero on failure
6216 static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
6217 const struct pci_device_id *dev_id)
6219 struct ipr_ioa_cfg *ioa_cfg;
6220 struct Scsi_Host *host;
6221 unsigned long ipr_regs_pci;
6222 void __iomem *ipr_regs;
6223 u32 rc = PCIBIOS_SUCCESSFUL;
6227 if ((rc = pci_enable_device(pdev))) {
6228 dev_err(&pdev->dev, "Cannot enable adapter\n");
6232 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
6234 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
6237 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
6242 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
6243 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
6245 ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
6247 if (!ioa_cfg->chip_cfg) {
6248 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
6249 dev_id->vendor, dev_id->device);
6250 goto out_scsi_host_put;
6253 ipr_regs_pci = pci_resource_start(pdev, 0);
6255 rc = pci_request_regions(pdev, IPR_NAME);
6258 "Couldn't register memory range of registers\n");
6259 goto out_scsi_host_put;
6262 ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0));
6266 "Couldn't map memory range of registers\n");
6268 goto out_release_regions;
6271 ioa_cfg->hdw_dma_regs = ipr_regs;
6272 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
6273 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
6275 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
6277 pci_set_master(pdev);
6279 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
6281 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
6285 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
6286 ioa_cfg->chip_cfg->cache_line_size);
6288 if (rc != PCIBIOS_SUCCESSFUL) {
6289 dev_err(&pdev->dev, "Write of cache line size failed\n");
6294 /* Save away PCI config space for use following IOA reset */
6295 rc = pci_save_state(pdev);
6297 if (rc != PCIBIOS_SUCCESSFUL) {
6298 dev_err(&pdev->dev, "Failed to save PCI config space\n");
6303 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
6306 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
6309 rc = ipr_alloc_mem(ioa_cfg);
6312 "Couldn't allocate enough memory for device driver!\n");
6316 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
6317 rc = request_irq(pdev->irq, ipr_isr, SA_SHIRQ, IPR_NAME, ioa_cfg);
6320 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
6325 spin_lock(&ipr_driver_lock);
6326 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
6327 spin_unlock(&ipr_driver_lock);
6334 ipr_free_mem(ioa_cfg);
6337 out_release_regions:
6338 pci_release_regions(pdev);
6340 scsi_host_put(host);
6342 pci_disable_device(pdev);
6347 * ipr_scan_vsets - Scans for VSET devices
6348 * @ioa_cfg: ioa config struct
6350 * Description: Since the VSET resources do not follow SAM in that we can have
6351 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
6356 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
6360 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
6361 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
6362 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
6366 * ipr_initiate_ioa_bringdown - Bring down an adapter
6367 * @ioa_cfg: ioa config struct
6368 * @shutdown_type: shutdown type
6370 * Description: This function will initiate bringing down the adapter.
6371 * This consists of issuing an IOA shutdown to the adapter
6372 * to flush the cache, and running BIST.
6373 * If the caller needs to wait on the completion of the reset,
6374 * the caller must sleep on the reset_wait_q.
6379 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
6380 enum ipr_shutdown_type shutdown_type)
6383 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
6384 ioa_cfg->sdt_state = ABORT_DUMP;
6385 ioa_cfg->reset_retries = 0;
6386 ioa_cfg->in_ioa_bringdown = 1;
6387 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
6392 * __ipr_remove - Remove a single adapter
6393 * @pdev: pci device struct
6395 * Adapter hot plug remove entry point.
6400 static void __ipr_remove(struct pci_dev *pdev)
6402 unsigned long host_lock_flags = 0;
6403 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6406 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6407 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6409 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6410 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6411 flush_scheduled_work();
6412 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6414 spin_lock(&ipr_driver_lock);
6415 list_del(&ioa_cfg->queue);
6416 spin_unlock(&ipr_driver_lock);
6418 if (ioa_cfg->sdt_state == ABORT_DUMP)
6419 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
6420 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6422 ipr_free_all_resources(ioa_cfg);
6428 * ipr_remove - IOA hot plug remove entry point
6429 * @pdev: pci device struct
6431 * Adapter hot plug remove entry point.
6436 static void ipr_remove(struct pci_dev *pdev)
6438 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6442 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6444 ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6446 scsi_remove_host(ioa_cfg->host);
6454 * ipr_probe - Adapter hot plug add entry point
6457 * 0 on success / non-zero on failure
6459 static int __devinit ipr_probe(struct pci_dev *pdev,
6460 const struct pci_device_id *dev_id)
6462 struct ipr_ioa_cfg *ioa_cfg;
6465 rc = ipr_probe_ioa(pdev, dev_id);
6470 ioa_cfg = pci_get_drvdata(pdev);
6471 rc = ipr_probe_ioa_part2(ioa_cfg);
6478 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
6485 rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6489 scsi_remove_host(ioa_cfg->host);
6494 rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6498 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6500 scsi_remove_host(ioa_cfg->host);
6505 scsi_scan_host(ioa_cfg->host);
6506 ipr_scan_vsets(ioa_cfg);
6507 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
6508 ioa_cfg->allow_ml_add_del = 1;
6509 ioa_cfg->host->max_channel = IPR_VSET_BUS;
6510 schedule_work(&ioa_cfg->work_q);
6515 * ipr_shutdown - Shutdown handler.
6516 * @pdev: pci device struct
6518 * This function is invoked upon system shutdown/reboot. It will issue
6519 * an adapter shutdown to the adapter to flush the write cache.
6524 static void ipr_shutdown(struct pci_dev *pdev)
6526 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6527 unsigned long lock_flags = 0;
6529 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6530 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6531 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6532 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6535 static struct pci_device_id ipr_pci_table[] __devinitdata = {
6536 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6537 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702,
6538 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6539 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6540 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703,
6541 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6542 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6543 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D,
6544 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6545 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6546 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E,
6547 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6548 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6549 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B,
6550 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6551 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6552 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E,
6553 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6554 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6555 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A,
6556 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6557 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6558 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780,
6559 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6560 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6561 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E,
6562 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6565 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
6567 static struct pci_driver ipr_driver = {
6569 .id_table = ipr_pci_table,
6571 .remove = ipr_remove,
6572 .shutdown = ipr_shutdown,
6576 * ipr_init - Module entry point
6579 * 0 on success / negative value on failure
6581 static int __init ipr_init(void)
6583 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
6584 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
6586 return pci_module_init(&ipr_driver);
6590 * ipr_exit - Module unload
6592 * Module unload entry point.
6597 static void __exit ipr_exit(void)
6599 pci_unregister_driver(&ipr_driver);
6602 module_init(ipr_init);
6603 module_exit(ipr_exit);