[SCSI] ibmvfc: Handle port login required response
[linux-2.6] / drivers / scsi / ibmvscsi / ibmvfc.c
1 /*
2  * ibmvfc.c -- driver for IBM Power Virtual Fibre Channel Adapter
3  *
4  * Written By: Brian King <brking@linux.vnet.ibm.com>, IBM Corporation
5  *
6  * Copyright (C) IBM Corporation, 2008
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/dmapool.h>
28 #include <linux/delay.h>
29 #include <linux/interrupt.h>
30 #include <linux/kthread.h>
31 #include <linux/of.h>
32 #include <linux/stringify.h>
33 #include <asm/firmware.h>
34 #include <asm/irq.h>
35 #include <asm/vio.h>
36 #include <scsi/scsi.h>
37 #include <scsi/scsi_cmnd.h>
38 #include <scsi/scsi_host.h>
39 #include <scsi/scsi_device.h>
40 #include <scsi/scsi_tcq.h>
41 #include <scsi/scsi_transport_fc.h>
42 #include "ibmvfc.h"
43
44 static unsigned int init_timeout = IBMVFC_INIT_TIMEOUT;
45 static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT;
46 static unsigned int max_lun = IBMVFC_MAX_LUN;
47 static unsigned int max_targets = IBMVFC_MAX_TARGETS;
48 static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
49 static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
50 static unsigned int dev_loss_tmo = IBMVFC_DEV_LOSS_TMO;
51 static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
52 static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL;
53 static LIST_HEAD(ibmvfc_head);
54 static DEFINE_SPINLOCK(ibmvfc_driver_lock);
55 static struct scsi_transport_template *ibmvfc_transport_template;
56
57 MODULE_DESCRIPTION("IBM Virtual Fibre Channel Driver");
58 MODULE_AUTHOR("Brian King <brking@linux.vnet.ibm.com>");
59 MODULE_LICENSE("GPL");
60 MODULE_VERSION(IBMVFC_DRIVER_VERSION);
61
62 module_param_named(init_timeout, init_timeout, uint, S_IRUGO | S_IWUSR);
63 MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds. "
64                  "[Default=" __stringify(IBMVFC_INIT_TIMEOUT) "]");
65 module_param_named(default_timeout, default_timeout, uint, S_IRUGO | S_IWUSR);
66 MODULE_PARM_DESC(default_timeout,
67                  "Default timeout in seconds for initialization and EH commands. "
68                  "[Default=" __stringify(IBMVFC_DEFAULT_TIMEOUT) "]");
69 module_param_named(max_requests, max_requests, uint, S_IRUGO);
70 MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. "
71                  "[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]");
72 module_param_named(max_lun, max_lun, uint, S_IRUGO);
73 MODULE_PARM_DESC(max_lun, "Maximum allowed LUN. "
74                  "[Default=" __stringify(IBMVFC_MAX_LUN) "]");
75 module_param_named(max_targets, max_targets, uint, S_IRUGO);
76 MODULE_PARM_DESC(max_targets, "Maximum allowed targets. "
77                  "[Default=" __stringify(IBMVFC_MAX_TARGETS) "]");
78 module_param_named(disc_threads, disc_threads, uint, S_IRUGO | S_IWUSR);
79 MODULE_PARM_DESC(disc_threads, "Number of device discovery threads to use. "
80                  "[Default=" __stringify(IBMVFC_MAX_DISC_THREADS) "]");
81 module_param_named(debug, ibmvfc_debug, uint, S_IRUGO | S_IWUSR);
82 MODULE_PARM_DESC(debug, "Enable driver debug information. "
83                  "[Default=" __stringify(IBMVFC_DEBUG) "]");
84 module_param_named(dev_loss_tmo, dev_loss_tmo, uint, S_IRUGO | S_IWUSR);
85 MODULE_PARM_DESC(dev_loss_tmo, "Maximum number of seconds that the FC "
86                  "transport should insulate the loss of a remote port. Once this "
87                  "value is exceeded, the scsi target is removed. "
88                  "[Default=" __stringify(IBMVFC_DEV_LOSS_TMO) "]");
89 module_param_named(log_level, log_level, uint, 0);
90 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. "
91                  "[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]");
92
93 static const struct {
94         u16 status;
95         u16 error;
96         u8 result;
97         u8 retry;
98         int log;
99         char *name;
100 } cmd_status [] = {
101         { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_ESTABLISH, DID_ERROR, 1, 1, "unable to establish" },
102         { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_FAULT, DID_OK, 1, 0, "transport fault" },
103         { IBMVFC_FABRIC_MAPPED, IBMVFC_CMD_TIMEOUT, DID_TIME_OUT, 1, 1, "command timeout" },
104         { IBMVFC_FABRIC_MAPPED, IBMVFC_ENETDOWN, DID_NO_CONNECT, 1, 1, "network down" },
105         { IBMVFC_FABRIC_MAPPED, IBMVFC_HW_FAILURE, DID_ERROR, 1, 1, "hardware failure" },
106         { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DOWN_ERR, DID_REQUEUE, 0, 0, "link down" },
107         { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DEAD_ERR, DID_ERROR, 0, 0, "link dead" },
108         { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_REGISTER, DID_ERROR, 1, 1, "unable to register" },
109         { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_BUSY, DID_BUS_BUSY, 1, 0, "transport busy" },
110         { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" },
111         { IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" },
112         { IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" },
113         { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 0, 0, "link halted" },
114         { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" },
115
116         { IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" },
117         { IBMVFC_VIOS_FAILURE, IBMVFC_SW_FAILURE, DID_ERROR, 0, 1, "software failure" },
118         { IBMVFC_VIOS_FAILURE, IBMVFC_INVALID_PARAMETER, DID_ABORT, 0, 1, "invalid parameter" },
119         { IBMVFC_VIOS_FAILURE, IBMVFC_MISSING_PARAMETER, DID_ABORT, 0, 1, "missing parameter" },
120         { IBMVFC_VIOS_FAILURE, IBMVFC_HOST_IO_BUS, DID_ERROR, 1, 1, "host I/O bus failure" },
121         { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ABORT, 0, 1, "transaction cancelled" },
122         { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ABORT, 0, 1, "transaction cancelled implicit" },
123         { IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" },
124         { IBMVFC_VIOS_FAILURE, IBMVFC_PLOGI_REQUIRED, DID_ERROR, 0, 1, "port login required" },
125         { IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" },
126
127         { IBMVFC_FC_FAILURE, IBMVFC_INVALID_ELS_CMD_CODE, DID_ERROR, 0, 1, "invalid ELS command code" },
128         { IBMVFC_FC_FAILURE, IBMVFC_INVALID_VERSION, DID_ERROR, 0, 1, "invalid version level" },
129         { IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_ERROR, DID_ERROR, 1, 1, "logical error" },
130         { IBMVFC_FC_FAILURE, IBMVFC_INVALID_CT_IU_SIZE, DID_ERROR, 0, 1, "invalid CT_IU size" },
131         { IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_BUSY, DID_REQUEUE, 1, 0, "logical busy" },
132         { IBMVFC_FC_FAILURE, IBMVFC_PROTOCOL_ERROR, DID_ERROR, 1, 1, "protocol error" },
133         { IBMVFC_FC_FAILURE, IBMVFC_UNABLE_TO_PERFORM_REQ, DID_ERROR, 1, 1, "unable to perform request" },
134         { IBMVFC_FC_FAILURE, IBMVFC_CMD_NOT_SUPPORTED, DID_ERROR, 0, 0, "command not supported" },
135         { IBMVFC_FC_FAILURE, IBMVFC_SERVER_NOT_AVAIL, DID_ERROR, 0, 1, "server not available" },
136         { IBMVFC_FC_FAILURE, IBMVFC_CMD_IN_PROGRESS, DID_ERROR, 0, 1, "command already in progress" },
137         { IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" },
138
139         { IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" },
140 };
141
142 static void ibmvfc_npiv_login(struct ibmvfc_host *);
143 static void ibmvfc_tgt_send_prli(struct ibmvfc_target *);
144 static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
145 static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
146
147 static const char *unknown_error = "unknown error";
148
149 #ifdef CONFIG_SCSI_IBMVFC_TRACE
150 /**
151  * ibmvfc_trc_start - Log a start trace entry
152  * @evt:                ibmvfc event struct
153  *
154  **/
155 static void ibmvfc_trc_start(struct ibmvfc_event *evt)
156 {
157         struct ibmvfc_host *vhost = evt->vhost;
158         struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
159         struct ibmvfc_mad_common *mad = &evt->iu.mad_common;
160         struct ibmvfc_trace_entry *entry;
161
162         entry = &vhost->trace[vhost->trace_index++];
163         entry->evt = evt;
164         entry->time = jiffies;
165         entry->fmt = evt->crq.format;
166         entry->type = IBMVFC_TRC_START;
167
168         switch (entry->fmt) {
169         case IBMVFC_CMD_FORMAT:
170                 entry->op_code = vfc_cmd->iu.cdb[0];
171                 entry->scsi_id = vfc_cmd->tgt_scsi_id;
172                 entry->lun = scsilun_to_int(&vfc_cmd->iu.lun);
173                 entry->tmf_flags = vfc_cmd->iu.tmf_flags;
174                 entry->u.start.xfer_len = vfc_cmd->iu.xfer_len;
175                 break;
176         case IBMVFC_MAD_FORMAT:
177                 entry->op_code = mad->opcode;
178                 break;
179         default:
180                 break;
181         };
182 }
183
184 /**
185  * ibmvfc_trc_end - Log an end trace entry
186  * @evt:                ibmvfc event struct
187  *
188  **/
189 static void ibmvfc_trc_end(struct ibmvfc_event *evt)
190 {
191         struct ibmvfc_host *vhost = evt->vhost;
192         struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
193         struct ibmvfc_mad_common *mad = &evt->xfer_iu->mad_common;
194         struct ibmvfc_trace_entry *entry = &vhost->trace[vhost->trace_index++];
195
196         entry->evt = evt;
197         entry->time = jiffies;
198         entry->fmt = evt->crq.format;
199         entry->type = IBMVFC_TRC_END;
200
201         switch (entry->fmt) {
202         case IBMVFC_CMD_FORMAT:
203                 entry->op_code = vfc_cmd->iu.cdb[0];
204                 entry->scsi_id = vfc_cmd->tgt_scsi_id;
205                 entry->lun = scsilun_to_int(&vfc_cmd->iu.lun);
206                 entry->tmf_flags = vfc_cmd->iu.tmf_flags;
207                 entry->u.end.status = vfc_cmd->status;
208                 entry->u.end.error = vfc_cmd->error;
209                 entry->u.end.fcp_rsp_flags = vfc_cmd->rsp.flags;
210                 entry->u.end.rsp_code = vfc_cmd->rsp.data.info.rsp_code;
211                 entry->u.end.scsi_status = vfc_cmd->rsp.scsi_status;
212                 break;
213         case IBMVFC_MAD_FORMAT:
214                 entry->op_code = mad->opcode;
215                 entry->u.end.status = mad->status;
216                 break;
217         default:
218                 break;
219
220         };
221 }
222
223 #else
224 #define ibmvfc_trc_start(evt) do { } while (0)
225 #define ibmvfc_trc_end(evt) do { } while (0)
226 #endif
227
228 /**
229  * ibmvfc_get_err_index - Find the index into cmd_status for the fcp response
230  * @status:             status / error class
231  * @error:              error
232  *
233  * Return value:
234  *      index into cmd_status / -EINVAL on failure
235  **/
236 static int ibmvfc_get_err_index(u16 status, u16 error)
237 {
238         int i;
239
240         for (i = 0; i < ARRAY_SIZE(cmd_status); i++)
241                 if ((cmd_status[i].status & status) == cmd_status[i].status &&
242                     cmd_status[i].error == error)
243                         return i;
244
245         return -EINVAL;
246 }
247
248 /**
249  * ibmvfc_get_cmd_error - Find the error description for the fcp response
250  * @status:             status / error class
251  * @error:              error
252  *
253  * Return value:
254  *      error description string
255  **/
256 static const char *ibmvfc_get_cmd_error(u16 status, u16 error)
257 {
258         int rc = ibmvfc_get_err_index(status, error);
259         if (rc >= 0)
260                 return cmd_status[rc].name;
261         return unknown_error;
262 }
263
264 /**
265  * ibmvfc_get_err_result - Find the scsi status to return for the fcp response
266  * @vfc_cmd:    ibmvfc command struct
267  *
268  * Return value:
269  *      SCSI result value to return for completed command
270  **/
271 static int ibmvfc_get_err_result(struct ibmvfc_cmd *vfc_cmd)
272 {
273         int err;
274         struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
275         int fc_rsp_len = rsp->fcp_rsp_len;
276
277         if ((rsp->flags & FCP_RSP_LEN_VALID) &&
278             ((!fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) ||
279              rsp->data.info.rsp_code))
280                 return DID_ERROR << 16;
281
282         err = ibmvfc_get_err_index(vfc_cmd->status, vfc_cmd->error);
283         if (err >= 0)
284                 return rsp->scsi_status | (cmd_status[err].result << 16);
285         return rsp->scsi_status | (DID_ERROR << 16);
286 }
287
288 /**
289  * ibmvfc_retry_cmd - Determine if error status is retryable
290  * @status:             status / error class
291  * @error:              error
292  *
293  * Return value:
294  *      1 if error should be retried / 0 if it should not
295  **/
296 static int ibmvfc_retry_cmd(u16 status, u16 error)
297 {
298         int rc = ibmvfc_get_err_index(status, error);
299
300         if (rc >= 0)
301                 return cmd_status[rc].retry;
302         return 1;
303 }
304
305 static const char *unknown_fc_explain = "unknown fc explain";
306
307 static const struct {
308         u16 fc_explain;
309         char *name;
310 } ls_explain [] = {
311         { 0x00, "no additional explanation" },
312         { 0x01, "service parameter error - options" },
313         { 0x03, "service parameter error - initiator control" },
314         { 0x05, "service parameter error - recipient control" },
315         { 0x07, "service parameter error - received data field size" },
316         { 0x09, "service parameter error - concurrent seq" },
317         { 0x0B, "service parameter error - credit" },
318         { 0x0D, "invalid N_Port/F_Port_Name" },
319         { 0x0E, "invalid node/Fabric Name" },
320         { 0x0F, "invalid common service parameters" },
321         { 0x11, "invalid association header" },
322         { 0x13, "association header required" },
323         { 0x15, "invalid originator S_ID" },
324         { 0x17, "invalid OX_ID-RX-ID combination" },
325         { 0x19, "command (request) already in progress" },
326         { 0x1E, "N_Port Login requested" },
327         { 0x1F, "Invalid N_Port_ID" },
328 };
329
330 static const struct {
331         u16 fc_explain;
332         char *name;
333 } gs_explain [] = {
334         { 0x00, "no additional explanation" },
335         { 0x01, "port identifier not registered" },
336         { 0x02, "port name not registered" },
337         { 0x03, "node name not registered" },
338         { 0x04, "class of service not registered" },
339         { 0x06, "initial process associator not registered" },
340         { 0x07, "FC-4 TYPEs not registered" },
341         { 0x08, "symbolic port name not registered" },
342         { 0x09, "symbolic node name not registered" },
343         { 0x0A, "port type not registered" },
344         { 0xF0, "authorization exception" },
345         { 0xF1, "authentication exception" },
346         { 0xF2, "data base full" },
347         { 0xF3, "data base empty" },
348         { 0xF4, "processing request" },
349         { 0xF5, "unable to verify connection" },
350         { 0xF6, "devices not in a common zone" },
351 };
352
353 /**
354  * ibmvfc_get_ls_explain - Return the FC Explain description text
355  * @status:     FC Explain status
356  *
357  * Returns:
358  *      error string
359  **/
360 static const char *ibmvfc_get_ls_explain(u16 status)
361 {
362         int i;
363
364         for (i = 0; i < ARRAY_SIZE(ls_explain); i++)
365                 if (ls_explain[i].fc_explain == status)
366                         return ls_explain[i].name;
367
368         return unknown_fc_explain;
369 }
370
371 /**
372  * ibmvfc_get_gs_explain - Return the FC Explain description text
373  * @status:     FC Explain status
374  *
375  * Returns:
376  *      error string
377  **/
378 static const char *ibmvfc_get_gs_explain(u16 status)
379 {
380         int i;
381
382         for (i = 0; i < ARRAY_SIZE(gs_explain); i++)
383                 if (gs_explain[i].fc_explain == status)
384                         return gs_explain[i].name;
385
386         return unknown_fc_explain;
387 }
388
389 static const struct {
390         enum ibmvfc_fc_type fc_type;
391         char *name;
392 } fc_type [] = {
393         { IBMVFC_FABRIC_REJECT, "fabric reject" },
394         { IBMVFC_PORT_REJECT, "port reject" },
395         { IBMVFC_LS_REJECT, "ELS reject" },
396         { IBMVFC_FABRIC_BUSY, "fabric busy" },
397         { IBMVFC_PORT_BUSY, "port busy" },
398         { IBMVFC_BASIC_REJECT, "basic reject" },
399 };
400
401 static const char *unknown_fc_type = "unknown fc type";
402
403 /**
404  * ibmvfc_get_fc_type - Return the FC Type description text
405  * @status:     FC Type error status
406  *
407  * Returns:
408  *      error string
409  **/
410 static const char *ibmvfc_get_fc_type(u16 status)
411 {
412         int i;
413
414         for (i = 0; i < ARRAY_SIZE(fc_type); i++)
415                 if (fc_type[i].fc_type == status)
416                         return fc_type[i].name;
417
418         return unknown_fc_type;
419 }
420
421 /**
422  * ibmvfc_set_tgt_action - Set the next init action for the target
423  * @tgt:                ibmvfc target struct
424  * @action:             action to perform
425  *
426  **/
427 static void ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
428                                   enum ibmvfc_target_action action)
429 {
430         switch (tgt->action) {
431         case IBMVFC_TGT_ACTION_DEL_RPORT:
432                 break;
433         default:
434                 tgt->action = action;
435                 break;
436         }
437 }
438
439 /**
440  * ibmvfc_set_host_state - Set the state for the host
441  * @vhost:              ibmvfc host struct
442  * @state:              state to set host to
443  *
444  * Returns:
445  *      0 if state changed / non-zero if not changed
446  **/
447 static int ibmvfc_set_host_state(struct ibmvfc_host *vhost,
448                                   enum ibmvfc_host_state state)
449 {
450         int rc = 0;
451
452         switch (vhost->state) {
453         case IBMVFC_HOST_OFFLINE:
454                 rc = -EINVAL;
455                 break;
456         default:
457                 vhost->state = state;
458                 break;
459         };
460
461         return rc;
462 }
463
464 /**
465  * ibmvfc_set_host_action - Set the next init action for the host
466  * @vhost:              ibmvfc host struct
467  * @action:             action to perform
468  *
469  **/
470 static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
471                                    enum ibmvfc_host_action action)
472 {
473         switch (action) {
474         case IBMVFC_HOST_ACTION_ALLOC_TGTS:
475                 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT)
476                         vhost->action = action;
477                 break;
478         case IBMVFC_HOST_ACTION_INIT_WAIT:
479                 if (vhost->action == IBMVFC_HOST_ACTION_INIT)
480                         vhost->action = action;
481                 break;
482         case IBMVFC_HOST_ACTION_QUERY:
483                 switch (vhost->action) {
484                 case IBMVFC_HOST_ACTION_INIT_WAIT:
485                 case IBMVFC_HOST_ACTION_NONE:
486                 case IBMVFC_HOST_ACTION_TGT_ADD:
487                         vhost->action = action;
488                         break;
489                 default:
490                         break;
491                 };
492                 break;
493         case IBMVFC_HOST_ACTION_TGT_INIT:
494                 if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
495                         vhost->action = action;
496                 break;
497         case IBMVFC_HOST_ACTION_INIT:
498         case IBMVFC_HOST_ACTION_TGT_DEL:
499         case IBMVFC_HOST_ACTION_QUERY_TGTS:
500         case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
501         case IBMVFC_HOST_ACTION_TGT_ADD:
502         case IBMVFC_HOST_ACTION_NONE:
503         default:
504                 vhost->action = action;
505                 break;
506         };
507 }
508
509 /**
510  * ibmvfc_reinit_host - Re-start host initialization (no NPIV Login)
511  * @vhost:              ibmvfc host struct
512  *
513  * Return value:
514  *      nothing
515  **/
516 static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
517 {
518         if (vhost->action == IBMVFC_HOST_ACTION_NONE) {
519                 if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
520                         scsi_block_requests(vhost->host);
521                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
522                 }
523         } else
524                 vhost->reinit = 1;
525
526         wake_up(&vhost->work_wait_q);
527 }
528
529 /**
530  * ibmvfc_link_down - Handle a link down event from the adapter
531  * @vhost:      ibmvfc host struct
532  * @state:      ibmvfc host state to enter
533  *
534  **/
535 static void ibmvfc_link_down(struct ibmvfc_host *vhost,
536                              enum ibmvfc_host_state state)
537 {
538         struct ibmvfc_target *tgt;
539
540         ENTER;
541         scsi_block_requests(vhost->host);
542         list_for_each_entry(tgt, &vhost->targets, queue)
543                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
544         ibmvfc_set_host_state(vhost, state);
545         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
546         vhost->events_to_log |= IBMVFC_AE_LINKDOWN;
547         wake_up(&vhost->work_wait_q);
548         LEAVE;
549 }
550
551 /**
552  * ibmvfc_init_host - Start host initialization
553  * @vhost:              ibmvfc host struct
554  * @relogin:    is this a re-login?
555  *
556  * Return value:
557  *      nothing
558  **/
559 static void ibmvfc_init_host(struct ibmvfc_host *vhost, int relogin)
560 {
561         struct ibmvfc_target *tgt;
562
563         if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
564                 if (++vhost->init_retries > IBMVFC_MAX_INIT_RETRIES) {
565                         dev_err(vhost->dev,
566                                 "Host initialization retries exceeded. Taking adapter offline\n");
567                         ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
568                         return;
569                 }
570         }
571
572         if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
573                 if (!relogin) {
574                         memset(vhost->async_crq.msgs, 0, PAGE_SIZE);
575                         vhost->async_crq.cur = 0;
576                 }
577
578                 list_for_each_entry(tgt, &vhost->targets, queue)
579                         tgt->need_login = 1;
580                 scsi_block_requests(vhost->host);
581                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
582                 vhost->job_step = ibmvfc_npiv_login;
583                 wake_up(&vhost->work_wait_q);
584         }
585 }
586
587 /**
588  * ibmvfc_send_crq - Send a CRQ
589  * @vhost:      ibmvfc host struct
590  * @word1:      the first 64 bits of the data
591  * @word2:      the second 64 bits of the data
592  *
593  * Return value:
594  *      0 on success / other on failure
595  **/
596 static int ibmvfc_send_crq(struct ibmvfc_host *vhost, u64 word1, u64 word2)
597 {
598         struct vio_dev *vdev = to_vio_dev(vhost->dev);
599         return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
600 }
601
602 /**
603  * ibmvfc_send_crq_init - Send a CRQ init message
604  * @vhost:      ibmvfc host struct
605  *
606  * Return value:
607  *      0 on success / other on failure
608  **/
609 static int ibmvfc_send_crq_init(struct ibmvfc_host *vhost)
610 {
611         ibmvfc_dbg(vhost, "Sending CRQ init\n");
612         return ibmvfc_send_crq(vhost, 0xC001000000000000LL, 0);
613 }
614
615 /**
616  * ibmvfc_send_crq_init_complete - Send a CRQ init complete message
617  * @vhost:      ibmvfc host struct
618  *
619  * Return value:
620  *      0 on success / other on failure
621  **/
622 static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost)
623 {
624         ibmvfc_dbg(vhost, "Sending CRQ init complete\n");
625         return ibmvfc_send_crq(vhost, 0xC002000000000000LL, 0);
626 }
627
628 /**
629  * ibmvfc_release_crq_queue - Deallocates data and unregisters CRQ
630  * @vhost:      ibmvfc host struct
631  *
632  * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
633  * the crq with the hypervisor.
634  **/
635 static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
636 {
637         long rc;
638         struct vio_dev *vdev = to_vio_dev(vhost->dev);
639         struct ibmvfc_crq_queue *crq = &vhost->crq;
640
641         ibmvfc_dbg(vhost, "Releasing CRQ\n");
642         free_irq(vdev->irq, vhost);
643         do {
644                 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
645         } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
646
647         vhost->state = IBMVFC_NO_CRQ;
648         dma_unmap_single(vhost->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
649         free_page((unsigned long)crq->msgs);
650 }
651
652 /**
653  * ibmvfc_reenable_crq_queue - reenables the CRQ
654  * @vhost:      ibmvfc host struct
655  *
656  * Return value:
657  *      0 on success / other on failure
658  **/
659 static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
660 {
661         int rc;
662         struct vio_dev *vdev = to_vio_dev(vhost->dev);
663
664         /* Re-enable the CRQ */
665         do {
666                 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
667         } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
668
669         if (rc)
670                 dev_err(vhost->dev, "Error enabling adapter (rc=%d)\n", rc);
671
672         return rc;
673 }
674
675 /**
676  * ibmvfc_reset_crq - resets a crq after a failure
677  * @vhost:      ibmvfc host struct
678  *
679  * Return value:
680  *      0 on success / other on failure
681  **/
682 static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
683 {
684         int rc;
685         struct vio_dev *vdev = to_vio_dev(vhost->dev);
686         struct ibmvfc_crq_queue *crq = &vhost->crq;
687
688         /* Close the CRQ */
689         do {
690                 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
691         } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
692
693         vhost->state = IBMVFC_NO_CRQ;
694         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
695
696         /* Clean out the queue */
697         memset(crq->msgs, 0, PAGE_SIZE);
698         crq->cur = 0;
699
700         /* And re-open it again */
701         rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
702                                 crq->msg_token, PAGE_SIZE);
703
704         if (rc == H_CLOSED)
705                 /* Adapter is good, but other end is not ready */
706                 dev_warn(vhost->dev, "Partner adapter not ready\n");
707         else if (rc != 0)
708                 dev_warn(vhost->dev, "Couldn't register crq (rc=%d)\n", rc);
709
710         return rc;
711 }
712
713 /**
714  * ibmvfc_valid_event - Determines if event is valid.
715  * @pool:       event_pool that contains the event
716  * @evt:        ibmvfc event to be checked for validity
717  *
718  * Return value:
719  *      1 if event is valid / 0 if event is not valid
720  **/
721 static int ibmvfc_valid_event(struct ibmvfc_event_pool *pool,
722                               struct ibmvfc_event *evt)
723 {
724         int index = evt - pool->events;
725         if (index < 0 || index >= pool->size)   /* outside of bounds */
726                 return 0;
727         if (evt != pool->events + index)        /* unaligned */
728                 return 0;
729         return 1;
730 }
731
732 /**
733  * ibmvfc_free_event - Free the specified event
734  * @evt:        ibmvfc_event to be freed
735  *
736  **/
737 static void ibmvfc_free_event(struct ibmvfc_event *evt)
738 {
739         struct ibmvfc_host *vhost = evt->vhost;
740         struct ibmvfc_event_pool *pool = &vhost->pool;
741
742         BUG_ON(!ibmvfc_valid_event(pool, evt));
743         BUG_ON(atomic_inc_return(&evt->free) != 1);
744         list_add_tail(&evt->queue, &vhost->free);
745 }
746
747 /**
748  * ibmvfc_scsi_eh_done - EH done function for queuecommand commands
749  * @evt:        ibmvfc event struct
750  *
751  * This function does not setup any error status, that must be done
752  * before this function gets called.
753  **/
754 static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
755 {
756         struct scsi_cmnd *cmnd = evt->cmnd;
757
758         if (cmnd) {
759                 scsi_dma_unmap(cmnd);
760                 cmnd->scsi_done(cmnd);
761         }
762
763         if (evt->eh_comp)
764                 complete(evt->eh_comp);
765
766         ibmvfc_free_event(evt);
767 }
768
769 /**
770  * ibmvfc_fail_request - Fail request with specified error code
771  * @evt:                ibmvfc event struct
772  * @error_code: error code to fail request with
773  *
774  * Return value:
775  *      none
776  **/
777 static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
778 {
779         if (evt->cmnd) {
780                 evt->cmnd->result = (error_code << 16);
781                 evt->done = ibmvfc_scsi_eh_done;
782         } else
783                 evt->xfer_iu->mad_common.status = IBMVFC_MAD_DRIVER_FAILED;
784
785         list_del(&evt->queue);
786         del_timer(&evt->timer);
787         ibmvfc_trc_end(evt);
788         evt->done(evt);
789 }
790
791 /**
792  * ibmvfc_purge_requests - Our virtual adapter just shut down. Purge any sent requests
793  * @vhost:              ibmvfc host struct
794  * @error_code: error code to fail requests with
795  *
796  * Return value:
797  *      none
798  **/
799 static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code)
800 {
801         struct ibmvfc_event *evt, *pos;
802
803         ibmvfc_dbg(vhost, "Purging all requests\n");
804         list_for_each_entry_safe(evt, pos, &vhost->sent, queue)
805                 ibmvfc_fail_request(evt, error_code);
806 }
807
808 /**
809  * __ibmvfc_reset_host - Reset the connection to the server (no locking)
810  * @vhost:      struct ibmvfc host to reset
811  **/
812 static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
813 {
814         int rc;
815
816         scsi_block_requests(vhost->host);
817         ibmvfc_purge_requests(vhost, DID_ERROR);
818         if ((rc = ibmvfc_reset_crq(vhost)) ||
819             (rc = ibmvfc_send_crq_init(vhost)) ||
820             (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
821                 dev_err(vhost->dev, "Error after reset rc=%d\n", rc);
822                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
823         } else
824                 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
825 }
826
827 /**
828  * ibmvfc_reset_host - Reset the connection to the server
829  * @vhost:      struct ibmvfc host to reset
830  **/
831 static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
832 {
833         unsigned long flags;
834
835         spin_lock_irqsave(vhost->host->host_lock, flags);
836         __ibmvfc_reset_host(vhost);
837         spin_unlock_irqrestore(vhost->host->host_lock, flags);
838 }
839
840 /**
841  * ibmvfc_retry_host_init - Retry host initialization if allowed
842  * @vhost:      ibmvfc host struct
843  *
844  **/
845 static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
846 {
847         if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
848                 if (++vhost->init_retries > IBMVFC_MAX_INIT_RETRIES) {
849                         dev_err(vhost->dev,
850                                 "Host initialization retries exceeded. Taking adapter offline\n");
851                         ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
852                 } else if (vhost->init_retries == IBMVFC_MAX_INIT_RETRIES)
853                         __ibmvfc_reset_host(vhost);
854                 else
855                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
856         }
857
858         wake_up(&vhost->work_wait_q);
859 }
860
861 /**
862  * __ibmvfc_get_target - Find the specified scsi_target (no locking)
863  * @starget:    scsi target struct
864  *
865  * Return value:
866  *      ibmvfc_target struct / NULL if not found
867  **/
868 static struct ibmvfc_target *__ibmvfc_get_target(struct scsi_target *starget)
869 {
870         struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
871         struct ibmvfc_host *vhost = shost_priv(shost);
872         struct ibmvfc_target *tgt;
873
874         list_for_each_entry(tgt, &vhost->targets, queue)
875                 if (tgt->target_id == starget->id) {
876                         kref_get(&tgt->kref);
877                         return tgt;
878                 }
879         return NULL;
880 }
881
882 /**
883  * ibmvfc_get_target - Find the specified scsi_target
884  * @starget:    scsi target struct
885  *
886  * Return value:
887  *      ibmvfc_target struct / NULL if not found
888  **/
889 static struct ibmvfc_target *ibmvfc_get_target(struct scsi_target *starget)
890 {
891         struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
892         struct ibmvfc_target *tgt;
893         unsigned long flags;
894
895         spin_lock_irqsave(shost->host_lock, flags);
896         tgt = __ibmvfc_get_target(starget);
897         spin_unlock_irqrestore(shost->host_lock, flags);
898         return tgt;
899 }
900
901 /**
902  * ibmvfc_get_host_speed - Get host port speed
903  * @shost:              scsi host struct
904  *
905  * Return value:
906  *      none
907  **/
908 static void ibmvfc_get_host_speed(struct Scsi_Host *shost)
909 {
910         struct ibmvfc_host *vhost = shost_priv(shost);
911         unsigned long flags;
912
913         spin_lock_irqsave(shost->host_lock, flags);
914         if (vhost->state == IBMVFC_ACTIVE) {
915                 switch (vhost->login_buf->resp.link_speed / 100) {
916                 case 1:
917                         fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
918                         break;
919                 case 2:
920                         fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
921                         break;
922                 case 4:
923                         fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
924                         break;
925                 case 8:
926                         fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
927                         break;
928                 case 10:
929                         fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
930                         break;
931                 case 16:
932                         fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
933                         break;
934                 default:
935                         ibmvfc_log(vhost, 3, "Unknown port speed: %ld Gbit\n",
936                                    vhost->login_buf->resp.link_speed / 100);
937                         fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
938                         break;
939                 }
940         } else
941                 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
942         spin_unlock_irqrestore(shost->host_lock, flags);
943 }
944
945 /**
946  * ibmvfc_get_host_port_state - Get host port state
947  * @shost:              scsi host struct
948  *
949  * Return value:
950  *      none
951  **/
952 static void ibmvfc_get_host_port_state(struct Scsi_Host *shost)
953 {
954         struct ibmvfc_host *vhost = shost_priv(shost);
955         unsigned long flags;
956
957         spin_lock_irqsave(shost->host_lock, flags);
958         switch (vhost->state) {
959         case IBMVFC_INITIALIZING:
960         case IBMVFC_ACTIVE:
961                 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
962                 break;
963         case IBMVFC_LINK_DOWN:
964                 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
965                 break;
966         case IBMVFC_LINK_DEAD:
967         case IBMVFC_HOST_OFFLINE:
968                 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
969                 break;
970         case IBMVFC_HALTED:
971                 fc_host_port_state(shost) = FC_PORTSTATE_BLOCKED;
972                 break;
973         case IBMVFC_NO_CRQ:
974                 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
975                 break;
976         default:
977                 ibmvfc_log(vhost, 3, "Unknown port state: %d\n", vhost->state);
978                 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
979                 break;
980         }
981         spin_unlock_irqrestore(shost->host_lock, flags);
982 }
983
984 /**
985  * ibmvfc_set_rport_dev_loss_tmo - Set rport's device loss timeout
986  * @rport:              rport struct
987  * @timeout:    timeout value
988  *
989  * Return value:
990  *      none
991  **/
992 static void ibmvfc_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
993 {
994         if (timeout)
995                 rport->dev_loss_tmo = timeout;
996         else
997                 rport->dev_loss_tmo = 1;
998 }
999
1000 /**
1001  * ibmvfc_release_tgt - Free memory allocated for a target
1002  * @kref:               kref struct
1003  *
1004  **/
1005 static void ibmvfc_release_tgt(struct kref *kref)
1006 {
1007         struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref);
1008         kfree(tgt);
1009 }
1010
1011 /**
1012  * ibmvfc_get_starget_node_name - Get SCSI target's node name
1013  * @starget:    scsi target struct
1014  *
1015  * Return value:
1016  *      none
1017  **/
1018 static void ibmvfc_get_starget_node_name(struct scsi_target *starget)
1019 {
1020         struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1021         fc_starget_port_name(starget) = tgt ? tgt->ids.node_name : 0;
1022         if (tgt)
1023                 kref_put(&tgt->kref, ibmvfc_release_tgt);
1024 }
1025
1026 /**
1027  * ibmvfc_get_starget_port_name - Get SCSI target's port name
1028  * @starget:    scsi target struct
1029  *
1030  * Return value:
1031  *      none
1032  **/
1033 static void ibmvfc_get_starget_port_name(struct scsi_target *starget)
1034 {
1035         struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1036         fc_starget_port_name(starget) = tgt ? tgt->ids.port_name : 0;
1037         if (tgt)
1038                 kref_put(&tgt->kref, ibmvfc_release_tgt);
1039 }
1040
1041 /**
1042  * ibmvfc_get_starget_port_id - Get SCSI target's port ID
1043  * @starget:    scsi target struct
1044  *
1045  * Return value:
1046  *      none
1047  **/
1048 static void ibmvfc_get_starget_port_id(struct scsi_target *starget)
1049 {
1050         struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1051         fc_starget_port_id(starget) = tgt ? tgt->scsi_id : -1;
1052         if (tgt)
1053                 kref_put(&tgt->kref, ibmvfc_release_tgt);
1054 }
1055
1056 /**
1057  * ibmvfc_wait_while_resetting - Wait while the host resets
1058  * @vhost:              ibmvfc host struct
1059  *
1060  * Return value:
1061  *      0 on success / other on failure
1062  **/
1063 static int ibmvfc_wait_while_resetting(struct ibmvfc_host *vhost)
1064 {
1065         long timeout = wait_event_timeout(vhost->init_wait_q,
1066                                           ((vhost->state == IBMVFC_ACTIVE ||
1067                                             vhost->state == IBMVFC_HOST_OFFLINE ||
1068                                             vhost->state == IBMVFC_LINK_DEAD) &&
1069                                            vhost->action == IBMVFC_HOST_ACTION_NONE),
1070                                           (init_timeout * HZ));
1071
1072         return timeout ? 0 : -EIO;
1073 }
1074
1075 /**
1076  * ibmvfc_issue_fc_host_lip - Re-initiate link initialization
1077  * @shost:              scsi host struct
1078  *
1079  * Return value:
1080  *      0 on success / other on failure
1081  **/
1082 static int ibmvfc_issue_fc_host_lip(struct Scsi_Host *shost)
1083 {
1084         struct ibmvfc_host *vhost = shost_priv(shost);
1085
1086         dev_err(vhost->dev, "Initiating host LIP. Resetting connection\n");
1087         ibmvfc_reset_host(vhost);
1088         return ibmvfc_wait_while_resetting(vhost);
1089 }
1090
1091 /**
1092  * ibmvfc_gather_partition_info - Gather info about the LPAR
1093  *
1094  * Return value:
1095  *      none
1096  **/
1097 static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost)
1098 {
1099         struct device_node *rootdn;
1100         const char *name;
1101         const unsigned int *num;
1102
1103         rootdn = of_find_node_by_path("/");
1104         if (!rootdn)
1105                 return;
1106
1107         name = of_get_property(rootdn, "ibm,partition-name", NULL);
1108         if (name)
1109                 strncpy(vhost->partition_name, name, sizeof(vhost->partition_name));
1110         num = of_get_property(rootdn, "ibm,partition-no", NULL);
1111         if (num)
1112                 vhost->partition_number = *num;
1113         of_node_put(rootdn);
1114 }
1115
1116 /**
1117  * ibmvfc_set_login_info - Setup info for NPIV login
1118  * @vhost:      ibmvfc host struct
1119  *
1120  * Return value:
1121  *      none
1122  **/
1123 static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
1124 {
1125         struct ibmvfc_npiv_login *login_info = &vhost->login_info;
1126         struct device_node *of_node = vhost->dev->archdata.of_node;
1127         const char *location;
1128
1129         memset(login_info, 0, sizeof(*login_info));
1130
1131         login_info->ostype = IBMVFC_OS_LINUX;
1132         login_info->max_dma_len = IBMVFC_MAX_SECTORS << 9;
1133         login_info->max_payload = sizeof(struct ibmvfc_fcp_cmd_iu);
1134         login_info->max_response = sizeof(struct ibmvfc_fcp_rsp);
1135         login_info->partition_num = vhost->partition_number;
1136         login_info->vfc_frame_version = 1;
1137         login_info->fcp_version = 3;
1138         if (vhost->client_migrated)
1139                 login_info->flags = IBMVFC_CLIENT_MIGRATED;
1140
1141         login_info->max_cmds = max_requests + IBMVFC_NUM_INTERNAL_REQ;
1142         login_info->capabilities = IBMVFC_CAN_MIGRATE;
1143         login_info->async.va = vhost->async_crq.msg_token;
1144         login_info->async.len = vhost->async_crq.size * sizeof(*vhost->async_crq.msgs);
1145         strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME);
1146         strncpy(login_info->device_name,
1147                 vhost->host->shost_gendev.bus_id, IBMVFC_MAX_NAME);
1148
1149         location = of_get_property(of_node, "ibm,loc-code", NULL);
1150         location = location ? location : vhost->dev->bus_id;
1151         strncpy(login_info->drc_name, location, IBMVFC_MAX_NAME);
1152 }
1153
1154 /**
1155  * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
1156  * @vhost:      ibmvfc host who owns the event pool
1157  *
1158  * Returns zero on success.
1159  **/
1160 static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost)
1161 {
1162         int i;
1163         struct ibmvfc_event_pool *pool = &vhost->pool;
1164
1165         ENTER;
1166         pool->size = max_requests + IBMVFC_NUM_INTERNAL_REQ;
1167         pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
1168         if (!pool->events)
1169                 return -ENOMEM;
1170
1171         pool->iu_storage = dma_alloc_coherent(vhost->dev,
1172                                               pool->size * sizeof(*pool->iu_storage),
1173                                               &pool->iu_token, 0);
1174
1175         if (!pool->iu_storage) {
1176                 kfree(pool->events);
1177                 return -ENOMEM;
1178         }
1179
1180         for (i = 0; i < pool->size; ++i) {
1181                 struct ibmvfc_event *evt = &pool->events[i];
1182                 atomic_set(&evt->free, 1);
1183                 evt->crq.valid = 0x80;
1184                 evt->crq.ioba = pool->iu_token + (sizeof(*evt->xfer_iu) * i);
1185                 evt->xfer_iu = pool->iu_storage + i;
1186                 evt->vhost = vhost;
1187                 evt->ext_list = NULL;
1188                 list_add_tail(&evt->queue, &vhost->free);
1189         }
1190
1191         LEAVE;
1192         return 0;
1193 }
1194
1195 /**
1196  * ibmvfc_free_event_pool - Frees memory of the event pool of a host
1197  * @vhost:      ibmvfc host who owns the event pool
1198  *
1199  **/
1200 static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost)
1201 {
1202         int i;
1203         struct ibmvfc_event_pool *pool = &vhost->pool;
1204
1205         ENTER;
1206         for (i = 0; i < pool->size; ++i) {
1207                 list_del(&pool->events[i].queue);
1208                 BUG_ON(atomic_read(&pool->events[i].free) != 1);
1209                 if (pool->events[i].ext_list)
1210                         dma_pool_free(vhost->sg_pool,
1211                                       pool->events[i].ext_list,
1212                                       pool->events[i].ext_list_token);
1213         }
1214
1215         kfree(pool->events);
1216         dma_free_coherent(vhost->dev,
1217                           pool->size * sizeof(*pool->iu_storage),
1218                           pool->iu_storage, pool->iu_token);
1219         LEAVE;
1220 }
1221
1222 /**
1223  * ibmvfc_get_event - Gets the next free event in pool
1224  * @vhost:      ibmvfc host struct
1225  *
1226  * Returns a free event from the pool.
1227  **/
1228 static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_host *vhost)
1229 {
1230         struct ibmvfc_event *evt;
1231
1232         BUG_ON(list_empty(&vhost->free));
1233         evt = list_entry(vhost->free.next, struct ibmvfc_event, queue);
1234         atomic_set(&evt->free, 0);
1235         list_del(&evt->queue);
1236         return evt;
1237 }
1238
1239 /**
1240  * ibmvfc_init_event - Initialize fields in an event struct that are always
1241  *                              required.
1242  * @evt:        The event
1243  * @done:       Routine to call when the event is responded to
1244  * @format:     SRP or MAD format
1245  **/
1246 static void ibmvfc_init_event(struct ibmvfc_event *evt,
1247                               void (*done) (struct ibmvfc_event *), u8 format)
1248 {
1249         evt->cmnd = NULL;
1250         evt->sync_iu = NULL;
1251         evt->crq.format = format;
1252         evt->done = done;
1253         evt->eh_comp = NULL;
1254 }
1255
1256 /**
1257  * ibmvfc_map_sg_list - Initialize scatterlist
1258  * @scmd:       scsi command struct
1259  * @nseg:       number of scatterlist segments
1260  * @md: memory descriptor list to initialize
1261  **/
1262 static void ibmvfc_map_sg_list(struct scsi_cmnd *scmd, int nseg,
1263                                struct srp_direct_buf *md)
1264 {
1265         int i;
1266         struct scatterlist *sg;
1267
1268         scsi_for_each_sg(scmd, sg, nseg, i) {
1269                 md[i].va = sg_dma_address(sg);
1270                 md[i].len = sg_dma_len(sg);
1271                 md[i].key = 0;
1272         }
1273 }
1274
1275 /**
1276  * ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes decriptor fields
1277  * @scmd:               Scsi_Cmnd with the scatterlist
1278  * @evt:                ibmvfc event struct
1279  * @vfc_cmd:    vfc_cmd that contains the memory descriptor
1280  * @dev:                device for which to map dma memory
1281  *
1282  * Returns:
1283  *      0 on success / non-zero on failure
1284  **/
1285 static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
1286                               struct ibmvfc_event *evt,
1287                               struct ibmvfc_cmd *vfc_cmd, struct device *dev)
1288 {
1289
1290         int sg_mapped;
1291         struct srp_direct_buf *data = &vfc_cmd->ioba;
1292         struct ibmvfc_host *vhost = dev_get_drvdata(dev);
1293
1294         sg_mapped = scsi_dma_map(scmd);
1295         if (!sg_mapped) {
1296                 vfc_cmd->flags |= IBMVFC_NO_MEM_DESC;
1297                 return 0;
1298         } else if (unlikely(sg_mapped < 0)) {
1299                 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1300                         scmd_printk(KERN_ERR, scmd, "Failed to map DMA buffer for command\n");
1301                 return sg_mapped;
1302         }
1303
1304         if (scmd->sc_data_direction == DMA_TO_DEVICE) {
1305                 vfc_cmd->flags |= IBMVFC_WRITE;
1306                 vfc_cmd->iu.add_cdb_len |= IBMVFC_WRDATA;
1307         } else {
1308                 vfc_cmd->flags |= IBMVFC_READ;
1309                 vfc_cmd->iu.add_cdb_len |= IBMVFC_RDDATA;
1310         }
1311
1312         if (sg_mapped == 1) {
1313                 ibmvfc_map_sg_list(scmd, sg_mapped, data);
1314                 return 0;
1315         }
1316
1317         vfc_cmd->flags |= IBMVFC_SCATTERLIST;
1318
1319         if (!evt->ext_list) {
1320                 evt->ext_list = dma_pool_alloc(vhost->sg_pool, GFP_ATOMIC,
1321                                                &evt->ext_list_token);
1322
1323                 if (!evt->ext_list) {
1324                         scmd_printk(KERN_ERR, scmd, "Can't allocate memory for scatterlist\n");
1325                         return -ENOMEM;
1326                 }
1327         }
1328
1329         ibmvfc_map_sg_list(scmd, sg_mapped, evt->ext_list);
1330
1331         data->va = evt->ext_list_token;
1332         data->len = sg_mapped * sizeof(struct srp_direct_buf);
1333         data->key = 0;
1334         return 0;
1335 }
1336
1337 /**
1338  * ibmvfc_timeout - Internal command timeout handler
1339  * @evt:        struct ibmvfc_event that timed out
1340  *
1341  * Called when an internally generated command times out
1342  **/
1343 static void ibmvfc_timeout(struct ibmvfc_event *evt)
1344 {
1345         struct ibmvfc_host *vhost = evt->vhost;
1346         dev_err(vhost->dev, "Command timed out (%p). Resetting connection\n", evt);
1347         ibmvfc_reset_host(vhost);
1348 }
1349
1350 /**
1351  * ibmvfc_send_event - Transforms event to u64 array and calls send_crq()
1352  * @evt:                event to be sent
1353  * @vhost:              ibmvfc host struct
1354  * @timeout:    timeout in seconds - 0 means do not time command
1355  *
1356  * Returns the value returned from ibmvfc_send_crq(). (Zero for success)
1357  **/
1358 static int ibmvfc_send_event(struct ibmvfc_event *evt,
1359                              struct ibmvfc_host *vhost, unsigned long timeout)
1360 {
1361         u64 *crq_as_u64 = (u64 *) &evt->crq;
1362         int rc;
1363
1364         /* Copy the IU into the transfer area */
1365         *evt->xfer_iu = evt->iu;
1366         if (evt->crq.format == IBMVFC_CMD_FORMAT)
1367                 evt->xfer_iu->cmd.tag = (u64)evt;
1368         else if (evt->crq.format == IBMVFC_MAD_FORMAT)
1369                 evt->xfer_iu->mad_common.tag = (u64)evt;
1370         else
1371                 BUG();
1372
1373         list_add_tail(&evt->queue, &vhost->sent);
1374         init_timer(&evt->timer);
1375
1376         if (timeout) {
1377                 evt->timer.data = (unsigned long) evt;
1378                 evt->timer.expires = jiffies + (timeout * HZ);
1379                 evt->timer.function = (void (*)(unsigned long))ibmvfc_timeout;
1380                 add_timer(&evt->timer);
1381         }
1382
1383         if ((rc = ibmvfc_send_crq(vhost, crq_as_u64[0], crq_as_u64[1]))) {
1384                 list_del(&evt->queue);
1385                 del_timer(&evt->timer);
1386
1387                 /* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
1388                  * Firmware will send a CRQ with a transport event (0xFF) to
1389                  * tell this client what has happened to the transport. This
1390                  * will be handled in ibmvfc_handle_crq()
1391                  */
1392                 if (rc == H_CLOSED) {
1393                         if (printk_ratelimit())
1394                                 dev_warn(vhost->dev, "Send warning. Receive queue closed, will retry.\n");
1395                         if (evt->cmnd)
1396                                 scsi_dma_unmap(evt->cmnd);
1397                         ibmvfc_free_event(evt);
1398                         return SCSI_MLQUEUE_HOST_BUSY;
1399                 }
1400
1401                 dev_err(vhost->dev, "Send error (rc=%d)\n", rc);
1402                 if (evt->cmnd) {
1403                         evt->cmnd->result = DID_ERROR << 16;
1404                         evt->done = ibmvfc_scsi_eh_done;
1405                 } else
1406                         evt->xfer_iu->mad_common.status = IBMVFC_MAD_CRQ_ERROR;
1407
1408                 evt->done(evt);
1409         } else
1410                 ibmvfc_trc_start(evt);
1411
1412         return 0;
1413 }
1414
1415 /**
1416  * ibmvfc_log_error - Log an error for the failed command if appropriate
1417  * @evt:        ibmvfc event to log
1418  *
1419  **/
1420 static void ibmvfc_log_error(struct ibmvfc_event *evt)
1421 {
1422         struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1423         struct ibmvfc_host *vhost = evt->vhost;
1424         struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
1425         struct scsi_cmnd *cmnd = evt->cmnd;
1426         const char *err = unknown_error;
1427         int index = ibmvfc_get_err_index(vfc_cmd->status, vfc_cmd->error);
1428         int logerr = 0;
1429         int rsp_code = 0;
1430
1431         if (index >= 0) {
1432                 logerr = cmd_status[index].log;
1433                 err = cmd_status[index].name;
1434         }
1435
1436         if (!logerr && (vhost->log_level <= (IBMVFC_DEFAULT_LOG_LEVEL + 1)))
1437                 return;
1438
1439         if (rsp->flags & FCP_RSP_LEN_VALID)
1440                 rsp_code = rsp->data.info.rsp_code;
1441
1442         scmd_printk(KERN_ERR, cmnd, "Command (%02X) failed: %s (%x:%x) "
1443                     "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n",
1444                     cmnd->cmnd[0], err, vfc_cmd->status, vfc_cmd->error,
1445                     rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
1446 }
1447
1448 /**
1449  * ibmvfc_scsi_done - Handle responses from commands
1450  * @evt:        ibmvfc event to be handled
1451  *
1452  * Used as a callback when sending scsi cmds.
1453  **/
1454 static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
1455 {
1456         struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1457         struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
1458         struct scsi_cmnd *cmnd = evt->cmnd;
1459         u32 rsp_len = 0;
1460         u32 sense_len = rsp->fcp_sense_len;
1461
1462         if (cmnd) {
1463                 if (vfc_cmd->response_flags & IBMVFC_ADAPTER_RESID_VALID)
1464                         scsi_set_resid(cmnd, vfc_cmd->adapter_resid);
1465                 else if (rsp->flags & FCP_RESID_UNDER)
1466                         scsi_set_resid(cmnd, rsp->fcp_resid);
1467                 else
1468                         scsi_set_resid(cmnd, 0);
1469
1470                 if (vfc_cmd->status) {
1471                         cmnd->result = ibmvfc_get_err_result(vfc_cmd);
1472
1473                         if (rsp->flags & FCP_RSP_LEN_VALID)
1474                                 rsp_len = rsp->fcp_rsp_len;
1475                         if ((sense_len + rsp_len) > SCSI_SENSE_BUFFERSIZE)
1476                                 sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len;
1477                         if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8)
1478                                 memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
1479                         if ((vfc_cmd->status & IBMVFC_VIOS_FAILURE) && (vfc_cmd->error == IBMVFC_PLOGI_REQUIRED))
1480                                 ibmvfc_reinit_host(evt->vhost);
1481
1482                         if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER)))
1483                                 cmnd->result = (DID_ERROR << 16);
1484
1485                         ibmvfc_log_error(evt);
1486                 }
1487
1488                 if (!cmnd->result &&
1489                     (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) < cmnd->underflow))
1490                         cmnd->result = (DID_ERROR << 16);
1491
1492                 scsi_dma_unmap(cmnd);
1493                 cmnd->scsi_done(cmnd);
1494         }
1495
1496         if (evt->eh_comp)
1497                 complete(evt->eh_comp);
1498
1499         ibmvfc_free_event(evt);
1500 }
1501
1502 /**
1503  * ibmvfc_host_chkready - Check if the host can accept commands
1504  * @vhost:       struct ibmvfc host
1505  *
1506  * Returns:
1507  *      1 if host can accept command / 0 if not
1508  **/
1509 static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost)
1510 {
1511         int result = 0;
1512
1513         switch (vhost->state) {
1514         case IBMVFC_LINK_DEAD:
1515         case IBMVFC_HOST_OFFLINE:
1516                 result = DID_NO_CONNECT << 16;
1517                 break;
1518         case IBMVFC_NO_CRQ:
1519         case IBMVFC_INITIALIZING:
1520         case IBMVFC_HALTED:
1521         case IBMVFC_LINK_DOWN:
1522                 result = DID_REQUEUE << 16;
1523                 break;
1524         case IBMVFC_ACTIVE:
1525                 result = 0;
1526                 break;
1527         };
1528
1529         return result;
1530 }
1531
1532 /**
1533  * ibmvfc_queuecommand - The queuecommand function of the scsi template
1534  * @cmnd:       struct scsi_cmnd to be executed
1535  * @done:       Callback function to be called when cmnd is completed
1536  *
1537  * Returns:
1538  *      0 on success / other on failure
1539  **/
1540 static int ibmvfc_queuecommand(struct scsi_cmnd *cmnd,
1541                                void (*done) (struct scsi_cmnd *))
1542 {
1543         struct ibmvfc_host *vhost = shost_priv(cmnd->device->host);
1544         struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1545         struct ibmvfc_cmd *vfc_cmd;
1546         struct ibmvfc_event *evt;
1547         u8 tag[2];
1548         int rc;
1549
1550         if (unlikely((rc = fc_remote_port_chkready(rport))) ||
1551             unlikely((rc = ibmvfc_host_chkready(vhost)))) {
1552                 cmnd->result = rc;
1553                 done(cmnd);
1554                 return 0;
1555         }
1556
1557         cmnd->result = (DID_OK << 16);
1558         evt = ibmvfc_get_event(vhost);
1559         ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
1560         evt->cmnd = cmnd;
1561         cmnd->scsi_done = done;
1562         vfc_cmd = &evt->iu.cmd;
1563         memset(vfc_cmd, 0, sizeof(*vfc_cmd));
1564         vfc_cmd->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp);
1565         vfc_cmd->resp.len = sizeof(vfc_cmd->rsp);
1566         vfc_cmd->frame_type = IBMVFC_SCSI_FCP_TYPE;
1567         vfc_cmd->payload_len = sizeof(vfc_cmd->iu);
1568         vfc_cmd->resp_len = sizeof(vfc_cmd->rsp);
1569         vfc_cmd->cancel_key = (unsigned long)cmnd->device->hostdata;
1570         vfc_cmd->tgt_scsi_id = rport->port_id;
1571         if ((rport->supported_classes & FC_COS_CLASS3) &&
1572             (fc_host_supported_classes(vhost->host) & FC_COS_CLASS3))
1573                 vfc_cmd->flags = IBMVFC_CLASS_3_ERR;
1574         vfc_cmd->iu.xfer_len = scsi_bufflen(cmnd);
1575         int_to_scsilun(cmnd->device->lun, &vfc_cmd->iu.lun);
1576         memcpy(vfc_cmd->iu.cdb, cmnd->cmnd, cmnd->cmd_len);
1577
1578         if (scsi_populate_tag_msg(cmnd, tag)) {
1579                 vfc_cmd->task_tag = tag[1];
1580                 switch (tag[0]) {
1581                 case MSG_SIMPLE_TAG:
1582                         vfc_cmd->iu.pri_task_attr = IBMVFC_SIMPLE_TASK;
1583                         break;
1584                 case MSG_HEAD_TAG:
1585                         vfc_cmd->iu.pri_task_attr = IBMVFC_HEAD_OF_QUEUE;
1586                         break;
1587                 case MSG_ORDERED_TAG:
1588                         vfc_cmd->iu.pri_task_attr = IBMVFC_ORDERED_TASK;
1589                         break;
1590                 };
1591         }
1592
1593         if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
1594                 return ibmvfc_send_event(evt, vhost, 0);
1595
1596         ibmvfc_free_event(evt);
1597         if (rc == -ENOMEM)
1598                 return SCSI_MLQUEUE_HOST_BUSY;
1599
1600         if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1601                 scmd_printk(KERN_ERR, cmnd,
1602                             "Failed to map DMA buffer for command. rc=%d\n", rc);
1603
1604         cmnd->result = DID_ERROR << 16;
1605         done(cmnd);
1606         return 0;
1607 }
1608
1609 /**
1610  * ibmvfc_sync_completion - Signal that a synchronous command has completed
1611  * @evt:        ibmvfc event struct
1612  *
1613  **/
1614 static void ibmvfc_sync_completion(struct ibmvfc_event *evt)
1615 {
1616         /* copy the response back */
1617         if (evt->sync_iu)
1618                 *evt->sync_iu = *evt->xfer_iu;
1619
1620         complete(&evt->comp);
1621 }
1622
1623 /**
1624  * ibmvfc_reset_device - Reset the device with the specified reset type
1625  * @sdev:       scsi device to reset
1626  * @type:       reset type
1627  * @desc:       reset type description for log messages
1628  *
1629  * Returns:
1630  *      0 on success / other on failure
1631  **/
1632 static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
1633 {
1634         struct ibmvfc_host *vhost = shost_priv(sdev->host);
1635         struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1636         struct ibmvfc_cmd *tmf;
1637         struct ibmvfc_event *evt = NULL;
1638         union ibmvfc_iu rsp_iu;
1639         struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp;
1640         int rsp_rc = -EBUSY;
1641         unsigned long flags;
1642         int rsp_code = 0;
1643
1644         spin_lock_irqsave(vhost->host->host_lock, flags);
1645         if (vhost->state == IBMVFC_ACTIVE) {
1646                 evt = ibmvfc_get_event(vhost);
1647                 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
1648
1649                 tmf = &evt->iu.cmd;
1650                 memset(tmf, 0, sizeof(*tmf));
1651                 tmf->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp);
1652                 tmf->resp.len = sizeof(tmf->rsp);
1653                 tmf->frame_type = IBMVFC_SCSI_FCP_TYPE;
1654                 tmf->payload_len = sizeof(tmf->iu);
1655                 tmf->resp_len = sizeof(tmf->rsp);
1656                 tmf->cancel_key = (unsigned long)sdev->hostdata;
1657                 tmf->tgt_scsi_id = rport->port_id;
1658                 int_to_scsilun(sdev->lun, &tmf->iu.lun);
1659                 tmf->flags = (IBMVFC_NO_MEM_DESC | IBMVFC_TMF);
1660                 tmf->iu.tmf_flags = type;
1661                 evt->sync_iu = &rsp_iu;
1662
1663                 init_completion(&evt->comp);
1664                 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
1665         }
1666         spin_unlock_irqrestore(vhost->host->host_lock, flags);
1667
1668         if (rsp_rc != 0) {
1669                 sdev_printk(KERN_ERR, sdev, "Failed to send %s reset event. rc=%d\n",
1670                             desc, rsp_rc);
1671                 return -EIO;
1672         }
1673
1674         sdev_printk(KERN_INFO, sdev, "Resetting %s\n", desc);
1675         wait_for_completion(&evt->comp);
1676
1677         if (rsp_iu.cmd.status) {
1678                 if (fc_rsp->flags & FCP_RSP_LEN_VALID)
1679                         rsp_code = fc_rsp->data.info.rsp_code;
1680
1681                 sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) "
1682                             "flags: %x fcp_rsp: %x, scsi_status: %x\n",
1683                             desc, ibmvfc_get_cmd_error(rsp_iu.cmd.status, rsp_iu.cmd.error),
1684                             rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
1685                             fc_rsp->scsi_status);
1686                 rsp_rc = -EIO;
1687         } else
1688                 sdev_printk(KERN_INFO, sdev, "%s reset successful\n", desc);
1689
1690         spin_lock_irqsave(vhost->host->host_lock, flags);
1691         ibmvfc_free_event(evt);
1692         spin_unlock_irqrestore(vhost->host->host_lock, flags);
1693         return rsp_rc;
1694 }
1695
1696 /**
1697  * ibmvfc_abort_task_set - Abort outstanding commands to the device
1698  * @sdev:       scsi device to abort commands
1699  *
1700  * This sends an Abort Task Set to the VIOS for the specified device. This does
1701  * NOT send any cancel to the VIOS. That must be done separately.
1702  *
1703  * Returns:
1704  *      0 on success / other on failure
1705  **/
1706 static int ibmvfc_abort_task_set(struct scsi_device *sdev)
1707 {
1708         struct ibmvfc_host *vhost = shost_priv(sdev->host);
1709         struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1710         struct ibmvfc_cmd *tmf;
1711         struct ibmvfc_event *evt, *found_evt;
1712         union ibmvfc_iu rsp_iu;
1713         struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp;
1714         int rsp_rc = -EBUSY;
1715         unsigned long flags;
1716         int rsp_code = 0;
1717
1718         spin_lock_irqsave(vhost->host->host_lock, flags);
1719         found_evt = NULL;
1720         list_for_each_entry(evt, &vhost->sent, queue) {
1721                 if (evt->cmnd && evt->cmnd->device == sdev) {
1722                         found_evt = evt;
1723                         break;
1724                 }
1725         }
1726
1727         if (!found_evt) {
1728                 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1729                         sdev_printk(KERN_INFO, sdev, "No events found to abort\n");
1730                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1731                 return 0;
1732         }
1733
1734         if (vhost->state == IBMVFC_ACTIVE) {
1735                 evt = ibmvfc_get_event(vhost);
1736                 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
1737
1738                 tmf = &evt->iu.cmd;
1739                 memset(tmf, 0, sizeof(*tmf));
1740                 tmf->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp);
1741                 tmf->resp.len = sizeof(tmf->rsp);
1742                 tmf->frame_type = IBMVFC_SCSI_FCP_TYPE;
1743                 tmf->payload_len = sizeof(tmf->iu);
1744                 tmf->resp_len = sizeof(tmf->rsp);
1745                 tmf->cancel_key = (unsigned long)sdev->hostdata;
1746                 tmf->tgt_scsi_id = rport->port_id;
1747                 int_to_scsilun(sdev->lun, &tmf->iu.lun);
1748                 tmf->flags = (IBMVFC_NO_MEM_DESC | IBMVFC_TMF);
1749                 tmf->iu.tmf_flags = IBMVFC_ABORT_TASK_SET;
1750                 evt->sync_iu = &rsp_iu;
1751
1752                 init_completion(&evt->comp);
1753                 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
1754         }
1755
1756         spin_unlock_irqrestore(vhost->host->host_lock, flags);
1757
1758         if (rsp_rc != 0) {
1759                 sdev_printk(KERN_ERR, sdev, "Failed to send abort. rc=%d\n", rsp_rc);
1760                 return -EIO;
1761         }
1762
1763         sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n");
1764         wait_for_completion(&evt->comp);
1765
1766         if (rsp_iu.cmd.status) {
1767                 if (fc_rsp->flags & FCP_RSP_LEN_VALID)
1768                         rsp_code = fc_rsp->data.info.rsp_code;
1769
1770                 sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
1771                             "flags: %x fcp_rsp: %x, scsi_status: %x\n",
1772                             ibmvfc_get_cmd_error(rsp_iu.cmd.status, rsp_iu.cmd.error),
1773                             rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
1774                             fc_rsp->scsi_status);
1775                 rsp_rc = -EIO;
1776         } else
1777                 sdev_printk(KERN_INFO, sdev, "Abort successful\n");
1778
1779         spin_lock_irqsave(vhost->host->host_lock, flags);
1780         ibmvfc_free_event(evt);
1781         spin_unlock_irqrestore(vhost->host->host_lock, flags);
1782         return rsp_rc;
1783 }
1784
1785 /**
1786  * ibmvfc_cancel_all - Cancel all outstanding commands to the device
1787  * @sdev:       scsi device to cancel commands
1788  * @type:       type of error recovery being performed
1789  *
1790  * This sends a cancel to the VIOS for the specified device. This does
1791  * NOT send any abort to the actual device. That must be done separately.
1792  *
1793  * Returns:
1794  *      0 on success / other on failure
1795  **/
1796 static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
1797 {
1798         struct ibmvfc_host *vhost = shost_priv(sdev->host);
1799         struct scsi_target *starget = scsi_target(sdev);
1800         struct fc_rport *rport = starget_to_rport(starget);
1801         struct ibmvfc_tmf *tmf;
1802         struct ibmvfc_event *evt, *found_evt;
1803         union ibmvfc_iu rsp;
1804         int rsp_rc = -EBUSY;
1805         unsigned long flags;
1806         u16 status;
1807
1808         ENTER;
1809         spin_lock_irqsave(vhost->host->host_lock, flags);
1810         found_evt = NULL;
1811         list_for_each_entry(evt, &vhost->sent, queue) {
1812                 if (evt->cmnd && evt->cmnd->device == sdev) {
1813                         found_evt = evt;
1814                         break;
1815                 }
1816         }
1817
1818         if (!found_evt) {
1819                 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1820                         sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
1821                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1822                 return 0;
1823         }
1824
1825         if (vhost->state == IBMVFC_ACTIVE) {
1826                 evt = ibmvfc_get_event(vhost);
1827                 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
1828
1829                 tmf = &evt->iu.tmf;
1830                 memset(tmf, 0, sizeof(*tmf));
1831                 tmf->common.version = 1;
1832                 tmf->common.opcode = IBMVFC_TMF_MAD;
1833                 tmf->common.length = sizeof(*tmf);
1834                 tmf->scsi_id = rport->port_id;
1835                 int_to_scsilun(sdev->lun, &tmf->lun);
1836                 tmf->flags = (type | IBMVFC_TMF_LUA_VALID);
1837                 tmf->cancel_key = (unsigned long)sdev->hostdata;
1838                 tmf->my_cancel_key = (unsigned long)starget->hostdata;
1839
1840                 evt->sync_iu = &rsp;
1841                 init_completion(&evt->comp);
1842                 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
1843         }
1844
1845         spin_unlock_irqrestore(vhost->host->host_lock, flags);
1846
1847         if (rsp_rc != 0) {
1848                 sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc);
1849                 return -EIO;
1850         }
1851
1852         sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
1853
1854         wait_for_completion(&evt->comp);
1855         status = rsp.mad_common.status;
1856         spin_lock_irqsave(vhost->host->host_lock, flags);
1857         ibmvfc_free_event(evt);
1858         spin_unlock_irqrestore(vhost->host->host_lock, flags);
1859
1860         if (status != IBMVFC_MAD_SUCCESS) {
1861                 sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
1862                 return -EIO;
1863         }
1864
1865         sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
1866         return 0;
1867 }
1868
1869 /**
1870  * ibmvfc_match_target - Match function for specified target
1871  * @evt:        ibmvfc event struct
1872  * @device:     device to match (starget)
1873  *
1874  * Returns:
1875  *      1 if event matches starget / 0 if event does not match starget
1876  **/
1877 static int ibmvfc_match_target(struct ibmvfc_event *evt, void *device)
1878 {
1879         if (evt->cmnd && scsi_target(evt->cmnd->device) == device)
1880                 return 1;
1881         return 0;
1882 }
1883
1884 /**
1885  * ibmvfc_match_lun - Match function for specified LUN
1886  * @evt:        ibmvfc event struct
1887  * @device:     device to match (sdev)
1888  *
1889  * Returns:
1890  *      1 if event matches sdev / 0 if event does not match sdev
1891  **/
1892 static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device)
1893 {
1894         if (evt->cmnd && evt->cmnd->device == device)
1895                 return 1;
1896         return 0;
1897 }
1898
1899 /**
1900  * ibmvfc_wait_for_ops - Wait for ops to complete
1901  * @vhost:      ibmvfc host struct
1902  * @device:     device to match (starget or sdev)
1903  * @match:      match function
1904  *
1905  * Returns:
1906  *      SUCCESS / FAILED
1907  **/
1908 static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
1909                                int (*match) (struct ibmvfc_event *, void *))
1910 {
1911         struct ibmvfc_event *evt;
1912         DECLARE_COMPLETION_ONSTACK(comp);
1913         int wait;
1914         unsigned long flags;
1915         signed long timeout = init_timeout * HZ;
1916
1917         ENTER;
1918         do {
1919                 wait = 0;
1920                 spin_lock_irqsave(vhost->host->host_lock, flags);
1921                 list_for_each_entry(evt, &vhost->sent, queue) {
1922                         if (match(evt, device)) {
1923                                 evt->eh_comp = &comp;
1924                                 wait++;
1925                         }
1926                 }
1927                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1928
1929                 if (wait) {
1930                         timeout = wait_for_completion_timeout(&comp, timeout);
1931
1932                         if (!timeout) {
1933                                 wait = 0;
1934                                 spin_lock_irqsave(vhost->host->host_lock, flags);
1935                                 list_for_each_entry(evt, &vhost->sent, queue) {
1936                                         if (match(evt, device)) {
1937                                                 evt->eh_comp = NULL;
1938                                                 wait++;
1939                                         }
1940                                 }
1941                                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1942                                 if (wait)
1943                                         dev_err(vhost->dev, "Timed out waiting for aborted commands\n");
1944                                 LEAVE;
1945                                 return wait ? FAILED : SUCCESS;
1946                         }
1947                 }
1948         } while (wait);
1949
1950         LEAVE;
1951         return SUCCESS;
1952 }
1953
1954 /**
1955  * ibmvfc_eh_abort_handler - Abort a command
1956  * @cmd:        scsi command to abort
1957  *
1958  * Returns:
1959  *      SUCCESS / FAILED
1960  **/
1961 static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
1962 {
1963         struct scsi_device *sdev = cmd->device;
1964         struct ibmvfc_host *vhost = shost_priv(sdev->host);
1965         int cancel_rc, abort_rc;
1966         int rc = FAILED;
1967
1968         ENTER;
1969         ibmvfc_wait_while_resetting(vhost);
1970         cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
1971         abort_rc = ibmvfc_abort_task_set(sdev);
1972
1973         if (!cancel_rc && !abort_rc)
1974                 rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
1975
1976         LEAVE;
1977         return rc;
1978 }
1979
1980 /**
1981  * ibmvfc_eh_device_reset_handler - Reset a single LUN
1982  * @cmd:        scsi command struct
1983  *
1984  * Returns:
1985  *      SUCCESS / FAILED
1986  **/
1987 static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
1988 {
1989         struct scsi_device *sdev = cmd->device;
1990         struct ibmvfc_host *vhost = shost_priv(sdev->host);
1991         int cancel_rc, reset_rc;
1992         int rc = FAILED;
1993
1994         ENTER;
1995         ibmvfc_wait_while_resetting(vhost);
1996         cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
1997         reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN");
1998
1999         if (!cancel_rc && !reset_rc)
2000                 rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
2001
2002         LEAVE;
2003         return rc;
2004 }
2005
2006 /**
2007  * ibmvfc_dev_cancel_all - Device iterated cancel all function
2008  * @sdev:       scsi device struct
2009  * @data:       return code
2010  *
2011  **/
2012 static void ibmvfc_dev_cancel_all(struct scsi_device *sdev, void *data)
2013 {
2014         unsigned long *rc = data;
2015         *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_TGT_RESET);
2016 }
2017
2018 /**
2019  * ibmvfc_dev_abort_all - Device iterated abort task set function
2020  * @sdev:       scsi device struct
2021  * @data:       return code
2022  *
2023  **/
2024 static void ibmvfc_dev_abort_all(struct scsi_device *sdev, void *data)
2025 {
2026         unsigned long *rc = data;
2027         *rc |= ibmvfc_abort_task_set(sdev);
2028 }
2029
2030 /**
2031  * ibmvfc_eh_target_reset_handler - Reset the target
2032  * @cmd:        scsi command struct
2033  *
2034  * Returns:
2035  *      SUCCESS / FAILED
2036  **/
2037 static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
2038 {
2039         struct scsi_device *sdev = cmd->device;
2040         struct ibmvfc_host *vhost = shost_priv(sdev->host);
2041         struct scsi_target *starget = scsi_target(sdev);
2042         int reset_rc;
2043         int rc = FAILED;
2044         unsigned long cancel_rc = 0;
2045
2046         ENTER;
2047         ibmvfc_wait_while_resetting(vhost);
2048         starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all);
2049         reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
2050
2051         if (!cancel_rc && !reset_rc)
2052                 rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
2053
2054         LEAVE;
2055         return rc;
2056 }
2057
2058 /**
2059  * ibmvfc_eh_host_reset_handler - Reset the connection to the server
2060  * @cmd:        struct scsi_cmnd having problems
2061  *
2062  **/
2063 static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd)
2064 {
2065         int rc;
2066         struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
2067
2068         dev_err(vhost->dev, "Resetting connection due to error recovery\n");
2069         rc = ibmvfc_issue_fc_host_lip(vhost->host);
2070         return rc ? FAILED : SUCCESS;
2071 }
2072
2073 /**
2074  * ibmvfc_terminate_rport_io - Terminate all pending I/O to the rport.
2075  * @rport:              rport struct
2076  *
2077  * Return value:
2078  *      none
2079  **/
2080 static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
2081 {
2082         struct scsi_target *starget = to_scsi_target(&rport->dev);
2083         struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2084         struct ibmvfc_host *vhost = shost_priv(shost);
2085         unsigned long cancel_rc = 0;
2086         unsigned long abort_rc = 0;
2087         int rc = FAILED;
2088
2089         ENTER;
2090         starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all);
2091         starget_for_each_device(starget, &abort_rc, ibmvfc_dev_abort_all);
2092
2093         if (!cancel_rc && !abort_rc)
2094                 rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
2095
2096         if (rc == FAILED)
2097                 ibmvfc_issue_fc_host_lip(shost);
2098         LEAVE;
2099 }
2100
2101 static const struct {
2102         enum ibmvfc_async_event ae;
2103         const char *desc;
2104 } ae_desc [] = {
2105         { IBMVFC_AE_ELS_PLOGI,          "PLOGI" },
2106         { IBMVFC_AE_ELS_LOGO,           "LOGO" },
2107         { IBMVFC_AE_ELS_PRLO,           "PRLO" },
2108         { IBMVFC_AE_SCN_NPORT,          "N-Port SCN" },
2109         { IBMVFC_AE_SCN_GROUP,          "Group SCN" },
2110         { IBMVFC_AE_SCN_DOMAIN,         "Domain SCN" },
2111         { IBMVFC_AE_SCN_FABRIC,         "Fabric SCN" },
2112         { IBMVFC_AE_LINK_UP,            "Link Up" },
2113         { IBMVFC_AE_LINK_DOWN,          "Link Down" },
2114         { IBMVFC_AE_LINK_DEAD,          "Link Dead" },
2115         { IBMVFC_AE_HALT,                       "Halt" },
2116         { IBMVFC_AE_RESUME,             "Resume" },
2117         { IBMVFC_AE_ADAPTER_FAILED,     "Adapter Failed" },
2118 };
2119
2120 static const char *unknown_ae = "Unknown async";
2121
2122 /**
2123  * ibmvfc_get_ae_desc - Get text description for async event
2124  * @ae: async event
2125  *
2126  **/
2127 static const char *ibmvfc_get_ae_desc(u64 ae)
2128 {
2129         int i;
2130
2131         for (i = 0; i < ARRAY_SIZE(ae_desc); i++)
2132                 if (ae_desc[i].ae == ae)
2133                         return ae_desc[i].desc;
2134
2135         return unknown_ae;
2136 }
2137
2138 /**
2139  * ibmvfc_handle_async - Handle an async event from the adapter
2140  * @crq:        crq to process
2141  * @vhost:      ibmvfc host struct
2142  *
2143  **/
2144 static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
2145                                 struct ibmvfc_host *vhost)
2146 {
2147         const char *desc = ibmvfc_get_ae_desc(crq->event);
2148
2149         ibmvfc_log(vhost, 3, "%s event received. scsi_id: %lx, wwpn: %lx,"
2150                    " node_name: %lx\n", desc, crq->scsi_id, crq->wwpn, crq->node_name);
2151
2152         switch (crq->event) {
2153         case IBMVFC_AE_LINK_UP:
2154         case IBMVFC_AE_RESUME:
2155                 vhost->events_to_log |= IBMVFC_AE_LINKUP;
2156                 ibmvfc_init_host(vhost, 1);
2157                 break;
2158         case IBMVFC_AE_SCN_FABRIC:
2159                 vhost->events_to_log |= IBMVFC_AE_RSCN;
2160                 ibmvfc_init_host(vhost, 1);
2161                 break;
2162         case IBMVFC_AE_SCN_NPORT:
2163         case IBMVFC_AE_SCN_GROUP:
2164         case IBMVFC_AE_SCN_DOMAIN:
2165                 vhost->events_to_log |= IBMVFC_AE_RSCN;
2166         case IBMVFC_AE_ELS_LOGO:
2167         case IBMVFC_AE_ELS_PRLO:
2168         case IBMVFC_AE_ELS_PLOGI:
2169                 ibmvfc_reinit_host(vhost);
2170                 break;
2171         case IBMVFC_AE_LINK_DOWN:
2172         case IBMVFC_AE_ADAPTER_FAILED:
2173                 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2174                 break;
2175         case IBMVFC_AE_LINK_DEAD:
2176                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
2177                 break;
2178         case IBMVFC_AE_HALT:
2179                 ibmvfc_link_down(vhost, IBMVFC_HALTED);
2180                 break;
2181         default:
2182                 dev_err(vhost->dev, "Unknown async event received: %ld\n", crq->event);
2183                 break;
2184         };
2185 }
2186
2187 /**
2188  * ibmvfc_handle_crq - Handles and frees received events in the CRQ
2189  * @crq:        Command/Response queue
2190  * @vhost:      ibmvfc host struct
2191  *
2192  **/
2193 static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
2194 {
2195         long rc;
2196         struct ibmvfc_event *evt = (struct ibmvfc_event *)crq->ioba;
2197
2198         switch (crq->valid) {
2199         case IBMVFC_CRQ_INIT_RSP:
2200                 switch (crq->format) {
2201                 case IBMVFC_CRQ_INIT:
2202                         dev_info(vhost->dev, "Partner initialized\n");
2203                         /* Send back a response */
2204                         rc = ibmvfc_send_crq_init_complete(vhost);
2205                         if (rc == 0)
2206                                 ibmvfc_init_host(vhost, 0);
2207                         else
2208                                 dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc);
2209                         break;
2210                 case IBMVFC_CRQ_INIT_COMPLETE:
2211                         dev_info(vhost->dev, "Partner initialization complete\n");
2212                         ibmvfc_init_host(vhost, 0);
2213                         break;
2214                 default:
2215                         dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format);
2216                 }
2217                 return;
2218         case IBMVFC_CRQ_XPORT_EVENT:
2219                 vhost->state = IBMVFC_NO_CRQ;
2220                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
2221                 if (crq->format == IBMVFC_PARTITION_MIGRATED) {
2222                         /* We need to re-setup the interpartition connection */
2223                         dev_info(vhost->dev, "Re-enabling adapter\n");
2224                         vhost->client_migrated = 1;
2225                         ibmvfc_purge_requests(vhost, DID_REQUEUE);
2226                         if ((rc = ibmvfc_reenable_crq_queue(vhost)) ||
2227                             (rc = ibmvfc_send_crq_init(vhost))) {
2228                                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
2229                                 dev_err(vhost->dev, "Error after enable (rc=%ld)\n", rc);
2230                         } else
2231                                 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2232                 } else {
2233                         dev_err(vhost->dev, "Virtual adapter failed (rc=%d)\n", crq->format);
2234
2235                         ibmvfc_purge_requests(vhost, DID_ERROR);
2236                         if ((rc = ibmvfc_reset_crq(vhost)) ||
2237                             (rc = ibmvfc_send_crq_init(vhost))) {
2238                                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
2239                                 dev_err(vhost->dev, "Error after reset (rc=%ld)\n", rc);
2240                         } else
2241                                 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2242                 }
2243                 return;
2244         case IBMVFC_CRQ_CMD_RSP:
2245                 break;
2246         default:
2247                 dev_err(vhost->dev, "Got an invalid message type 0x%02x\n", crq->valid);
2248                 return;
2249         }
2250
2251         if (crq->format == IBMVFC_ASYNC_EVENT)
2252                 return;
2253
2254         /* The only kind of payload CRQs we should get are responses to
2255          * things we send. Make sure this response is to something we
2256          * actually sent
2257          */
2258         if (unlikely(!ibmvfc_valid_event(&vhost->pool, evt))) {
2259                 dev_err(vhost->dev, "Returned correlation_token 0x%08lx is invalid!\n",
2260                         crq->ioba);
2261                 return;
2262         }
2263
2264         if (unlikely(atomic_read(&evt->free))) {
2265                 dev_err(vhost->dev, "Received duplicate correlation_token 0x%08lx!\n",
2266                         crq->ioba);
2267                 return;
2268         }
2269
2270         del_timer(&evt->timer);
2271         list_del(&evt->queue);
2272         ibmvfc_trc_end(evt);
2273         evt->done(evt);
2274 }
2275
2276 /**
2277  * ibmvfc_scan_finished - Check if the device scan is done.
2278  * @shost:      scsi host struct
2279  * @time:       current elapsed time
2280  *
2281  * Returns:
2282  *      0 if scan is not done / 1 if scan is done
2283  **/
2284 static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
2285 {
2286         unsigned long flags;
2287         struct ibmvfc_host *vhost = shost_priv(shost);
2288         int done = 0;
2289
2290         spin_lock_irqsave(shost->host_lock, flags);
2291         if (time >= (init_timeout * HZ)) {
2292                 dev_info(vhost->dev, "Scan taking longer than %d seconds, "
2293                          "continuing initialization\n", init_timeout);
2294                 done = 1;
2295         }
2296
2297         if (vhost->state != IBMVFC_NO_CRQ && vhost->action == IBMVFC_HOST_ACTION_NONE)
2298                 done = 1;
2299         spin_unlock_irqrestore(shost->host_lock, flags);
2300         return done;
2301 }
2302
2303 /**
2304  * ibmvfc_slave_alloc - Setup the device's task set value
2305  * @sdev:       struct scsi_device device to configure
2306  *
2307  * Set the device's task set value so that error handling works as
2308  * expected.
2309  *
2310  * Returns:
2311  *      0 on success / -ENXIO if device does not exist
2312  **/
2313 static int ibmvfc_slave_alloc(struct scsi_device *sdev)
2314 {
2315         struct Scsi_Host *shost = sdev->host;
2316         struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2317         struct ibmvfc_host *vhost = shost_priv(shost);
2318         unsigned long flags = 0;
2319
2320         if (!rport || fc_remote_port_chkready(rport))
2321                 return -ENXIO;
2322
2323         spin_lock_irqsave(shost->host_lock, flags);
2324         sdev->hostdata = (void *)(unsigned long)vhost->task_set++;
2325         spin_unlock_irqrestore(shost->host_lock, flags);
2326         return 0;
2327 }
2328
2329 /**
2330  * ibmvfc_target_alloc - Setup the target's task set value
2331  * @starget:    struct scsi_target
2332  *
2333  * Set the target's task set value so that error handling works as
2334  * expected.
2335  *
2336  * Returns:
2337  *      0 on success / -ENXIO if device does not exist
2338  **/
2339 static int ibmvfc_target_alloc(struct scsi_target *starget)
2340 {
2341         struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2342         struct ibmvfc_host *vhost = shost_priv(shost);
2343         unsigned long flags = 0;
2344
2345         spin_lock_irqsave(shost->host_lock, flags);
2346         starget->hostdata = (void *)(unsigned long)vhost->task_set++;
2347         spin_unlock_irqrestore(shost->host_lock, flags);
2348         return 0;
2349 }
2350
2351 /**
2352  * ibmvfc_slave_configure - Configure the device
2353  * @sdev:       struct scsi_device device to configure
2354  *
2355  * Enable allow_restart for a device if it is a disk. Adjust the
2356  * queue_depth here also.
2357  *
2358  * Returns:
2359  *      0
2360  **/
2361 static int ibmvfc_slave_configure(struct scsi_device *sdev)
2362 {
2363         struct Scsi_Host *shost = sdev->host;
2364         struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
2365         unsigned long flags = 0;
2366
2367         spin_lock_irqsave(shost->host_lock, flags);
2368         if (sdev->type == TYPE_DISK)
2369                 sdev->allow_restart = 1;
2370
2371         if (sdev->tagged_supported) {
2372                 scsi_set_tag_type(sdev, MSG_SIMPLE_TAG);
2373                 scsi_activate_tcq(sdev, sdev->queue_depth);
2374         } else
2375                 scsi_deactivate_tcq(sdev, sdev->queue_depth);
2376
2377         rport->dev_loss_tmo = dev_loss_tmo;
2378         spin_unlock_irqrestore(shost->host_lock, flags);
2379         return 0;
2380 }
2381
2382 /**
2383  * ibmvfc_change_queue_depth - Change the device's queue depth
2384  * @sdev:       scsi device struct
2385  * @qdepth:     depth to set
2386  *
2387  * Return value:
2388  *      actual depth set
2389  **/
2390 static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth)
2391 {
2392         if (qdepth > IBMVFC_MAX_CMDS_PER_LUN)
2393                 qdepth = IBMVFC_MAX_CMDS_PER_LUN;
2394
2395         scsi_adjust_queue_depth(sdev, 0, qdepth);
2396         return sdev->queue_depth;
2397 }
2398
2399 /**
2400  * ibmvfc_change_queue_type - Change the device's queue type
2401  * @sdev:               scsi device struct
2402  * @tag_type:   type of tags to use
2403  *
2404  * Return value:
2405  *      actual queue type set
2406  **/
2407 static int ibmvfc_change_queue_type(struct scsi_device *sdev, int tag_type)
2408 {
2409         if (sdev->tagged_supported) {
2410                 scsi_set_tag_type(sdev, tag_type);
2411
2412                 if (tag_type)
2413                         scsi_activate_tcq(sdev, sdev->queue_depth);
2414                 else
2415                         scsi_deactivate_tcq(sdev, sdev->queue_depth);
2416         } else
2417                 tag_type = 0;
2418
2419         return tag_type;
2420 }
2421
2422 static ssize_t ibmvfc_show_host_partition_name(struct device *dev,
2423                                                  struct device_attribute *attr, char *buf)
2424 {
2425         struct Scsi_Host *shost = class_to_shost(dev);
2426         struct ibmvfc_host *vhost = shost_priv(shost);
2427
2428         return snprintf(buf, PAGE_SIZE, "%s\n",
2429                         vhost->login_buf->resp.partition_name);
2430 }
2431
2432 static struct device_attribute ibmvfc_host_partition_name = {
2433         .attr = {
2434                 .name = "partition_name",
2435                 .mode = S_IRUGO,
2436         },
2437         .show = ibmvfc_show_host_partition_name,
2438 };
2439
2440 static ssize_t ibmvfc_show_host_device_name(struct device *dev,
2441                                             struct device_attribute *attr, char *buf)
2442 {
2443         struct Scsi_Host *shost = class_to_shost(dev);
2444         struct ibmvfc_host *vhost = shost_priv(shost);
2445
2446         return snprintf(buf, PAGE_SIZE, "%s\n",
2447                         vhost->login_buf->resp.device_name);
2448 }
2449
2450 static struct device_attribute ibmvfc_host_device_name = {
2451         .attr = {
2452                 .name = "device_name",
2453                 .mode = S_IRUGO,
2454         },
2455         .show = ibmvfc_show_host_device_name,
2456 };
2457
2458 static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
2459                                          struct device_attribute *attr, char *buf)
2460 {
2461         struct Scsi_Host *shost = class_to_shost(dev);
2462         struct ibmvfc_host *vhost = shost_priv(shost);
2463
2464         return snprintf(buf, PAGE_SIZE, "%s\n",
2465                         vhost->login_buf->resp.port_loc_code);
2466 }
2467
2468 static struct device_attribute ibmvfc_host_loc_code = {
2469         .attr = {
2470                 .name = "port_loc_code",
2471                 .mode = S_IRUGO,
2472         },
2473         .show = ibmvfc_show_host_loc_code,
2474 };
2475
2476 static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
2477                                          struct device_attribute *attr, char *buf)
2478 {
2479         struct Scsi_Host *shost = class_to_shost(dev);
2480         struct ibmvfc_host *vhost = shost_priv(shost);
2481
2482         return snprintf(buf, PAGE_SIZE, "%s\n",
2483                         vhost->login_buf->resp.drc_name);
2484 }
2485
2486 static struct device_attribute ibmvfc_host_drc_name = {
2487         .attr = {
2488                 .name = "drc_name",
2489                 .mode = S_IRUGO,
2490         },
2491         .show = ibmvfc_show_host_drc_name,
2492 };
2493
2494 static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
2495                                              struct device_attribute *attr, char *buf)
2496 {
2497         struct Scsi_Host *shost = class_to_shost(dev);
2498         struct ibmvfc_host *vhost = shost_priv(shost);
2499         return snprintf(buf, PAGE_SIZE, "%d\n", vhost->login_buf->resp.version);
2500 }
2501
2502 static struct device_attribute ibmvfc_host_npiv_version = {
2503         .attr = {
2504                 .name = "npiv_version",
2505                 .mode = S_IRUGO,
2506         },
2507         .show = ibmvfc_show_host_npiv_version,
2508 };
2509
2510 /**
2511  * ibmvfc_show_log_level - Show the adapter's error logging level
2512  * @dev:        class device struct
2513  * @buf:        buffer
2514  *
2515  * Return value:
2516  *      number of bytes printed to buffer
2517  **/
2518 static ssize_t ibmvfc_show_log_level(struct device *dev,
2519                                      struct device_attribute *attr, char *buf)
2520 {
2521         struct Scsi_Host *shost = class_to_shost(dev);
2522         struct ibmvfc_host *vhost = shost_priv(shost);
2523         unsigned long flags = 0;
2524         int len;
2525
2526         spin_lock_irqsave(shost->host_lock, flags);
2527         len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->log_level);
2528         spin_unlock_irqrestore(shost->host_lock, flags);
2529         return len;
2530 }
2531
2532 /**
2533  * ibmvfc_store_log_level - Change the adapter's error logging level
2534  * @dev:        class device struct
2535  * @buf:        buffer
2536  *
2537  * Return value:
2538  *      number of bytes printed to buffer
2539  **/
2540 static ssize_t ibmvfc_store_log_level(struct device *dev,
2541                                       struct device_attribute *attr,
2542                                       const char *buf, size_t count)
2543 {
2544         struct Scsi_Host *shost = class_to_shost(dev);
2545         struct ibmvfc_host *vhost = shost_priv(shost);
2546         unsigned long flags = 0;
2547
2548         spin_lock_irqsave(shost->host_lock, flags);
2549         vhost->log_level = simple_strtoul(buf, NULL, 10);
2550         spin_unlock_irqrestore(shost->host_lock, flags);
2551         return strlen(buf);
2552 }
2553
2554 static struct device_attribute ibmvfc_log_level_attr = {
2555         .attr = {
2556                 .name =         "log_level",
2557                 .mode =         S_IRUGO | S_IWUSR,
2558         },
2559         .show = ibmvfc_show_log_level,
2560         .store = ibmvfc_store_log_level
2561 };
2562
2563 #ifdef CONFIG_SCSI_IBMVFC_TRACE
2564 /**
2565  * ibmvfc_read_trace - Dump the adapter trace
2566  * @kobj:               kobject struct
2567  * @bin_attr:   bin_attribute struct
2568  * @buf:                buffer
2569  * @off:                offset
2570  * @count:              buffer size
2571  *
2572  * Return value:
2573  *      number of bytes printed to buffer
2574  **/
2575 static ssize_t ibmvfc_read_trace(struct kobject *kobj,
2576                                  struct bin_attribute *bin_attr,
2577                                  char *buf, loff_t off, size_t count)
2578 {
2579         struct device *dev = container_of(kobj, struct device, kobj);
2580         struct Scsi_Host *shost = class_to_shost(dev);
2581         struct ibmvfc_host *vhost = shost_priv(shost);
2582         unsigned long flags = 0;
2583         int size = IBMVFC_TRACE_SIZE;
2584         char *src = (char *)vhost->trace;
2585
2586         if (off > size)
2587                 return 0;
2588         if (off + count > size) {
2589                 size -= off;
2590                 count = size;
2591         }
2592
2593         spin_lock_irqsave(shost->host_lock, flags);
2594         memcpy(buf, &src[off], count);
2595         spin_unlock_irqrestore(shost->host_lock, flags);
2596         return count;
2597 }
2598
2599 static struct bin_attribute ibmvfc_trace_attr = {
2600         .attr = {
2601                 .name = "trace",
2602                 .mode = S_IRUGO,
2603         },
2604         .size = 0,
2605         .read = ibmvfc_read_trace,
2606 };
2607 #endif
2608
2609 static struct device_attribute *ibmvfc_attrs[] = {
2610         &ibmvfc_host_partition_name,
2611         &ibmvfc_host_device_name,
2612         &ibmvfc_host_loc_code,
2613         &ibmvfc_host_drc_name,
2614         &ibmvfc_host_npiv_version,
2615         &ibmvfc_log_level_attr,
2616         NULL
2617 };
2618
2619 static struct scsi_host_template driver_template = {
2620         .module = THIS_MODULE,
2621         .name = "IBM POWER Virtual FC Adapter",
2622         .proc_name = IBMVFC_NAME,
2623         .queuecommand = ibmvfc_queuecommand,
2624         .eh_abort_handler = ibmvfc_eh_abort_handler,
2625         .eh_device_reset_handler = ibmvfc_eh_device_reset_handler,
2626         .eh_target_reset_handler = ibmvfc_eh_target_reset_handler,
2627         .eh_host_reset_handler = ibmvfc_eh_host_reset_handler,
2628         .slave_alloc = ibmvfc_slave_alloc,
2629         .slave_configure = ibmvfc_slave_configure,
2630         .target_alloc = ibmvfc_target_alloc,
2631         .scan_finished = ibmvfc_scan_finished,
2632         .change_queue_depth = ibmvfc_change_queue_depth,
2633         .change_queue_type = ibmvfc_change_queue_type,
2634         .cmd_per_lun = 16,
2635         .can_queue = IBMVFC_MAX_REQUESTS_DEFAULT,
2636         .this_id = -1,
2637         .sg_tablesize = SG_ALL,
2638         .max_sectors = IBMVFC_MAX_SECTORS,
2639         .use_clustering = ENABLE_CLUSTERING,
2640         .shost_attrs = ibmvfc_attrs,
2641 };
2642
2643 /**
2644  * ibmvfc_next_async_crq - Returns the next entry in async queue
2645  * @vhost:      ibmvfc host struct
2646  *
2647  * Returns:
2648  *      Pointer to next entry in queue / NULL if empty
2649  **/
2650 static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost)
2651 {
2652         struct ibmvfc_async_crq_queue *async_crq = &vhost->async_crq;
2653         struct ibmvfc_async_crq *crq;
2654
2655         crq = &async_crq->msgs[async_crq->cur];
2656         if (crq->valid & 0x80) {
2657                 if (++async_crq->cur == async_crq->size)
2658                         async_crq->cur = 0;
2659         } else
2660                 crq = NULL;
2661
2662         return crq;
2663 }
2664
2665 /**
2666  * ibmvfc_next_crq - Returns the next entry in message queue
2667  * @vhost:      ibmvfc host struct
2668  *
2669  * Returns:
2670  *      Pointer to next entry in queue / NULL if empty
2671  **/
2672 static struct ibmvfc_crq *ibmvfc_next_crq(struct ibmvfc_host *vhost)
2673 {
2674         struct ibmvfc_crq_queue *queue = &vhost->crq;
2675         struct ibmvfc_crq *crq;
2676
2677         crq = &queue->msgs[queue->cur];
2678         if (crq->valid & 0x80) {
2679                 if (++queue->cur == queue->size)
2680                         queue->cur = 0;
2681         } else
2682                 crq = NULL;
2683
2684         return crq;
2685 }
2686
2687 /**
2688  * ibmvfc_interrupt - Interrupt handler
2689  * @irq:                number of irq to handle, not used
2690  * @dev_instance: ibmvfc_host that received interrupt
2691  *
2692  * Returns:
2693  *      IRQ_HANDLED
2694  **/
2695 static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance)
2696 {
2697         struct ibmvfc_host *vhost = (struct ibmvfc_host *)dev_instance;
2698         struct vio_dev *vdev = to_vio_dev(vhost->dev);
2699         struct ibmvfc_crq *crq;
2700         struct ibmvfc_async_crq *async;
2701         unsigned long flags;
2702         int done = 0;
2703
2704         spin_lock_irqsave(vhost->host->host_lock, flags);
2705         vio_disable_interrupts(to_vio_dev(vhost->dev));
2706         while (!done) {
2707                 /* Pull all the valid messages off the CRQ */
2708                 while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
2709                         ibmvfc_handle_crq(crq, vhost);
2710                         crq->valid = 0;
2711                 }
2712
2713                 /* Pull all the valid messages off the async CRQ */
2714                 while ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
2715                         ibmvfc_handle_async(async, vhost);
2716                         async->valid = 0;
2717                 }
2718
2719                 vio_enable_interrupts(vdev);
2720                 if ((crq = ibmvfc_next_crq(vhost)) != NULL) {
2721                         vio_disable_interrupts(vdev);
2722                         ibmvfc_handle_crq(crq, vhost);
2723                         crq->valid = 0;
2724                 } else if ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
2725                         vio_disable_interrupts(vdev);
2726                         ibmvfc_handle_async(async, vhost);
2727                         async->valid = 0;
2728                 } else
2729                         done = 1;
2730         }
2731
2732         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2733         return IRQ_HANDLED;
2734 }
2735
2736 /**
2737  * ibmvfc_init_tgt - Set the next init job step for the target
2738  * @tgt:                ibmvfc target struct
2739  * @job_step:   job step to perform
2740  *
2741  **/
2742 static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
2743                             void (*job_step) (struct ibmvfc_target *))
2744 {
2745         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT);
2746         tgt->job_step = job_step;
2747         wake_up(&tgt->vhost->work_wait_q);
2748 }
2749
2750 /**
2751  * ibmvfc_retry_tgt_init - Attempt to retry a step in target initialization
2752  * @tgt:                ibmvfc target struct
2753  * @job_step:   initialization job step
2754  *
2755  **/
2756 static void ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
2757                                   void (*job_step) (struct ibmvfc_target *))
2758 {
2759         if (++tgt->init_retries > IBMVFC_MAX_INIT_RETRIES) {
2760                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2761                 wake_up(&tgt->vhost->work_wait_q);
2762         } else
2763                 ibmvfc_init_tgt(tgt, job_step);
2764 }
2765
2766 /**
2767  * ibmvfc_tgt_prli_done - Completion handler for Process Login
2768  * @evt:        ibmvfc event struct
2769  *
2770  **/
2771 static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
2772 {
2773         struct ibmvfc_target *tgt = evt->tgt;
2774         struct ibmvfc_host *vhost = evt->vhost;
2775         struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli;
2776         u32 status = rsp->common.status;
2777
2778         vhost->discovery_threads--;
2779         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
2780         switch (status) {
2781         case IBMVFC_MAD_SUCCESS:
2782                 tgt_dbg(tgt, "Process Login succeeded\n");
2783                 tgt->need_login = 0;
2784                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_ADD_RPORT);
2785                 break;
2786         case IBMVFC_MAD_DRIVER_FAILED:
2787                 break;
2788         case IBMVFC_MAD_CRQ_ERROR:
2789                 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
2790                 break;
2791         case IBMVFC_MAD_FAILED:
2792         default:
2793                 tgt_err(tgt, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
2794                         ibmvfc_get_cmd_error(rsp->status, rsp->error),
2795                         rsp->status, rsp->error, status);
2796                 if (ibmvfc_retry_cmd(rsp->status, rsp->error))
2797                         ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
2798                 else
2799                         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2800                 break;
2801         };
2802
2803         kref_put(&tgt->kref, ibmvfc_release_tgt);
2804         ibmvfc_free_event(evt);
2805         wake_up(&vhost->work_wait_q);
2806 }
2807
2808 /**
2809  * ibmvfc_tgt_send_prli - Send a process login
2810  * @tgt:        ibmvfc target struct
2811  *
2812  **/
2813 static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
2814 {
2815         struct ibmvfc_process_login *prli;
2816         struct ibmvfc_host *vhost = tgt->vhost;
2817         struct ibmvfc_event *evt;
2818
2819         if (vhost->discovery_threads >= disc_threads)
2820                 return;
2821
2822         kref_get(&tgt->kref);
2823         evt = ibmvfc_get_event(vhost);
2824         vhost->discovery_threads++;
2825         ibmvfc_init_event(evt, ibmvfc_tgt_prli_done, IBMVFC_MAD_FORMAT);
2826         evt->tgt = tgt;
2827         prli = &evt->iu.prli;
2828         memset(prli, 0, sizeof(*prli));
2829         prli->common.version = 1;
2830         prli->common.opcode = IBMVFC_PROCESS_LOGIN;
2831         prli->common.length = sizeof(*prli);
2832         prli->scsi_id = tgt->scsi_id;
2833
2834         prli->parms.type = IBMVFC_SCSI_FCP_TYPE;
2835         prli->parms.flags = IBMVFC_PRLI_EST_IMG_PAIR;
2836         prli->parms.service_parms = IBMVFC_PRLI_INITIATOR_FUNC;
2837
2838         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
2839         if (ibmvfc_send_event(evt, vhost, default_timeout)) {
2840                 vhost->discovery_threads--;
2841                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
2842                 kref_put(&tgt->kref, ibmvfc_release_tgt);
2843         } else
2844                 tgt_dbg(tgt, "Sent process login\n");
2845 }
2846
2847 /**
2848  * ibmvfc_tgt_plogi_done - Completion handler for Port Login
2849  * @evt:        ibmvfc event struct
2850  *
2851  **/
2852 static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
2853 {
2854         struct ibmvfc_target *tgt = evt->tgt;
2855         struct ibmvfc_host *vhost = evt->vhost;
2856         struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi;
2857         u32 status = rsp->common.status;
2858
2859         vhost->discovery_threads--;
2860         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
2861         switch (status) {
2862         case IBMVFC_MAD_SUCCESS:
2863                 tgt_dbg(tgt, "Port Login succeeded\n");
2864                 if (tgt->ids.port_name &&
2865                     tgt->ids.port_name != wwn_to_u64(rsp->service_parms.port_name)) {
2866                         vhost->reinit = 1;
2867                         tgt_dbg(tgt, "Port re-init required\n");
2868                         break;
2869                 }
2870                 tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
2871                 tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
2872                 tgt->ids.port_id = tgt->scsi_id;
2873                 tgt->ids.roles = FC_PORT_ROLE_FCP_TARGET;
2874                 memcpy(&tgt->service_parms, &rsp->service_parms,
2875                        sizeof(tgt->service_parms));
2876                 memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
2877                        sizeof(tgt->service_parms_change));
2878                 ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
2879                 break;
2880         case IBMVFC_MAD_DRIVER_FAILED:
2881                 break;
2882         case IBMVFC_MAD_CRQ_ERROR:
2883                 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
2884                 break;
2885         case IBMVFC_MAD_FAILED:
2886         default:
2887                 tgt_err(tgt, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
2888                         ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
2889                         ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
2890                         ibmvfc_get_ls_explain(rsp->fc_explain), rsp->fc_explain, status);
2891
2892                 if (ibmvfc_retry_cmd(rsp->status, rsp->error))
2893                         ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
2894                 else
2895                         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2896                 break;
2897         };
2898
2899         kref_put(&tgt->kref, ibmvfc_release_tgt);
2900         ibmvfc_free_event(evt);
2901         wake_up(&vhost->work_wait_q);
2902 }
2903
2904 /**
2905  * ibmvfc_tgt_send_plogi - Send PLOGI to the specified target
2906  * @tgt:        ibmvfc target struct
2907  *
2908  **/
2909 static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
2910 {
2911         struct ibmvfc_port_login *plogi;
2912         struct ibmvfc_host *vhost = tgt->vhost;
2913         struct ibmvfc_event *evt;
2914
2915         if (vhost->discovery_threads >= disc_threads)
2916                 return;
2917
2918         kref_get(&tgt->kref);
2919         evt = ibmvfc_get_event(vhost);
2920         vhost->discovery_threads++;
2921         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
2922         ibmvfc_init_event(evt, ibmvfc_tgt_plogi_done, IBMVFC_MAD_FORMAT);
2923         evt->tgt = tgt;
2924         plogi = &evt->iu.plogi;
2925         memset(plogi, 0, sizeof(*plogi));
2926         plogi->common.version = 1;
2927         plogi->common.opcode = IBMVFC_PORT_LOGIN;
2928         plogi->common.length = sizeof(*plogi);
2929         plogi->scsi_id = tgt->scsi_id;
2930
2931         if (ibmvfc_send_event(evt, vhost, default_timeout)) {
2932                 vhost->discovery_threads--;
2933                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
2934                 kref_put(&tgt->kref, ibmvfc_release_tgt);
2935         } else
2936                 tgt_dbg(tgt, "Sent port login\n");
2937 }
2938
2939 /**
2940  * ibmvfc_tgt_implicit_logout_done - Completion handler for Implicit Logout MAD
2941  * @evt:        ibmvfc event struct
2942  *
2943  **/
2944 static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
2945 {
2946         struct ibmvfc_target *tgt = evt->tgt;
2947         struct ibmvfc_host *vhost = evt->vhost;
2948         struct ibmvfc_implicit_logout *rsp = &evt->xfer_iu->implicit_logout;
2949         u32 status = rsp->common.status;
2950
2951         vhost->discovery_threads--;
2952         ibmvfc_free_event(evt);
2953         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
2954
2955         switch (status) {
2956         case IBMVFC_MAD_SUCCESS:
2957                 tgt_dbg(tgt, "Implicit Logout succeeded\n");
2958                 break;
2959         case IBMVFC_MAD_DRIVER_FAILED:
2960                 kref_put(&tgt->kref, ibmvfc_release_tgt);
2961                 wake_up(&vhost->work_wait_q);
2962                 return;
2963         case IBMVFC_MAD_FAILED:
2964         default:
2965                 tgt_err(tgt, "Implicit Logout failed: rc=0x%02X\n", status);
2966                 break;
2967         };
2968
2969         if (vhost->action == IBMVFC_HOST_ACTION_TGT_INIT)
2970                 ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi);
2971         else if (vhost->action == IBMVFC_HOST_ACTION_QUERY_TGTS &&
2972                  tgt->scsi_id != tgt->new_scsi_id)
2973                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2974         kref_put(&tgt->kref, ibmvfc_release_tgt);
2975         wake_up(&vhost->work_wait_q);
2976 }
2977
2978 /**
2979  * ibmvfc_tgt_implicit_logout - Initiate an Implicit Logout for specified target
2980  * @tgt:                ibmvfc target struct
2981  *
2982  **/
2983 static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
2984 {
2985         struct ibmvfc_implicit_logout *mad;
2986         struct ibmvfc_host *vhost = tgt->vhost;
2987         struct ibmvfc_event *evt;
2988
2989         if (vhost->discovery_threads >= disc_threads)
2990                 return;
2991
2992         kref_get(&tgt->kref);
2993         evt = ibmvfc_get_event(vhost);
2994         vhost->discovery_threads++;
2995         ibmvfc_init_event(evt, ibmvfc_tgt_implicit_logout_done, IBMVFC_MAD_FORMAT);
2996         evt->tgt = tgt;
2997         mad = &evt->iu.implicit_logout;
2998         memset(mad, 0, sizeof(*mad));
2999         mad->common.version = 1;
3000         mad->common.opcode = IBMVFC_IMPLICIT_LOGOUT;
3001         mad->common.length = sizeof(*mad);
3002         mad->old_scsi_id = tgt->scsi_id;
3003
3004         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
3005         if (ibmvfc_send_event(evt, vhost, default_timeout)) {
3006                 vhost->discovery_threads--;
3007                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3008                 kref_put(&tgt->kref, ibmvfc_release_tgt);
3009         } else
3010                 tgt_dbg(tgt, "Sent Implicit Logout\n");
3011 }
3012
3013 /**
3014  * ibmvfc_adisc_needs_plogi - Does device need PLOGI?
3015  * @mad:        ibmvfc passthru mad struct
3016  * @tgt:        ibmvfc target struct
3017  *
3018  * Returns:
3019  *      1 if PLOGI needed / 0 if PLOGI not needed
3020  **/
3021 static int ibmvfc_adisc_needs_plogi(struct ibmvfc_passthru_mad *mad,
3022                                     struct ibmvfc_target *tgt)
3023 {
3024         if (memcmp(&mad->fc_iu.response[2], &tgt->ids.port_name,
3025                    sizeof(tgt->ids.port_name)))
3026                 return 1;
3027         if (memcmp(&mad->fc_iu.response[4], &tgt->ids.node_name,
3028                    sizeof(tgt->ids.node_name)))
3029                 return 1;
3030         if (mad->fc_iu.response[6] != tgt->scsi_id)
3031                 return 1;
3032         return 0;
3033 }
3034
3035 /**
3036  * ibmvfc_tgt_adisc_done - Completion handler for ADISC
3037  * @evt:        ibmvfc event struct
3038  *
3039  **/
3040 static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
3041 {
3042         struct ibmvfc_target *tgt = evt->tgt;
3043         struct ibmvfc_host *vhost = evt->vhost;
3044         struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
3045         u32 status = mad->common.status;
3046         u8 fc_reason, fc_explain;
3047
3048         vhost->discovery_threads--;
3049         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3050
3051         switch (status) {
3052         case IBMVFC_MAD_SUCCESS:
3053                 tgt_dbg(tgt, "ADISC succeeded\n");
3054                 if (ibmvfc_adisc_needs_plogi(mad, tgt))
3055                         tgt->need_login = 1;
3056                 break;
3057         case IBMVFC_MAD_DRIVER_FAILED:
3058                 break;
3059         case IBMVFC_MAD_FAILED:
3060         default:
3061                 tgt->need_login = 1;
3062                 fc_reason = (mad->fc_iu.response[1] & 0x00ff0000) >> 16;
3063                 fc_explain = (mad->fc_iu.response[1] & 0x0000ff00) >> 8;
3064                 tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
3065                          ibmvfc_get_cmd_error(mad->iu.status, mad->iu.error),
3066                          mad->iu.status, mad->iu.error,
3067                          ibmvfc_get_fc_type(fc_reason), fc_reason,
3068                          ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
3069                 break;
3070         };
3071
3072         kref_put(&tgt->kref, ibmvfc_release_tgt);
3073         ibmvfc_free_event(evt);
3074         wake_up(&vhost->work_wait_q);
3075 }
3076
3077 /**
3078  * ibmvfc_init_passthru - Initialize an event struct for FC passthru
3079  * @evt:                ibmvfc event struct
3080  *
3081  **/
3082 static void ibmvfc_init_passthru(struct ibmvfc_event *evt)
3083 {
3084         struct ibmvfc_passthru_mad *mad = &evt->iu.passthru;
3085
3086         memset(mad, 0, sizeof(*mad));
3087         mad->common.version = 1;
3088         mad->common.opcode = IBMVFC_PASSTHRU;
3089         mad->common.length = sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu);
3090         mad->cmd_ioba.va = (u64)evt->crq.ioba +
3091                 offsetof(struct ibmvfc_passthru_mad, iu);
3092         mad->cmd_ioba.len = sizeof(mad->iu);
3093         mad->iu.cmd_len = sizeof(mad->fc_iu.payload);
3094         mad->iu.rsp_len = sizeof(mad->fc_iu.response);
3095         mad->iu.cmd.va = (u64)evt->crq.ioba +
3096                 offsetof(struct ibmvfc_passthru_mad, fc_iu) +
3097                 offsetof(struct ibmvfc_passthru_fc_iu, payload);
3098         mad->iu.cmd.len = sizeof(mad->fc_iu.payload);
3099         mad->iu.rsp.va = (u64)evt->crq.ioba +
3100                 offsetof(struct ibmvfc_passthru_mad, fc_iu) +
3101                 offsetof(struct ibmvfc_passthru_fc_iu, response);
3102         mad->iu.rsp.len = sizeof(mad->fc_iu.response);
3103 }
3104
3105 /**
3106  * ibmvfc_tgt_adisc - Initiate an ADISC for specified target
3107  * @tgt:                ibmvfc target struct
3108  *
3109  **/
3110 static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
3111 {
3112         struct ibmvfc_passthru_mad *mad;
3113         struct ibmvfc_host *vhost = tgt->vhost;
3114         struct ibmvfc_event *evt;
3115
3116         if (vhost->discovery_threads >= disc_threads)
3117                 return;
3118
3119         kref_get(&tgt->kref);
3120         evt = ibmvfc_get_event(vhost);
3121         vhost->discovery_threads++;
3122         ibmvfc_init_event(evt, ibmvfc_tgt_adisc_done, IBMVFC_MAD_FORMAT);
3123         evt->tgt = tgt;
3124
3125         ibmvfc_init_passthru(evt);
3126         mad = &evt->iu.passthru;
3127         mad->iu.flags = IBMVFC_FC_ELS;
3128         mad->iu.scsi_id = tgt->scsi_id;
3129
3130         mad->fc_iu.payload[0] = IBMVFC_ADISC;
3131         memcpy(&mad->fc_iu.payload[2], &vhost->login_buf->resp.port_name,
3132                sizeof(vhost->login_buf->resp.port_name));
3133         memcpy(&mad->fc_iu.payload[4], &vhost->login_buf->resp.node_name,
3134                sizeof(vhost->login_buf->resp.node_name));
3135         mad->fc_iu.payload[6] = vhost->login_buf->resp.scsi_id & 0x00ffffff;
3136
3137         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
3138         if (ibmvfc_send_event(evt, vhost, default_timeout)) {
3139                 vhost->discovery_threads--;
3140                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3141                 kref_put(&tgt->kref, ibmvfc_release_tgt);
3142         } else
3143                 tgt_dbg(tgt, "Sent ADISC\n");
3144 }
3145
3146 /**
3147  * ibmvfc_tgt_query_target_done - Completion handler for Query Target MAD
3148  * @evt:        ibmvfc event struct
3149  *
3150  **/
3151 static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
3152 {
3153         struct ibmvfc_target *tgt = evt->tgt;
3154         struct ibmvfc_host *vhost = evt->vhost;
3155         struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt;
3156         u32 status = rsp->common.status;
3157
3158         vhost->discovery_threads--;
3159         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3160         switch (status) {
3161         case IBMVFC_MAD_SUCCESS:
3162                 tgt_dbg(tgt, "Query Target succeeded\n");
3163                 tgt->new_scsi_id = rsp->scsi_id;
3164                 if (rsp->scsi_id != tgt->scsi_id)
3165                         ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
3166                 else
3167                         ibmvfc_init_tgt(tgt, ibmvfc_tgt_adisc);
3168                 break;
3169         case IBMVFC_MAD_DRIVER_FAILED:
3170                 break;
3171         case IBMVFC_MAD_CRQ_ERROR:
3172                 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
3173                 break;
3174         case IBMVFC_MAD_FAILED:
3175         default:
3176                 tgt_err(tgt, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
3177                         ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
3178                         ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
3179                         ibmvfc_get_gs_explain(rsp->fc_explain), rsp->fc_explain, status);
3180
3181                 if ((rsp->status & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED &&
3182                     rsp->error == IBMVFC_UNABLE_TO_PERFORM_REQ &&
3183                     rsp->fc_explain == IBMVFC_PORT_NAME_NOT_REG)
3184                         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3185                 else if (ibmvfc_retry_cmd(rsp->status, rsp->error))
3186                         ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
3187                 else
3188                         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3189                 break;
3190         };
3191
3192         kref_put(&tgt->kref, ibmvfc_release_tgt);
3193         ibmvfc_free_event(evt);
3194         wake_up(&vhost->work_wait_q);
3195 }
3196
3197 /**
3198  * ibmvfc_tgt_query_target - Initiate a Query Target for specified target
3199  * @tgt:        ibmvfc target struct
3200  *
3201  **/
3202 static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
3203 {
3204         struct ibmvfc_query_tgt *query_tgt;
3205         struct ibmvfc_host *vhost = tgt->vhost;
3206         struct ibmvfc_event *evt;
3207
3208         if (vhost->discovery_threads >= disc_threads)
3209                 return;
3210
3211         kref_get(&tgt->kref);
3212         evt = ibmvfc_get_event(vhost);
3213         vhost->discovery_threads++;
3214         evt->tgt = tgt;
3215         ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT);
3216         query_tgt = &evt->iu.query_tgt;
3217         memset(query_tgt, 0, sizeof(*query_tgt));
3218         query_tgt->common.version = 1;
3219         query_tgt->common.opcode = IBMVFC_QUERY_TARGET;
3220         query_tgt->common.length = sizeof(*query_tgt);
3221         query_tgt->wwpn = tgt->ids.port_name;
3222
3223         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
3224         if (ibmvfc_send_event(evt, vhost, default_timeout)) {
3225                 vhost->discovery_threads--;
3226                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3227                 kref_put(&tgt->kref, ibmvfc_release_tgt);
3228         } else
3229                 tgt_dbg(tgt, "Sent Query Target\n");
3230 }
3231
3232 /**
3233  * ibmvfc_alloc_target - Allocate and initialize an ibmvfc target
3234  * @vhost:              ibmvfc host struct
3235  * @scsi_id:    SCSI ID to allocate target for
3236  *
3237  * Returns:
3238  *      0 on success / other on failure
3239  **/
3240 static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, u64 scsi_id)
3241 {
3242         struct ibmvfc_target *tgt;
3243         unsigned long flags;
3244
3245         spin_lock_irqsave(vhost->host->host_lock, flags);
3246         list_for_each_entry(tgt, &vhost->targets, queue) {
3247                 if (tgt->scsi_id == scsi_id) {
3248                         if (tgt->need_login)
3249                                 ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
3250                         goto unlock_out;
3251                 }
3252         }
3253         spin_unlock_irqrestore(vhost->host->host_lock, flags);
3254
3255         tgt = mempool_alloc(vhost->tgt_pool, GFP_KERNEL);
3256         if (!tgt) {
3257                 dev_err(vhost->dev, "Target allocation failure for scsi id %08lx\n",
3258                         scsi_id);
3259                 return -ENOMEM;
3260         }
3261
3262         tgt->scsi_id = scsi_id;
3263         tgt->new_scsi_id = scsi_id;
3264         tgt->vhost = vhost;
3265         tgt->need_login = 1;
3266         kref_init(&tgt->kref);
3267         ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
3268         spin_lock_irqsave(vhost->host->host_lock, flags);
3269         list_add_tail(&tgt->queue, &vhost->targets);
3270
3271 unlock_out:
3272         spin_unlock_irqrestore(vhost->host->host_lock, flags);
3273         return 0;
3274 }
3275
3276 /**
3277  * ibmvfc_alloc_targets - Allocate and initialize ibmvfc targets
3278  * @vhost:              ibmvfc host struct
3279  *
3280  * Returns:
3281  *      0 on success / other on failure
3282  **/
3283 static int ibmvfc_alloc_targets(struct ibmvfc_host *vhost)
3284 {
3285         int i, rc;
3286
3287         for (i = 0, rc = 0; !rc && i < vhost->num_targets; i++)
3288                 rc = ibmvfc_alloc_target(vhost,
3289                                          vhost->disc_buf->scsi_id[i] & IBMVFC_DISC_TGT_SCSI_ID_MASK);
3290
3291         return rc;
3292 }
3293
3294 /**
3295  * ibmvfc_discover_targets_done - Completion handler for discover targets MAD
3296  * @evt:        ibmvfc event struct
3297  *
3298  **/
3299 static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
3300 {
3301         struct ibmvfc_host *vhost = evt->vhost;
3302         struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets;
3303         u32 mad_status = rsp->common.status;
3304
3305         switch (mad_status) {
3306         case IBMVFC_MAD_SUCCESS:
3307                 ibmvfc_dbg(vhost, "Discover Targets succeeded\n");
3308                 vhost->num_targets = rsp->num_written;
3309                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS);
3310                 break;
3311         case IBMVFC_MAD_FAILED:
3312                 dev_err(vhost->dev, "Discover Targets failed: %s (%x:%x)\n",
3313                         ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
3314                 ibmvfc_retry_host_init(vhost);
3315                 break;
3316         case IBMVFC_MAD_DRIVER_FAILED:
3317                 break;
3318         default:
3319                 dev_err(vhost->dev, "Invalid Discover Targets response: 0x%x\n", mad_status);
3320                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3321                 break;
3322         }
3323
3324         ibmvfc_free_event(evt);
3325         wake_up(&vhost->work_wait_q);
3326 }
3327
3328 /**
3329  * ibmvfc_discover_targets - Send Discover Targets MAD
3330  * @vhost:      ibmvfc host struct
3331  *
3332  **/
3333 static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
3334 {
3335         struct ibmvfc_discover_targets *mad;
3336         struct ibmvfc_event *evt = ibmvfc_get_event(vhost);
3337
3338         ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT);
3339         mad = &evt->iu.discover_targets;
3340         memset(mad, 0, sizeof(*mad));
3341         mad->common.version = 1;
3342         mad->common.opcode = IBMVFC_DISC_TARGETS;
3343         mad->common.length = sizeof(*mad);
3344         mad->bufflen = vhost->disc_buf_sz;
3345         mad->buffer.va = vhost->disc_buf_dma;
3346         mad->buffer.len = vhost->disc_buf_sz;
3347         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
3348
3349         if (!ibmvfc_send_event(evt, vhost, default_timeout))
3350                 ibmvfc_dbg(vhost, "Sent discover targets\n");
3351         else
3352                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3353 }
3354
3355 /**
3356  * ibmvfc_npiv_login_done - Completion handler for NPIV Login
3357  * @evt:        ibmvfc event struct
3358  *
3359  **/
3360 static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
3361 {
3362         struct ibmvfc_host *vhost = evt->vhost;
3363         u32 mad_status = evt->xfer_iu->npiv_login.common.status;
3364         struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp;
3365         unsigned int npiv_max_sectors;
3366
3367         switch (mad_status) {
3368         case IBMVFC_MAD_SUCCESS:
3369                 ibmvfc_free_event(evt);
3370                 break;
3371         case IBMVFC_MAD_FAILED:
3372                 dev_err(vhost->dev, "NPIV Login failed: %s (%x:%x)\n",
3373                         ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
3374                 if (ibmvfc_retry_cmd(rsp->status, rsp->error))
3375                         ibmvfc_retry_host_init(vhost);
3376                 else
3377                         ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3378                 ibmvfc_free_event(evt);
3379                 return;
3380         case IBMVFC_MAD_CRQ_ERROR:
3381                 ibmvfc_retry_host_init(vhost);
3382         case IBMVFC_MAD_DRIVER_FAILED:
3383                 ibmvfc_free_event(evt);
3384                 return;
3385         default:
3386                 dev_err(vhost->dev, "Invalid NPIV Login response: 0x%x\n", mad_status);
3387                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3388                 ibmvfc_free_event(evt);
3389                 return;
3390         }
3391
3392         vhost->client_migrated = 0;
3393
3394         if (!(rsp->flags & IBMVFC_NATIVE_FC)) {
3395                 dev_err(vhost->dev, "Virtual adapter does not support FC. %x\n",
3396                         rsp->flags);
3397                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3398                 wake_up(&vhost->work_wait_q);
3399                 return;
3400         }
3401
3402         if (rsp->max_cmds <= IBMVFC_NUM_INTERNAL_REQ) {
3403                 dev_err(vhost->dev, "Virtual adapter supported queue depth too small: %d\n",
3404                         rsp->max_cmds);
3405                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3406                 wake_up(&vhost->work_wait_q);
3407                 return;
3408         }
3409
3410         npiv_max_sectors = min((uint)(rsp->max_dma_len >> 9), IBMVFC_MAX_SECTORS);
3411         dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n",
3412                  rsp->partition_name, rsp->device_name, rsp->port_loc_code,
3413                  rsp->drc_name, npiv_max_sectors);
3414
3415         fc_host_fabric_name(vhost->host) = rsp->node_name;
3416         fc_host_node_name(vhost->host) = rsp->node_name;
3417         fc_host_port_name(vhost->host) = rsp->port_name;
3418         fc_host_port_id(vhost->host) = rsp->scsi_id;
3419         fc_host_port_type(vhost->host) = FC_PORTTYPE_NPIV;
3420         fc_host_supported_classes(vhost->host) = 0;
3421         if (rsp->service_parms.class1_parms[0] & 0x80000000)
3422                 fc_host_supported_classes(vhost->host) |= FC_COS_CLASS1;
3423         if (rsp->service_parms.class2_parms[0] & 0x80000000)
3424                 fc_host_supported_classes(vhost->host) |= FC_COS_CLASS2;
3425         if (rsp->service_parms.class3_parms[0] & 0x80000000)
3426                 fc_host_supported_classes(vhost->host) |= FC_COS_CLASS3;
3427         fc_host_maxframe_size(vhost->host) =
3428                 rsp->service_parms.common.bb_rcv_sz & 0x0fff;
3429
3430         vhost->host->can_queue = rsp->max_cmds - IBMVFC_NUM_INTERNAL_REQ;
3431         vhost->host->max_sectors = npiv_max_sectors;
3432         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
3433         wake_up(&vhost->work_wait_q);
3434 }
3435
3436 /**
3437  * ibmvfc_npiv_login - Sends NPIV login
3438  * @vhost:      ibmvfc host struct
3439  *
3440  **/
3441 static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
3442 {
3443         struct ibmvfc_npiv_login_mad *mad;
3444         struct ibmvfc_event *evt = ibmvfc_get_event(vhost);
3445
3446         ibmvfc_gather_partition_info(vhost);
3447         ibmvfc_set_login_info(vhost);
3448         ibmvfc_init_event(evt, ibmvfc_npiv_login_done, IBMVFC_MAD_FORMAT);
3449
3450         memcpy(vhost->login_buf, &vhost->login_info, sizeof(vhost->login_info));
3451         mad = &evt->iu.npiv_login;
3452         memset(mad, 0, sizeof(struct ibmvfc_npiv_login_mad));
3453         mad->common.version = 1;
3454         mad->common.opcode = IBMVFC_NPIV_LOGIN;
3455         mad->common.length = sizeof(struct ibmvfc_npiv_login_mad);
3456         mad->buffer.va = vhost->login_buf_dma;
3457         mad->buffer.len = sizeof(*vhost->login_buf);
3458
3459         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
3460
3461         if (!ibmvfc_send_event(evt, vhost, default_timeout))
3462                 ibmvfc_dbg(vhost, "Sent NPIV login\n");
3463         else
3464                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3465 };
3466
3467 /**
3468  * ibmvfc_dev_init_to_do - Is there target initialization work to do?
3469  * @vhost:              ibmvfc host struct
3470  *
3471  * Returns:
3472  *      1 if work to do / 0 if not
3473  **/
3474 static int ibmvfc_dev_init_to_do(struct ibmvfc_host *vhost)
3475 {
3476         struct ibmvfc_target *tgt;
3477
3478         list_for_each_entry(tgt, &vhost->targets, queue) {
3479                 if (tgt->action == IBMVFC_TGT_ACTION_INIT ||
3480                     tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
3481                         return 1;
3482         }
3483
3484         return 0;
3485 }
3486
3487 /**
3488  * __ibmvfc_work_to_do - Is there task level work to do? (no locking)
3489  * @vhost:              ibmvfc host struct
3490  *
3491  * Returns:
3492  *      1 if work to do / 0 if not
3493  **/
3494 static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
3495 {
3496         struct ibmvfc_target *tgt;
3497
3498         if (kthread_should_stop())
3499                 return 1;
3500         switch (vhost->action) {
3501         case IBMVFC_HOST_ACTION_NONE:
3502         case IBMVFC_HOST_ACTION_INIT_WAIT:
3503                 return 0;
3504         case IBMVFC_HOST_ACTION_TGT_INIT:
3505         case IBMVFC_HOST_ACTION_QUERY_TGTS:
3506                 if (vhost->discovery_threads == disc_threads)
3507                         return 0;
3508                 list_for_each_entry(tgt, &vhost->targets, queue)
3509                         if (tgt->action == IBMVFC_TGT_ACTION_INIT)
3510                                 return 1;
3511                 list_for_each_entry(tgt, &vhost->targets, queue)
3512                         if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
3513                                 return 0;
3514                 return 1;
3515         case IBMVFC_HOST_ACTION_INIT:
3516         case IBMVFC_HOST_ACTION_ALLOC_TGTS:
3517         case IBMVFC_HOST_ACTION_TGT_ADD:
3518         case IBMVFC_HOST_ACTION_TGT_DEL:
3519         case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
3520         case IBMVFC_HOST_ACTION_QUERY:
3521         default:
3522                 break;
3523         };
3524
3525         return 1;
3526 }
3527
3528 /**
3529  * ibmvfc_work_to_do - Is there task level work to do?
3530  * @vhost:              ibmvfc host struct
3531  *
3532  * Returns:
3533  *      1 if work to do / 0 if not
3534  **/
3535 static int ibmvfc_work_to_do(struct ibmvfc_host *vhost)
3536 {
3537         unsigned long flags;
3538         int rc;
3539
3540         spin_lock_irqsave(vhost->host->host_lock, flags);
3541         rc = __ibmvfc_work_to_do(vhost);
3542         spin_unlock_irqrestore(vhost->host->host_lock, flags);
3543         return rc;
3544 }
3545
3546 /**
3547  * ibmvfc_log_ae - Log async events if necessary
3548  * @vhost:              ibmvfc host struct
3549  * @events:             events to log
3550  *
3551  **/
3552 static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events)
3553 {
3554         if (events & IBMVFC_AE_RSCN)
3555                 fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_RSCN, 0);
3556         if ((events & IBMVFC_AE_LINKDOWN) &&
3557             vhost->state >= IBMVFC_HALTED)
3558                 fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
3559         if ((events & IBMVFC_AE_LINKUP) &&
3560             vhost->state == IBMVFC_INITIALIZING)
3561                 fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKUP, 0);
3562 }
3563
3564 /**
3565  * ibmvfc_tgt_add_rport - Tell the FC transport about a new remote port
3566  * @tgt:                ibmvfc target struct
3567  *
3568  **/
3569 static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
3570 {
3571         struct ibmvfc_host *vhost = tgt->vhost;
3572         struct fc_rport *rport;
3573         unsigned long flags;
3574
3575         tgt_dbg(tgt, "Adding rport\n");
3576         rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
3577         spin_lock_irqsave(vhost->host->host_lock, flags);
3578         tgt->rport = rport;
3579         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3580         if (rport) {
3581                 tgt_dbg(tgt, "rport add succeeded\n");
3582                 rport->maxframe_size = tgt->service_parms.common.bb_rcv_sz & 0x0fff;
3583                 rport->supported_classes = 0;
3584                 tgt->target_id = rport->scsi_target_id;
3585                 if (tgt->service_parms.class1_parms[0] & 0x80000000)
3586                         rport->supported_classes |= FC_COS_CLASS1;
3587                 if (tgt->service_parms.class2_parms[0] & 0x80000000)
3588                         rport->supported_classes |= FC_COS_CLASS2;
3589                 if (tgt->service_parms.class3_parms[0] & 0x80000000)
3590                         rport->supported_classes |= FC_COS_CLASS3;
3591         } else
3592                 tgt_dbg(tgt, "rport add failed\n");
3593         spin_unlock_irqrestore(vhost->host->host_lock, flags);
3594 }
3595
3596 /**
3597  * ibmvfc_do_work - Do task level work
3598  * @vhost:              ibmvfc host struct
3599  *
3600  **/
3601 static void ibmvfc_do_work(struct ibmvfc_host *vhost)
3602 {
3603         struct ibmvfc_target *tgt;
3604         unsigned long flags;
3605         struct fc_rport *rport;
3606
3607         ibmvfc_log_ae(vhost, vhost->events_to_log);
3608         spin_lock_irqsave(vhost->host->host_lock, flags);
3609         vhost->events_to_log = 0;
3610         switch (vhost->action) {
3611         case IBMVFC_HOST_ACTION_NONE:
3612         case IBMVFC_HOST_ACTION_INIT_WAIT:
3613                 break;
3614         case IBMVFC_HOST_ACTION_INIT:
3615                 BUG_ON(vhost->state != IBMVFC_INITIALIZING);
3616                 vhost->job_step(vhost);
3617                 break;
3618         case IBMVFC_HOST_ACTION_QUERY:
3619                 list_for_each_entry(tgt, &vhost->targets, queue)
3620                         ibmvfc_init_tgt(tgt, ibmvfc_tgt_query_target);
3621                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY_TGTS);
3622                 break;
3623         case IBMVFC_HOST_ACTION_QUERY_TGTS:
3624                 list_for_each_entry(tgt, &vhost->targets, queue) {
3625                         if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
3626                                 tgt->job_step(tgt);
3627                                 break;
3628                         }
3629                 }
3630
3631                 if (!ibmvfc_dev_init_to_do(vhost))
3632                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
3633                 break;
3634         case IBMVFC_HOST_ACTION_TGT_DEL:
3635         case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
3636                 list_for_each_entry(tgt, &vhost->targets, queue) {
3637                         if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
3638                                 tgt_dbg(tgt, "Deleting rport\n");
3639                                 rport = tgt->rport;
3640                                 tgt->rport = NULL;
3641                                 list_del(&tgt->queue);
3642                                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3643                                 if (rport)
3644                                         fc_remote_port_delete(rport);
3645                                 kref_put(&tgt->kref, ibmvfc_release_tgt);
3646                                 return;
3647                         }
3648                 }
3649
3650                 if (vhost->state == IBMVFC_INITIALIZING) {
3651                         if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) {
3652                                 ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
3653                                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_ADD);
3654                                 vhost->init_retries = 0;
3655                                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3656                                 scsi_unblock_requests(vhost->host);
3657                                 return;
3658                         } else {
3659                                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
3660                                 vhost->job_step = ibmvfc_discover_targets;
3661                         }
3662                 } else {
3663                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
3664                         spin_unlock_irqrestore(vhost->host->host_lock, flags);
3665                         scsi_unblock_requests(vhost->host);
3666                         wake_up(&vhost->init_wait_q);
3667                         return;
3668                 }
3669                 break;
3670         case IBMVFC_HOST_ACTION_ALLOC_TGTS:
3671                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_INIT);
3672                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3673                 ibmvfc_alloc_targets(vhost);
3674                 spin_lock_irqsave(vhost->host->host_lock, flags);
3675                 break;
3676         case IBMVFC_HOST_ACTION_TGT_INIT:
3677                 list_for_each_entry(tgt, &vhost->targets, queue) {
3678                         if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
3679                                 tgt->job_step(tgt);
3680                                 break;
3681                         }
3682                 }
3683
3684                 if (!ibmvfc_dev_init_to_do(vhost))
3685                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED);
3686                 break;
3687         case IBMVFC_HOST_ACTION_TGT_ADD:
3688                 list_for_each_entry(tgt, &vhost->targets, queue) {
3689                         if (tgt->action == IBMVFC_TGT_ACTION_ADD_RPORT) {
3690                                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3691                                 ibmvfc_tgt_add_rport(tgt);
3692                                 return;
3693                         }
3694                 }
3695
3696                 if (vhost->reinit && !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
3697                         vhost->reinit = 0;
3698                         scsi_block_requests(vhost->host);
3699                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
3700                 } else {
3701                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
3702                         wake_up(&vhost->init_wait_q);
3703                 }
3704                 break;
3705         default:
3706                 break;
3707         };
3708
3709         spin_unlock_irqrestore(vhost->host->host_lock, flags);
3710 }
3711
3712 /**
3713  * ibmvfc_work - Do task level work
3714  * @data:               ibmvfc host struct
3715  *
3716  * Returns:
3717  *      zero
3718  **/
3719 static int ibmvfc_work(void *data)
3720 {
3721         struct ibmvfc_host *vhost = data;
3722         int rc;
3723
3724         set_user_nice(current, -20);
3725
3726         while (1) {
3727                 rc = wait_event_interruptible(vhost->work_wait_q,
3728                                               ibmvfc_work_to_do(vhost));
3729
3730                 BUG_ON(rc);
3731
3732                 if (kthread_should_stop())
3733                         break;
3734
3735                 ibmvfc_do_work(vhost);
3736         }
3737
3738         ibmvfc_dbg(vhost, "ibmvfc kthread exiting...\n");
3739         return 0;
3740 }
3741
3742 /**
3743  * ibmvfc_init_crq - Initializes and registers CRQ with hypervisor
3744  * @vhost:      ibmvfc host struct
3745  *
3746  * Allocates a page for messages, maps it for dma, and registers
3747  * the crq with the hypervisor.
3748  *
3749  * Return value:
3750  *      zero on success / other on failure
3751  **/
3752 static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
3753 {
3754         int rc, retrc = -ENOMEM;
3755         struct device *dev = vhost->dev;
3756         struct vio_dev *vdev = to_vio_dev(dev);
3757         struct ibmvfc_crq_queue *crq = &vhost->crq;
3758
3759         ENTER;
3760         crq->msgs = (struct ibmvfc_crq *)get_zeroed_page(GFP_KERNEL);
3761
3762         if (!crq->msgs)
3763                 return -ENOMEM;
3764
3765         crq->size = PAGE_SIZE / sizeof(*crq->msgs);
3766         crq->msg_token = dma_map_single(dev, crq->msgs,
3767                                         PAGE_SIZE, DMA_BIDIRECTIONAL);
3768
3769         if (dma_mapping_error(dev, crq->msg_token))
3770                 goto map_failed;
3771
3772         retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3773                                         crq->msg_token, PAGE_SIZE);
3774
3775         if (rc == H_RESOURCE)
3776                 /* maybe kexecing and resource is busy. try a reset */
3777                 retrc = rc = ibmvfc_reset_crq(vhost);
3778
3779         if (rc == H_CLOSED)
3780                 dev_warn(dev, "Partner adapter not ready\n");
3781         else if (rc) {
3782                 dev_warn(dev, "Error %d opening adapter\n", rc);
3783                 goto reg_crq_failed;
3784         }
3785
3786         retrc = 0;
3787
3788         if ((rc = request_irq(vdev->irq, ibmvfc_interrupt, 0, IBMVFC_NAME, vhost))) {
3789                 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", vdev->irq, rc);
3790                 goto req_irq_failed;
3791         }
3792
3793         if ((rc = vio_enable_interrupts(vdev))) {
3794                 dev_err(dev, "Error %d enabling interrupts\n", rc);
3795                 goto req_irq_failed;
3796         }
3797
3798         crq->cur = 0;
3799         LEAVE;
3800         return retrc;
3801
3802 req_irq_failed:
3803         do {
3804                 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3805         } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3806 reg_crq_failed:
3807         dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
3808 map_failed:
3809         free_page((unsigned long)crq->msgs);
3810         return retrc;
3811 }
3812
3813 /**
3814  * ibmvfc_free_mem - Free memory for vhost
3815  * @vhost:      ibmvfc host struct
3816  *
3817  * Return value:
3818  *      none
3819  **/
3820 static void ibmvfc_free_mem(struct ibmvfc_host *vhost)
3821 {
3822         struct ibmvfc_async_crq_queue *async_q = &vhost->async_crq;
3823
3824         ENTER;
3825         mempool_destroy(vhost->tgt_pool);
3826         kfree(vhost->trace);
3827         dma_free_coherent(vhost->dev, vhost->disc_buf_sz, vhost->disc_buf,
3828                           vhost->disc_buf_dma);
3829         dma_free_coherent(vhost->dev, sizeof(*vhost->login_buf),
3830                           vhost->login_buf, vhost->login_buf_dma);
3831         dma_pool_destroy(vhost->sg_pool);
3832         dma_unmap_single(vhost->dev, async_q->msg_token,
3833                          async_q->size * sizeof(*async_q->msgs), DMA_BIDIRECTIONAL);
3834         free_page((unsigned long)async_q->msgs);
3835         LEAVE;
3836 }
3837
3838 /**
3839  * ibmvfc_alloc_mem - Allocate memory for vhost
3840  * @vhost:      ibmvfc host struct
3841  *
3842  * Return value:
3843  *      0 on success / non-zero on failure
3844  **/
3845 static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
3846 {
3847         struct ibmvfc_async_crq_queue *async_q = &vhost->async_crq;
3848         struct device *dev = vhost->dev;
3849
3850         ENTER;
3851         async_q->msgs = (struct ibmvfc_async_crq *)get_zeroed_page(GFP_KERNEL);
3852         if (!async_q->msgs) {
3853                 dev_err(dev, "Couldn't allocate async queue.\n");
3854                 goto nomem;
3855         }
3856
3857         async_q->size = PAGE_SIZE / sizeof(struct ibmvfc_async_crq);
3858         async_q->msg_token = dma_map_single(dev, async_q->msgs,
3859                                             async_q->size * sizeof(*async_q->msgs),
3860                                             DMA_BIDIRECTIONAL);
3861
3862         if (dma_mapping_error(dev, async_q->msg_token)) {
3863                 dev_err(dev, "Failed to map async queue\n");
3864                 goto free_async_crq;
3865         }
3866
3867         vhost->sg_pool = dma_pool_create(IBMVFC_NAME, dev,
3868                                          SG_ALL * sizeof(struct srp_direct_buf),
3869                                          sizeof(struct srp_direct_buf), 0);
3870
3871         if (!vhost->sg_pool) {
3872                 dev_err(dev, "Failed to allocate sg pool\n");
3873                 goto unmap_async_crq;
3874         }
3875
3876         vhost->login_buf = dma_alloc_coherent(dev, sizeof(*vhost->login_buf),
3877                                               &vhost->login_buf_dma, GFP_KERNEL);
3878
3879         if (!vhost->login_buf) {
3880                 dev_err(dev, "Couldn't allocate NPIV login buffer\n");
3881                 goto free_sg_pool;
3882         }
3883
3884         vhost->disc_buf_sz = sizeof(vhost->disc_buf->scsi_id[0]) * max_targets;
3885         vhost->disc_buf = dma_alloc_coherent(dev, vhost->disc_buf_sz,
3886                                              &vhost->disc_buf_dma, GFP_KERNEL);
3887
3888         if (!vhost->disc_buf) {
3889                 dev_err(dev, "Couldn't allocate Discover Targets buffer\n");
3890                 goto free_login_buffer;
3891         }
3892
3893         vhost->trace = kcalloc(IBMVFC_NUM_TRACE_ENTRIES,
3894                                sizeof(struct ibmvfc_trace_entry), GFP_KERNEL);
3895
3896         if (!vhost->trace)
3897                 goto free_disc_buffer;
3898
3899         vhost->tgt_pool = mempool_create_kzalloc_pool(IBMVFC_TGT_MEMPOOL_SZ,
3900                                                       sizeof(struct ibmvfc_target));
3901
3902         if (!vhost->tgt_pool) {
3903                 dev_err(dev, "Couldn't allocate target memory pool\n");
3904                 goto free_trace;
3905         }
3906
3907         LEAVE;
3908         return 0;
3909
3910 free_trace:
3911         kfree(vhost->trace);
3912 free_disc_buffer:
3913         dma_free_coherent(dev, vhost->disc_buf_sz, vhost->disc_buf,
3914                           vhost->disc_buf_dma);
3915 free_login_buffer:
3916         dma_free_coherent(dev, sizeof(*vhost->login_buf),
3917                           vhost->login_buf, vhost->login_buf_dma);
3918 free_sg_pool:
3919         dma_pool_destroy(vhost->sg_pool);
3920 unmap_async_crq:
3921         dma_unmap_single(dev, async_q->msg_token,
3922                          async_q->size * sizeof(*async_q->msgs), DMA_BIDIRECTIONAL);
3923 free_async_crq:
3924         free_page((unsigned long)async_q->msgs);
3925 nomem:
3926         LEAVE;
3927         return -ENOMEM;
3928 }
3929
3930 /**
3931  * ibmvfc_probe - Adapter hot plug add entry point
3932  * @vdev:       vio device struct
3933  * @id: vio device id struct
3934  *
3935  * Return value:
3936  *      0 on success / non-zero on failure
3937  **/
3938 static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
3939 {
3940         struct ibmvfc_host *vhost;
3941         struct Scsi_Host *shost;
3942         struct device *dev = &vdev->dev;
3943         int rc = -ENOMEM;
3944
3945         ENTER;
3946         shost = scsi_host_alloc(&driver_template, sizeof(*vhost));
3947         if (!shost) {
3948                 dev_err(dev, "Couldn't allocate host data\n");
3949                 goto out;
3950         }
3951
3952         shost->transportt = ibmvfc_transport_template;
3953         shost->can_queue = max_requests;
3954         shost->max_lun = max_lun;
3955         shost->max_id = max_targets;
3956         shost->max_sectors = IBMVFC_MAX_SECTORS;
3957         shost->max_cmd_len = IBMVFC_MAX_CDB_LEN;
3958         shost->unique_id = shost->host_no;
3959
3960         vhost = shost_priv(shost);
3961         INIT_LIST_HEAD(&vhost->sent);
3962         INIT_LIST_HEAD(&vhost->free);
3963         INIT_LIST_HEAD(&vhost->targets);
3964         sprintf(vhost->name, IBMVFC_NAME);
3965         vhost->host = shost;
3966         vhost->dev = dev;
3967         vhost->partition_number = -1;
3968         vhost->log_level = log_level;
3969         strcpy(vhost->partition_name, "UNKNOWN");
3970         init_waitqueue_head(&vhost->work_wait_q);
3971         init_waitqueue_head(&vhost->init_wait_q);
3972
3973         if ((rc = ibmvfc_alloc_mem(vhost)))
3974                 goto free_scsi_host;
3975
3976         vhost->work_thread = kthread_run(ibmvfc_work, vhost, "%s_%d", IBMVFC_NAME,
3977                                          shost->host_no);
3978
3979         if (IS_ERR(vhost->work_thread)) {
3980                 dev_err(dev, "Couldn't create kernel thread: %ld\n",
3981                         PTR_ERR(vhost->work_thread));
3982                 goto free_host_mem;
3983         }
3984
3985         if ((rc = ibmvfc_init_crq(vhost))) {
3986                 dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
3987                 goto kill_kthread;
3988         }
3989
3990         if ((rc = ibmvfc_init_event_pool(vhost))) {
3991                 dev_err(dev, "Couldn't initialize event pool. rc=%d\n", rc);
3992                 goto release_crq;
3993         }
3994
3995         if ((rc = scsi_add_host(shost, dev)))
3996                 goto release_event_pool;
3997
3998         if ((rc = ibmvfc_create_trace_file(&shost->shost_dev.kobj,
3999                                            &ibmvfc_trace_attr))) {
4000                 dev_err(dev, "Failed to create trace file. rc=%d\n", rc);
4001                 goto remove_shost;
4002         }
4003
4004         dev_set_drvdata(dev, vhost);
4005         spin_lock(&ibmvfc_driver_lock);
4006         list_add_tail(&vhost->queue, &ibmvfc_head);
4007         spin_unlock(&ibmvfc_driver_lock);
4008
4009         ibmvfc_send_crq_init(vhost);
4010         scsi_scan_host(shost);
4011         return 0;
4012
4013 remove_shost:
4014         scsi_remove_host(shost);
4015 release_event_pool:
4016         ibmvfc_free_event_pool(vhost);
4017 release_crq:
4018         ibmvfc_release_crq_queue(vhost);
4019 kill_kthread:
4020         kthread_stop(vhost->work_thread);
4021 free_host_mem:
4022         ibmvfc_free_mem(vhost);
4023 free_scsi_host:
4024         scsi_host_put(shost);
4025 out:
4026         LEAVE;
4027         return rc;
4028 }
4029
4030 /**
4031  * ibmvfc_remove - Adapter hot plug remove entry point
4032  * @vdev:       vio device struct
4033  *
4034  * Return value:
4035  *      0
4036  **/
4037 static int ibmvfc_remove(struct vio_dev *vdev)
4038 {
4039         struct ibmvfc_host *vhost = dev_get_drvdata(&vdev->dev);
4040         unsigned long flags;
4041
4042         ENTER;
4043         ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr);
4044         ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
4045         ibmvfc_wait_while_resetting(vhost);
4046         ibmvfc_release_crq_queue(vhost);
4047         kthread_stop(vhost->work_thread);
4048         fc_remove_host(vhost->host);
4049         scsi_remove_host(vhost->host);
4050
4051         spin_lock_irqsave(vhost->host->host_lock, flags);
4052         ibmvfc_purge_requests(vhost, DID_ERROR);
4053         ibmvfc_free_event_pool(vhost);
4054         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4055
4056         ibmvfc_free_mem(vhost);
4057         spin_lock(&ibmvfc_driver_lock);
4058         list_del(&vhost->queue);
4059         spin_unlock(&ibmvfc_driver_lock);
4060         scsi_host_put(vhost->host);
4061         LEAVE;
4062         return 0;
4063 }
4064
4065 /**
4066  * ibmvfc_get_desired_dma - Calculate DMA resources needed by the driver
4067  * @vdev:       vio device struct
4068  *
4069  * Return value:
4070  *      Number of bytes the driver will need to DMA map at the same time in
4071  *      order to perform well.
4072  */
4073 static unsigned long ibmvfc_get_desired_dma(struct vio_dev *vdev)
4074 {
4075         unsigned long pool_dma = max_requests * sizeof(union ibmvfc_iu);
4076         return pool_dma + ((512 * 1024) * driver_template.cmd_per_lun);
4077 }
4078
4079 static struct vio_device_id ibmvfc_device_table[] __devinitdata = {
4080         {"fcp", "IBM,vfc-client"},
4081         { "", "" }
4082 };
4083 MODULE_DEVICE_TABLE(vio, ibmvfc_device_table);
4084
4085 static struct vio_driver ibmvfc_driver = {
4086         .id_table = ibmvfc_device_table,
4087         .probe = ibmvfc_probe,
4088         .remove = ibmvfc_remove,
4089         .get_desired_dma = ibmvfc_get_desired_dma,
4090         .driver = {
4091                 .name = IBMVFC_NAME,
4092                 .owner = THIS_MODULE,
4093         }
4094 };
4095
4096 static struct fc_function_template ibmvfc_transport_functions = {
4097         .show_host_fabric_name = 1,
4098         .show_host_node_name = 1,
4099         .show_host_port_name = 1,
4100         .show_host_supported_classes = 1,
4101         .show_host_port_type = 1,
4102         .show_host_port_id = 1,
4103
4104         .get_host_port_state = ibmvfc_get_host_port_state,
4105         .show_host_port_state = 1,
4106
4107         .get_host_speed = ibmvfc_get_host_speed,
4108         .show_host_speed = 1,
4109
4110         .issue_fc_host_lip = ibmvfc_issue_fc_host_lip,
4111         .terminate_rport_io = ibmvfc_terminate_rport_io,
4112
4113         .show_rport_maxframe_size = 1,
4114         .show_rport_supported_classes = 1,
4115
4116         .set_rport_dev_loss_tmo = ibmvfc_set_rport_dev_loss_tmo,
4117         .show_rport_dev_loss_tmo = 1,
4118
4119         .get_starget_node_name = ibmvfc_get_starget_node_name,
4120         .show_starget_node_name = 1,
4121
4122         .get_starget_port_name = ibmvfc_get_starget_port_name,
4123         .show_starget_port_name = 1,
4124
4125         .get_starget_port_id = ibmvfc_get_starget_port_id,
4126         .show_starget_port_id = 1,
4127 };
4128
4129 /**
4130  * ibmvfc_module_init - Initialize the ibmvfc module
4131  *
4132  * Return value:
4133  *      0 on success / other on failure
4134  **/
4135 static int __init ibmvfc_module_init(void)
4136 {
4137         int rc;
4138
4139         if (!firmware_has_feature(FW_FEATURE_VIO))
4140                 return -ENODEV;
4141
4142         printk(KERN_INFO IBMVFC_NAME": IBM Virtual Fibre Channel Driver version: %s %s\n",
4143                IBMVFC_DRIVER_VERSION, IBMVFC_DRIVER_DATE);
4144
4145         ibmvfc_transport_template = fc_attach_transport(&ibmvfc_transport_functions);
4146         if (!ibmvfc_transport_template)
4147                 return -ENOMEM;
4148
4149         rc = vio_register_driver(&ibmvfc_driver);
4150         if (rc)
4151                 fc_release_transport(ibmvfc_transport_template);
4152         return rc;
4153 }
4154
4155 /**
4156  * ibmvfc_module_exit - Teardown the ibmvfc module
4157  *
4158  * Return value:
4159  *      nothing
4160  **/
4161 static void __exit ibmvfc_module_exit(void)
4162 {
4163         vio_unregister_driver(&ibmvfc_driver);
4164         fc_release_transport(ibmvfc_transport_template);
4165 }
4166
4167 module_init(ibmvfc_module_init);
4168 module_exit(ibmvfc_module_exit);