2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * $Id: ib_srp.c 3932 2005-11-01 17:19:29Z roland $
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/string.h>
40 #include <linux/parser.h>
41 #include <linux/random.h>
42 #include <linux/jiffies.h>
44 #include <asm/atomic.h>
46 #include <scsi/scsi.h>
47 #include <scsi/scsi_device.h>
48 #include <scsi/scsi_dbg.h>
51 #include <rdma/ib_cache.h>
55 #define DRV_NAME "ib_srp"
56 #define PFX DRV_NAME ": "
57 #define DRV_VERSION "0.2"
58 #define DRV_RELDATE "November 1, 2005"
60 MODULE_AUTHOR("Roland Dreier");
61 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
62 "v" DRV_VERSION " (" DRV_RELDATE ")");
63 MODULE_LICENSE("Dual BSD/GPL");
65 static int srp_sg_tablesize = SRP_DEF_SG_TABLESIZE;
66 static int srp_max_iu_len;
68 module_param(srp_sg_tablesize, int, 0444);
69 MODULE_PARM_DESC(srp_sg_tablesize,
70 "Max number of gather/scatter entries per I/O (default is 12)");
72 static int topspin_workarounds = 1;
74 module_param(topspin_workarounds, int, 0444);
75 MODULE_PARM_DESC(topspin_workarounds,
76 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
78 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
80 static void srp_add_one(struct ib_device *device);
81 static void srp_remove_one(struct ib_device *device);
82 static void srp_completion(struct ib_cq *cq, void *target_ptr);
83 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
85 static struct ib_client srp_client = {
88 .remove = srp_remove_one
91 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
93 return (struct srp_target_port *) host->hostdata;
96 static const char *srp_target_info(struct Scsi_Host *host)
98 return host_to_target(host)->target_name;
101 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
103 enum dma_data_direction direction)
107 iu = kmalloc(sizeof *iu, gfp_mask);
111 iu->buf = kzalloc(size, gfp_mask);
115 iu->dma = dma_map_single(host->dev->dev->dma_device,
116 iu->buf, size, direction);
117 if (dma_mapping_error(iu->dma))
121 iu->direction = direction;
133 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
138 dma_unmap_single(host->dev->dev->dma_device,
139 iu->dma, iu->size, iu->direction);
144 static void srp_qp_event(struct ib_event *event, void *context)
146 printk(KERN_ERR PFX "QP event %d\n", event->event);
149 static int srp_init_qp(struct srp_target_port *target,
152 struct ib_qp_attr *attr;
155 attr = kmalloc(sizeof *attr, GFP_KERNEL);
159 ret = ib_find_cached_pkey(target->srp_host->dev->dev,
160 target->srp_host->port,
161 be16_to_cpu(target->path.pkey),
166 attr->qp_state = IB_QPS_INIT;
167 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
168 IB_ACCESS_REMOTE_WRITE);
169 attr->port_num = target->srp_host->port;
171 ret = ib_modify_qp(qp, attr,
182 static int srp_create_target_ib(struct srp_target_port *target)
184 struct ib_qp_init_attr *init_attr;
187 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
191 target->cq = ib_create_cq(target->srp_host->dev->dev, srp_completion,
192 NULL, target, SRP_CQ_SIZE);
193 if (IS_ERR(target->cq)) {
194 ret = PTR_ERR(target->cq);
198 ib_req_notify_cq(target->cq, IB_CQ_NEXT_COMP);
200 init_attr->event_handler = srp_qp_event;
201 init_attr->cap.max_send_wr = SRP_SQ_SIZE;
202 init_attr->cap.max_recv_wr = SRP_RQ_SIZE;
203 init_attr->cap.max_recv_sge = 1;
204 init_attr->cap.max_send_sge = 1;
205 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
206 init_attr->qp_type = IB_QPT_RC;
207 init_attr->send_cq = target->cq;
208 init_attr->recv_cq = target->cq;
210 target->qp = ib_create_qp(target->srp_host->dev->pd, init_attr);
211 if (IS_ERR(target->qp)) {
212 ret = PTR_ERR(target->qp);
213 ib_destroy_cq(target->cq);
217 ret = srp_init_qp(target, target->qp);
219 ib_destroy_qp(target->qp);
220 ib_destroy_cq(target->cq);
229 static void srp_free_target_ib(struct srp_target_port *target)
233 ib_destroy_qp(target->qp);
234 ib_destroy_cq(target->cq);
236 for (i = 0; i < SRP_RQ_SIZE; ++i)
237 srp_free_iu(target->srp_host, target->rx_ring[i]);
238 for (i = 0; i < SRP_SQ_SIZE + 1; ++i)
239 srp_free_iu(target->srp_host, target->tx_ring[i]);
242 static void srp_path_rec_completion(int status,
243 struct ib_sa_path_rec *pathrec,
246 struct srp_target_port *target = target_ptr;
248 target->status = status;
250 printk(KERN_ERR PFX "Got failed path rec status %d\n", status);
252 target->path = *pathrec;
253 complete(&target->done);
256 static int srp_lookup_path(struct srp_target_port *target)
258 target->path.numb_path = 1;
260 init_completion(&target->done);
262 target->path_query_id = ib_sa_path_rec_get(target->srp_host->dev->dev,
263 target->srp_host->port,
265 IB_SA_PATH_REC_DGID |
266 IB_SA_PATH_REC_SGID |
267 IB_SA_PATH_REC_NUMB_PATH |
269 SRP_PATH_REC_TIMEOUT_MS,
271 srp_path_rec_completion,
272 target, &target->path_query);
273 if (target->path_query_id < 0)
274 return target->path_query_id;
276 wait_for_completion(&target->done);
278 if (target->status < 0)
279 printk(KERN_WARNING PFX "Path record query failed\n");
281 return target->status;
284 static int srp_send_req(struct srp_target_port *target)
287 struct ib_cm_req_param param;
288 struct srp_login_req priv;
292 req = kzalloc(sizeof *req, GFP_KERNEL);
296 req->param.primary_path = &target->path;
297 req->param.alternate_path = NULL;
298 req->param.service_id = target->service_id;
299 req->param.qp_num = target->qp->qp_num;
300 req->param.qp_type = target->qp->qp_type;
301 req->param.private_data = &req->priv;
302 req->param.private_data_len = sizeof req->priv;
303 req->param.flow_control = 1;
305 get_random_bytes(&req->param.starting_psn, 4);
306 req->param.starting_psn &= 0xffffff;
309 * Pick some arbitrary defaults here; we could make these
310 * module parameters if anyone cared about setting them.
312 req->param.responder_resources = 4;
313 req->param.remote_cm_response_timeout = 20;
314 req->param.local_cm_response_timeout = 20;
315 req->param.retry_count = 7;
316 req->param.rnr_retry_count = 7;
317 req->param.max_cm_retries = 15;
319 req->priv.opcode = SRP_LOGIN_REQ;
321 req->priv.req_it_iu_len = cpu_to_be32(srp_max_iu_len);
322 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
323 SRP_BUF_FORMAT_INDIRECT);
325 * In the published SRP specification (draft rev. 16a), the
326 * port identifier format is 8 bytes of ID extension followed
327 * by 8 bytes of GUID. Older drafts put the two halves in the
328 * opposite order, so that the GUID comes first.
330 * Targets conforming to these obsolete drafts can be
331 * recognized by the I/O Class they report.
333 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
334 memcpy(req->priv.initiator_port_id,
335 target->srp_host->initiator_port_id + 8, 8);
336 memcpy(req->priv.initiator_port_id + 8,
337 target->srp_host->initiator_port_id, 8);
338 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
339 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
341 memcpy(req->priv.initiator_port_id,
342 target->srp_host->initiator_port_id, 16);
343 memcpy(req->priv.target_port_id, &target->id_ext, 8);
344 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
348 * Topspin/Cisco SRP targets will reject our login unless we
349 * zero out the first 8 bytes of our initiator port ID. The
350 * second 8 bytes must be our local node GUID, but we always
353 if (topspin_workarounds && !memcmp(&target->ioc_guid, topspin_oui, 3)) {
354 printk(KERN_DEBUG PFX "Topspin/Cisco initiator port ID workaround "
355 "activated for target GUID %016llx\n",
356 (unsigned long long) be64_to_cpu(target->ioc_guid));
357 memset(req->priv.initiator_port_id, 0, 8);
360 status = ib_send_cm_req(target->cm_id, &req->param);
367 static void srp_disconnect_target(struct srp_target_port *target)
369 /* XXX should send SRP_I_LOGOUT request */
371 init_completion(&target->done);
372 if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
373 printk(KERN_DEBUG PFX "Sending CM DREQ failed\n");
376 wait_for_completion(&target->done);
379 static void srp_remove_work(void *target_ptr)
381 struct srp_target_port *target = target_ptr;
383 spin_lock_irq(target->scsi_host->host_lock);
384 if (target->state != SRP_TARGET_DEAD) {
385 spin_unlock_irq(target->scsi_host->host_lock);
388 target->state = SRP_TARGET_REMOVED;
389 spin_unlock_irq(target->scsi_host->host_lock);
391 spin_lock(&target->srp_host->target_lock);
392 list_del(&target->list);
393 spin_unlock(&target->srp_host->target_lock);
395 scsi_remove_host(target->scsi_host);
396 ib_destroy_cm_id(target->cm_id);
397 srp_free_target_ib(target);
398 scsi_host_put(target->scsi_host);
401 static int srp_connect_target(struct srp_target_port *target)
405 ret = srp_lookup_path(target);
410 init_completion(&target->done);
411 ret = srp_send_req(target);
414 wait_for_completion(&target->done);
417 * The CM event handling code will set status to
418 * SRP_PORT_REDIRECT if we get a port redirect REJ
419 * back, or SRP_DLID_REDIRECT if we get a lid/qp
422 switch (target->status) {
426 case SRP_PORT_REDIRECT:
427 ret = srp_lookup_path(target);
432 case SRP_DLID_REDIRECT:
436 return target->status;
441 static void srp_unmap_data(struct scsi_cmnd *scmnd,
442 struct srp_target_port *target,
443 struct srp_request *req)
445 struct scatterlist *scat;
448 if (!scmnd->request_buffer ||
449 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
450 scmnd->sc_data_direction != DMA_FROM_DEVICE))
454 ib_fmr_pool_unmap(req->fmr);
459 * This handling of non-SG commands can be killed when the
460 * SCSI midlayer no longer generates non-SG commands.
462 if (likely(scmnd->use_sg)) {
463 nents = scmnd->use_sg;
464 scat = scmnd->request_buffer;
467 scat = &req->fake_sg;
470 dma_unmap_sg(target->srp_host->dev->dev->dma_device, scat, nents,
471 scmnd->sc_data_direction);
474 static int srp_reconnect_target(struct srp_target_port *target)
476 struct ib_cm_id *new_cm_id;
477 struct ib_qp_attr qp_attr;
478 struct srp_request *req;
483 spin_lock_irq(target->scsi_host->host_lock);
484 if (target->state != SRP_TARGET_LIVE) {
485 spin_unlock_irq(target->scsi_host->host_lock);
488 target->state = SRP_TARGET_CONNECTING;
489 spin_unlock_irq(target->scsi_host->host_lock);
491 srp_disconnect_target(target);
493 * Now get a new local CM ID so that we avoid confusing the
494 * target in case things are really fouled up.
496 new_cm_id = ib_create_cm_id(target->srp_host->dev->dev,
497 srp_cm_handler, target);
498 if (IS_ERR(new_cm_id)) {
499 ret = PTR_ERR(new_cm_id);
502 ib_destroy_cm_id(target->cm_id);
503 target->cm_id = new_cm_id;
505 qp_attr.qp_state = IB_QPS_RESET;
506 ret = ib_modify_qp(target->qp, &qp_attr, IB_QP_STATE);
510 ret = srp_init_qp(target, target->qp);
514 while (ib_poll_cq(target->cq, 1, &wc) > 0)
517 list_for_each_entry(req, &target->req_queue, list) {
518 req->scmnd->result = DID_RESET << 16;
519 req->scmnd->scsi_done(req->scmnd);
520 srp_unmap_data(req->scmnd, target, req);
526 INIT_LIST_HEAD(&target->free_reqs);
527 INIT_LIST_HEAD(&target->req_queue);
528 for (i = 0; i < SRP_SQ_SIZE; ++i)
529 list_add_tail(&target->req_ring[i].list, &target->free_reqs);
531 ret = srp_connect_target(target);
535 spin_lock_irq(target->scsi_host->host_lock);
536 if (target->state == SRP_TARGET_CONNECTING) {
538 target->state = SRP_TARGET_LIVE;
541 spin_unlock_irq(target->scsi_host->host_lock);
546 printk(KERN_ERR PFX "reconnect failed (%d), removing target port.\n", ret);
549 * We couldn't reconnect, so kill our target port off.
550 * However, we have to defer the real removal because we might
551 * be in the context of the SCSI error handler now, which
552 * would deadlock if we call scsi_remove_host().
554 spin_lock_irq(target->scsi_host->host_lock);
555 if (target->state == SRP_TARGET_CONNECTING) {
556 target->state = SRP_TARGET_DEAD;
557 INIT_WORK(&target->work, srp_remove_work, target);
558 schedule_work(&target->work);
560 spin_unlock_irq(target->scsi_host->host_lock);
565 static int srp_map_fmr(struct srp_device *dev, struct scatterlist *scat,
566 int sg_cnt, struct srp_request *req,
567 struct srp_direct_buf *buf)
580 for (i = 0; i < sg_cnt; ++i) {
581 if (sg_dma_address(&scat[i]) & ~dev->fmr_page_mask) {
587 if ((sg_dma_address(&scat[i]) + sg_dma_len(&scat[i])) &
588 ~dev->fmr_page_mask) {
595 len += sg_dma_len(&scat[i]);
598 page_cnt += len >> dev->fmr_page_shift;
599 if (page_cnt > SRP_FMR_SIZE)
602 dma_pages = kmalloc(sizeof (u64) * page_cnt, GFP_ATOMIC);
607 for (i = 0; i < sg_cnt; ++i)
608 for (j = 0; j < sg_dma_len(&scat[i]); j += dev->fmr_page_size)
609 dma_pages[page_cnt++] =
610 (sg_dma_address(&scat[i]) & dev->fmr_page_mask) + j;
612 req->fmr = ib_fmr_pool_map_phys(dev->fmr_pool,
613 dma_pages, page_cnt, &io_addr);
614 if (IS_ERR(req->fmr)) {
615 ret = PTR_ERR(req->fmr);
619 buf->va = cpu_to_be64(sg_dma_address(&scat[0]) & ~dev->fmr_page_mask);
620 buf->key = cpu_to_be32(req->fmr->fmr->rkey);
621 buf->len = cpu_to_be32(len);
631 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
632 struct srp_request *req)
634 struct scatterlist *scat;
635 struct srp_cmd *cmd = req->cmd->buf;
636 int len, nents, count;
637 u8 fmt = SRP_DATA_DESC_DIRECT;
639 if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE)
640 return sizeof (struct srp_cmd);
642 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
643 scmnd->sc_data_direction != DMA_TO_DEVICE) {
644 printk(KERN_WARNING PFX "Unhandled data direction %d\n",
645 scmnd->sc_data_direction);
650 * This handling of non-SG commands can be killed when the
651 * SCSI midlayer no longer generates non-SG commands.
653 if (likely(scmnd->use_sg)) {
654 nents = scmnd->use_sg;
655 scat = scmnd->request_buffer;
658 scat = &req->fake_sg;
659 sg_init_one(scat, scmnd->request_buffer, scmnd->request_bufflen);
662 count = dma_map_sg(target->srp_host->dev->dev->dma_device,
663 scat, nents, scmnd->sc_data_direction);
665 fmt = SRP_DATA_DESC_DIRECT;
666 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
670 * The midlayer only generated a single gather/scatter
671 * entry, or DMA mapping coalesced everything to a
672 * single entry. So a direct descriptor along with
673 * the DMA MR suffices.
675 struct srp_direct_buf *buf = (void *) cmd->add_data;
677 buf->va = cpu_to_be64(sg_dma_address(scat));
678 buf->key = cpu_to_be32(target->srp_host->dev->mr->rkey);
679 buf->len = cpu_to_be32(sg_dma_len(scat));
680 } else if (srp_map_fmr(target->srp_host->dev, scat, count, req,
681 (void *) cmd->add_data)) {
683 * FMR mapping failed, and the scatterlist has more
684 * than one entry. Generate an indirect memory
687 struct srp_indirect_buf *buf = (void *) cmd->add_data;
691 fmt = SRP_DATA_DESC_INDIRECT;
692 len = sizeof (struct srp_cmd) +
693 sizeof (struct srp_indirect_buf) +
694 count * sizeof (struct srp_direct_buf);
696 for (i = 0; i < count; ++i) {
697 buf->desc_list[i].va =
698 cpu_to_be64(sg_dma_address(&scat[i]));
699 buf->desc_list[i].key =
700 cpu_to_be32(target->srp_host->dev->mr->rkey);
701 buf->desc_list[i].len =
702 cpu_to_be32(sg_dma_len(&scat[i]));
703 datalen += sg_dma_len(&scat[i]);
706 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
707 cmd->data_out_desc_cnt = count;
709 cmd->data_in_desc_cnt = count;
712 cpu_to_be64(req->cmd->dma + sizeof *cmd + sizeof *buf);
713 buf->table_desc.key =
714 cpu_to_be32(target->srp_host->dev->mr->rkey);
715 buf->table_desc.len =
716 cpu_to_be32(count * sizeof (struct srp_direct_buf));
718 buf->len = cpu_to_be32(datalen);
721 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
722 cmd->buf_fmt = fmt << 4;
729 static void srp_remove_req(struct srp_target_port *target, struct srp_request *req)
731 srp_unmap_data(req->scmnd, target, req);
732 list_move_tail(&req->list, &target->free_reqs);
735 static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
737 struct srp_request *req;
738 struct scsi_cmnd *scmnd;
742 delta = (s32) be32_to_cpu(rsp->req_lim_delta);
744 spin_lock_irqsave(target->scsi_host->host_lock, flags);
746 target->req_lim += delta;
748 req = &target->req_ring[rsp->tag & ~SRP_TAG_TSK_MGMT];
750 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
751 if (be32_to_cpu(rsp->resp_data_len) < 4)
752 req->tsk_status = -1;
754 req->tsk_status = rsp->data[3];
755 complete(&req->done);
759 printk(KERN_ERR "Null scmnd for RSP w/tag %016llx\n",
760 (unsigned long long) rsp->tag);
761 scmnd->result = rsp->status;
763 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
764 memcpy(scmnd->sense_buffer, rsp->data +
765 be32_to_cpu(rsp->resp_data_len),
766 min_t(int, be32_to_cpu(rsp->sense_data_len),
767 SCSI_SENSE_BUFFERSIZE));
770 if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER))
771 scmnd->resid = be32_to_cpu(rsp->data_out_res_cnt);
772 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
773 scmnd->resid = be32_to_cpu(rsp->data_in_res_cnt);
775 if (!req->tsk_mgmt) {
776 scmnd->host_scribble = (void *) -1L;
777 scmnd->scsi_done(scmnd);
779 srp_remove_req(target, req);
784 spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
787 static void srp_reconnect_work(void *target_ptr)
789 struct srp_target_port *target = target_ptr;
791 srp_reconnect_target(target);
794 static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
799 iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV];
801 dma_sync_single_for_cpu(target->srp_host->dev->dev->dma_device, iu->dma,
802 target->max_ti_iu_len, DMA_FROM_DEVICE);
804 opcode = *(u8 *) iu->buf;
809 printk(KERN_ERR PFX "recv completion, opcode 0x%02x\n", opcode);
811 for (i = 0; i < wc->byte_len; ++i) {
813 printk(KERN_ERR " [%02x] ", i);
814 printk(" %02x", ((u8 *) iu->buf)[i]);
815 if ((i + 1) % 8 == 0)
819 if (wc->byte_len % 8)
825 srp_process_rsp(target, iu->buf);
829 /* XXX Handle target logout */
830 printk(KERN_WARNING PFX "Got target logout request\n");
834 printk(KERN_WARNING PFX "Unhandled SRP opcode 0x%02x\n", opcode);
838 dma_sync_single_for_device(target->srp_host->dev->dev->dma_device, iu->dma,
839 target->max_ti_iu_len, DMA_FROM_DEVICE);
842 static void srp_completion(struct ib_cq *cq, void *target_ptr)
844 struct srp_target_port *target = target_ptr;
848 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
849 while (ib_poll_cq(cq, 1, &wc) > 0) {
851 printk(KERN_ERR PFX "failed %s status %d\n",
852 wc.wr_id & SRP_OP_RECV ? "receive" : "send",
854 spin_lock_irqsave(target->scsi_host->host_lock, flags);
855 if (target->state == SRP_TARGET_LIVE)
856 schedule_work(&target->work);
857 spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
861 if (wc.wr_id & SRP_OP_RECV)
862 srp_handle_recv(target, &wc);
868 static int __srp_post_recv(struct srp_target_port *target)
872 struct ib_recv_wr wr, *bad_wr;
876 next = target->rx_head & (SRP_RQ_SIZE - 1);
877 wr.wr_id = next | SRP_OP_RECV;
878 iu = target->rx_ring[next];
881 list.length = iu->size;
882 list.lkey = target->srp_host->dev->mr->lkey;
888 ret = ib_post_recv(target->qp, &wr, &bad_wr);
895 static int srp_post_recv(struct srp_target_port *target)
900 spin_lock_irqsave(target->scsi_host->host_lock, flags);
901 ret = __srp_post_recv(target);
902 spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
908 * Must be called with target->scsi_host->host_lock held to protect
909 * req_lim and tx_head. Lock cannot be dropped between call here and
910 * call to __srp_post_send().
912 static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target)
914 if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
917 if (unlikely(target->req_lim < 1))
918 ++target->zero_req_lim;
920 return target->tx_ring[target->tx_head & SRP_SQ_SIZE];
924 * Must be called with target->scsi_host->host_lock held to protect
925 * req_lim and tx_head.
927 static int __srp_post_send(struct srp_target_port *target,
928 struct srp_iu *iu, int len)
931 struct ib_send_wr wr, *bad_wr;
936 list.lkey = target->srp_host->dev->mr->lkey;
939 wr.wr_id = target->tx_head & SRP_SQ_SIZE;
942 wr.opcode = IB_WR_SEND;
943 wr.send_flags = IB_SEND_SIGNALED;
945 ret = ib_post_send(target->qp, &wr, &bad_wr);
955 static int srp_queuecommand(struct scsi_cmnd *scmnd,
956 void (*done)(struct scsi_cmnd *))
958 struct srp_target_port *target = host_to_target(scmnd->device->host);
959 struct srp_request *req;
964 if (target->state == SRP_TARGET_CONNECTING)
967 if (target->state == SRP_TARGET_DEAD ||
968 target->state == SRP_TARGET_REMOVED) {
969 scmnd->result = DID_BAD_TARGET << 16;
974 iu = __srp_get_tx_iu(target);
978 dma_sync_single_for_cpu(target->srp_host->dev->dev->dma_device, iu->dma,
979 srp_max_iu_len, DMA_TO_DEVICE);
981 req = list_entry(target->free_reqs.next, struct srp_request, list);
983 scmnd->scsi_done = done;
985 scmnd->host_scribble = (void *) (long) req->index;
988 memset(cmd, 0, sizeof *cmd);
990 cmd->opcode = SRP_CMD;
991 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
992 cmd->tag = req->index;
993 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
998 req->tsk_mgmt = NULL;
1000 len = srp_map_data(scmnd, target, req);
1002 printk(KERN_ERR PFX "Failed to map data\n");
1006 if (__srp_post_recv(target)) {
1007 printk(KERN_ERR PFX "Recv failed\n");
1011 dma_sync_single_for_device(target->srp_host->dev->dev->dma_device, iu->dma,
1012 srp_max_iu_len, DMA_TO_DEVICE);
1014 if (__srp_post_send(target, iu, len)) {
1015 printk(KERN_ERR PFX "Send failed\n");
1019 list_move_tail(&req->list, &target->req_queue);
1024 srp_unmap_data(scmnd, target, req);
1027 return SCSI_MLQUEUE_HOST_BUSY;
1030 static int srp_alloc_iu_bufs(struct srp_target_port *target)
1034 for (i = 0; i < SRP_RQ_SIZE; ++i) {
1035 target->rx_ring[i] = srp_alloc_iu(target->srp_host,
1036 target->max_ti_iu_len,
1037 GFP_KERNEL, DMA_FROM_DEVICE);
1038 if (!target->rx_ring[i])
1042 for (i = 0; i < SRP_SQ_SIZE + 1; ++i) {
1043 target->tx_ring[i] = srp_alloc_iu(target->srp_host,
1045 GFP_KERNEL, DMA_TO_DEVICE);
1046 if (!target->tx_ring[i])
1053 for (i = 0; i < SRP_RQ_SIZE; ++i) {
1054 srp_free_iu(target->srp_host, target->rx_ring[i]);
1055 target->rx_ring[i] = NULL;
1058 for (i = 0; i < SRP_SQ_SIZE + 1; ++i) {
1059 srp_free_iu(target->srp_host, target->tx_ring[i]);
1060 target->tx_ring[i] = NULL;
1066 static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
1067 struct ib_cm_event *event,
1068 struct srp_target_port *target)
1070 struct ib_class_port_info *cpi;
1073 switch (event->param.rej_rcvd.reason) {
1074 case IB_CM_REJ_PORT_CM_REDIRECT:
1075 cpi = event->param.rej_rcvd.ari;
1076 target->path.dlid = cpi->redirect_lid;
1077 target->path.pkey = cpi->redirect_pkey;
1078 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
1079 memcpy(target->path.dgid.raw, cpi->redirect_gid, 16);
1081 target->status = target->path.dlid ?
1082 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
1085 case IB_CM_REJ_PORT_REDIRECT:
1086 if (topspin_workarounds &&
1087 !memcmp(&target->ioc_guid, topspin_oui, 3)) {
1089 * Topspin/Cisco SRP gateways incorrectly send
1090 * reject reason code 25 when they mean 24
1093 memcpy(target->path.dgid.raw,
1094 event->param.rej_rcvd.ari, 16);
1096 printk(KERN_DEBUG PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
1097 (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix),
1098 (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id));
1100 target->status = SRP_PORT_REDIRECT;
1102 printk(KERN_WARNING " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
1103 target->status = -ECONNRESET;
1107 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
1108 printk(KERN_WARNING " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
1109 target->status = -ECONNRESET;
1112 case IB_CM_REJ_CONSUMER_DEFINED:
1113 opcode = *(u8 *) event->private_data;
1114 if (opcode == SRP_LOGIN_REJ) {
1115 struct srp_login_rej *rej = event->private_data;
1116 u32 reason = be32_to_cpu(rej->reason);
1118 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
1119 printk(KERN_WARNING PFX
1120 "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
1122 printk(KERN_WARNING PFX
1123 "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
1125 printk(KERN_WARNING " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
1126 " opcode 0x%02x\n", opcode);
1127 target->status = -ECONNRESET;
1131 printk(KERN_WARNING " REJ reason 0x%x\n",
1132 event->param.rej_rcvd.reason);
1133 target->status = -ECONNRESET;
1137 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1139 struct srp_target_port *target = cm_id->context;
1140 struct ib_qp_attr *qp_attr = NULL;
1145 switch (event->event) {
1146 case IB_CM_REQ_ERROR:
1147 printk(KERN_DEBUG PFX "Sending CM REQ failed\n");
1149 target->status = -ECONNRESET;
1152 case IB_CM_REP_RECEIVED:
1154 opcode = *(u8 *) event->private_data;
1156 if (opcode == SRP_LOGIN_RSP) {
1157 struct srp_login_rsp *rsp = event->private_data;
1159 target->max_ti_iu_len = be32_to_cpu(rsp->max_ti_iu_len);
1160 target->req_lim = be32_to_cpu(rsp->req_lim_delta);
1162 target->scsi_host->can_queue = min(target->req_lim,
1163 target->scsi_host->can_queue);
1165 printk(KERN_WARNING PFX "Unhandled RSP opcode %#x\n", opcode);
1166 target->status = -ECONNRESET;
1170 target->status = srp_alloc_iu_bufs(target);
1174 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
1176 target->status = -ENOMEM;
1180 qp_attr->qp_state = IB_QPS_RTR;
1181 target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1185 target->status = ib_modify_qp(target->qp, qp_attr, attr_mask);
1189 target->status = srp_post_recv(target);
1193 qp_attr->qp_state = IB_QPS_RTS;
1194 target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1198 target->status = ib_modify_qp(target->qp, qp_attr, attr_mask);
1202 target->status = ib_send_cm_rtu(cm_id, NULL, 0);
1208 case IB_CM_REJ_RECEIVED:
1209 printk(KERN_DEBUG PFX "REJ received\n");
1212 srp_cm_rej_handler(cm_id, event, target);
1215 case IB_CM_DREQ_RECEIVED:
1216 printk(KERN_WARNING PFX "DREQ received - connection closed\n");
1217 if (ib_send_cm_drep(cm_id, NULL, 0))
1218 printk(KERN_ERR PFX "Sending CM DREP failed\n");
1221 case IB_CM_TIMEWAIT_EXIT:
1222 printk(KERN_ERR PFX "connection closed\n");
1228 case IB_CM_MRA_RECEIVED:
1229 case IB_CM_DREQ_ERROR:
1230 case IB_CM_DREP_RECEIVED:
1234 printk(KERN_WARNING PFX "Unhandled CM event %d\n", event->event);
1239 complete(&target->done);
1246 static int srp_send_tsk_mgmt(struct srp_target_port *target,
1247 struct srp_request *req, u8 func)
1250 struct srp_tsk_mgmt *tsk_mgmt;
1252 spin_lock_irq(target->scsi_host->host_lock);
1254 if (target->state == SRP_TARGET_DEAD ||
1255 target->state == SRP_TARGET_REMOVED) {
1256 req->scmnd->result = DID_BAD_TARGET << 16;
1260 init_completion(&req->done);
1262 iu = __srp_get_tx_iu(target);
1267 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
1269 tsk_mgmt->opcode = SRP_TSK_MGMT;
1270 tsk_mgmt->lun = cpu_to_be64((u64) req->scmnd->device->lun << 48);
1271 tsk_mgmt->tag = req->index | SRP_TAG_TSK_MGMT;
1272 tsk_mgmt->tsk_mgmt_func = func;
1273 tsk_mgmt->task_tag = req->index;
1275 if (__srp_post_send(target, iu, sizeof *tsk_mgmt))
1280 spin_unlock_irq(target->scsi_host->host_lock);
1282 if (!wait_for_completion_timeout(&req->done,
1283 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
1289 spin_unlock_irq(target->scsi_host->host_lock);
1293 static int srp_find_req(struct srp_target_port *target,
1294 struct scsi_cmnd *scmnd,
1295 struct srp_request **req)
1297 if (scmnd->host_scribble == (void *) -1L)
1300 *req = &target->req_ring[(long) scmnd->host_scribble];
1305 static int srp_abort(struct scsi_cmnd *scmnd)
1307 struct srp_target_port *target = host_to_target(scmnd->device->host);
1308 struct srp_request *req;
1311 printk(KERN_ERR "SRP abort called\n");
1313 if (srp_find_req(target, scmnd, &req))
1315 if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK))
1318 spin_lock_irq(target->scsi_host->host_lock);
1320 if (req->cmd_done) {
1321 srp_remove_req(target, req);
1322 scmnd->scsi_done(scmnd);
1323 } else if (!req->tsk_status) {
1324 srp_remove_req(target, req);
1325 scmnd->result = DID_ABORT << 16;
1329 spin_unlock_irq(target->scsi_host->host_lock);
1334 static int srp_reset_device(struct scsi_cmnd *scmnd)
1336 struct srp_target_port *target = host_to_target(scmnd->device->host);
1337 struct srp_request *req, *tmp;
1339 printk(KERN_ERR "SRP reset_device called\n");
1341 if (srp_find_req(target, scmnd, &req))
1343 if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET))
1345 if (req->tsk_status)
1348 spin_lock_irq(target->scsi_host->host_lock);
1350 list_for_each_entry_safe(req, tmp, &target->req_queue, list)
1351 if (req->scmnd->device == scmnd->device) {
1352 req->scmnd->result = DID_RESET << 16;
1353 req->scmnd->scsi_done(req->scmnd);
1354 srp_remove_req(target, req);
1357 spin_unlock_irq(target->scsi_host->host_lock);
1362 static int srp_reset_host(struct scsi_cmnd *scmnd)
1364 struct srp_target_port *target = host_to_target(scmnd->device->host);
1367 printk(KERN_ERR PFX "SRP reset_host called\n");
1369 if (!srp_reconnect_target(target))
1375 static ssize_t show_id_ext(struct class_device *cdev, char *buf)
1377 struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1379 if (target->state == SRP_TARGET_DEAD ||
1380 target->state == SRP_TARGET_REMOVED)
1383 return sprintf(buf, "0x%016llx\n",
1384 (unsigned long long) be64_to_cpu(target->id_ext));
1387 static ssize_t show_ioc_guid(struct class_device *cdev, char *buf)
1389 struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1391 if (target->state == SRP_TARGET_DEAD ||
1392 target->state == SRP_TARGET_REMOVED)
1395 return sprintf(buf, "0x%016llx\n",
1396 (unsigned long long) be64_to_cpu(target->ioc_guid));
1399 static ssize_t show_service_id(struct class_device *cdev, char *buf)
1401 struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1403 if (target->state == SRP_TARGET_DEAD ||
1404 target->state == SRP_TARGET_REMOVED)
1407 return sprintf(buf, "0x%016llx\n",
1408 (unsigned long long) be64_to_cpu(target->service_id));
1411 static ssize_t show_pkey(struct class_device *cdev, char *buf)
1413 struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1415 if (target->state == SRP_TARGET_DEAD ||
1416 target->state == SRP_TARGET_REMOVED)
1419 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey));
1422 static ssize_t show_dgid(struct class_device *cdev, char *buf)
1424 struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1426 if (target->state == SRP_TARGET_DEAD ||
1427 target->state == SRP_TARGET_REMOVED)
1430 return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
1431 be16_to_cpu(((__be16 *) target->path.dgid.raw)[0]),
1432 be16_to_cpu(((__be16 *) target->path.dgid.raw)[1]),
1433 be16_to_cpu(((__be16 *) target->path.dgid.raw)[2]),
1434 be16_to_cpu(((__be16 *) target->path.dgid.raw)[3]),
1435 be16_to_cpu(((__be16 *) target->path.dgid.raw)[4]),
1436 be16_to_cpu(((__be16 *) target->path.dgid.raw)[5]),
1437 be16_to_cpu(((__be16 *) target->path.dgid.raw)[6]),
1438 be16_to_cpu(((__be16 *) target->path.dgid.raw)[7]));
1441 static ssize_t show_zero_req_lim(struct class_device *cdev, char *buf)
1443 struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1445 if (target->state == SRP_TARGET_DEAD ||
1446 target->state == SRP_TARGET_REMOVED)
1449 return sprintf(buf, "%d\n", target->zero_req_lim);
1452 static CLASS_DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
1453 static CLASS_DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
1454 static CLASS_DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
1455 static CLASS_DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
1456 static CLASS_DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
1457 static CLASS_DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
1459 static struct class_device_attribute *srp_host_attrs[] = {
1460 &class_device_attr_id_ext,
1461 &class_device_attr_ioc_guid,
1462 &class_device_attr_service_id,
1463 &class_device_attr_pkey,
1464 &class_device_attr_dgid,
1465 &class_device_attr_zero_req_lim,
1469 static struct scsi_host_template srp_template = {
1470 .module = THIS_MODULE,
1472 .info = srp_target_info,
1473 .queuecommand = srp_queuecommand,
1474 .eh_abort_handler = srp_abort,
1475 .eh_device_reset_handler = srp_reset_device,
1476 .eh_host_reset_handler = srp_reset_host,
1477 .can_queue = SRP_SQ_SIZE,
1479 .cmd_per_lun = SRP_SQ_SIZE,
1480 .use_clustering = ENABLE_CLUSTERING,
1481 .shost_attrs = srp_host_attrs
1484 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
1486 sprintf(target->target_name, "SRP.T10:%016llX",
1487 (unsigned long long) be64_to_cpu(target->id_ext));
1489 if (scsi_add_host(target->scsi_host, host->dev->dev->dma_device))
1492 spin_lock(&host->target_lock);
1493 list_add_tail(&target->list, &host->target_list);
1494 spin_unlock(&host->target_lock);
1496 target->state = SRP_TARGET_LIVE;
1498 scsi_scan_target(&target->scsi_host->shost_gendev,
1499 0, target->scsi_id, SCAN_WILD_CARD, 0);
1504 static void srp_release_class_dev(struct class_device *class_dev)
1506 struct srp_host *host =
1507 container_of(class_dev, struct srp_host, class_dev);
1509 complete(&host->released);
1512 static struct class srp_class = {
1513 .name = "infiniband_srp",
1514 .release = srp_release_class_dev
1518 * Target ports are added by writing
1520 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
1521 * pkey=<P_Key>,service_id=<service ID>
1523 * to the add_target sysfs attribute.
1527 SRP_OPT_ID_EXT = 1 << 0,
1528 SRP_OPT_IOC_GUID = 1 << 1,
1529 SRP_OPT_DGID = 1 << 2,
1530 SRP_OPT_PKEY = 1 << 3,
1531 SRP_OPT_SERVICE_ID = 1 << 4,
1532 SRP_OPT_MAX_SECT = 1 << 5,
1533 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
1534 SRP_OPT_IO_CLASS = 1 << 7,
1535 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
1539 SRP_OPT_SERVICE_ID),
1542 static match_table_t srp_opt_tokens = {
1543 { SRP_OPT_ID_EXT, "id_ext=%s" },
1544 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
1545 { SRP_OPT_DGID, "dgid=%s" },
1546 { SRP_OPT_PKEY, "pkey=%x" },
1547 { SRP_OPT_SERVICE_ID, "service_id=%s" },
1548 { SRP_OPT_MAX_SECT, "max_sect=%d" },
1549 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
1550 { SRP_OPT_IO_CLASS, "io_class=%x" },
1551 { SRP_OPT_ERR, NULL }
1554 static int srp_parse_options(const char *buf, struct srp_target_port *target)
1556 char *options, *sep_opt;
1559 substring_t args[MAX_OPT_ARGS];
1565 options = kstrdup(buf, GFP_KERNEL);
1570 while ((p = strsep(&sep_opt, ",")) != NULL) {
1574 token = match_token(p, srp_opt_tokens, args);
1578 case SRP_OPT_ID_EXT:
1579 p = match_strdup(args);
1580 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
1584 case SRP_OPT_IOC_GUID:
1585 p = match_strdup(args);
1586 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
1591 p = match_strdup(args);
1592 if (strlen(p) != 32) {
1593 printk(KERN_WARNING PFX "bad dest GID parameter '%s'\n", p);
1598 for (i = 0; i < 16; ++i) {
1599 strlcpy(dgid, p + i * 2, 3);
1600 target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16);
1606 if (match_hex(args, &token)) {
1607 printk(KERN_WARNING PFX "bad P_Key parameter '%s'\n", p);
1610 target->path.pkey = cpu_to_be16(token);
1613 case SRP_OPT_SERVICE_ID:
1614 p = match_strdup(args);
1615 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
1619 case SRP_OPT_MAX_SECT:
1620 if (match_int(args, &token)) {
1621 printk(KERN_WARNING PFX "bad max sect parameter '%s'\n", p);
1624 target->scsi_host->max_sectors = token;
1627 case SRP_OPT_MAX_CMD_PER_LUN:
1628 if (match_int(args, &token)) {
1629 printk(KERN_WARNING PFX "bad max cmd_per_lun parameter '%s'\n", p);
1632 target->scsi_host->cmd_per_lun = min(token, SRP_SQ_SIZE);
1635 case SRP_OPT_IO_CLASS:
1636 if (match_hex(args, &token)) {
1637 printk(KERN_WARNING PFX "bad IO class parameter '%s' \n", p);
1640 if (token != SRP_REV10_IB_IO_CLASS &&
1641 token != SRP_REV16A_IB_IO_CLASS) {
1642 printk(KERN_WARNING PFX "unknown IO class parameter value"
1643 " %x specified (use %x or %x).\n",
1644 token, SRP_REV10_IB_IO_CLASS, SRP_REV16A_IB_IO_CLASS);
1647 target->io_class = token;
1651 printk(KERN_WARNING PFX "unknown parameter or missing value "
1652 "'%s' in target creation request\n", p);
1657 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
1660 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
1661 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
1662 !(srp_opt_tokens[i].token & opt_mask))
1663 printk(KERN_WARNING PFX "target creation request is "
1664 "missing parameter '%s'\n",
1665 srp_opt_tokens[i].pattern);
1672 static ssize_t srp_create_target(struct class_device *class_dev,
1673 const char *buf, size_t count)
1675 struct srp_host *host =
1676 container_of(class_dev, struct srp_host, class_dev);
1677 struct Scsi_Host *target_host;
1678 struct srp_target_port *target;
1682 target_host = scsi_host_alloc(&srp_template,
1683 sizeof (struct srp_target_port));
1687 target_host->max_lun = SRP_MAX_LUN;
1689 target = host_to_target(target_host);
1690 memset(target, 0, sizeof *target);
1692 target->io_class = SRP_REV16A_IB_IO_CLASS;
1693 target->scsi_host = target_host;
1694 target->srp_host = host;
1696 INIT_WORK(&target->work, srp_reconnect_work, target);
1698 INIT_LIST_HEAD(&target->free_reqs);
1699 INIT_LIST_HEAD(&target->req_queue);
1700 for (i = 0; i < SRP_SQ_SIZE; ++i) {
1701 target->req_ring[i].index = i;
1702 list_add_tail(&target->req_ring[i].list, &target->free_reqs);
1705 ret = srp_parse_options(buf, target);
1709 ib_get_cached_gid(host->dev->dev, host->port, 0, &target->path.sgid);
1711 printk(KERN_DEBUG PFX "new target: id_ext %016llx ioc_guid %016llx pkey %04x "
1712 "service_id %016llx dgid %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
1713 (unsigned long long) be64_to_cpu(target->id_ext),
1714 (unsigned long long) be64_to_cpu(target->ioc_guid),
1715 be16_to_cpu(target->path.pkey),
1716 (unsigned long long) be64_to_cpu(target->service_id),
1717 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[0]),
1718 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[2]),
1719 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[4]),
1720 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[6]),
1721 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[8]),
1722 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[10]),
1723 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[12]),
1724 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[14]));
1726 ret = srp_create_target_ib(target);
1730 target->cm_id = ib_create_cm_id(host->dev->dev, srp_cm_handler, target);
1731 if (IS_ERR(target->cm_id)) {
1732 ret = PTR_ERR(target->cm_id);
1736 ret = srp_connect_target(target);
1738 printk(KERN_ERR PFX "Connection failed\n");
1742 ret = srp_add_target(host, target);
1744 goto err_disconnect;
1749 srp_disconnect_target(target);
1752 ib_destroy_cm_id(target->cm_id);
1755 srp_free_target_ib(target);
1758 scsi_host_put(target_host);
1763 static CLASS_DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
1765 static ssize_t show_ibdev(struct class_device *class_dev, char *buf)
1767 struct srp_host *host =
1768 container_of(class_dev, struct srp_host, class_dev);
1770 return sprintf(buf, "%s\n", host->dev->dev->name);
1773 static CLASS_DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
1775 static ssize_t show_port(struct class_device *class_dev, char *buf)
1777 struct srp_host *host =
1778 container_of(class_dev, struct srp_host, class_dev);
1780 return sprintf(buf, "%d\n", host->port);
1783 static CLASS_DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
1785 static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
1787 struct srp_host *host;
1789 host = kzalloc(sizeof *host, GFP_KERNEL);
1793 INIT_LIST_HEAD(&host->target_list);
1794 spin_lock_init(&host->target_lock);
1795 init_completion(&host->released);
1799 host->initiator_port_id[7] = port;
1800 memcpy(host->initiator_port_id + 8, &device->dev->node_guid, 8);
1802 host->class_dev.class = &srp_class;
1803 host->class_dev.dev = device->dev->dma_device;
1804 snprintf(host->class_dev.class_id, BUS_ID_SIZE, "srp-%s-%d",
1805 device->dev->name, port);
1807 if (class_device_register(&host->class_dev))
1809 if (class_device_create_file(&host->class_dev, &class_device_attr_add_target))
1811 if (class_device_create_file(&host->class_dev, &class_device_attr_ibdev))
1813 if (class_device_create_file(&host->class_dev, &class_device_attr_port))
1819 class_device_unregister(&host->class_dev);
1827 static void srp_add_one(struct ib_device *device)
1829 struct srp_device *srp_dev;
1830 struct ib_device_attr *dev_attr;
1831 struct ib_fmr_pool_param fmr_param;
1832 struct srp_host *host;
1835 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
1839 if (ib_query_device(device, dev_attr)) {
1840 printk(KERN_WARNING PFX "Query device failed for %s\n",
1845 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
1850 * Use the smallest page size supported by the HCA, down to a
1851 * minimum of 512 bytes (which is the smallest sector that a
1852 * SCSI command will ever carry).
1854 srp_dev->fmr_page_shift = max(9, ffs(dev_attr->page_size_cap) - 1);
1855 srp_dev->fmr_page_size = 1 << srp_dev->fmr_page_shift;
1856 srp_dev->fmr_page_mask = ~((unsigned long) srp_dev->fmr_page_size - 1);
1858 INIT_LIST_HEAD(&srp_dev->dev_list);
1860 srp_dev->dev = device;
1861 srp_dev->pd = ib_alloc_pd(device);
1862 if (IS_ERR(srp_dev->pd))
1865 srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
1866 IB_ACCESS_LOCAL_WRITE |
1867 IB_ACCESS_REMOTE_READ |
1868 IB_ACCESS_REMOTE_WRITE);
1869 if (IS_ERR(srp_dev->mr))
1872 memset(&fmr_param, 0, sizeof fmr_param);
1873 fmr_param.pool_size = SRP_FMR_POOL_SIZE;
1874 fmr_param.dirty_watermark = SRP_FMR_DIRTY_SIZE;
1875 fmr_param.cache = 1;
1876 fmr_param.max_pages_per_fmr = SRP_FMR_SIZE;
1877 fmr_param.page_shift = srp_dev->fmr_page_shift;
1878 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
1879 IB_ACCESS_REMOTE_WRITE |
1880 IB_ACCESS_REMOTE_READ);
1882 srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param);
1883 if (IS_ERR(srp_dev->fmr_pool))
1884 srp_dev->fmr_pool = NULL;
1886 if (device->node_type == IB_NODE_SWITCH) {
1891 e = device->phys_port_cnt;
1894 for (p = s; p <= e; ++p) {
1895 host = srp_add_port(srp_dev, p);
1897 list_add_tail(&host->list, &srp_dev->dev_list);
1900 ib_set_client_data(device, &srp_client, srp_dev);
1905 ib_dealloc_pd(srp_dev->pd);
1914 static void srp_remove_one(struct ib_device *device)
1916 struct srp_device *srp_dev;
1917 struct srp_host *host, *tmp_host;
1918 LIST_HEAD(target_list);
1919 struct srp_target_port *target, *tmp_target;
1921 srp_dev = ib_get_client_data(device, &srp_client);
1923 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
1924 class_device_unregister(&host->class_dev);
1926 * Wait for the sysfs entry to go away, so that no new
1927 * target ports can be created.
1929 wait_for_completion(&host->released);
1932 * Mark all target ports as removed, so we stop queueing
1933 * commands and don't try to reconnect.
1935 spin_lock(&host->target_lock);
1936 list_for_each_entry(target, &host->target_list, list) {
1937 spin_lock_irq(target->scsi_host->host_lock);
1938 target->state = SRP_TARGET_REMOVED;
1939 spin_unlock_irq(target->scsi_host->host_lock);
1941 spin_unlock(&host->target_lock);
1944 * Wait for any reconnection tasks that may have
1945 * started before we marked our target ports as
1946 * removed, and any target port removal tasks.
1948 flush_scheduled_work();
1950 list_for_each_entry_safe(target, tmp_target,
1951 &host->target_list, list) {
1952 scsi_remove_host(target->scsi_host);
1953 srp_disconnect_target(target);
1954 ib_destroy_cm_id(target->cm_id);
1955 srp_free_target_ib(target);
1956 scsi_host_put(target->scsi_host);
1962 if (srp_dev->fmr_pool)
1963 ib_destroy_fmr_pool(srp_dev->fmr_pool);
1964 ib_dereg_mr(srp_dev->mr);
1965 ib_dealloc_pd(srp_dev->pd);
1970 static int __init srp_init_module(void)
1974 srp_template.sg_tablesize = srp_sg_tablesize;
1975 srp_max_iu_len = (sizeof (struct srp_cmd) +
1976 sizeof (struct srp_indirect_buf) +
1977 srp_sg_tablesize * 16);
1979 ret = class_register(&srp_class);
1981 printk(KERN_ERR PFX "couldn't register class infiniband_srp\n");
1985 ret = ib_register_client(&srp_client);
1987 printk(KERN_ERR PFX "couldn't register IB client\n");
1988 class_unregister(&srp_class);
1995 static void __exit srp_cleanup_module(void)
1997 ib_unregister_client(&srp_client);
1998 class_unregister(&srp_class);
2001 module_init(srp_init_module);
2002 module_exit(srp_cleanup_module);