IB/srp: Fix srp_create_target() error handling
[cascardo/linux.git] / drivers / infiniband / ulp / srp / ib_srp.c
1 /*
2  * Copyright (c) 2005 Cisco Systems.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/string.h>
40 #include <linux/parser.h>
41 #include <linux/random.h>
42 #include <linux/jiffies.h>
43 #include <rdma/ib_cache.h>
44
45 #include <linux/atomic.h>
46
47 #include <scsi/scsi.h>
48 #include <scsi/scsi_device.h>
49 #include <scsi/scsi_dbg.h>
50 #include <scsi/scsi_tcq.h>
51 #include <scsi/srp.h>
52 #include <scsi/scsi_transport_srp.h>
53
54 #include "ib_srp.h"
55
56 #define DRV_NAME        "ib_srp"
57 #define PFX             DRV_NAME ": "
58 #define DRV_VERSION     "2.0"
59 #define DRV_RELDATE     "July 26, 2015"
60
61 MODULE_AUTHOR("Roland Dreier");
62 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
63 MODULE_LICENSE("Dual BSD/GPL");
64 MODULE_VERSION(DRV_VERSION);
65 MODULE_INFO(release_date, DRV_RELDATE);
66
67 static unsigned int srp_sg_tablesize;
68 static unsigned int cmd_sg_entries;
69 static unsigned int indirect_sg_entries;
70 static bool allow_ext_sg;
71 static bool prefer_fr = true;
72 static bool register_always = true;
73 static int topspin_workarounds = 1;
74
75 module_param(srp_sg_tablesize, uint, 0444);
76 MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
77
78 module_param(cmd_sg_entries, uint, 0444);
79 MODULE_PARM_DESC(cmd_sg_entries,
80                  "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
81
82 module_param(indirect_sg_entries, uint, 0444);
83 MODULE_PARM_DESC(indirect_sg_entries,
84                  "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
85
86 module_param(allow_ext_sg, bool, 0444);
87 MODULE_PARM_DESC(allow_ext_sg,
88                   "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
89
90 module_param(topspin_workarounds, int, 0444);
91 MODULE_PARM_DESC(topspin_workarounds,
92                  "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
93
94 module_param(prefer_fr, bool, 0444);
95 MODULE_PARM_DESC(prefer_fr,
96 "Whether to use fast registration if both FMR and fast registration are supported");
97
98 module_param(register_always, bool, 0444);
99 MODULE_PARM_DESC(register_always,
100                  "Use memory registration even for contiguous memory regions");
101
102 static const struct kernel_param_ops srp_tmo_ops;
103
104 static int srp_reconnect_delay = 10;
105 module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
106                 S_IRUGO | S_IWUSR);
107 MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
108
109 static int srp_fast_io_fail_tmo = 15;
110 module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
111                 S_IRUGO | S_IWUSR);
112 MODULE_PARM_DESC(fast_io_fail_tmo,
113                  "Number of seconds between the observation of a transport"
114                  " layer error and failing all I/O. \"off\" means that this"
115                  " functionality is disabled.");
116
117 static int srp_dev_loss_tmo = 600;
118 module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
119                 S_IRUGO | S_IWUSR);
120 MODULE_PARM_DESC(dev_loss_tmo,
121                  "Maximum number of seconds that the SRP transport should"
122                  " insulate transport layer errors. After this time has been"
123                  " exceeded the SCSI host is removed. Should be"
124                  " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
125                  " if fast_io_fail_tmo has not been set. \"off\" means that"
126                  " this functionality is disabled.");
127
128 static unsigned ch_count;
129 module_param(ch_count, uint, 0444);
130 MODULE_PARM_DESC(ch_count,
131                  "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
132
133 static void srp_add_one(struct ib_device *device);
134 static void srp_remove_one(struct ib_device *device, void *client_data);
135 static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
136 static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
137                 const char *opname);
138 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
139
140 static struct scsi_transport_template *ib_srp_transport_template;
141 static struct workqueue_struct *srp_remove_wq;
142
143 static struct ib_client srp_client = {
144         .name   = "srp",
145         .add    = srp_add_one,
146         .remove = srp_remove_one
147 };
148
149 static struct ib_sa_client srp_sa_client;
150
151 static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
152 {
153         int tmo = *(int *)kp->arg;
154
155         if (tmo >= 0)
156                 return sprintf(buffer, "%d", tmo);
157         else
158                 return sprintf(buffer, "off");
159 }
160
161 static int srp_tmo_set(const char *val, const struct kernel_param *kp)
162 {
163         int tmo, res;
164
165         res = srp_parse_tmo(&tmo, val);
166         if (res)
167                 goto out;
168
169         if (kp->arg == &srp_reconnect_delay)
170                 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
171                                     srp_dev_loss_tmo);
172         else if (kp->arg == &srp_fast_io_fail_tmo)
173                 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
174         else
175                 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
176                                     tmo);
177         if (res)
178                 goto out;
179         *(int *)kp->arg = tmo;
180
181 out:
182         return res;
183 }
184
185 static const struct kernel_param_ops srp_tmo_ops = {
186         .get = srp_tmo_get,
187         .set = srp_tmo_set,
188 };
189
190 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
191 {
192         return (struct srp_target_port *) host->hostdata;
193 }
194
195 static const char *srp_target_info(struct Scsi_Host *host)
196 {
197         return host_to_target(host)->target_name;
198 }
199
200 static int srp_target_is_topspin(struct srp_target_port *target)
201 {
202         static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
203         static const u8 cisco_oui[3]   = { 0x00, 0x1b, 0x0d };
204
205         return topspin_workarounds &&
206                 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
207                  !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
208 }
209
210 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
211                                    gfp_t gfp_mask,
212                                    enum dma_data_direction direction)
213 {
214         struct srp_iu *iu;
215
216         iu = kmalloc(sizeof *iu, gfp_mask);
217         if (!iu)
218                 goto out;
219
220         iu->buf = kzalloc(size, gfp_mask);
221         if (!iu->buf)
222                 goto out_free_iu;
223
224         iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
225                                     direction);
226         if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
227                 goto out_free_buf;
228
229         iu->size      = size;
230         iu->direction = direction;
231
232         return iu;
233
234 out_free_buf:
235         kfree(iu->buf);
236 out_free_iu:
237         kfree(iu);
238 out:
239         return NULL;
240 }
241
242 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
243 {
244         if (!iu)
245                 return;
246
247         ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
248                             iu->direction);
249         kfree(iu->buf);
250         kfree(iu);
251 }
252
253 static void srp_qp_event(struct ib_event *event, void *context)
254 {
255         pr_debug("QP event %s (%d)\n",
256                  ib_event_msg(event->event), event->event);
257 }
258
259 static int srp_init_qp(struct srp_target_port *target,
260                        struct ib_qp *qp)
261 {
262         struct ib_qp_attr *attr;
263         int ret;
264
265         attr = kmalloc(sizeof *attr, GFP_KERNEL);
266         if (!attr)
267                 return -ENOMEM;
268
269         ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
270                                   target->srp_host->port,
271                                   be16_to_cpu(target->pkey),
272                                   &attr->pkey_index);
273         if (ret)
274                 goto out;
275
276         attr->qp_state        = IB_QPS_INIT;
277         attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
278                                     IB_ACCESS_REMOTE_WRITE);
279         attr->port_num        = target->srp_host->port;
280
281         ret = ib_modify_qp(qp, attr,
282                            IB_QP_STATE          |
283                            IB_QP_PKEY_INDEX     |
284                            IB_QP_ACCESS_FLAGS   |
285                            IB_QP_PORT);
286
287 out:
288         kfree(attr);
289         return ret;
290 }
291
292 static int srp_new_cm_id(struct srp_rdma_ch *ch)
293 {
294         struct srp_target_port *target = ch->target;
295         struct ib_cm_id *new_cm_id;
296
297         new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
298                                     srp_cm_handler, ch);
299         if (IS_ERR(new_cm_id))
300                 return PTR_ERR(new_cm_id);
301
302         if (ch->cm_id)
303                 ib_destroy_cm_id(ch->cm_id);
304         ch->cm_id = new_cm_id;
305         ch->path.sgid = target->sgid;
306         ch->path.dgid = target->orig_dgid;
307         ch->path.pkey = target->pkey;
308         ch->path.service_id = target->service_id;
309
310         return 0;
311 }
312
313 static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
314 {
315         struct srp_device *dev = target->srp_host->srp_dev;
316         struct ib_fmr_pool_param fmr_param;
317
318         memset(&fmr_param, 0, sizeof(fmr_param));
319         fmr_param.pool_size         = target->mr_pool_size;
320         fmr_param.dirty_watermark   = fmr_param.pool_size / 4;
321         fmr_param.cache             = 1;
322         fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
323         fmr_param.page_shift        = ilog2(dev->mr_page_size);
324         fmr_param.access            = (IB_ACCESS_LOCAL_WRITE |
325                                        IB_ACCESS_REMOTE_WRITE |
326                                        IB_ACCESS_REMOTE_READ);
327
328         return ib_create_fmr_pool(dev->pd, &fmr_param);
329 }
330
331 /**
332  * srp_destroy_fr_pool() - free the resources owned by a pool
333  * @pool: Fast registration pool to be destroyed.
334  */
335 static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
336 {
337         int i;
338         struct srp_fr_desc *d;
339
340         if (!pool)
341                 return;
342
343         for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
344                 if (d->mr)
345                         ib_dereg_mr(d->mr);
346         }
347         kfree(pool);
348 }
349
350 /**
351  * srp_create_fr_pool() - allocate and initialize a pool for fast registration
352  * @device:            IB device to allocate fast registration descriptors for.
353  * @pd:                Protection domain associated with the FR descriptors.
354  * @pool_size:         Number of descriptors to allocate.
355  * @max_page_list_len: Maximum fast registration work request page list length.
356  */
357 static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
358                                               struct ib_pd *pd, int pool_size,
359                                               int max_page_list_len)
360 {
361         struct srp_fr_pool *pool;
362         struct srp_fr_desc *d;
363         struct ib_mr *mr;
364         int i, ret = -EINVAL;
365
366         if (pool_size <= 0)
367                 goto err;
368         ret = -ENOMEM;
369         pool = kzalloc(sizeof(struct srp_fr_pool) +
370                        pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
371         if (!pool)
372                 goto err;
373         pool->size = pool_size;
374         pool->max_page_list_len = max_page_list_len;
375         spin_lock_init(&pool->lock);
376         INIT_LIST_HEAD(&pool->free_list);
377
378         for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
379                 mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
380                                  max_page_list_len);
381                 if (IS_ERR(mr)) {
382                         ret = PTR_ERR(mr);
383                         goto destroy_pool;
384                 }
385                 d->mr = mr;
386                 list_add_tail(&d->entry, &pool->free_list);
387         }
388
389 out:
390         return pool;
391
392 destroy_pool:
393         srp_destroy_fr_pool(pool);
394
395 err:
396         pool = ERR_PTR(ret);
397         goto out;
398 }
399
400 /**
401  * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
402  * @pool: Pool to obtain descriptor from.
403  */
404 static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
405 {
406         struct srp_fr_desc *d = NULL;
407         unsigned long flags;
408
409         spin_lock_irqsave(&pool->lock, flags);
410         if (!list_empty(&pool->free_list)) {
411                 d = list_first_entry(&pool->free_list, typeof(*d), entry);
412                 list_del(&d->entry);
413         }
414         spin_unlock_irqrestore(&pool->lock, flags);
415
416         return d;
417 }
418
419 /**
420  * srp_fr_pool_put() - put an FR descriptor back in the free list
421  * @pool: Pool the descriptor was allocated from.
422  * @desc: Pointer to an array of fast registration descriptor pointers.
423  * @n:    Number of descriptors to put back.
424  *
425  * Note: The caller must already have queued an invalidation request for
426  * desc->mr->rkey before calling this function.
427  */
428 static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
429                             int n)
430 {
431         unsigned long flags;
432         int i;
433
434         spin_lock_irqsave(&pool->lock, flags);
435         for (i = 0; i < n; i++)
436                 list_add(&desc[i]->entry, &pool->free_list);
437         spin_unlock_irqrestore(&pool->lock, flags);
438 }
439
440 static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
441 {
442         struct srp_device *dev = target->srp_host->srp_dev;
443
444         return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size,
445                                   dev->max_pages_per_mr);
446 }
447
448 /**
449  * srp_destroy_qp() - destroy an RDMA queue pair
450  * @qp: RDMA queue pair.
451  *
452  * Drain the qp before destroying it.  This avoids that the receive
453  * completion handler can access the queue pair while it is
454  * being destroyed.
455  */
456 static void srp_destroy_qp(struct ib_qp *qp)
457 {
458         ib_drain_rq(qp);
459         ib_destroy_qp(qp);
460 }
461
462 static int srp_create_ch_ib(struct srp_rdma_ch *ch)
463 {
464         struct srp_target_port *target = ch->target;
465         struct srp_device *dev = target->srp_host->srp_dev;
466         struct ib_qp_init_attr *init_attr;
467         struct ib_cq *recv_cq, *send_cq;
468         struct ib_qp *qp;
469         struct ib_fmr_pool *fmr_pool = NULL;
470         struct srp_fr_pool *fr_pool = NULL;
471         const int m = dev->use_fast_reg ? 3 : 1;
472         int ret;
473
474         init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
475         if (!init_attr)
476                 return -ENOMEM;
477
478         /* queue_size + 1 for ib_drain_rq() */
479         recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
480                                 ch->comp_vector, IB_POLL_SOFTIRQ);
481         if (IS_ERR(recv_cq)) {
482                 ret = PTR_ERR(recv_cq);
483                 goto err;
484         }
485
486         send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size,
487                                 ch->comp_vector, IB_POLL_DIRECT);
488         if (IS_ERR(send_cq)) {
489                 ret = PTR_ERR(send_cq);
490                 goto err_recv_cq;
491         }
492
493         init_attr->event_handler       = srp_qp_event;
494         init_attr->cap.max_send_wr     = m * target->queue_size;
495         init_attr->cap.max_recv_wr     = target->queue_size + 1;
496         init_attr->cap.max_recv_sge    = 1;
497         init_attr->cap.max_send_sge    = 1;
498         init_attr->sq_sig_type         = IB_SIGNAL_REQ_WR;
499         init_attr->qp_type             = IB_QPT_RC;
500         init_attr->send_cq             = send_cq;
501         init_attr->recv_cq             = recv_cq;
502
503         qp = ib_create_qp(dev->pd, init_attr);
504         if (IS_ERR(qp)) {
505                 ret = PTR_ERR(qp);
506                 goto err_send_cq;
507         }
508
509         ret = srp_init_qp(target, qp);
510         if (ret)
511                 goto err_qp;
512
513         if (dev->use_fast_reg) {
514                 fr_pool = srp_alloc_fr_pool(target);
515                 if (IS_ERR(fr_pool)) {
516                         ret = PTR_ERR(fr_pool);
517                         shost_printk(KERN_WARNING, target->scsi_host, PFX
518                                      "FR pool allocation failed (%d)\n", ret);
519                         goto err_qp;
520                 }
521         } else if (dev->use_fmr) {
522                 fmr_pool = srp_alloc_fmr_pool(target);
523                 if (IS_ERR(fmr_pool)) {
524                         ret = PTR_ERR(fmr_pool);
525                         shost_printk(KERN_WARNING, target->scsi_host, PFX
526                                      "FMR pool allocation failed (%d)\n", ret);
527                         goto err_qp;
528                 }
529         }
530
531         if (ch->qp)
532                 srp_destroy_qp(ch->qp);
533         if (ch->recv_cq)
534                 ib_free_cq(ch->recv_cq);
535         if (ch->send_cq)
536                 ib_free_cq(ch->send_cq);
537
538         ch->qp = qp;
539         ch->recv_cq = recv_cq;
540         ch->send_cq = send_cq;
541
542         if (dev->use_fast_reg) {
543                 if (ch->fr_pool)
544                         srp_destroy_fr_pool(ch->fr_pool);
545                 ch->fr_pool = fr_pool;
546         } else if (dev->use_fmr) {
547                 if (ch->fmr_pool)
548                         ib_destroy_fmr_pool(ch->fmr_pool);
549                 ch->fmr_pool = fmr_pool;
550         }
551
552         kfree(init_attr);
553         return 0;
554
555 err_qp:
556         srp_destroy_qp(qp);
557
558 err_send_cq:
559         ib_free_cq(send_cq);
560
561 err_recv_cq:
562         ib_free_cq(recv_cq);
563
564 err:
565         kfree(init_attr);
566         return ret;
567 }
568
569 /*
570  * Note: this function may be called without srp_alloc_iu_bufs() having been
571  * invoked. Hence the ch->[rt]x_ring checks.
572  */
573 static void srp_free_ch_ib(struct srp_target_port *target,
574                            struct srp_rdma_ch *ch)
575 {
576         struct srp_device *dev = target->srp_host->srp_dev;
577         int i;
578
579         if (!ch->target)
580                 return;
581
582         if (ch->cm_id) {
583                 ib_destroy_cm_id(ch->cm_id);
584                 ch->cm_id = NULL;
585         }
586
587         /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
588         if (!ch->qp)
589                 return;
590
591         if (dev->use_fast_reg) {
592                 if (ch->fr_pool)
593                         srp_destroy_fr_pool(ch->fr_pool);
594         } else if (dev->use_fmr) {
595                 if (ch->fmr_pool)
596                         ib_destroy_fmr_pool(ch->fmr_pool);
597         }
598
599         srp_destroy_qp(ch->qp);
600         ib_free_cq(ch->send_cq);
601         ib_free_cq(ch->recv_cq);
602
603         /*
604          * Avoid that the SCSI error handler tries to use this channel after
605          * it has been freed. The SCSI error handler can namely continue
606          * trying to perform recovery actions after scsi_remove_host()
607          * returned.
608          */
609         ch->target = NULL;
610
611         ch->qp = NULL;
612         ch->send_cq = ch->recv_cq = NULL;
613
614         if (ch->rx_ring) {
615                 for (i = 0; i < target->queue_size; ++i)
616                         srp_free_iu(target->srp_host, ch->rx_ring[i]);
617                 kfree(ch->rx_ring);
618                 ch->rx_ring = NULL;
619         }
620         if (ch->tx_ring) {
621                 for (i = 0; i < target->queue_size; ++i)
622                         srp_free_iu(target->srp_host, ch->tx_ring[i]);
623                 kfree(ch->tx_ring);
624                 ch->tx_ring = NULL;
625         }
626 }
627
628 static void srp_path_rec_completion(int status,
629                                     struct ib_sa_path_rec *pathrec,
630                                     void *ch_ptr)
631 {
632         struct srp_rdma_ch *ch = ch_ptr;
633         struct srp_target_port *target = ch->target;
634
635         ch->status = status;
636         if (status)
637                 shost_printk(KERN_ERR, target->scsi_host,
638                              PFX "Got failed path rec status %d\n", status);
639         else
640                 ch->path = *pathrec;
641         complete(&ch->done);
642 }
643
644 static int srp_lookup_path(struct srp_rdma_ch *ch)
645 {
646         struct srp_target_port *target = ch->target;
647         int ret;
648
649         ch->path.numb_path = 1;
650
651         init_completion(&ch->done);
652
653         ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
654                                                target->srp_host->srp_dev->dev,
655                                                target->srp_host->port,
656                                                &ch->path,
657                                                IB_SA_PATH_REC_SERVICE_ID |
658                                                IB_SA_PATH_REC_DGID       |
659                                                IB_SA_PATH_REC_SGID       |
660                                                IB_SA_PATH_REC_NUMB_PATH  |
661                                                IB_SA_PATH_REC_PKEY,
662                                                SRP_PATH_REC_TIMEOUT_MS,
663                                                GFP_KERNEL,
664                                                srp_path_rec_completion,
665                                                ch, &ch->path_query);
666         if (ch->path_query_id < 0)
667                 return ch->path_query_id;
668
669         ret = wait_for_completion_interruptible(&ch->done);
670         if (ret < 0)
671                 return ret;
672
673         if (ch->status < 0)
674                 shost_printk(KERN_WARNING, target->scsi_host,
675                              PFX "Path record query failed\n");
676
677         return ch->status;
678 }
679
680 static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
681 {
682         struct srp_target_port *target = ch->target;
683         struct {
684                 struct ib_cm_req_param param;
685                 struct srp_login_req   priv;
686         } *req = NULL;
687         int status;
688
689         req = kzalloc(sizeof *req, GFP_KERNEL);
690         if (!req)
691                 return -ENOMEM;
692
693         req->param.primary_path               = &ch->path;
694         req->param.alternate_path             = NULL;
695         req->param.service_id                 = target->service_id;
696         req->param.qp_num                     = ch->qp->qp_num;
697         req->param.qp_type                    = ch->qp->qp_type;
698         req->param.private_data               = &req->priv;
699         req->param.private_data_len           = sizeof req->priv;
700         req->param.flow_control               = 1;
701
702         get_random_bytes(&req->param.starting_psn, 4);
703         req->param.starting_psn              &= 0xffffff;
704
705         /*
706          * Pick some arbitrary defaults here; we could make these
707          * module parameters if anyone cared about setting them.
708          */
709         req->param.responder_resources        = 4;
710         req->param.remote_cm_response_timeout = 20;
711         req->param.local_cm_response_timeout  = 20;
712         req->param.retry_count                = target->tl_retry_count;
713         req->param.rnr_retry_count            = 7;
714         req->param.max_cm_retries             = 15;
715
716         req->priv.opcode        = SRP_LOGIN_REQ;
717         req->priv.tag           = 0;
718         req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
719         req->priv.req_buf_fmt   = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
720                                               SRP_BUF_FORMAT_INDIRECT);
721         req->priv.req_flags     = (multich ? SRP_MULTICHAN_MULTI :
722                                    SRP_MULTICHAN_SINGLE);
723         /*
724          * In the published SRP specification (draft rev. 16a), the
725          * port identifier format is 8 bytes of ID extension followed
726          * by 8 bytes of GUID.  Older drafts put the two halves in the
727          * opposite order, so that the GUID comes first.
728          *
729          * Targets conforming to these obsolete drafts can be
730          * recognized by the I/O Class they report.
731          */
732         if (target->io_class == SRP_REV10_IB_IO_CLASS) {
733                 memcpy(req->priv.initiator_port_id,
734                        &target->sgid.global.interface_id, 8);
735                 memcpy(req->priv.initiator_port_id + 8,
736                        &target->initiator_ext, 8);
737                 memcpy(req->priv.target_port_id,     &target->ioc_guid, 8);
738                 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
739         } else {
740                 memcpy(req->priv.initiator_port_id,
741                        &target->initiator_ext, 8);
742                 memcpy(req->priv.initiator_port_id + 8,
743                        &target->sgid.global.interface_id, 8);
744                 memcpy(req->priv.target_port_id,     &target->id_ext, 8);
745                 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
746         }
747
748         /*
749          * Topspin/Cisco SRP targets will reject our login unless we
750          * zero out the first 8 bytes of our initiator port ID and set
751          * the second 8 bytes to the local node GUID.
752          */
753         if (srp_target_is_topspin(target)) {
754                 shost_printk(KERN_DEBUG, target->scsi_host,
755                              PFX "Topspin/Cisco initiator port ID workaround "
756                              "activated for target GUID %016llx\n",
757                              be64_to_cpu(target->ioc_guid));
758                 memset(req->priv.initiator_port_id, 0, 8);
759                 memcpy(req->priv.initiator_port_id + 8,
760                        &target->srp_host->srp_dev->dev->node_guid, 8);
761         }
762
763         status = ib_send_cm_req(ch->cm_id, &req->param);
764
765         kfree(req);
766
767         return status;
768 }
769
770 static bool srp_queue_remove_work(struct srp_target_port *target)
771 {
772         bool changed = false;
773
774         spin_lock_irq(&target->lock);
775         if (target->state != SRP_TARGET_REMOVED) {
776                 target->state = SRP_TARGET_REMOVED;
777                 changed = true;
778         }
779         spin_unlock_irq(&target->lock);
780
781         if (changed)
782                 queue_work(srp_remove_wq, &target->remove_work);
783
784         return changed;
785 }
786
787 static void srp_disconnect_target(struct srp_target_port *target)
788 {
789         struct srp_rdma_ch *ch;
790         int i;
791
792         /* XXX should send SRP_I_LOGOUT request */
793
794         for (i = 0; i < target->ch_count; i++) {
795                 ch = &target->ch[i];
796                 ch->connected = false;
797                 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
798                         shost_printk(KERN_DEBUG, target->scsi_host,
799                                      PFX "Sending CM DREQ failed\n");
800                 }
801         }
802 }
803
804 static void srp_free_req_data(struct srp_target_port *target,
805                               struct srp_rdma_ch *ch)
806 {
807         struct srp_device *dev = target->srp_host->srp_dev;
808         struct ib_device *ibdev = dev->dev;
809         struct srp_request *req;
810         int i;
811
812         if (!ch->req_ring)
813                 return;
814
815         for (i = 0; i < target->req_ring_size; ++i) {
816                 req = &ch->req_ring[i];
817                 if (dev->use_fast_reg) {
818                         kfree(req->fr_list);
819                 } else {
820                         kfree(req->fmr_list);
821                         kfree(req->map_page);
822                 }
823                 if (req->indirect_dma_addr) {
824                         ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
825                                             target->indirect_size,
826                                             DMA_TO_DEVICE);
827                 }
828                 kfree(req->indirect_desc);
829         }
830
831         kfree(ch->req_ring);
832         ch->req_ring = NULL;
833 }
834
835 static int srp_alloc_req_data(struct srp_rdma_ch *ch)
836 {
837         struct srp_target_port *target = ch->target;
838         struct srp_device *srp_dev = target->srp_host->srp_dev;
839         struct ib_device *ibdev = srp_dev->dev;
840         struct srp_request *req;
841         void *mr_list;
842         dma_addr_t dma_addr;
843         int i, ret = -ENOMEM;
844
845         ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
846                                GFP_KERNEL);
847         if (!ch->req_ring)
848                 goto out;
849
850         for (i = 0; i < target->req_ring_size; ++i) {
851                 req = &ch->req_ring[i];
852                 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
853                                   GFP_KERNEL);
854                 if (!mr_list)
855                         goto out;
856                 if (srp_dev->use_fast_reg) {
857                         req->fr_list = mr_list;
858                 } else {
859                         req->fmr_list = mr_list;
860                         req->map_page = kmalloc(srp_dev->max_pages_per_mr *
861                                                 sizeof(void *), GFP_KERNEL);
862                         if (!req->map_page)
863                                 goto out;
864                 }
865                 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
866                 if (!req->indirect_desc)
867                         goto out;
868
869                 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
870                                              target->indirect_size,
871                                              DMA_TO_DEVICE);
872                 if (ib_dma_mapping_error(ibdev, dma_addr))
873                         goto out;
874
875                 req->indirect_dma_addr = dma_addr;
876         }
877         ret = 0;
878
879 out:
880         return ret;
881 }
882
883 /**
884  * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
885  * @shost: SCSI host whose attributes to remove from sysfs.
886  *
887  * Note: Any attributes defined in the host template and that did not exist
888  * before invocation of this function will be ignored.
889  */
890 static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
891 {
892         struct device_attribute **attr;
893
894         for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
895                 device_remove_file(&shost->shost_dev, *attr);
896 }
897
898 static void srp_remove_target(struct srp_target_port *target)
899 {
900         struct srp_rdma_ch *ch;
901         int i;
902
903         WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
904
905         srp_del_scsi_host_attr(target->scsi_host);
906         srp_rport_get(target->rport);
907         srp_remove_host(target->scsi_host);
908         scsi_remove_host(target->scsi_host);
909         srp_stop_rport_timers(target->rport);
910         srp_disconnect_target(target);
911         for (i = 0; i < target->ch_count; i++) {
912                 ch = &target->ch[i];
913                 srp_free_ch_ib(target, ch);
914         }
915         cancel_work_sync(&target->tl_err_work);
916         srp_rport_put(target->rport);
917         for (i = 0; i < target->ch_count; i++) {
918                 ch = &target->ch[i];
919                 srp_free_req_data(target, ch);
920         }
921         kfree(target->ch);
922         target->ch = NULL;
923
924         spin_lock(&target->srp_host->target_lock);
925         list_del(&target->list);
926         spin_unlock(&target->srp_host->target_lock);
927
928         scsi_host_put(target->scsi_host);
929 }
930
931 static void srp_remove_work(struct work_struct *work)
932 {
933         struct srp_target_port *target =
934                 container_of(work, struct srp_target_port, remove_work);
935
936         WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
937
938         srp_remove_target(target);
939 }
940
941 static void srp_rport_delete(struct srp_rport *rport)
942 {
943         struct srp_target_port *target = rport->lld_data;
944
945         srp_queue_remove_work(target);
946 }
947
948 /**
949  * srp_connected_ch() - number of connected channels
950  * @target: SRP target port.
951  */
952 static int srp_connected_ch(struct srp_target_port *target)
953 {
954         int i, c = 0;
955
956         for (i = 0; i < target->ch_count; i++)
957                 c += target->ch[i].connected;
958
959         return c;
960 }
961
962 static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
963 {
964         struct srp_target_port *target = ch->target;
965         int ret;
966
967         WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
968
969         ret = srp_lookup_path(ch);
970         if (ret)
971                 goto out;
972
973         while (1) {
974                 init_completion(&ch->done);
975                 ret = srp_send_req(ch, multich);
976                 if (ret)
977                         goto out;
978                 ret = wait_for_completion_interruptible(&ch->done);
979                 if (ret < 0)
980                         goto out;
981
982                 /*
983                  * The CM event handling code will set status to
984                  * SRP_PORT_REDIRECT if we get a port redirect REJ
985                  * back, or SRP_DLID_REDIRECT if we get a lid/qp
986                  * redirect REJ back.
987                  */
988                 ret = ch->status;
989                 switch (ret) {
990                 case 0:
991                         ch->connected = true;
992                         goto out;
993
994                 case SRP_PORT_REDIRECT:
995                         ret = srp_lookup_path(ch);
996                         if (ret)
997                                 goto out;
998                         break;
999
1000                 case SRP_DLID_REDIRECT:
1001                         break;
1002
1003                 case SRP_STALE_CONN:
1004                         shost_printk(KERN_ERR, target->scsi_host, PFX
1005                                      "giving up on stale connection\n");
1006                         ret = -ECONNRESET;
1007                         goto out;
1008
1009                 default:
1010                         goto out;
1011                 }
1012         }
1013
1014 out:
1015         return ret <= 0 ? ret : -ENODEV;
1016 }
1017
1018 static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc)
1019 {
1020         srp_handle_qp_err(cq, wc, "INV RKEY");
1021 }
1022
1023 static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
1024                 u32 rkey)
1025 {
1026         struct ib_send_wr *bad_wr;
1027         struct ib_send_wr wr = {
1028                 .opcode             = IB_WR_LOCAL_INV,
1029                 .next               = NULL,
1030                 .num_sge            = 0,
1031                 .send_flags         = 0,
1032                 .ex.invalidate_rkey = rkey,
1033         };
1034
1035         wr.wr_cqe = &req->reg_cqe;
1036         req->reg_cqe.done = srp_inv_rkey_err_done;
1037         return ib_post_send(ch->qp, &wr, &bad_wr);
1038 }
1039
1040 static void srp_unmap_data(struct scsi_cmnd *scmnd,
1041                            struct srp_rdma_ch *ch,
1042                            struct srp_request *req)
1043 {
1044         struct srp_target_port *target = ch->target;
1045         struct srp_device *dev = target->srp_host->srp_dev;
1046         struct ib_device *ibdev = dev->dev;
1047         int i, res;
1048
1049         if (!scsi_sglist(scmnd) ||
1050             (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1051              scmnd->sc_data_direction != DMA_FROM_DEVICE))
1052                 return;
1053
1054         if (dev->use_fast_reg) {
1055                 struct srp_fr_desc **pfr;
1056
1057                 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
1058                         res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey);
1059                         if (res < 0) {
1060                                 shost_printk(KERN_ERR, target->scsi_host, PFX
1061                                   "Queueing INV WR for rkey %#x failed (%d)\n",
1062                                   (*pfr)->mr->rkey, res);
1063                                 queue_work(system_long_wq,
1064                                            &target->tl_err_work);
1065                         }
1066                 }
1067                 if (req->nmdesc)
1068                         srp_fr_pool_put(ch->fr_pool, req->fr_list,
1069                                         req->nmdesc);
1070         } else if (dev->use_fmr) {
1071                 struct ib_pool_fmr **pfmr;
1072
1073                 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1074                         ib_fmr_pool_unmap(*pfmr);
1075         }
1076
1077         ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1078                         scmnd->sc_data_direction);
1079 }
1080
1081 /**
1082  * srp_claim_req - Take ownership of the scmnd associated with a request.
1083  * @ch: SRP RDMA channel.
1084  * @req: SRP request.
1085  * @sdev: If not NULL, only take ownership for this SCSI device.
1086  * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1087  *         ownership of @req->scmnd if it equals @scmnd.
1088  *
1089  * Return value:
1090  * Either NULL or a pointer to the SCSI command the caller became owner of.
1091  */
1092 static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
1093                                        struct srp_request *req,
1094                                        struct scsi_device *sdev,
1095                                        struct scsi_cmnd *scmnd)
1096 {
1097         unsigned long flags;
1098
1099         spin_lock_irqsave(&ch->lock, flags);
1100         if (req->scmnd &&
1101             (!sdev || req->scmnd->device == sdev) &&
1102             (!scmnd || req->scmnd == scmnd)) {
1103                 scmnd = req->scmnd;
1104                 req->scmnd = NULL;
1105         } else {
1106                 scmnd = NULL;
1107         }
1108         spin_unlock_irqrestore(&ch->lock, flags);
1109
1110         return scmnd;
1111 }
1112
1113 /**
1114  * srp_free_req() - Unmap data and adjust ch->req_lim.
1115  * @ch:     SRP RDMA channel.
1116  * @req:    Request to be freed.
1117  * @scmnd:  SCSI command associated with @req.
1118  * @req_lim_delta: Amount to be added to @target->req_lim.
1119  */
1120 static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1121                          struct scsi_cmnd *scmnd, s32 req_lim_delta)
1122 {
1123         unsigned long flags;
1124
1125         srp_unmap_data(scmnd, ch, req);
1126
1127         spin_lock_irqsave(&ch->lock, flags);
1128         ch->req_lim += req_lim_delta;
1129         spin_unlock_irqrestore(&ch->lock, flags);
1130 }
1131
1132 static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1133                            struct scsi_device *sdev, int result)
1134 {
1135         struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
1136
1137         if (scmnd) {
1138                 srp_free_req(ch, req, scmnd, 0);
1139                 scmnd->result = result;
1140                 scmnd->scsi_done(scmnd);
1141         }
1142 }
1143
1144 static void srp_terminate_io(struct srp_rport *rport)
1145 {
1146         struct srp_target_port *target = rport->lld_data;
1147         struct srp_rdma_ch *ch;
1148         struct Scsi_Host *shost = target->scsi_host;
1149         struct scsi_device *sdev;
1150         int i, j;
1151
1152         /*
1153          * Invoking srp_terminate_io() while srp_queuecommand() is running
1154          * is not safe. Hence the warning statement below.
1155          */
1156         shost_for_each_device(sdev, shost)
1157                 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1158
1159         for (i = 0; i < target->ch_count; i++) {
1160                 ch = &target->ch[i];
1161
1162                 for (j = 0; j < target->req_ring_size; ++j) {
1163                         struct srp_request *req = &ch->req_ring[j];
1164
1165                         srp_finish_req(ch, req, NULL,
1166                                        DID_TRANSPORT_FAILFAST << 16);
1167                 }
1168         }
1169 }
1170
1171 /*
1172  * It is up to the caller to ensure that srp_rport_reconnect() calls are
1173  * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1174  * srp_reset_device() or srp_reset_host() calls will occur while this function
1175  * is in progress. One way to realize that is not to call this function
1176  * directly but to call srp_reconnect_rport() instead since that last function
1177  * serializes calls of this function via rport->mutex and also blocks
1178  * srp_queuecommand() calls before invoking this function.
1179  */
1180 static int srp_rport_reconnect(struct srp_rport *rport)
1181 {
1182         struct srp_target_port *target = rport->lld_data;
1183         struct srp_rdma_ch *ch;
1184         int i, j, ret = 0;
1185         bool multich = false;
1186
1187         srp_disconnect_target(target);
1188
1189         if (target->state == SRP_TARGET_SCANNING)
1190                 return -ENODEV;
1191
1192         /*
1193          * Now get a new local CM ID so that we avoid confusing the target in
1194          * case things are really fouled up. Doing so also ensures that all CM
1195          * callbacks will have finished before a new QP is allocated.
1196          */
1197         for (i = 0; i < target->ch_count; i++) {
1198                 ch = &target->ch[i];
1199                 ret += srp_new_cm_id(ch);
1200         }
1201         for (i = 0; i < target->ch_count; i++) {
1202                 ch = &target->ch[i];
1203                 for (j = 0; j < target->req_ring_size; ++j) {
1204                         struct srp_request *req = &ch->req_ring[j];
1205
1206                         srp_finish_req(ch, req, NULL, DID_RESET << 16);
1207                 }
1208         }
1209         for (i = 0; i < target->ch_count; i++) {
1210                 ch = &target->ch[i];
1211                 /*
1212                  * Whether or not creating a new CM ID succeeded, create a new
1213                  * QP. This guarantees that all completion callback function
1214                  * invocations have finished before request resetting starts.
1215                  */
1216                 ret += srp_create_ch_ib(ch);
1217
1218                 INIT_LIST_HEAD(&ch->free_tx);
1219                 for (j = 0; j < target->queue_size; ++j)
1220                         list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1221         }
1222
1223         target->qp_in_error = false;
1224
1225         for (i = 0; i < target->ch_count; i++) {
1226                 ch = &target->ch[i];
1227                 if (ret)
1228                         break;
1229                 ret = srp_connect_ch(ch, multich);
1230                 multich = true;
1231         }
1232
1233         if (ret == 0)
1234                 shost_printk(KERN_INFO, target->scsi_host,
1235                              PFX "reconnect succeeded\n");
1236
1237         return ret;
1238 }
1239
1240 static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1241                          unsigned int dma_len, u32 rkey)
1242 {
1243         struct srp_direct_buf *desc = state->desc;
1244
1245         WARN_ON_ONCE(!dma_len);
1246
1247         desc->va = cpu_to_be64(dma_addr);
1248         desc->key = cpu_to_be32(rkey);
1249         desc->len = cpu_to_be32(dma_len);
1250
1251         state->total_len += dma_len;
1252         state->desc++;
1253         state->ndesc++;
1254 }
1255
1256 static int srp_map_finish_fmr(struct srp_map_state *state,
1257                               struct srp_rdma_ch *ch)
1258 {
1259         struct srp_target_port *target = ch->target;
1260         struct srp_device *dev = target->srp_host->srp_dev;
1261         struct ib_pool_fmr *fmr;
1262         u64 io_addr = 0;
1263
1264         if (state->fmr.next >= state->fmr.end)
1265                 return -ENOMEM;
1266
1267         WARN_ON_ONCE(!dev->use_fmr);
1268
1269         if (state->npages == 0)
1270                 return 0;
1271
1272         if (state->npages == 1 && target->global_mr) {
1273                 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1274                              target->global_mr->rkey);
1275                 goto reset_state;
1276         }
1277
1278         fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
1279                                    state->npages, io_addr);
1280         if (IS_ERR(fmr))
1281                 return PTR_ERR(fmr);
1282
1283         *state->fmr.next++ = fmr;
1284         state->nmdesc++;
1285
1286         srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
1287                      state->dma_len, fmr->fmr->rkey);
1288
1289 reset_state:
1290         state->npages = 0;
1291         state->dma_len = 0;
1292
1293         return 0;
1294 }
1295
1296 static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
1297 {
1298         srp_handle_qp_err(cq, wc, "FAST REG");
1299 }
1300
1301 static int srp_map_finish_fr(struct srp_map_state *state,
1302                              struct srp_request *req,
1303                              struct srp_rdma_ch *ch, int sg_nents)
1304 {
1305         struct srp_target_port *target = ch->target;
1306         struct srp_device *dev = target->srp_host->srp_dev;
1307         struct ib_send_wr *bad_wr;
1308         struct ib_reg_wr wr;
1309         struct srp_fr_desc *desc;
1310         u32 rkey;
1311         int n, err;
1312
1313         if (state->fr.next >= state->fr.end)
1314                 return -ENOMEM;
1315
1316         WARN_ON_ONCE(!dev->use_fast_reg);
1317
1318         if (sg_nents == 1 && target->global_mr) {
1319                 srp_map_desc(state, sg_dma_address(state->sg),
1320                              sg_dma_len(state->sg),
1321                              target->global_mr->rkey);
1322                 return 1;
1323         }
1324
1325         desc = srp_fr_pool_get(ch->fr_pool);
1326         if (!desc)
1327                 return -ENOMEM;
1328
1329         rkey = ib_inc_rkey(desc->mr->rkey);
1330         ib_update_fast_reg_key(desc->mr, rkey);
1331
1332         n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, 0, dev->mr_page_size);
1333         if (unlikely(n < 0)) {
1334                 srp_fr_pool_put(ch->fr_pool, &desc, 1);
1335                 pr_debug("%s: ib_map_mr_sg(%d) returned %d.\n",
1336                          dev_name(&req->scmnd->device->sdev_gendev), sg_nents,
1337                          n);
1338                 return n;
1339         }
1340
1341         req->reg_cqe.done = srp_reg_mr_err_done;
1342
1343         wr.wr.next = NULL;
1344         wr.wr.opcode = IB_WR_REG_MR;
1345         wr.wr.wr_cqe = &req->reg_cqe;
1346         wr.wr.num_sge = 0;
1347         wr.wr.send_flags = 0;
1348         wr.mr = desc->mr;
1349         wr.key = desc->mr->rkey;
1350         wr.access = (IB_ACCESS_LOCAL_WRITE |
1351                      IB_ACCESS_REMOTE_READ |
1352                      IB_ACCESS_REMOTE_WRITE);
1353
1354         *state->fr.next++ = desc;
1355         state->nmdesc++;
1356
1357         srp_map_desc(state, desc->mr->iova,
1358                      desc->mr->length, desc->mr->rkey);
1359
1360         err = ib_post_send(ch->qp, &wr.wr, &bad_wr);
1361         if (unlikely(err))
1362                 return err;
1363
1364         return n;
1365 }
1366
1367 static int srp_map_sg_entry(struct srp_map_state *state,
1368                             struct srp_rdma_ch *ch,
1369                             struct scatterlist *sg, int sg_index)
1370 {
1371         struct srp_target_port *target = ch->target;
1372         struct srp_device *dev = target->srp_host->srp_dev;
1373         struct ib_device *ibdev = dev->dev;
1374         dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1375         unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
1376         unsigned int len = 0;
1377         int ret;
1378
1379         WARN_ON_ONCE(!dma_len);
1380
1381         while (dma_len) {
1382                 unsigned offset = dma_addr & ~dev->mr_page_mask;
1383                 if (state->npages == dev->max_pages_per_mr || offset != 0) {
1384                         ret = srp_map_finish_fmr(state, ch);
1385                         if (ret)
1386                                 return ret;
1387                 }
1388
1389                 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
1390
1391                 if (!state->npages)
1392                         state->base_dma_addr = dma_addr;
1393                 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
1394                 state->dma_len += len;
1395                 dma_addr += len;
1396                 dma_len -= len;
1397         }
1398
1399         /*
1400          * If the last entry of the MR wasn't a full page, then we need to
1401          * close it out and start a new one -- we can only merge at page
1402          * boundaries.
1403          */
1404         ret = 0;
1405         if (len != dev->mr_page_size)
1406                 ret = srp_map_finish_fmr(state, ch);
1407         return ret;
1408 }
1409
1410 static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1411                           struct srp_request *req, struct scatterlist *scat,
1412                           int count)
1413 {
1414         struct scatterlist *sg;
1415         int i, ret;
1416
1417         state->pages = req->map_page;
1418         state->fmr.next = req->fmr_list;
1419         state->fmr.end = req->fmr_list + ch->target->cmd_sg_cnt;
1420
1421         for_each_sg(scat, sg, count, i) {
1422                 ret = srp_map_sg_entry(state, ch, sg, i);
1423                 if (ret)
1424                         return ret;
1425         }
1426
1427         ret = srp_map_finish_fmr(state, ch);
1428         if (ret)
1429                 return ret;
1430
1431         return 0;
1432 }
1433
1434 static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1435                          struct srp_request *req, struct scatterlist *scat,
1436                          int count)
1437 {
1438         state->desc = req->indirect_desc;
1439         state->fr.next = req->fr_list;
1440         state->fr.end = req->fr_list + ch->target->cmd_sg_cnt;
1441         state->sg = scat;
1442
1443         if (count == 0)
1444                 return 0;
1445
1446         while (count) {
1447                 int i, n;
1448
1449                 n = srp_map_finish_fr(state, req, ch, count);
1450                 if (unlikely(n < 0))
1451                         return n;
1452
1453                 count -= n;
1454                 for (i = 0; i < n; i++)
1455                         state->sg = sg_next(state->sg);
1456         }
1457
1458         return 0;
1459 }
1460
1461 static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1462                           struct srp_request *req, struct scatterlist *scat,
1463                           int count)
1464 {
1465         struct srp_target_port *target = ch->target;
1466         struct srp_device *dev = target->srp_host->srp_dev;
1467         struct scatterlist *sg;
1468         int i;
1469
1470         state->desc = req->indirect_desc;
1471         for_each_sg(scat, sg, count, i) {
1472                 srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
1473                              ib_sg_dma_len(dev->dev, sg),
1474                              target->global_mr->rkey);
1475         }
1476
1477         return 0;
1478 }
1479
1480 /*
1481  * Register the indirect data buffer descriptor with the HCA.
1482  *
1483  * Note: since the indirect data buffer descriptor has been allocated with
1484  * kmalloc() it is guaranteed that this buffer is a physically contiguous
1485  * memory buffer.
1486  */
1487 static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1488                        void **next_mr, void **end_mr, u32 idb_len,
1489                        __be32 *idb_rkey)
1490 {
1491         struct srp_target_port *target = ch->target;
1492         struct srp_device *dev = target->srp_host->srp_dev;
1493         struct srp_map_state state;
1494         struct srp_direct_buf idb_desc;
1495         u64 idb_pages[1];
1496         struct scatterlist idb_sg[1];
1497         int ret;
1498
1499         memset(&state, 0, sizeof(state));
1500         memset(&idb_desc, 0, sizeof(idb_desc));
1501         state.gen.next = next_mr;
1502         state.gen.end = end_mr;
1503         state.desc = &idb_desc;
1504         state.base_dma_addr = req->indirect_dma_addr;
1505         state.dma_len = idb_len;
1506
1507         if (dev->use_fast_reg) {
1508                 state.sg = idb_sg;
1509                 sg_set_buf(idb_sg, req->indirect_desc, idb_len);
1510                 idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
1511 #ifdef CONFIG_NEED_SG_DMA_LENGTH
1512                 idb_sg->dma_length = idb_sg->length;          /* hack^2 */
1513 #endif
1514                 ret = srp_map_finish_fr(&state, req, ch, 1);
1515                 if (ret < 0)
1516                         return ret;
1517         } else if (dev->use_fmr) {
1518                 state.pages = idb_pages;
1519                 state.pages[0] = (req->indirect_dma_addr &
1520                                   dev->mr_page_mask);
1521                 state.npages = 1;
1522                 ret = srp_map_finish_fmr(&state, ch);
1523                 if (ret < 0)
1524                         return ret;
1525         } else {
1526                 return -EINVAL;
1527         }
1528
1529         *idb_rkey = idb_desc.key;
1530
1531         return 0;
1532 }
1533
1534 /**
1535  * srp_map_data() - map SCSI data buffer onto an SRP request
1536  * @scmnd: SCSI command to map
1537  * @ch: SRP RDMA channel
1538  * @req: SRP request
1539  *
1540  * Returns the length in bytes of the SRP_CMD IU or a negative value if
1541  * mapping failed.
1542  */
1543 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1544                         struct srp_request *req)
1545 {
1546         struct srp_target_port *target = ch->target;
1547         struct scatterlist *scat;
1548         struct srp_cmd *cmd = req->cmd->buf;
1549         int len, nents, count, ret;
1550         struct srp_device *dev;
1551         struct ib_device *ibdev;
1552         struct srp_map_state state;
1553         struct srp_indirect_buf *indirect_hdr;
1554         u32 idb_len, table_len;
1555         __be32 idb_rkey;
1556         u8 fmt;
1557
1558         if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1559                 return sizeof (struct srp_cmd);
1560
1561         if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1562             scmnd->sc_data_direction != DMA_TO_DEVICE) {
1563                 shost_printk(KERN_WARNING, target->scsi_host,
1564                              PFX "Unhandled data direction %d\n",
1565                              scmnd->sc_data_direction);
1566                 return -EINVAL;
1567         }
1568
1569         nents = scsi_sg_count(scmnd);
1570         scat  = scsi_sglist(scmnd);
1571
1572         dev = target->srp_host->srp_dev;
1573         ibdev = dev->dev;
1574
1575         count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
1576         if (unlikely(count == 0))
1577                 return -EIO;
1578
1579         fmt = SRP_DATA_DESC_DIRECT;
1580         len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
1581
1582         if (count == 1 && target->global_mr) {
1583                 /*
1584                  * The midlayer only generated a single gather/scatter
1585                  * entry, or DMA mapping coalesced everything to a
1586                  * single entry.  So a direct descriptor along with
1587                  * the DMA MR suffices.
1588                  */
1589                 struct srp_direct_buf *buf = (void *) cmd->add_data;
1590
1591                 buf->va  = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
1592                 buf->key = cpu_to_be32(target->global_mr->rkey);
1593                 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
1594
1595                 req->nmdesc = 0;
1596                 goto map_complete;
1597         }
1598
1599         /*
1600          * We have more than one scatter/gather entry, so build our indirect
1601          * descriptor table, trying to merge as many entries as we can.
1602          */
1603         indirect_hdr = (void *) cmd->add_data;
1604
1605         ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1606                                    target->indirect_size, DMA_TO_DEVICE);
1607
1608         memset(&state, 0, sizeof(state));
1609         if (dev->use_fast_reg)
1610                 ret = srp_map_sg_fr(&state, ch, req, scat, count);
1611         else if (dev->use_fmr)
1612                 ret = srp_map_sg_fmr(&state, ch, req, scat, count);
1613         else
1614                 ret = srp_map_sg_dma(&state, ch, req, scat, count);
1615         req->nmdesc = state.nmdesc;
1616         if (ret < 0)
1617                 goto unmap;
1618
1619         /* We've mapped the request, now pull as much of the indirect
1620          * descriptor table as we can into the command buffer. If this
1621          * target is not using an external indirect table, we are
1622          * guaranteed to fit into the command, as the SCSI layer won't
1623          * give us more S/G entries than we allow.
1624          */
1625         if (state.ndesc == 1) {
1626                 /*
1627                  * Memory registration collapsed the sg-list into one entry,
1628                  * so use a direct descriptor.
1629                  */
1630                 struct srp_direct_buf *buf = (void *) cmd->add_data;
1631
1632                 *buf = req->indirect_desc[0];
1633                 goto map_complete;
1634         }
1635
1636         if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1637                                                 !target->allow_ext_sg)) {
1638                 shost_printk(KERN_ERR, target->scsi_host,
1639                              "Could not fit S/G list into SRP_CMD\n");
1640                 ret = -EIO;
1641                 goto unmap;
1642         }
1643
1644         count = min(state.ndesc, target->cmd_sg_cnt);
1645         table_len = state.ndesc * sizeof (struct srp_direct_buf);
1646         idb_len = sizeof(struct srp_indirect_buf) + table_len;
1647
1648         fmt = SRP_DATA_DESC_INDIRECT;
1649         len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
1650         len += count * sizeof (struct srp_direct_buf);
1651
1652         memcpy(indirect_hdr->desc_list, req->indirect_desc,
1653                count * sizeof (struct srp_direct_buf));
1654
1655         if (!target->global_mr) {
1656                 ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1657                                   idb_len, &idb_rkey);
1658                 if (ret < 0)
1659                         goto unmap;
1660                 req->nmdesc++;
1661         } else {
1662                 idb_rkey = cpu_to_be32(target->global_mr->rkey);
1663         }
1664
1665         indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1666         indirect_hdr->table_desc.key = idb_rkey;
1667         indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1668         indirect_hdr->len = cpu_to_be32(state.total_len);
1669
1670         if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1671                 cmd->data_out_desc_cnt = count;
1672         else
1673                 cmd->data_in_desc_cnt = count;
1674
1675         ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1676                                       DMA_TO_DEVICE);
1677
1678 map_complete:
1679         if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1680                 cmd->buf_fmt = fmt << 4;
1681         else
1682                 cmd->buf_fmt = fmt;
1683
1684         return len;
1685
1686 unmap:
1687         srp_unmap_data(scmnd, ch, req);
1688         if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size)
1689                 ret = -E2BIG;
1690         return ret;
1691 }
1692
1693 /*
1694  * Return an IU and possible credit to the free pool
1695  */
1696 static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
1697                           enum srp_iu_type iu_type)
1698 {
1699         unsigned long flags;
1700
1701         spin_lock_irqsave(&ch->lock, flags);
1702         list_add(&iu->list, &ch->free_tx);
1703         if (iu_type != SRP_IU_RSP)
1704                 ++ch->req_lim;
1705         spin_unlock_irqrestore(&ch->lock, flags);
1706 }
1707
1708 /*
1709  * Must be called with ch->lock held to protect req_lim and free_tx.
1710  * If IU is not sent, it must be returned using srp_put_tx_iu().
1711  *
1712  * Note:
1713  * An upper limit for the number of allocated information units for each
1714  * request type is:
1715  * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1716  *   more than Scsi_Host.can_queue requests.
1717  * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1718  * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1719  *   one unanswered SRP request to an initiator.
1720  */
1721 static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
1722                                       enum srp_iu_type iu_type)
1723 {
1724         struct srp_target_port *target = ch->target;
1725         s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1726         struct srp_iu *iu;
1727
1728         ib_process_cq_direct(ch->send_cq, -1);
1729
1730         if (list_empty(&ch->free_tx))
1731                 return NULL;
1732
1733         /* Initiator responses to target requests do not consume credits */
1734         if (iu_type != SRP_IU_RSP) {
1735                 if (ch->req_lim <= rsv) {
1736                         ++target->zero_req_lim;
1737                         return NULL;
1738                 }
1739
1740                 --ch->req_lim;
1741         }
1742
1743         iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
1744         list_del(&iu->list);
1745         return iu;
1746 }
1747
1748 static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
1749 {
1750         struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
1751         struct srp_rdma_ch *ch = cq->cq_context;
1752
1753         if (unlikely(wc->status != IB_WC_SUCCESS)) {
1754                 srp_handle_qp_err(cq, wc, "SEND");
1755                 return;
1756         }
1757
1758         list_add(&iu->list, &ch->free_tx);
1759 }
1760
1761 static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
1762 {
1763         struct srp_target_port *target = ch->target;
1764         struct ib_sge list;
1765         struct ib_send_wr wr, *bad_wr;
1766
1767         list.addr   = iu->dma;
1768         list.length = len;
1769         list.lkey   = target->lkey;
1770
1771         iu->cqe.done = srp_send_done;
1772
1773         wr.next       = NULL;
1774         wr.wr_cqe     = &iu->cqe;
1775         wr.sg_list    = &list;
1776         wr.num_sge    = 1;
1777         wr.opcode     = IB_WR_SEND;
1778         wr.send_flags = IB_SEND_SIGNALED;
1779
1780         return ib_post_send(ch->qp, &wr, &bad_wr);
1781 }
1782
1783 static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
1784 {
1785         struct srp_target_port *target = ch->target;
1786         struct ib_recv_wr wr, *bad_wr;
1787         struct ib_sge list;
1788
1789         list.addr   = iu->dma;
1790         list.length = iu->size;
1791         list.lkey   = target->lkey;
1792
1793         iu->cqe.done = srp_recv_done;
1794
1795         wr.next     = NULL;
1796         wr.wr_cqe   = &iu->cqe;
1797         wr.sg_list  = &list;
1798         wr.num_sge  = 1;
1799
1800         return ib_post_recv(ch->qp, &wr, &bad_wr);
1801 }
1802
1803 static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
1804 {
1805         struct srp_target_port *target = ch->target;
1806         struct srp_request *req;
1807         struct scsi_cmnd *scmnd;
1808         unsigned long flags;
1809
1810         if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1811                 spin_lock_irqsave(&ch->lock, flags);
1812                 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1813                 spin_unlock_irqrestore(&ch->lock, flags);
1814
1815                 ch->tsk_mgmt_status = -1;
1816                 if (be32_to_cpu(rsp->resp_data_len) >= 4)
1817                         ch->tsk_mgmt_status = rsp->data[3];
1818                 complete(&ch->tsk_mgmt_done);
1819         } else {
1820                 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1821                 if (scmnd) {
1822                         req = (void *)scmnd->host_scribble;
1823                         scmnd = srp_claim_req(ch, req, NULL, scmnd);
1824                 }
1825                 if (!scmnd) {
1826                         shost_printk(KERN_ERR, target->scsi_host,
1827                                      "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1828                                      rsp->tag, ch - target->ch, ch->qp->qp_num);
1829
1830                         spin_lock_irqsave(&ch->lock, flags);
1831                         ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1832                         spin_unlock_irqrestore(&ch->lock, flags);
1833
1834                         return;
1835                 }
1836                 scmnd->result = rsp->status;
1837
1838                 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1839                         memcpy(scmnd->sense_buffer, rsp->data +
1840                                be32_to_cpu(rsp->resp_data_len),
1841                                min_t(int, be32_to_cpu(rsp->sense_data_len),
1842                                      SCSI_SENSE_BUFFERSIZE));
1843                 }
1844
1845                 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
1846                         scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1847                 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1848                         scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1849                 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1850                         scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1851                 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1852                         scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
1853
1854                 srp_free_req(ch, req, scmnd,
1855                              be32_to_cpu(rsp->req_lim_delta));
1856
1857                 scmnd->host_scribble = NULL;
1858                 scmnd->scsi_done(scmnd);
1859         }
1860 }
1861
1862 static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
1863                                void *rsp, int len)
1864 {
1865         struct srp_target_port *target = ch->target;
1866         struct ib_device *dev = target->srp_host->srp_dev->dev;
1867         unsigned long flags;
1868         struct srp_iu *iu;
1869         int err;
1870
1871         spin_lock_irqsave(&ch->lock, flags);
1872         ch->req_lim += req_delta;
1873         iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1874         spin_unlock_irqrestore(&ch->lock, flags);
1875
1876         if (!iu) {
1877                 shost_printk(KERN_ERR, target->scsi_host, PFX
1878                              "no IU available to send response\n");
1879                 return 1;
1880         }
1881
1882         ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1883         memcpy(iu->buf, rsp, len);
1884         ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1885
1886         err = srp_post_send(ch, iu, len);
1887         if (err) {
1888                 shost_printk(KERN_ERR, target->scsi_host, PFX
1889                              "unable to post response: %d\n", err);
1890                 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
1891         }
1892
1893         return err;
1894 }
1895
1896 static void srp_process_cred_req(struct srp_rdma_ch *ch,
1897                                  struct srp_cred_req *req)
1898 {
1899         struct srp_cred_rsp rsp = {
1900                 .opcode = SRP_CRED_RSP,
1901                 .tag = req->tag,
1902         };
1903         s32 delta = be32_to_cpu(req->req_lim_delta);
1904
1905         if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1906                 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
1907                              "problems processing SRP_CRED_REQ\n");
1908 }
1909
1910 static void srp_process_aer_req(struct srp_rdma_ch *ch,
1911                                 struct srp_aer_req *req)
1912 {
1913         struct srp_target_port *target = ch->target;
1914         struct srp_aer_rsp rsp = {
1915                 .opcode = SRP_AER_RSP,
1916                 .tag = req->tag,
1917         };
1918         s32 delta = be32_to_cpu(req->req_lim_delta);
1919
1920         shost_printk(KERN_ERR, target->scsi_host, PFX
1921                      "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
1922
1923         if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1924                 shost_printk(KERN_ERR, target->scsi_host, PFX
1925                              "problems processing SRP_AER_REQ\n");
1926 }
1927
1928 static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1929 {
1930         struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
1931         struct srp_rdma_ch *ch = cq->cq_context;
1932         struct srp_target_port *target = ch->target;
1933         struct ib_device *dev = target->srp_host->srp_dev->dev;
1934         int res;
1935         u8 opcode;
1936
1937         if (unlikely(wc->status != IB_WC_SUCCESS)) {
1938                 srp_handle_qp_err(cq, wc, "RECV");
1939                 return;
1940         }
1941
1942         ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
1943                                    DMA_FROM_DEVICE);
1944
1945         opcode = *(u8 *) iu->buf;
1946
1947         if (0) {
1948                 shost_printk(KERN_ERR, target->scsi_host,
1949                              PFX "recv completion, opcode 0x%02x\n", opcode);
1950                 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1951                                iu->buf, wc->byte_len, true);
1952         }
1953
1954         switch (opcode) {
1955         case SRP_RSP:
1956                 srp_process_rsp(ch, iu->buf);
1957                 break;
1958
1959         case SRP_CRED_REQ:
1960                 srp_process_cred_req(ch, iu->buf);
1961                 break;
1962
1963         case SRP_AER_REQ:
1964                 srp_process_aer_req(ch, iu->buf);
1965                 break;
1966
1967         case SRP_T_LOGOUT:
1968                 /* XXX Handle target logout */
1969                 shost_printk(KERN_WARNING, target->scsi_host,
1970                              PFX "Got target logout request\n");
1971                 break;
1972
1973         default:
1974                 shost_printk(KERN_WARNING, target->scsi_host,
1975                              PFX "Unhandled SRP opcode 0x%02x\n", opcode);
1976                 break;
1977         }
1978
1979         ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
1980                                       DMA_FROM_DEVICE);
1981
1982         res = srp_post_recv(ch, iu);
1983         if (res != 0)
1984                 shost_printk(KERN_ERR, target->scsi_host,
1985                              PFX "Recv failed with error code %d\n", res);
1986 }
1987
1988 /**
1989  * srp_tl_err_work() - handle a transport layer error
1990  * @work: Work structure embedded in an SRP target port.
1991  *
1992  * Note: This function may get invoked before the rport has been created,
1993  * hence the target->rport test.
1994  */
1995 static void srp_tl_err_work(struct work_struct *work)
1996 {
1997         struct srp_target_port *target;
1998
1999         target = container_of(work, struct srp_target_port, tl_err_work);
2000         if (target->rport)
2001                 srp_start_tl_fail_timers(target->rport);
2002 }
2003
2004 static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
2005                 const char *opname)
2006 {
2007         struct srp_rdma_ch *ch = cq->cq_context;
2008         struct srp_target_port *target = ch->target;
2009
2010         if (ch->connected && !target->qp_in_error) {
2011                 shost_printk(KERN_ERR, target->scsi_host,
2012                              PFX "failed %s status %s (%d) for CQE %p\n",
2013                              opname, ib_wc_status_msg(wc->status), wc->status,
2014                              wc->wr_cqe);
2015                 queue_work(system_long_wq, &target->tl_err_work);
2016         }
2017         target->qp_in_error = true;
2018 }
2019
2020 static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
2021 {
2022         struct srp_target_port *target = host_to_target(shost);
2023         struct srp_rport *rport = target->rport;
2024         struct srp_rdma_ch *ch;
2025         struct srp_request *req;
2026         struct srp_iu *iu;
2027         struct srp_cmd *cmd;
2028         struct ib_device *dev;
2029         unsigned long flags;
2030         u32 tag;
2031         u16 idx;
2032         int len, ret;
2033         const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
2034
2035         /*
2036          * The SCSI EH thread is the only context from which srp_queuecommand()
2037          * can get invoked for blocked devices (SDEV_BLOCK /
2038          * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2039          * locking the rport mutex if invoked from inside the SCSI EH.
2040          */
2041         if (in_scsi_eh)
2042                 mutex_lock(&rport->mutex);
2043
2044         scmnd->result = srp_chkready(target->rport);
2045         if (unlikely(scmnd->result))
2046                 goto err;
2047
2048         WARN_ON_ONCE(scmnd->request->tag < 0);
2049         tag = blk_mq_unique_tag(scmnd->request);
2050         ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
2051         idx = blk_mq_unique_tag_to_tag(tag);
2052         WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2053                   dev_name(&shost->shost_gendev), tag, idx,
2054                   target->req_ring_size);
2055
2056         spin_lock_irqsave(&ch->lock, flags);
2057         iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
2058         spin_unlock_irqrestore(&ch->lock, flags);
2059
2060         if (!iu)
2061                 goto err;
2062
2063         req = &ch->req_ring[idx];
2064         dev = target->srp_host->srp_dev->dev;
2065         ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
2066                                    DMA_TO_DEVICE);
2067
2068         scmnd->host_scribble = (void *) req;
2069
2070         cmd = iu->buf;
2071         memset(cmd, 0, sizeof *cmd);
2072
2073         cmd->opcode = SRP_CMD;
2074         int_to_scsilun(scmnd->device->lun, &cmd->lun);
2075         cmd->tag    = tag;
2076         memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2077
2078         req->scmnd    = scmnd;
2079         req->cmd      = iu;
2080
2081         len = srp_map_data(scmnd, ch, req);
2082         if (len < 0) {
2083                 shost_printk(KERN_ERR, target->scsi_host,
2084                              PFX "Failed to map data (%d)\n", len);
2085                 /*
2086                  * If we ran out of memory descriptors (-ENOMEM) because an
2087                  * application is queuing many requests with more than
2088                  * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
2089                  * to reduce queue depth temporarily.
2090                  */
2091                 scmnd->result = len == -ENOMEM ?
2092                         DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
2093                 goto err_iu;
2094         }
2095
2096         ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
2097                                       DMA_TO_DEVICE);
2098
2099         if (srp_post_send(ch, iu, len)) {
2100                 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
2101                 goto err_unmap;
2102         }
2103
2104         ret = 0;
2105
2106 unlock_rport:
2107         if (in_scsi_eh)
2108                 mutex_unlock(&rport->mutex);
2109
2110         return ret;
2111
2112 err_unmap:
2113         srp_unmap_data(scmnd, ch, req);
2114
2115 err_iu:
2116         srp_put_tx_iu(ch, iu, SRP_IU_CMD);
2117
2118         /*
2119          * Avoid that the loops that iterate over the request ring can
2120          * encounter a dangling SCSI command pointer.
2121          */
2122         req->scmnd = NULL;
2123
2124 err:
2125         if (scmnd->result) {
2126                 scmnd->scsi_done(scmnd);
2127                 ret = 0;
2128         } else {
2129                 ret = SCSI_MLQUEUE_HOST_BUSY;
2130         }
2131
2132         goto unlock_rport;
2133 }
2134
2135 /*
2136  * Note: the resources allocated in this function are freed in
2137  * srp_free_ch_ib().
2138  */
2139 static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
2140 {
2141         struct srp_target_port *target = ch->target;
2142         int i;
2143
2144         ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2145                               GFP_KERNEL);
2146         if (!ch->rx_ring)
2147                 goto err_no_ring;
2148         ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2149                               GFP_KERNEL);
2150         if (!ch->tx_ring)
2151                 goto err_no_ring;
2152
2153         for (i = 0; i < target->queue_size; ++i) {
2154                 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2155                                               ch->max_ti_iu_len,
2156                                               GFP_KERNEL, DMA_FROM_DEVICE);
2157                 if (!ch->rx_ring[i])
2158                         goto err;
2159         }
2160
2161         for (i = 0; i < target->queue_size; ++i) {
2162                 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2163                                               target->max_iu_len,
2164                                               GFP_KERNEL, DMA_TO_DEVICE);
2165                 if (!ch->tx_ring[i])
2166                         goto err;
2167
2168                 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
2169         }
2170
2171         return 0;
2172
2173 err:
2174         for (i = 0; i < target->queue_size; ++i) {
2175                 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2176                 srp_free_iu(target->srp_host, ch->tx_ring[i]);
2177         }
2178
2179
2180 err_no_ring:
2181         kfree(ch->tx_ring);
2182         ch->tx_ring = NULL;
2183         kfree(ch->rx_ring);
2184         ch->rx_ring = NULL;
2185
2186         return -ENOMEM;
2187 }
2188
2189 static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2190 {
2191         uint64_t T_tr_ns, max_compl_time_ms;
2192         uint32_t rq_tmo_jiffies;
2193
2194         /*
2195          * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2196          * table 91), both the QP timeout and the retry count have to be set
2197          * for RC QP's during the RTR to RTS transition.
2198          */
2199         WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2200                      (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2201
2202         /*
2203          * Set target->rq_tmo_jiffies to one second more than the largest time
2204          * it can take before an error completion is generated. See also
2205          * C9-140..142 in the IBTA spec for more information about how to
2206          * convert the QP Local ACK Timeout value to nanoseconds.
2207          */
2208         T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2209         max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2210         do_div(max_compl_time_ms, NSEC_PER_MSEC);
2211         rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2212
2213         return rq_tmo_jiffies;
2214 }
2215
2216 static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2217                                const struct srp_login_rsp *lrsp,
2218                                struct srp_rdma_ch *ch)
2219 {
2220         struct srp_target_port *target = ch->target;
2221         struct ib_qp_attr *qp_attr = NULL;
2222         int attr_mask = 0;
2223         int ret;
2224         int i;
2225
2226         if (lrsp->opcode == SRP_LOGIN_RSP) {
2227                 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2228                 ch->req_lim       = be32_to_cpu(lrsp->req_lim_delta);
2229
2230                 /*
2231                  * Reserve credits for task management so we don't
2232                  * bounce requests back to the SCSI mid-layer.
2233                  */
2234                 target->scsi_host->can_queue
2235                         = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
2236                               target->scsi_host->can_queue);
2237                 target->scsi_host->cmd_per_lun
2238                         = min_t(int, target->scsi_host->can_queue,
2239                                 target->scsi_host->cmd_per_lun);
2240         } else {
2241                 shost_printk(KERN_WARNING, target->scsi_host,
2242                              PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2243                 ret = -ECONNRESET;
2244                 goto error;
2245         }
2246
2247         if (!ch->rx_ring) {
2248                 ret = srp_alloc_iu_bufs(ch);
2249                 if (ret)
2250                         goto error;
2251         }
2252
2253         ret = -ENOMEM;
2254         qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2255         if (!qp_attr)
2256                 goto error;
2257
2258         qp_attr->qp_state = IB_QPS_RTR;
2259         ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2260         if (ret)
2261                 goto error_free;
2262
2263         ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2264         if (ret)
2265                 goto error_free;
2266
2267         for (i = 0; i < target->queue_size; i++) {
2268                 struct srp_iu *iu = ch->rx_ring[i];
2269
2270                 ret = srp_post_recv(ch, iu);
2271                 if (ret)
2272                         goto error_free;
2273         }
2274
2275         qp_attr->qp_state = IB_QPS_RTS;
2276         ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2277         if (ret)
2278                 goto error_free;
2279
2280         target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2281
2282         ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2283         if (ret)
2284                 goto error_free;
2285
2286         ret = ib_send_cm_rtu(cm_id, NULL, 0);
2287
2288 error_free:
2289         kfree(qp_attr);
2290
2291 error:
2292         ch->status = ret;
2293 }
2294
2295 static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2296                                struct ib_cm_event *event,
2297                                struct srp_rdma_ch *ch)
2298 {
2299         struct srp_target_port *target = ch->target;
2300         struct Scsi_Host *shost = target->scsi_host;
2301         struct ib_class_port_info *cpi;
2302         int opcode;
2303
2304         switch (event->param.rej_rcvd.reason) {
2305         case IB_CM_REJ_PORT_CM_REDIRECT:
2306                 cpi = event->param.rej_rcvd.ari;
2307                 ch->path.dlid = cpi->redirect_lid;
2308                 ch->path.pkey = cpi->redirect_pkey;
2309                 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
2310                 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
2311
2312                 ch->status = ch->path.dlid ?
2313                         SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2314                 break;
2315
2316         case IB_CM_REJ_PORT_REDIRECT:
2317                 if (srp_target_is_topspin(target)) {
2318                         /*
2319                          * Topspin/Cisco SRP gateways incorrectly send
2320                          * reject reason code 25 when they mean 24
2321                          * (port redirect).
2322                          */
2323                         memcpy(ch->path.dgid.raw,
2324                                event->param.rej_rcvd.ari, 16);
2325
2326                         shost_printk(KERN_DEBUG, shost,
2327                                      PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2328                                      be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2329                                      be64_to_cpu(ch->path.dgid.global.interface_id));
2330
2331                         ch->status = SRP_PORT_REDIRECT;
2332                 } else {
2333                         shost_printk(KERN_WARNING, shost,
2334                                      "  REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
2335                         ch->status = -ECONNRESET;
2336                 }
2337                 break;
2338
2339         case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2340                 shost_printk(KERN_WARNING, shost,
2341                             "  REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2342                 ch->status = -ECONNRESET;
2343                 break;
2344
2345         case IB_CM_REJ_CONSUMER_DEFINED:
2346                 opcode = *(u8 *) event->private_data;
2347                 if (opcode == SRP_LOGIN_REJ) {
2348                         struct srp_login_rej *rej = event->private_data;
2349                         u32 reason = be32_to_cpu(rej->reason);
2350
2351                         if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2352                                 shost_printk(KERN_WARNING, shost,
2353                                              PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2354                         else
2355                                 shost_printk(KERN_WARNING, shost, PFX
2356                                              "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2357                                              target->sgid.raw,
2358                                              target->orig_dgid.raw, reason);
2359                 } else
2360                         shost_printk(KERN_WARNING, shost,
2361                                      "  REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2362                                      " opcode 0x%02x\n", opcode);
2363                 ch->status = -ECONNRESET;
2364                 break;
2365
2366         case IB_CM_REJ_STALE_CONN:
2367                 shost_printk(KERN_WARNING, shost, "  REJ reason: stale connection\n");
2368                 ch->status = SRP_STALE_CONN;
2369                 break;
2370
2371         default:
2372                 shost_printk(KERN_WARNING, shost, "  REJ reason 0x%x\n",
2373                              event->param.rej_rcvd.reason);
2374                 ch->status = -ECONNRESET;
2375         }
2376 }
2377
2378 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2379 {
2380         struct srp_rdma_ch *ch = cm_id->context;
2381         struct srp_target_port *target = ch->target;
2382         int comp = 0;
2383
2384         switch (event->event) {
2385         case IB_CM_REQ_ERROR:
2386                 shost_printk(KERN_DEBUG, target->scsi_host,
2387                              PFX "Sending CM REQ failed\n");
2388                 comp = 1;
2389                 ch->status = -ECONNRESET;
2390                 break;
2391
2392         case IB_CM_REP_RECEIVED:
2393                 comp = 1;
2394                 srp_cm_rep_handler(cm_id, event->private_data, ch);
2395                 break;
2396
2397         case IB_CM_REJ_RECEIVED:
2398                 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2399                 comp = 1;
2400
2401                 srp_cm_rej_handler(cm_id, event, ch);
2402                 break;
2403
2404         case IB_CM_DREQ_RECEIVED:
2405                 shost_printk(KERN_WARNING, target->scsi_host,
2406                              PFX "DREQ received - connection closed\n");
2407                 ch->connected = false;
2408                 if (ib_send_cm_drep(cm_id, NULL, 0))
2409                         shost_printk(KERN_ERR, target->scsi_host,
2410                                      PFX "Sending CM DREP failed\n");
2411                 queue_work(system_long_wq, &target->tl_err_work);
2412                 break;
2413
2414         case IB_CM_TIMEWAIT_EXIT:
2415                 shost_printk(KERN_ERR, target->scsi_host,
2416                              PFX "connection closed\n");
2417                 comp = 1;
2418
2419                 ch->status = 0;
2420                 break;
2421
2422         case IB_CM_MRA_RECEIVED:
2423         case IB_CM_DREQ_ERROR:
2424         case IB_CM_DREP_RECEIVED:
2425                 break;
2426
2427         default:
2428                 shost_printk(KERN_WARNING, target->scsi_host,
2429                              PFX "Unhandled CM event %d\n", event->event);
2430                 break;
2431         }
2432
2433         if (comp)
2434                 complete(&ch->done);
2435
2436         return 0;
2437 }
2438
2439 /**
2440  * srp_change_queue_depth - setting device queue depth
2441  * @sdev: scsi device struct
2442  * @qdepth: requested queue depth
2443  *
2444  * Returns queue depth.
2445  */
2446 static int
2447 srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
2448 {
2449         if (!sdev->tagged_supported)
2450                 qdepth = 1;
2451         return scsi_change_queue_depth(sdev, qdepth);
2452 }
2453
2454 static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2455                              u8 func)
2456 {
2457         struct srp_target_port *target = ch->target;
2458         struct srp_rport *rport = target->rport;
2459         struct ib_device *dev = target->srp_host->srp_dev->dev;
2460         struct srp_iu *iu;
2461         struct srp_tsk_mgmt *tsk_mgmt;
2462
2463         if (!ch->connected || target->qp_in_error)
2464                 return -1;
2465
2466         init_completion(&ch->tsk_mgmt_done);
2467
2468         /*
2469          * Lock the rport mutex to avoid that srp_create_ch_ib() is
2470          * invoked while a task management function is being sent.
2471          */
2472         mutex_lock(&rport->mutex);
2473         spin_lock_irq(&ch->lock);
2474         iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2475         spin_unlock_irq(&ch->lock);
2476
2477         if (!iu) {
2478                 mutex_unlock(&rport->mutex);
2479
2480                 return -1;
2481         }
2482
2483         ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2484                                    DMA_TO_DEVICE);
2485         tsk_mgmt = iu->buf;
2486         memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2487
2488         tsk_mgmt->opcode        = SRP_TSK_MGMT;
2489         int_to_scsilun(lun, &tsk_mgmt->lun);
2490         tsk_mgmt->tag           = req_tag | SRP_TAG_TSK_MGMT;
2491         tsk_mgmt->tsk_mgmt_func = func;
2492         tsk_mgmt->task_tag      = req_tag;
2493
2494         ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2495                                       DMA_TO_DEVICE);
2496         if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2497                 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
2498                 mutex_unlock(&rport->mutex);
2499
2500                 return -1;
2501         }
2502         mutex_unlock(&rport->mutex);
2503
2504         if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
2505                                          msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
2506                 return -1;
2507
2508         return 0;
2509 }
2510
2511 static int srp_abort(struct scsi_cmnd *scmnd)
2512 {
2513         struct srp_target_port *target = host_to_target(scmnd->device->host);
2514         struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
2515         u32 tag;
2516         u16 ch_idx;
2517         struct srp_rdma_ch *ch;
2518         int ret;
2519
2520         shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
2521
2522         if (!req)
2523                 return SUCCESS;
2524         tag = blk_mq_unique_tag(scmnd->request);
2525         ch_idx = blk_mq_unique_tag_to_hwq(tag);
2526         if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2527                 return SUCCESS;
2528         ch = &target->ch[ch_idx];
2529         if (!srp_claim_req(ch, req, NULL, scmnd))
2530                 return SUCCESS;
2531         shost_printk(KERN_ERR, target->scsi_host,
2532                      "Sending SRP abort for tag %#x\n", tag);
2533         if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
2534                               SRP_TSK_ABORT_TASK) == 0)
2535                 ret = SUCCESS;
2536         else if (target->rport->state == SRP_RPORT_LOST)
2537                 ret = FAST_IO_FAIL;
2538         else
2539                 ret = FAILED;
2540         srp_free_req(ch, req, scmnd, 0);
2541         scmnd->result = DID_ABORT << 16;
2542         scmnd->scsi_done(scmnd);
2543
2544         return ret;
2545 }
2546
2547 static int srp_reset_device(struct scsi_cmnd *scmnd)
2548 {
2549         struct srp_target_port *target = host_to_target(scmnd->device->host);
2550         struct srp_rdma_ch *ch;
2551         int i;
2552
2553         shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
2554
2555         ch = &target->ch[0];
2556         if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
2557                               SRP_TSK_LUN_RESET))
2558                 return FAILED;
2559         if (ch->tsk_mgmt_status)
2560                 return FAILED;
2561
2562         for (i = 0; i < target->ch_count; i++) {
2563                 ch = &target->ch[i];
2564                 for (i = 0; i < target->req_ring_size; ++i) {
2565                         struct srp_request *req = &ch->req_ring[i];
2566
2567                         srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2568                 }
2569         }
2570
2571         return SUCCESS;
2572 }
2573
2574 static int srp_reset_host(struct scsi_cmnd *scmnd)
2575 {
2576         struct srp_target_port *target = host_to_target(scmnd->device->host);
2577
2578         shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
2579
2580         return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
2581 }
2582
2583 static int srp_slave_configure(struct scsi_device *sdev)
2584 {
2585         struct Scsi_Host *shost = sdev->host;
2586         struct srp_target_port *target = host_to_target(shost);
2587         struct request_queue *q = sdev->request_queue;
2588         unsigned long timeout;
2589
2590         if (sdev->type == TYPE_DISK) {
2591                 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2592                 blk_queue_rq_timeout(q, timeout);
2593         }
2594
2595         return 0;
2596 }
2597
2598 static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2599                            char *buf)
2600 {
2601         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2602
2603         return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
2604 }
2605
2606 static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2607                              char *buf)
2608 {
2609         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2610
2611         return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
2612 }
2613
2614 static ssize_t show_service_id(struct device *dev,
2615                                struct device_attribute *attr, char *buf)
2616 {
2617         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2618
2619         return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
2620 }
2621
2622 static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2623                          char *buf)
2624 {
2625         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2626
2627         return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
2628 }
2629
2630 static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2631                          char *buf)
2632 {
2633         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2634
2635         return sprintf(buf, "%pI6\n", target->sgid.raw);
2636 }
2637
2638 static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2639                          char *buf)
2640 {
2641         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2642         struct srp_rdma_ch *ch = &target->ch[0];
2643
2644         return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
2645 }
2646
2647 static ssize_t show_orig_dgid(struct device *dev,
2648                               struct device_attribute *attr, char *buf)
2649 {
2650         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2651
2652         return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
2653 }
2654
2655 static ssize_t show_req_lim(struct device *dev,
2656                             struct device_attribute *attr, char *buf)
2657 {
2658         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2659         struct srp_rdma_ch *ch;
2660         int i, req_lim = INT_MAX;
2661
2662         for (i = 0; i < target->ch_count; i++) {
2663                 ch = &target->ch[i];
2664                 req_lim = min(req_lim, ch->req_lim);
2665         }
2666         return sprintf(buf, "%d\n", req_lim);
2667 }
2668
2669 static ssize_t show_zero_req_lim(struct device *dev,
2670                                  struct device_attribute *attr, char *buf)
2671 {
2672         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2673
2674         return sprintf(buf, "%d\n", target->zero_req_lim);
2675 }
2676
2677 static ssize_t show_local_ib_port(struct device *dev,
2678                                   struct device_attribute *attr, char *buf)
2679 {
2680         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2681
2682         return sprintf(buf, "%d\n", target->srp_host->port);
2683 }
2684
2685 static ssize_t show_local_ib_device(struct device *dev,
2686                                     struct device_attribute *attr, char *buf)
2687 {
2688         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2689
2690         return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
2691 }
2692
2693 static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2694                              char *buf)
2695 {
2696         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2697
2698         return sprintf(buf, "%d\n", target->ch_count);
2699 }
2700
2701 static ssize_t show_comp_vector(struct device *dev,
2702                                 struct device_attribute *attr, char *buf)
2703 {
2704         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2705
2706         return sprintf(buf, "%d\n", target->comp_vector);
2707 }
2708
2709 static ssize_t show_tl_retry_count(struct device *dev,
2710                                    struct device_attribute *attr, char *buf)
2711 {
2712         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2713
2714         return sprintf(buf, "%d\n", target->tl_retry_count);
2715 }
2716
2717 static ssize_t show_cmd_sg_entries(struct device *dev,
2718                                    struct device_attribute *attr, char *buf)
2719 {
2720         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2721
2722         return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2723 }
2724
2725 static ssize_t show_allow_ext_sg(struct device *dev,
2726                                  struct device_attribute *attr, char *buf)
2727 {
2728         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2729
2730         return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2731 }
2732
2733 static DEVICE_ATTR(id_ext,          S_IRUGO, show_id_ext,          NULL);
2734 static DEVICE_ATTR(ioc_guid,        S_IRUGO, show_ioc_guid,        NULL);
2735 static DEVICE_ATTR(service_id,      S_IRUGO, show_service_id,      NULL);
2736 static DEVICE_ATTR(pkey,            S_IRUGO, show_pkey,            NULL);
2737 static DEVICE_ATTR(sgid,            S_IRUGO, show_sgid,            NULL);
2738 static DEVICE_ATTR(dgid,            S_IRUGO, show_dgid,            NULL);
2739 static DEVICE_ATTR(orig_dgid,       S_IRUGO, show_orig_dgid,       NULL);
2740 static DEVICE_ATTR(req_lim,         S_IRUGO, show_req_lim,         NULL);
2741 static DEVICE_ATTR(zero_req_lim,    S_IRUGO, show_zero_req_lim,    NULL);
2742 static DEVICE_ATTR(local_ib_port,   S_IRUGO, show_local_ib_port,   NULL);
2743 static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
2744 static DEVICE_ATTR(ch_count,        S_IRUGO, show_ch_count,        NULL);
2745 static DEVICE_ATTR(comp_vector,     S_IRUGO, show_comp_vector,     NULL);
2746 static DEVICE_ATTR(tl_retry_count,  S_IRUGO, show_tl_retry_count,  NULL);
2747 static DEVICE_ATTR(cmd_sg_entries,  S_IRUGO, show_cmd_sg_entries,  NULL);
2748 static DEVICE_ATTR(allow_ext_sg,    S_IRUGO, show_allow_ext_sg,    NULL);
2749
2750 static struct device_attribute *srp_host_attrs[] = {
2751         &dev_attr_id_ext,
2752         &dev_attr_ioc_guid,
2753         &dev_attr_service_id,
2754         &dev_attr_pkey,
2755         &dev_attr_sgid,
2756         &dev_attr_dgid,
2757         &dev_attr_orig_dgid,
2758         &dev_attr_req_lim,
2759         &dev_attr_zero_req_lim,
2760         &dev_attr_local_ib_port,
2761         &dev_attr_local_ib_device,
2762         &dev_attr_ch_count,
2763         &dev_attr_comp_vector,
2764         &dev_attr_tl_retry_count,
2765         &dev_attr_cmd_sg_entries,
2766         &dev_attr_allow_ext_sg,
2767         NULL
2768 };
2769
2770 static struct scsi_host_template srp_template = {
2771         .module                         = THIS_MODULE,
2772         .name                           = "InfiniBand SRP initiator",
2773         .proc_name                      = DRV_NAME,
2774         .slave_configure                = srp_slave_configure,
2775         .info                           = srp_target_info,
2776         .queuecommand                   = srp_queuecommand,
2777         .change_queue_depth             = srp_change_queue_depth,
2778         .eh_abort_handler               = srp_abort,
2779         .eh_device_reset_handler        = srp_reset_device,
2780         .eh_host_reset_handler          = srp_reset_host,
2781         .skip_settle_delay              = true,
2782         .sg_tablesize                   = SRP_DEF_SG_TABLESIZE,
2783         .can_queue                      = SRP_DEFAULT_CMD_SQ_SIZE,
2784         .this_id                        = -1,
2785         .cmd_per_lun                    = SRP_DEFAULT_CMD_SQ_SIZE,
2786         .use_clustering                 = ENABLE_CLUSTERING,
2787         .shost_attrs                    = srp_host_attrs,
2788         .track_queue_depth              = 1,
2789 };
2790
2791 static int srp_sdev_count(struct Scsi_Host *host)
2792 {
2793         struct scsi_device *sdev;
2794         int c = 0;
2795
2796         shost_for_each_device(sdev, host)
2797                 c++;
2798
2799         return c;
2800 }
2801
2802 /*
2803  * Return values:
2804  * < 0 upon failure. Caller is responsible for SRP target port cleanup.
2805  * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
2806  *    removal has been scheduled.
2807  * 0 and target->state != SRP_TARGET_REMOVED upon success.
2808  */
2809 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2810 {
2811         struct srp_rport_identifiers ids;
2812         struct srp_rport *rport;
2813
2814         target->state = SRP_TARGET_SCANNING;
2815         sprintf(target->target_name, "SRP.T10:%016llX",
2816                 be64_to_cpu(target->id_ext));
2817
2818         if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
2819                 return -ENODEV;
2820
2821         memcpy(ids.port_id, &target->id_ext, 8);
2822         memcpy(ids.port_id + 8, &target->ioc_guid, 8);
2823         ids.roles = SRP_RPORT_ROLE_TARGET;
2824         rport = srp_rport_add(target->scsi_host, &ids);
2825         if (IS_ERR(rport)) {
2826                 scsi_remove_host(target->scsi_host);
2827                 return PTR_ERR(rport);
2828         }
2829
2830         rport->lld_data = target;
2831         target->rport = rport;
2832
2833         spin_lock(&host->target_lock);
2834         list_add_tail(&target->list, &host->target_list);
2835         spin_unlock(&host->target_lock);
2836
2837         scsi_scan_target(&target->scsi_host->shost_gendev,
2838                          0, target->scsi_id, SCAN_WILD_CARD, 0);
2839
2840         if (srp_connected_ch(target) < target->ch_count ||
2841             target->qp_in_error) {
2842                 shost_printk(KERN_INFO, target->scsi_host,
2843                              PFX "SCSI scan failed - removing SCSI host\n");
2844                 srp_queue_remove_work(target);
2845                 goto out;
2846         }
2847
2848         pr_debug("%s: SCSI scan succeeded - detected %d LUNs\n",
2849                  dev_name(&target->scsi_host->shost_gendev),
2850                  srp_sdev_count(target->scsi_host));
2851
2852         spin_lock_irq(&target->lock);
2853         if (target->state == SRP_TARGET_SCANNING)
2854                 target->state = SRP_TARGET_LIVE;
2855         spin_unlock_irq(&target->lock);
2856
2857 out:
2858         return 0;
2859 }
2860
2861 static void srp_release_dev(struct device *dev)
2862 {
2863         struct srp_host *host =
2864                 container_of(dev, struct srp_host, dev);
2865
2866         complete(&host->released);
2867 }
2868
2869 static struct class srp_class = {
2870         .name    = "infiniband_srp",
2871         .dev_release = srp_release_dev
2872 };
2873
2874 /**
2875  * srp_conn_unique() - check whether the connection to a target is unique
2876  * @host:   SRP host.
2877  * @target: SRP target port.
2878  */
2879 static bool srp_conn_unique(struct srp_host *host,
2880                             struct srp_target_port *target)
2881 {
2882         struct srp_target_port *t;
2883         bool ret = false;
2884
2885         if (target->state == SRP_TARGET_REMOVED)
2886                 goto out;
2887
2888         ret = true;
2889
2890         spin_lock(&host->target_lock);
2891         list_for_each_entry(t, &host->target_list, list) {
2892                 if (t != target &&
2893                     target->id_ext == t->id_ext &&
2894                     target->ioc_guid == t->ioc_guid &&
2895                     target->initiator_ext == t->initiator_ext) {
2896                         ret = false;
2897                         break;
2898                 }
2899         }
2900         spin_unlock(&host->target_lock);
2901
2902 out:
2903         return ret;
2904 }
2905
2906 /*
2907  * Target ports are added by writing
2908  *
2909  *     id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2910  *     pkey=<P_Key>,service_id=<service ID>
2911  *
2912  * to the add_target sysfs attribute.
2913  */
2914 enum {
2915         SRP_OPT_ERR             = 0,
2916         SRP_OPT_ID_EXT          = 1 << 0,
2917         SRP_OPT_IOC_GUID        = 1 << 1,
2918         SRP_OPT_DGID            = 1 << 2,
2919         SRP_OPT_PKEY            = 1 << 3,
2920         SRP_OPT_SERVICE_ID      = 1 << 4,
2921         SRP_OPT_MAX_SECT        = 1 << 5,
2922         SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
2923         SRP_OPT_IO_CLASS        = 1 << 7,
2924         SRP_OPT_INITIATOR_EXT   = 1 << 8,
2925         SRP_OPT_CMD_SG_ENTRIES  = 1 << 9,
2926         SRP_OPT_ALLOW_EXT_SG    = 1 << 10,
2927         SRP_OPT_SG_TABLESIZE    = 1 << 11,
2928         SRP_OPT_COMP_VECTOR     = 1 << 12,
2929         SRP_OPT_TL_RETRY_COUNT  = 1 << 13,
2930         SRP_OPT_QUEUE_SIZE      = 1 << 14,
2931         SRP_OPT_ALL             = (SRP_OPT_ID_EXT       |
2932                                    SRP_OPT_IOC_GUID     |
2933                                    SRP_OPT_DGID         |
2934                                    SRP_OPT_PKEY         |
2935                                    SRP_OPT_SERVICE_ID),
2936 };
2937
2938 static const match_table_t srp_opt_tokens = {
2939         { SRP_OPT_ID_EXT,               "id_ext=%s"             },
2940         { SRP_OPT_IOC_GUID,             "ioc_guid=%s"           },
2941         { SRP_OPT_DGID,                 "dgid=%s"               },
2942         { SRP_OPT_PKEY,                 "pkey=%x"               },
2943         { SRP_OPT_SERVICE_ID,           "service_id=%s"         },
2944         { SRP_OPT_MAX_SECT,             "max_sect=%d"           },
2945         { SRP_OPT_MAX_CMD_PER_LUN,      "max_cmd_per_lun=%d"    },
2946         { SRP_OPT_IO_CLASS,             "io_class=%x"           },
2947         { SRP_OPT_INITIATOR_EXT,        "initiator_ext=%s"      },
2948         { SRP_OPT_CMD_SG_ENTRIES,       "cmd_sg_entries=%u"     },
2949         { SRP_OPT_ALLOW_EXT_SG,         "allow_ext_sg=%u"       },
2950         { SRP_OPT_SG_TABLESIZE,         "sg_tablesize=%u"       },
2951         { SRP_OPT_COMP_VECTOR,          "comp_vector=%u"        },
2952         { SRP_OPT_TL_RETRY_COUNT,       "tl_retry_count=%u"     },
2953         { SRP_OPT_QUEUE_SIZE,           "queue_size=%d"         },
2954         { SRP_OPT_ERR,                  NULL                    }
2955 };
2956
2957 static int srp_parse_options(const char *buf, struct srp_target_port *target)
2958 {
2959         char *options, *sep_opt;
2960         char *p;
2961         char dgid[3];
2962         substring_t args[MAX_OPT_ARGS];
2963         int opt_mask = 0;
2964         int token;
2965         int ret = -EINVAL;
2966         int i;
2967
2968         options = kstrdup(buf, GFP_KERNEL);
2969         if (!options)
2970                 return -ENOMEM;
2971
2972         sep_opt = options;
2973         while ((p = strsep(&sep_opt, ",\n")) != NULL) {
2974                 if (!*p)
2975                         continue;
2976
2977                 token = match_token(p, srp_opt_tokens, args);
2978                 opt_mask |= token;
2979
2980                 switch (token) {
2981                 case SRP_OPT_ID_EXT:
2982                         p = match_strdup(args);
2983                         if (!p) {
2984                                 ret = -ENOMEM;
2985                                 goto out;
2986                         }
2987                         target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2988                         kfree(p);
2989                         break;
2990
2991                 case SRP_OPT_IOC_GUID:
2992                         p = match_strdup(args);
2993                         if (!p) {
2994                                 ret = -ENOMEM;
2995                                 goto out;
2996                         }
2997                         target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2998                         kfree(p);
2999                         break;
3000
3001                 case SRP_OPT_DGID:
3002                         p = match_strdup(args);
3003                         if (!p) {
3004                                 ret = -ENOMEM;
3005                                 goto out;
3006                         }
3007                         if (strlen(p) != 32) {
3008                                 pr_warn("bad dest GID parameter '%s'\n", p);
3009                                 kfree(p);
3010                                 goto out;
3011                         }
3012
3013                         for (i = 0; i < 16; ++i) {
3014                                 strlcpy(dgid, p + i * 2, sizeof(dgid));
3015                                 if (sscanf(dgid, "%hhx",
3016                                            &target->orig_dgid.raw[i]) < 1) {
3017                                         ret = -EINVAL;
3018                                         kfree(p);
3019                                         goto out;
3020                                 }
3021                         }
3022                         kfree(p);
3023                         break;
3024
3025                 case SRP_OPT_PKEY:
3026                         if (match_hex(args, &token)) {
3027                                 pr_warn("bad P_Key parameter '%s'\n", p);
3028                                 goto out;
3029                         }
3030                         target->pkey = cpu_to_be16(token);
3031                         break;
3032
3033                 case SRP_OPT_SERVICE_ID:
3034                         p = match_strdup(args);
3035                         if (!p) {
3036                                 ret = -ENOMEM;
3037                                 goto out;
3038                         }
3039                         target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
3040                         kfree(p);
3041                         break;
3042
3043                 case SRP_OPT_MAX_SECT:
3044                         if (match_int(args, &token)) {
3045                                 pr_warn("bad max sect parameter '%s'\n", p);
3046                                 goto out;
3047                         }
3048                         target->scsi_host->max_sectors = token;
3049                         break;
3050
3051                 case SRP_OPT_QUEUE_SIZE:
3052                         if (match_int(args, &token) || token < 1) {
3053                                 pr_warn("bad queue_size parameter '%s'\n", p);
3054                                 goto out;
3055                         }
3056                         target->scsi_host->can_queue = token;
3057                         target->queue_size = token + SRP_RSP_SQ_SIZE +
3058                                              SRP_TSK_MGMT_SQ_SIZE;
3059                         if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3060                                 target->scsi_host->cmd_per_lun = token;
3061                         break;
3062
3063                 case SRP_OPT_MAX_CMD_PER_LUN:
3064                         if (match_int(args, &token) || token < 1) {
3065                                 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3066                                         p);
3067                                 goto out;
3068                         }
3069                         target->scsi_host->cmd_per_lun = token;
3070                         break;
3071
3072                 case SRP_OPT_IO_CLASS:
3073                         if (match_hex(args, &token)) {
3074                                 pr_warn("bad IO class parameter '%s'\n", p);
3075                                 goto out;
3076                         }
3077                         if (token != SRP_REV10_IB_IO_CLASS &&
3078                             token != SRP_REV16A_IB_IO_CLASS) {
3079                                 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3080                                         token, SRP_REV10_IB_IO_CLASS,
3081                                         SRP_REV16A_IB_IO_CLASS);
3082                                 goto out;
3083                         }
3084                         target->io_class = token;
3085                         break;
3086
3087                 case SRP_OPT_INITIATOR_EXT:
3088                         p = match_strdup(args);
3089                         if (!p) {
3090                                 ret = -ENOMEM;
3091                                 goto out;
3092                         }
3093                         target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3094                         kfree(p);
3095                         break;
3096
3097                 case SRP_OPT_CMD_SG_ENTRIES:
3098                         if (match_int(args, &token) || token < 1 || token > 255) {
3099                                 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3100                                         p);
3101                                 goto out;
3102                         }
3103                         target->cmd_sg_cnt = token;
3104                         break;
3105
3106                 case SRP_OPT_ALLOW_EXT_SG:
3107                         if (match_int(args, &token)) {
3108                                 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
3109                                 goto out;
3110                         }
3111                         target->allow_ext_sg = !!token;
3112                         break;
3113
3114                 case SRP_OPT_SG_TABLESIZE:
3115                         if (match_int(args, &token) || token < 1 ||
3116                                         token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
3117                                 pr_warn("bad max sg_tablesize parameter '%s'\n",
3118                                         p);
3119                                 goto out;
3120                         }
3121                         target->sg_tablesize = token;
3122                         break;
3123
3124                 case SRP_OPT_COMP_VECTOR:
3125                         if (match_int(args, &token) || token < 0) {
3126                                 pr_warn("bad comp_vector parameter '%s'\n", p);
3127                                 goto out;
3128                         }
3129                         target->comp_vector = token;
3130                         break;
3131
3132                 case SRP_OPT_TL_RETRY_COUNT:
3133                         if (match_int(args, &token) || token < 2 || token > 7) {
3134                                 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3135                                         p);
3136                                 goto out;
3137                         }
3138                         target->tl_retry_count = token;
3139                         break;
3140
3141                 default:
3142                         pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3143                                 p);
3144                         goto out;
3145                 }
3146         }
3147
3148         if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3149                 ret = 0;
3150         else
3151                 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3152                         if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3153                             !(srp_opt_tokens[i].token & opt_mask))
3154                                 pr_warn("target creation request is missing parameter '%s'\n",
3155                                         srp_opt_tokens[i].pattern);
3156
3157         if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3158             && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3159                 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3160                         target->scsi_host->cmd_per_lun,
3161                         target->scsi_host->can_queue);
3162
3163 out:
3164         kfree(options);
3165         return ret;
3166 }
3167
3168 static ssize_t srp_create_target(struct device *dev,
3169                                  struct device_attribute *attr,
3170                                  const char *buf, size_t count)
3171 {
3172         struct srp_host *host =
3173                 container_of(dev, struct srp_host, dev);
3174         struct Scsi_Host *target_host;
3175         struct srp_target_port *target;
3176         struct srp_rdma_ch *ch;
3177         struct srp_device *srp_dev = host->srp_dev;
3178         struct ib_device *ibdev = srp_dev->dev;
3179         int ret, node_idx, node, cpu, i;
3180         bool multich = false;
3181
3182         target_host = scsi_host_alloc(&srp_template,
3183                                       sizeof (struct srp_target_port));
3184         if (!target_host)
3185                 return -ENOMEM;
3186
3187         target_host->transportt  = ib_srp_transport_template;
3188         target_host->max_channel = 0;
3189         target_host->max_id      = 1;
3190         target_host->max_lun     = -1LL;
3191         target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
3192
3193         target = host_to_target(target_host);
3194
3195         target->io_class        = SRP_REV16A_IB_IO_CLASS;
3196         target->scsi_host       = target_host;
3197         target->srp_host        = host;
3198         target->lkey            = host->srp_dev->pd->local_dma_lkey;
3199         target->global_mr       = host->srp_dev->global_mr;
3200         target->cmd_sg_cnt      = cmd_sg_entries;
3201         target->sg_tablesize    = indirect_sg_entries ? : cmd_sg_entries;
3202         target->allow_ext_sg    = allow_ext_sg;
3203         target->tl_retry_count  = 7;
3204         target->queue_size      = SRP_DEFAULT_QUEUE_SIZE;
3205
3206         /*
3207          * Avoid that the SCSI host can be removed by srp_remove_target()
3208          * before this function returns.
3209          */
3210         scsi_host_get(target->scsi_host);
3211
3212         mutex_lock(&host->add_target_mutex);
3213
3214         ret = srp_parse_options(buf, target);
3215         if (ret)
3216                 goto out;
3217
3218         target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3219
3220         if (!srp_conn_unique(target->srp_host, target)) {
3221                 shost_printk(KERN_INFO, target->scsi_host,
3222                              PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3223                              be64_to_cpu(target->id_ext),
3224                              be64_to_cpu(target->ioc_guid),
3225                              be64_to_cpu(target->initiator_ext));
3226                 ret = -EEXIST;
3227                 goto out;
3228         }
3229
3230         if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
3231             target->cmd_sg_cnt < target->sg_tablesize) {
3232                 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
3233                 target->sg_tablesize = target->cmd_sg_cnt;
3234         }
3235
3236         target_host->sg_tablesize = target->sg_tablesize;
3237         target->mr_pool_size = target->scsi_host->can_queue;
3238         target->indirect_size = target->sg_tablesize *
3239                                 sizeof (struct srp_direct_buf);
3240         target->max_iu_len = sizeof (struct srp_cmd) +
3241                              sizeof (struct srp_indirect_buf) +
3242                              target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3243
3244         INIT_WORK(&target->tl_err_work, srp_tl_err_work);
3245         INIT_WORK(&target->remove_work, srp_remove_work);
3246         spin_lock_init(&target->lock);
3247         ret = ib_query_gid(ibdev, host->port, 0, &target->sgid, NULL);
3248         if (ret)
3249                 goto out;
3250
3251         ret = -ENOMEM;
3252         target->ch_count = max_t(unsigned, num_online_nodes(),
3253                                  min(ch_count ? :
3254                                      min(4 * num_online_nodes(),
3255                                          ibdev->num_comp_vectors),
3256                                      num_online_cpus()));
3257         target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3258                              GFP_KERNEL);
3259         if (!target->ch)
3260                 goto out;
3261
3262         node_idx = 0;
3263         for_each_online_node(node) {
3264                 const int ch_start = (node_idx * target->ch_count /
3265                                       num_online_nodes());
3266                 const int ch_end = ((node_idx + 1) * target->ch_count /
3267                                     num_online_nodes());
3268                 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3269                                       num_online_nodes() + target->comp_vector)
3270                                      % ibdev->num_comp_vectors;
3271                 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3272                                     num_online_nodes() + target->comp_vector)
3273                                    % ibdev->num_comp_vectors;
3274                 int cpu_idx = 0;
3275
3276                 for_each_online_cpu(cpu) {
3277                         if (cpu_to_node(cpu) != node)
3278                                 continue;
3279                         if (ch_start + cpu_idx >= ch_end)
3280                                 continue;
3281                         ch = &target->ch[ch_start + cpu_idx];
3282                         ch->target = target;
3283                         ch->comp_vector = cv_start == cv_end ? cv_start :
3284                                 cv_start + cpu_idx % (cv_end - cv_start);
3285                         spin_lock_init(&ch->lock);
3286                         INIT_LIST_HEAD(&ch->free_tx);
3287                         ret = srp_new_cm_id(ch);
3288                         if (ret)
3289                                 goto err_disconnect;
3290
3291                         ret = srp_create_ch_ib(ch);
3292                         if (ret)
3293                                 goto err_disconnect;
3294
3295                         ret = srp_alloc_req_data(ch);
3296                         if (ret)
3297                                 goto err_disconnect;
3298
3299                         ret = srp_connect_ch(ch, multich);
3300                         if (ret) {
3301                                 shost_printk(KERN_ERR, target->scsi_host,
3302                                              PFX "Connection %d/%d failed\n",
3303                                              ch_start + cpu_idx,
3304                                              target->ch_count);
3305                                 if (node_idx == 0 && cpu_idx == 0) {
3306                                         goto err_disconnect;
3307                                 } else {
3308                                         srp_free_ch_ib(target, ch);
3309                                         srp_free_req_data(target, ch);
3310                                         target->ch_count = ch - target->ch;
3311                                         goto connected;
3312                                 }
3313                         }
3314
3315                         multich = true;
3316                         cpu_idx++;
3317                 }
3318                 node_idx++;
3319         }
3320
3321 connected:
3322         target->scsi_host->nr_hw_queues = target->ch_count;
3323
3324         ret = srp_add_target(host, target);
3325         if (ret)
3326                 goto err_disconnect;
3327
3328         if (target->state != SRP_TARGET_REMOVED) {
3329                 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3330                              "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3331                              be64_to_cpu(target->id_ext),
3332                              be64_to_cpu(target->ioc_guid),
3333                              be16_to_cpu(target->pkey),
3334                              be64_to_cpu(target->service_id),
3335                              target->sgid.raw, target->orig_dgid.raw);
3336         }
3337
3338         ret = count;
3339
3340 out:
3341         mutex_unlock(&host->add_target_mutex);
3342
3343         scsi_host_put(target->scsi_host);
3344         if (ret < 0)
3345                 scsi_host_put(target->scsi_host);
3346
3347         return ret;
3348
3349 err_disconnect:
3350         srp_disconnect_target(target);
3351
3352         for (i = 0; i < target->ch_count; i++) {
3353                 ch = &target->ch[i];
3354                 srp_free_ch_ib(target, ch);
3355                 srp_free_req_data(target, ch);
3356         }
3357
3358         kfree(target->ch);
3359         goto out;
3360 }
3361
3362 static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
3363
3364 static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3365                           char *buf)
3366 {
3367         struct srp_host *host = container_of(dev, struct srp_host, dev);
3368
3369         return sprintf(buf, "%s\n", host->srp_dev->dev->name);
3370 }
3371
3372 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
3373
3374 static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3375                          char *buf)
3376 {
3377         struct srp_host *host = container_of(dev, struct srp_host, dev);
3378
3379         return sprintf(buf, "%d\n", host->port);
3380 }
3381
3382 static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
3383
3384 static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
3385 {
3386         struct srp_host *host;
3387
3388         host = kzalloc(sizeof *host, GFP_KERNEL);
3389         if (!host)
3390                 return NULL;
3391
3392         INIT_LIST_HEAD(&host->target_list);
3393         spin_lock_init(&host->target_lock);
3394         init_completion(&host->released);
3395         mutex_init(&host->add_target_mutex);
3396         host->srp_dev = device;
3397         host->port = port;
3398
3399         host->dev.class = &srp_class;
3400         host->dev.parent = device->dev->dma_device;
3401         dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
3402
3403         if (device_register(&host->dev))
3404                 goto free_host;
3405         if (device_create_file(&host->dev, &dev_attr_add_target))
3406                 goto err_class;
3407         if (device_create_file(&host->dev, &dev_attr_ibdev))
3408                 goto err_class;
3409         if (device_create_file(&host->dev, &dev_attr_port))
3410                 goto err_class;
3411
3412         return host;
3413
3414 err_class:
3415         device_unregister(&host->dev);
3416
3417 free_host:
3418         kfree(host);
3419
3420         return NULL;
3421 }
3422
3423 static void srp_add_one(struct ib_device *device)
3424 {
3425         struct srp_device *srp_dev;
3426         struct srp_host *host;
3427         int mr_page_shift, p;
3428         u64 max_pages_per_mr;
3429
3430         srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3431         if (!srp_dev)
3432                 return;
3433
3434         srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3435                             device->map_phys_fmr && device->unmap_fmr);
3436         srp_dev->has_fr = (device->attrs.device_cap_flags &
3437                            IB_DEVICE_MEM_MGT_EXTENSIONS);
3438         if (!srp_dev->has_fmr && !srp_dev->has_fr)
3439                 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3440
3441         srp_dev->use_fast_reg = (srp_dev->has_fr &&
3442                                  (!srp_dev->has_fmr || prefer_fr));
3443         srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
3444
3445         /*
3446          * Use the smallest page size supported by the HCA, down to a
3447          * minimum of 4096 bytes. We're unlikely to build large sglists
3448          * out of smaller entries.
3449          */
3450         mr_page_shift           = max(12, ffs(device->attrs.page_size_cap) - 1);
3451         srp_dev->mr_page_size   = 1 << mr_page_shift;
3452         srp_dev->mr_page_mask   = ~((u64) srp_dev->mr_page_size - 1);
3453         max_pages_per_mr        = device->attrs.max_mr_size;
3454         do_div(max_pages_per_mr, srp_dev->mr_page_size);
3455         srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3456                                           max_pages_per_mr);
3457         if (srp_dev->use_fast_reg) {
3458                 srp_dev->max_pages_per_mr =
3459                         min_t(u32, srp_dev->max_pages_per_mr,
3460                               device->attrs.max_fast_reg_page_list_len);
3461         }
3462         srp_dev->mr_max_size    = srp_dev->mr_page_size *
3463                                    srp_dev->max_pages_per_mr;
3464         pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
3465                  device->name, mr_page_shift, device->attrs.max_mr_size,
3466                  device->attrs.max_fast_reg_page_list_len,
3467                  srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
3468
3469         INIT_LIST_HEAD(&srp_dev->dev_list);
3470
3471         srp_dev->dev = device;
3472         srp_dev->pd  = ib_alloc_pd(device);
3473         if (IS_ERR(srp_dev->pd))
3474                 goto free_dev;
3475
3476         if (!register_always || (!srp_dev->has_fmr && !srp_dev->has_fr)) {
3477                 srp_dev->global_mr = ib_get_dma_mr(srp_dev->pd,
3478                                                    IB_ACCESS_LOCAL_WRITE |
3479                                                    IB_ACCESS_REMOTE_READ |
3480                                                    IB_ACCESS_REMOTE_WRITE);
3481                 if (IS_ERR(srp_dev->global_mr))
3482                         goto err_pd;
3483         } else {
3484                 srp_dev->global_mr = NULL;
3485         }
3486
3487         for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
3488                 host = srp_add_port(srp_dev, p);
3489                 if (host)
3490                         list_add_tail(&host->list, &srp_dev->dev_list);
3491         }
3492
3493         ib_set_client_data(device, &srp_client, srp_dev);
3494         return;
3495
3496 err_pd:
3497         ib_dealloc_pd(srp_dev->pd);
3498
3499 free_dev:
3500         kfree(srp_dev);
3501 }
3502
3503 static void srp_remove_one(struct ib_device *device, void *client_data)
3504 {
3505         struct srp_device *srp_dev;
3506         struct srp_host *host, *tmp_host;
3507         struct srp_target_port *target;
3508
3509         srp_dev = client_data;
3510         if (!srp_dev)
3511                 return;
3512
3513         list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
3514                 device_unregister(&host->dev);
3515                 /*
3516                  * Wait for the sysfs entry to go away, so that no new
3517                  * target ports can be created.
3518                  */
3519                 wait_for_completion(&host->released);
3520
3521                 /*
3522                  * Remove all target ports.
3523                  */
3524                 spin_lock(&host->target_lock);
3525                 list_for_each_entry(target, &host->target_list, list)
3526                         srp_queue_remove_work(target);
3527                 spin_unlock(&host->target_lock);
3528
3529                 /*
3530                  * Wait for tl_err and target port removal tasks.
3531                  */
3532                 flush_workqueue(system_long_wq);
3533                 flush_workqueue(srp_remove_wq);
3534
3535                 kfree(host);
3536         }
3537
3538         if (srp_dev->global_mr)
3539                 ib_dereg_mr(srp_dev->global_mr);
3540         ib_dealloc_pd(srp_dev->pd);
3541
3542         kfree(srp_dev);
3543 }
3544
3545 static struct srp_function_template ib_srp_transport_functions = {
3546         .has_rport_state         = true,
3547         .reset_timer_if_blocked  = true,
3548         .reconnect_delay         = &srp_reconnect_delay,
3549         .fast_io_fail_tmo        = &srp_fast_io_fail_tmo,
3550         .dev_loss_tmo            = &srp_dev_loss_tmo,
3551         .reconnect               = srp_rport_reconnect,
3552         .rport_delete            = srp_rport_delete,
3553         .terminate_rport_io      = srp_terminate_io,
3554 };
3555
3556 static int __init srp_init_module(void)
3557 {
3558         int ret;
3559
3560         if (srp_sg_tablesize) {
3561                 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
3562                 if (!cmd_sg_entries)
3563                         cmd_sg_entries = srp_sg_tablesize;
3564         }
3565
3566         if (!cmd_sg_entries)
3567                 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3568
3569         if (cmd_sg_entries > 255) {
3570                 pr_warn("Clamping cmd_sg_entries to 255\n");
3571                 cmd_sg_entries = 255;
3572         }
3573
3574         if (!indirect_sg_entries)
3575                 indirect_sg_entries = cmd_sg_entries;
3576         else if (indirect_sg_entries < cmd_sg_entries) {
3577                 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3578                         cmd_sg_entries);
3579                 indirect_sg_entries = cmd_sg_entries;
3580         }
3581
3582         srp_remove_wq = create_workqueue("srp_remove");
3583         if (!srp_remove_wq) {
3584                 ret = -ENOMEM;
3585                 goto out;
3586         }
3587
3588         ret = -ENOMEM;
3589         ib_srp_transport_template =
3590                 srp_attach_transport(&ib_srp_transport_functions);
3591         if (!ib_srp_transport_template)
3592                 goto destroy_wq;
3593
3594         ret = class_register(&srp_class);
3595         if (ret) {
3596                 pr_err("couldn't register class infiniband_srp\n");
3597                 goto release_tr;
3598         }
3599
3600         ib_sa_register_client(&srp_sa_client);
3601
3602         ret = ib_register_client(&srp_client);
3603         if (ret) {
3604                 pr_err("couldn't register IB client\n");
3605                 goto unreg_sa;
3606         }
3607
3608 out:
3609         return ret;
3610
3611 unreg_sa:
3612         ib_sa_unregister_client(&srp_sa_client);
3613         class_unregister(&srp_class);
3614
3615 release_tr:
3616         srp_release_transport(ib_srp_transport_template);
3617
3618 destroy_wq:
3619         destroy_workqueue(srp_remove_wq);
3620         goto out;
3621 }
3622
3623 static void __exit srp_cleanup_module(void)
3624 {
3625         ib_unregister_client(&srp_client);
3626         ib_sa_unregister_client(&srp_sa_client);
3627         class_unregister(&srp_class);
3628         srp_release_transport(ib_srp_transport_template);
3629         destroy_workqueue(srp_remove_wq);
3630 }
3631
3632 module_init(srp_init_module);
3633 module_exit(srp_cleanup_module);