target: Drop left-over se_lun->lun_status
[cascardo/linux.git] / drivers / target / target_core_tpg.c
1 /*******************************************************************************
2  * Filename:  target_core_tpg.c
3  *
4  * This file contains generic Target Portal Group related functions.
5  *
6  * (c) Copyright 2002-2013 Datera, Inc.
7  *
8  * Nicholas A. Bellinger <nab@kernel.org>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23  *
24  ******************************************************************************/
25
26 #include <linux/net.h>
27 #include <linux/string.h>
28 #include <linux/timer.h>
29 #include <linux/slab.h>
30 #include <linux/spinlock.h>
31 #include <linux/in.h>
32 #include <linux/export.h>
33 #include <net/sock.h>
34 #include <net/tcp.h>
35 #include <scsi/scsi.h>
36 #include <scsi/scsi_cmnd.h>
37
38 #include <target/target_core_base.h>
39 #include <target/target_core_backend.h>
40 #include <target/target_core_fabric.h>
41
42 #include "target_core_internal.h"
43 #include "target_core_pr.h"
44
45 extern struct se_device *g_lun0_dev;
46
47 static DEFINE_SPINLOCK(tpg_lock);
48 static LIST_HEAD(tpg_list);
49
50 /*      __core_tpg_get_initiator_node_acl():
51  *
52  *      mutex_lock(&tpg->acl_node_mutex); must be held when calling
53  */
54 struct se_node_acl *__core_tpg_get_initiator_node_acl(
55         struct se_portal_group *tpg,
56         const char *initiatorname)
57 {
58         struct se_node_acl *acl;
59
60         list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
61                 if (!strcmp(acl->initiatorname, initiatorname))
62                         return acl;
63         }
64
65         return NULL;
66 }
67
68 /*      core_tpg_get_initiator_node_acl():
69  *
70  *
71  */
72 struct se_node_acl *core_tpg_get_initiator_node_acl(
73         struct se_portal_group *tpg,
74         unsigned char *initiatorname)
75 {
76         struct se_node_acl *acl;
77
78         mutex_lock(&tpg->acl_node_mutex);
79         acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
80         mutex_unlock(&tpg->acl_node_mutex);
81
82         return acl;
83 }
84 EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
85
86 /*      core_tpg_add_node_to_devs():
87  *
88  *
89  */
90 void core_tpg_add_node_to_devs(
91         struct se_node_acl *acl,
92         struct se_portal_group *tpg,
93         struct se_lun *lun_orig)
94 {
95         u32 lun_access = 0;
96         struct se_lun *lun;
97         struct se_device *dev;
98
99         mutex_lock(&tpg->tpg_lun_mutex);
100         hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link) {
101                 if (lun_orig && lun != lun_orig)
102                         continue;
103
104                 dev = lun->lun_se_dev;
105                 /*
106                  * By default in LIO-Target $FABRIC_MOD,
107                  * demo_mode_write_protect is ON, or READ_ONLY;
108                  */
109                 if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
110                         lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
111                 } else {
112                         /*
113                          * Allow only optical drives to issue R/W in default RO
114                          * demo mode.
115                          */
116                         if (dev->transport->get_device_type(dev) == TYPE_DISK)
117                                 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
118                         else
119                                 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
120                 }
121
122                 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
123                         " access for LUN in Demo Mode\n",
124                         tpg->se_tpg_tfo->get_fabric_name(),
125                         tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
126                         (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
127                         "READ-WRITE" : "READ-ONLY");
128
129                 core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
130                                                  lun_access, acl, tpg);
131                 /*
132                  * Check to see if there are any existing persistent reservation
133                  * APTPL pre-registrations that need to be enabled for this dynamic
134                  * LUN ACL now..
135                  */
136                 core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
137                                                     lun->unpacked_lun);
138         }
139         mutex_unlock(&tpg->tpg_lun_mutex);
140 }
141
142 /*      core_set_queue_depth_for_node():
143  *
144  *
145  */
146 static int core_set_queue_depth_for_node(
147         struct se_portal_group *tpg,
148         struct se_node_acl *acl)
149 {
150         if (!acl->queue_depth) {
151                 pr_err("Queue depth for %s Initiator Node: %s is 0,"
152                         "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
153                         acl->initiatorname);
154                 acl->queue_depth = 1;
155         }
156
157         return 0;
158 }
159
160 static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg,
161                 const unsigned char *initiatorname)
162 {
163         struct se_node_acl *acl;
164
165         acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size),
166                         GFP_KERNEL);
167         if (!acl)
168                 return NULL;
169
170         INIT_LIST_HEAD(&acl->acl_list);
171         INIT_LIST_HEAD(&acl->acl_sess_list);
172         INIT_HLIST_HEAD(&acl->lun_entry_hlist);
173         kref_init(&acl->acl_kref);
174         init_completion(&acl->acl_free_comp);
175         spin_lock_init(&acl->nacl_sess_lock);
176         mutex_init(&acl->lun_entry_mutex);
177         atomic_set(&acl->acl_pr_ref_count, 0);
178         if (tpg->se_tpg_tfo->tpg_get_default_depth)
179                 acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
180         else
181                 acl->queue_depth = 1;
182         snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
183         acl->se_tpg = tpg;
184         acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
185
186         tpg->se_tpg_tfo->set_default_node_attributes(acl);
187
188         if (core_set_queue_depth_for_node(tpg, acl) < 0)
189                 goto out_free_acl;
190
191         return acl;
192
193 out_free_acl:
194         kfree(acl);
195         return NULL;
196 }
197
198 static void target_add_node_acl(struct se_node_acl *acl)
199 {
200         struct se_portal_group *tpg = acl->se_tpg;
201
202         mutex_lock(&tpg->acl_node_mutex);
203         list_add_tail(&acl->acl_list, &tpg->acl_node_list);
204         tpg->num_node_acls++;
205         mutex_unlock(&tpg->acl_node_mutex);
206
207         pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s"
208                 " Initiator Node: %s\n",
209                 tpg->se_tpg_tfo->get_fabric_name(),
210                 tpg->se_tpg_tfo->tpg_get_tag(tpg),
211                 acl->dynamic_node_acl ? "DYNAMIC" : "",
212                 acl->queue_depth,
213                 tpg->se_tpg_tfo->get_fabric_name(),
214                 acl->initiatorname);
215 }
216
217 struct se_node_acl *core_tpg_check_initiator_node_acl(
218         struct se_portal_group *tpg,
219         unsigned char *initiatorname)
220 {
221         struct se_node_acl *acl;
222
223         acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
224         if (acl)
225                 return acl;
226
227         if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
228                 return NULL;
229
230         acl = target_alloc_node_acl(tpg, initiatorname);
231         if (!acl)
232                 return NULL;
233         acl->dynamic_node_acl = 1;
234
235         /*
236          * Here we only create demo-mode MappedLUNs from the active
237          * TPG LUNs if the fabric is not explicitly asking for
238          * tpg_check_demo_mode_login_only() == 1.
239          */
240         if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
241             (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
242                 core_tpg_add_node_to_devs(acl, tpg, NULL);
243
244         target_add_node_acl(acl);
245         return acl;
246 }
247 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
248
249 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
250 {
251         while (atomic_read(&nacl->acl_pr_ref_count) != 0)
252                 cpu_relax();
253 }
254
255 struct se_node_acl *core_tpg_add_initiator_node_acl(
256         struct se_portal_group *tpg,
257         const char *initiatorname)
258 {
259         struct se_node_acl *acl;
260
261         mutex_lock(&tpg->acl_node_mutex);
262         acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
263         if (acl) {
264                 if (acl->dynamic_node_acl) {
265                         acl->dynamic_node_acl = 0;
266                         pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
267                                 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
268                                 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
269                         mutex_unlock(&tpg->acl_node_mutex);
270                         return acl;
271                 }
272
273                 pr_err("ACL entry for %s Initiator"
274                         " Node %s already exists for TPG %u, ignoring"
275                         " request.\n",  tpg->se_tpg_tfo->get_fabric_name(),
276                         initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
277                 mutex_unlock(&tpg->acl_node_mutex);
278                 return ERR_PTR(-EEXIST);
279         }
280         mutex_unlock(&tpg->acl_node_mutex);
281
282         acl = target_alloc_node_acl(tpg, initiatorname);
283         if (!acl)
284                 return ERR_PTR(-ENOMEM);
285
286         target_add_node_acl(acl);
287         return acl;
288 }
289
290 void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
291 {
292         struct se_portal_group *tpg = acl->se_tpg;
293         LIST_HEAD(sess_list);
294         struct se_session *sess, *sess_tmp;
295         unsigned long flags;
296         int rc;
297
298         mutex_lock(&tpg->acl_node_mutex);
299         if (acl->dynamic_node_acl) {
300                 acl->dynamic_node_acl = 0;
301         }
302         list_del(&acl->acl_list);
303         tpg->num_node_acls--;
304         mutex_unlock(&tpg->acl_node_mutex);
305
306         spin_lock_irqsave(&acl->nacl_sess_lock, flags);
307         acl->acl_stop = 1;
308
309         list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
310                                 sess_acl_list) {
311                 if (sess->sess_tearing_down != 0)
312                         continue;
313
314                 target_get_session(sess);
315                 list_move(&sess->sess_acl_list, &sess_list);
316         }
317         spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
318
319         list_for_each_entry_safe(sess, sess_tmp, &sess_list, sess_acl_list) {
320                 list_del(&sess->sess_acl_list);
321
322                 rc = tpg->se_tpg_tfo->shutdown_session(sess);
323                 target_put_session(sess);
324                 if (!rc)
325                         continue;
326                 target_put_session(sess);
327         }
328         target_put_nacl(acl);
329         /*
330          * Wait for last target_put_nacl() to complete in target_complete_nacl()
331          * for active fabric session transport_deregister_session() callbacks.
332          */
333         wait_for_completion(&acl->acl_free_comp);
334
335         core_tpg_wait_for_nacl_pr_ref(acl);
336         core_free_device_list_for_node(acl, tpg);
337
338         pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
339                 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
340                 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
341                 tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
342
343         kfree(acl);
344 }
345
346 /*      core_tpg_set_initiator_node_queue_depth():
347  *
348  *
349  */
350 int core_tpg_set_initiator_node_queue_depth(
351         struct se_portal_group *tpg,
352         unsigned char *initiatorname,
353         u32 queue_depth,
354         int force)
355 {
356         struct se_session *sess, *init_sess = NULL;
357         struct se_node_acl *acl;
358         unsigned long flags;
359         int dynamic_acl = 0;
360
361         mutex_lock(&tpg->acl_node_mutex);
362         acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
363         if (!acl) {
364                 pr_err("Access Control List entry for %s Initiator"
365                         " Node %s does not exists for TPG %hu, ignoring"
366                         " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
367                         initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
368                 mutex_unlock(&tpg->acl_node_mutex);
369                 return -ENODEV;
370         }
371         if (acl->dynamic_node_acl) {
372                 acl->dynamic_node_acl = 0;
373                 dynamic_acl = 1;
374         }
375         mutex_unlock(&tpg->acl_node_mutex);
376
377         spin_lock_irqsave(&tpg->session_lock, flags);
378         list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
379                 if (sess->se_node_acl != acl)
380                         continue;
381
382                 if (!force) {
383                         pr_err("Unable to change queue depth for %s"
384                                 " Initiator Node: %s while session is"
385                                 " operational.  To forcefully change the queue"
386                                 " depth and force session reinstatement"
387                                 " use the \"force=1\" parameter.\n",
388                                 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
389                         spin_unlock_irqrestore(&tpg->session_lock, flags);
390
391                         mutex_lock(&tpg->acl_node_mutex);
392                         if (dynamic_acl)
393                                 acl->dynamic_node_acl = 1;
394                         mutex_unlock(&tpg->acl_node_mutex);
395                         return -EEXIST;
396                 }
397                 /*
398                  * Determine if the session needs to be closed by our context.
399                  */
400                 if (!tpg->se_tpg_tfo->shutdown_session(sess))
401                         continue;
402
403                 init_sess = sess;
404                 break;
405         }
406
407         /*
408          * User has requested to change the queue depth for a Initiator Node.
409          * Change the value in the Node's struct se_node_acl, and call
410          * core_set_queue_depth_for_node() to add the requested queue depth.
411          *
412          * Finally call  tpg->se_tpg_tfo->close_session() to force session
413          * reinstatement to occur if there is an active session for the
414          * $FABRIC_MOD Initiator Node in question.
415          */
416         acl->queue_depth = queue_depth;
417
418         if (core_set_queue_depth_for_node(tpg, acl) < 0) {
419                 spin_unlock_irqrestore(&tpg->session_lock, flags);
420                 /*
421                  * Force session reinstatement if
422                  * core_set_queue_depth_for_node() failed, because we assume
423                  * the $FABRIC_MOD has already the set session reinstatement
424                  * bit from tpg->se_tpg_tfo->shutdown_session() called above.
425                  */
426                 if (init_sess)
427                         tpg->se_tpg_tfo->close_session(init_sess);
428
429                 mutex_lock(&tpg->acl_node_mutex);
430                 if (dynamic_acl)
431                         acl->dynamic_node_acl = 1;
432                 mutex_unlock(&tpg->acl_node_mutex);
433                 return -EINVAL;
434         }
435         spin_unlock_irqrestore(&tpg->session_lock, flags);
436         /*
437          * If the $FABRIC_MOD session for the Initiator Node ACL exists,
438          * forcefully shutdown the $FABRIC_MOD session/nexus.
439          */
440         if (init_sess)
441                 tpg->se_tpg_tfo->close_session(init_sess);
442
443         pr_debug("Successfully changed queue depth to: %d for Initiator"
444                 " Node: %s on %s Target Portal Group: %u\n", queue_depth,
445                 initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
446                 tpg->se_tpg_tfo->tpg_get_tag(tpg));
447
448         mutex_lock(&tpg->acl_node_mutex);
449         if (dynamic_acl)
450                 acl->dynamic_node_acl = 1;
451         mutex_unlock(&tpg->acl_node_mutex);
452
453         return 0;
454 }
455 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
456
457 /*      core_tpg_set_initiator_node_tag():
458  *
459  *      Initiator nodeacl tags are not used internally, but may be used by
460  *      userspace to emulate aliases or groups.
461  *      Returns length of newly-set tag or -EINVAL.
462  */
463 int core_tpg_set_initiator_node_tag(
464         struct se_portal_group *tpg,
465         struct se_node_acl *acl,
466         const char *new_tag)
467 {
468         if (strlen(new_tag) >= MAX_ACL_TAG_SIZE)
469                 return -EINVAL;
470
471         if (!strncmp("NULL", new_tag, 4)) {
472                 acl->acl_tag[0] = '\0';
473                 return 0;
474         }
475
476         return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag);
477 }
478 EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
479
480 static void core_tpg_lun_ref_release(struct percpu_ref *ref)
481 {
482         struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
483
484         complete(&lun->lun_ref_comp);
485 }
486
487 static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
488 {
489         /* Set in core_dev_setup_virtual_lun0() */
490         struct se_device *dev = g_lun0_dev;
491         struct se_lun *lun = &se_tpg->tpg_virt_lun0;
492         u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
493         int ret;
494
495         lun->unpacked_lun = 0;
496         atomic_set(&lun->lun_acl_count, 0);
497         init_completion(&lun->lun_shutdown_comp);
498         spin_lock_init(&lun->lun_sep_lock);
499         init_completion(&lun->lun_ref_comp);
500
501         ret = core_tpg_add_lun(se_tpg, lun, lun_access, dev);
502         if (ret < 0)
503                 return ret;
504
505         return 0;
506 }
507
508 int core_tpg_register(
509         const struct target_core_fabric_ops *tfo,
510         struct se_wwn *se_wwn,
511         struct se_portal_group *se_tpg,
512         int proto_id)
513 {
514         INIT_HLIST_HEAD(&se_tpg->tpg_lun_hlist);
515         se_tpg->proto_id = proto_id;
516         se_tpg->se_tpg_tfo = tfo;
517         se_tpg->se_tpg_wwn = se_wwn;
518         atomic_set(&se_tpg->tpg_pr_ref_count, 0);
519         INIT_LIST_HEAD(&se_tpg->acl_node_list);
520         INIT_LIST_HEAD(&se_tpg->se_tpg_node);
521         INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
522         spin_lock_init(&se_tpg->session_lock);
523         mutex_init(&se_tpg->tpg_lun_mutex);
524         mutex_init(&se_tpg->acl_node_mutex);
525
526         if (se_tpg->proto_id >= 0) {
527                 if (core_tpg_setup_virtual_lun0(se_tpg) < 0)
528                         return -ENOMEM;
529         }
530
531         spin_lock_bh(&tpg_lock);
532         list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
533         spin_unlock_bh(&tpg_lock);
534
535         pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, "
536                  "Proto: %d, Portal Tag: %u\n", tfo->get_fabric_name(),
537                 tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL,
538                 se_tpg->proto_id, tfo->tpg_get_tag(se_tpg));
539
540         return 0;
541 }
542 EXPORT_SYMBOL(core_tpg_register);
543
544 int core_tpg_deregister(struct se_portal_group *se_tpg)
545 {
546         const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
547         struct se_node_acl *nacl, *nacl_tmp;
548         LIST_HEAD(node_list);
549
550         pr_debug("TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, "
551                  "Proto: %d, Portal Tag: %u\n", tfo->get_fabric_name(),
552                 tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL,
553                 se_tpg->proto_id, tfo->tpg_get_tag(se_tpg));
554
555         spin_lock_bh(&tpg_lock);
556         list_del(&se_tpg->se_tpg_node);
557         spin_unlock_bh(&tpg_lock);
558
559         while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
560                 cpu_relax();
561
562         mutex_lock(&se_tpg->acl_node_mutex);
563         list_splice_init(&se_tpg->acl_node_list, &node_list);
564         mutex_unlock(&se_tpg->acl_node_mutex);
565         /*
566          * Release any remaining demo-mode generated se_node_acl that have
567          * not been released because of TFO->tpg_check_demo_mode_cache() == 1
568          * in transport_deregister_session().
569          */
570         list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) {
571                 list_del(&nacl->acl_list);
572                 se_tpg->num_node_acls--;
573
574                 core_tpg_wait_for_nacl_pr_ref(nacl);
575                 core_free_device_list_for_node(nacl, se_tpg);
576                 kfree(nacl);
577         }
578
579         if (se_tpg->proto_id >= 0)
580                 core_tpg_remove_lun(se_tpg, &se_tpg->tpg_virt_lun0);
581
582         return 0;
583 }
584 EXPORT_SYMBOL(core_tpg_deregister);
585
586 struct se_lun *core_tpg_alloc_lun(
587         struct se_portal_group *tpg,
588         u32 unpacked_lun)
589 {
590         struct se_lun *lun;
591
592         if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
593                 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
594                         "-1: %u for Target Portal Group: %u\n",
595                         tpg->se_tpg_tfo->get_fabric_name(),
596                         unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
597                         tpg->se_tpg_tfo->tpg_get_tag(tpg));
598                 return ERR_PTR(-EOVERFLOW);
599         }
600
601         lun = kzalloc(sizeof(*lun), GFP_KERNEL);
602         if (!lun) {
603                 pr_err("Unable to allocate se_lun memory\n");
604                 return ERR_PTR(-ENOMEM);
605         }
606         lun->unpacked_lun = unpacked_lun;
607         lun->lun_link_magic = SE_LUN_LINK_MAGIC;
608         atomic_set(&lun->lun_acl_count, 0);
609         init_completion(&lun->lun_shutdown_comp);
610         spin_lock_init(&lun->lun_sep_lock);
611         init_completion(&lun->lun_ref_comp);
612
613         return lun;
614 }
615
616 int core_tpg_add_lun(
617         struct se_portal_group *tpg,
618         struct se_lun *lun,
619         u32 lun_access,
620         struct se_device *dev)
621 {
622         int ret;
623
624         ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0,
625                               GFP_KERNEL);
626         if (ret < 0)
627                 return ret;
628
629         ret = core_dev_export(dev, tpg, lun);
630         if (ret < 0) {
631                 percpu_ref_exit(&lun->lun_ref);
632                 return ret;
633         }
634
635         mutex_lock(&tpg->tpg_lun_mutex);
636         lun->lun_access = lun_access;
637         if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
638                 hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist);
639         mutex_unlock(&tpg->tpg_lun_mutex);
640
641         return 0;
642 }
643
644 void core_tpg_remove_lun(
645         struct se_portal_group *tpg,
646         struct se_lun *lun)
647 {
648         struct se_device *dev = lun->lun_se_dev;
649
650         core_clear_lun_from_tpg(lun, tpg);
651         transport_clear_lun_ref(lun);
652
653         core_dev_unexport(lun->lun_se_dev, tpg, lun);
654
655         mutex_lock(&tpg->tpg_lun_mutex);
656         if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
657                 hlist_del_rcu(&lun->link);
658         mutex_unlock(&tpg->tpg_lun_mutex);
659
660         percpu_ref_exit(&lun->lun_ref);
661 }