Merge tag 'upstream-3.4-rc1' of git://git.infradead.org/linux-ubifs
[cascardo/linux.git] / drivers / target / target_core_tpg.c
index 06336ec..70c3ffb 100644 (file)
@@ -64,7 +64,7 @@ static void core_clear_initiator_node_from_tpg(
 
        spin_lock_irq(&nacl->device_list_lock);
        for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
-               deve = &nacl->device_list[i];
+               deve = nacl->device_list[i];
 
                if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
                        continue;
@@ -163,7 +163,7 @@ void core_tpg_add_node_to_devs(
 
        spin_lock(&tpg->tpg_lun_lock);
        for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
-               lun = &tpg->tpg_lun_list[i];
+               lun = tpg->tpg_lun_list[i];
                if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
                        continue;
 
@@ -222,6 +222,34 @@ static int core_set_queue_depth_for_node(
        return 0;
 }
 
+void array_free(void *array, int n)
+{
+       void **a = array;
+       int i;
+
+       for (i = 0; i < n; i++)
+               kfree(a[i]);
+       kfree(a);
+}
+
+static void *array_zalloc(int n, size_t size, gfp_t flags)
+{
+       void **a;
+       int i;
+
+       a = kzalloc(n * sizeof(void*), flags);
+       if (!a)
+               return NULL;
+       for (i = 0; i < n; i++) {
+               a[i] = kzalloc(size, flags);
+               if (!a[i]) {
+                       array_free(a, n);
+                       return NULL;
+               }
+       }
+       return a;
+}
+
 /*      core_create_device_list_for_node():
  *
  *
@@ -231,15 +259,15 @@ static int core_create_device_list_for_node(struct se_node_acl *nacl)
        struct se_dev_entry *deve;
        int i;
 
-       nacl->device_list = kzalloc(sizeof(struct se_dev_entry) *
-                               TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL);
+       nacl->device_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG,
+                       sizeof(struct se_dev_entry), GFP_KERNEL);
        if (!nacl->device_list) {
                pr_err("Unable to allocate memory for"
                        " struct se_node_acl->device_list\n");
                return -ENOMEM;
        }
        for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
-               deve = &nacl->device_list[i];
+               deve = nacl->device_list[i];
 
                atomic_set(&deve->ua_count, 0);
                atomic_set(&deve->pr_ref_count, 0);
@@ -274,6 +302,8 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
 
        INIT_LIST_HEAD(&acl->acl_list);
        INIT_LIST_HEAD(&acl->acl_sess_list);
+       kref_init(&acl->acl_kref);
+       init_completion(&acl->acl_free_comp);
        spin_lock_init(&acl->device_list_lock);
        spin_lock_init(&acl->nacl_sess_lock);
        atomic_set(&acl->acl_pr_ref_count, 0);
@@ -329,19 +359,19 @@ void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
 
 void core_tpg_clear_object_luns(struct se_portal_group *tpg)
 {
-       int i, ret;
+       int i;
        struct se_lun *lun;
 
        spin_lock(&tpg->tpg_lun_lock);
        for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
-               lun = &tpg->tpg_lun_list[i];
+               lun = tpg->tpg_lun_list[i];
 
                if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
                    (lun->lun_se_dev == NULL))
                        continue;
 
                spin_unlock(&tpg->tpg_lun_lock);
-               ret = core_dev_del_lun(tpg, lun->unpacked_lun);
+               core_dev_del_lun(tpg, lun->unpacked_lun);
                spin_lock(&tpg->tpg_lun_lock);
        }
        spin_unlock(&tpg->tpg_lun_lock);
@@ -402,6 +432,8 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
 
        INIT_LIST_HEAD(&acl->acl_list);
        INIT_LIST_HEAD(&acl->acl_sess_list);
+       kref_init(&acl->acl_kref);
+       init_completion(&acl->acl_free_comp);
        spin_lock_init(&acl->device_list_lock);
        spin_lock_init(&acl->nacl_sess_lock);
        atomic_set(&acl->acl_pr_ref_count, 0);
@@ -448,39 +480,47 @@ int core_tpg_del_initiator_node_acl(
        struct se_node_acl *acl,
        int force)
 {
+       LIST_HEAD(sess_list);
        struct se_session *sess, *sess_tmp;
-       int dynamic_acl = 0;
+       unsigned long flags;
+       int rc;
 
        spin_lock_irq(&tpg->acl_node_lock);
        if (acl->dynamic_node_acl) {
                acl->dynamic_node_acl = 0;
-               dynamic_acl = 1;
        }
        list_del(&acl->acl_list);
        tpg->num_node_acls--;
        spin_unlock_irq(&tpg->acl_node_lock);
 
-       spin_lock_bh(&tpg->session_lock);
-       list_for_each_entry_safe(sess, sess_tmp,
-                               &tpg->tpg_sess_list, sess_list) {
-               if (sess->se_node_acl != acl)
-                       continue;
-               /*
-                * Determine if the session needs to be closed by our context.
-                */
-               if (!tpg->se_tpg_tfo->shutdown_session(sess))
+       spin_lock_irqsave(&acl->nacl_sess_lock, flags);
+       acl->acl_stop = 1;
+
+       list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
+                               sess_acl_list) {
+               if (sess->sess_tearing_down != 0)
                        continue;
 
-               spin_unlock_bh(&tpg->session_lock);
-               /*
-                * If the $FABRIC_MOD session for the Initiator Node ACL exists,
-                * forcefully shutdown the $FABRIC_MOD session/nexus.
-                */
-               tpg->se_tpg_tfo->close_session(sess);
+               target_get_session(sess);
+               list_move(&sess->sess_acl_list, &sess_list);
+       }
+       spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
+
+       list_for_each_entry_safe(sess, sess_tmp, &sess_list, sess_acl_list) {
+               list_del(&sess->sess_acl_list);
 
-               spin_lock_bh(&tpg->session_lock);
+               rc = tpg->se_tpg_tfo->shutdown_session(sess);
+               target_put_session(sess);
+               if (!rc)
+                       continue;
+               target_put_session(sess);
        }
-       spin_unlock_bh(&tpg->session_lock);
+       target_put_nacl(acl);
+       /*
+        * Wait for last target_put_nacl() to complete in target_complete_nacl()
+        * for active fabric session transport_deregister_session() callbacks.
+        */
+       wait_for_completion(&acl->acl_free_comp);
 
        core_tpg_wait_for_nacl_pr_ref(acl);
        core_clear_initiator_node_from_tpg(acl, tpg);
@@ -507,6 +547,7 @@ int core_tpg_set_initiator_node_queue_depth(
 {
        struct se_session *sess, *init_sess = NULL;
        struct se_node_acl *acl;
+       unsigned long flags;
        int dynamic_acl = 0;
 
        spin_lock_irq(&tpg->acl_node_lock);
@@ -525,7 +566,7 @@ int core_tpg_set_initiator_node_queue_depth(
        }
        spin_unlock_irq(&tpg->acl_node_lock);
 
-       spin_lock_bh(&tpg->session_lock);
+       spin_lock_irqsave(&tpg->session_lock, flags);
        list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
                if (sess->se_node_acl != acl)
                        continue;
@@ -537,7 +578,7 @@ int core_tpg_set_initiator_node_queue_depth(
                                " depth and force session reinstatement"
                                " use the \"force=1\" parameter.\n",
                                tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
-                       spin_unlock_bh(&tpg->session_lock);
+                       spin_unlock_irqrestore(&tpg->session_lock, flags);
 
                        spin_lock_irq(&tpg->acl_node_lock);
                        if (dynamic_acl)
@@ -567,7 +608,7 @@ int core_tpg_set_initiator_node_queue_depth(
        acl->queue_depth = queue_depth;
 
        if (core_set_queue_depth_for_node(tpg, acl) < 0) {
-               spin_unlock_bh(&tpg->session_lock);
+               spin_unlock_irqrestore(&tpg->session_lock, flags);
                /*
                 * Force session reinstatement if
                 * core_set_queue_depth_for_node() failed, because we assume
@@ -583,7 +624,7 @@ int core_tpg_set_initiator_node_queue_depth(
                spin_unlock_irq(&tpg->acl_node_lock);
                return -EINVAL;
        }
-       spin_unlock_bh(&tpg->session_lock);
+       spin_unlock_irqrestore(&tpg->session_lock, flags);
        /*
         * If the $FABRIC_MOD session for the Initiator Node ACL exists,
         * forcefully shutdown the $FABRIC_MOD session/nexus.
@@ -647,8 +688,8 @@ int core_tpg_register(
        struct se_lun *lun;
        u32 i;
 
-       se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) *
-                               TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL);
+       se_tpg->tpg_lun_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG,
+                       sizeof(struct se_lun), GFP_KERNEL);
        if (!se_tpg->tpg_lun_list) {
                pr_err("Unable to allocate struct se_portal_group->"
                                "tpg_lun_list\n");
@@ -656,7 +697,7 @@ int core_tpg_register(
        }
 
        for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
-               lun = &se_tpg->tpg_lun_list[i];
+               lun = se_tpg->tpg_lun_list[i];
                lun->unpacked_lun = i;
                lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
                atomic_set(&lun->lun_acl_count, 0);
@@ -742,7 +783,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
                core_tpg_release_virtual_lun0(se_tpg);
 
        se_tpg->se_tpg_fabric_ptr = NULL;
-       kfree(se_tpg->tpg_lun_list);
+       array_free(se_tpg->tpg_lun_list, TRANSPORT_MAX_LUNS_PER_TPG);
        return 0;
 }
 EXPORT_SYMBOL(core_tpg_deregister);
@@ -763,7 +804,7 @@ struct se_lun *core_tpg_pre_addlun(
        }
 
        spin_lock(&tpg->tpg_lun_lock);
-       lun = &tpg->tpg_lun_list[unpacked_lun];
+       lun = tpg->tpg_lun_list[unpacked_lun];
        if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
                pr_err("TPG Logical Unit Number: %u is already active"
                        " on %s Target Portal Group: %u, ignoring request.\n",
@@ -821,7 +862,7 @@ struct se_lun *core_tpg_pre_dellun(
        }
 
        spin_lock(&tpg->tpg_lun_lock);
-       lun = &tpg->tpg_lun_list[unpacked_lun];
+       lun = tpg->tpg_lun_list[unpacked_lun];
        if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
                pr_err("%s Logical Unit Number: %u is not active on"
                        " Target Portal Group: %u, ignoring request.\n",