drm/i915: SDVO hotplug have different interrupt status bits for i915/i965/g4x
[linux-2.6.git] / drivers / target / target_core_transport.c
index 176f956..69f3f7d 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/in.h>
 #include <linux/cdrom.h>
 #include <linux/module.h>
+#include <linux/ratelimit.h>
 #include <asm/unaligned.h>
 #include <net/sock.h>
 #include <net/tcp.h>
@@ -58,7 +59,6 @@ static int sub_api_initialized;
 
 static struct workqueue_struct *target_completion_wq;
 static struct kmem_cache *se_sess_cache;
-struct kmem_cache *se_tmr_req_cache;
 struct kmem_cache *se_ua_cache;
 struct kmem_cache *t10_pr_reg_cache;
 struct kmem_cache *t10_alua_lu_gp_cache;
@@ -68,7 +68,7 @@ struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
 
 static int transport_generic_write_pending(struct se_cmd *);
 static int transport_processing_thread(void *param);
-static int __transport_execute_tasks(struct se_device *dev);
+static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *);
 static void transport_complete_task_attr(struct se_cmd *cmd);
 static void transport_handle_queue_full(struct se_cmd *cmd,
                struct se_device *dev);
@@ -77,26 +77,17 @@ static int transport_generic_get_mem(struct se_cmd *cmd);
 static void transport_put_cmd(struct se_cmd *cmd);
 static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
 static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
-static void transport_generic_request_failure(struct se_cmd *);
 static void target_complete_ok_work(struct work_struct *work);
 
 int init_se_kmem_caches(void)
 {
-       se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
-                       sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
-                       0, NULL);
-       if (!se_tmr_req_cache) {
-               pr_err("kmem_cache_create() for struct se_tmr_req"
-                               " failed\n");
-               goto out;
-       }
        se_sess_cache = kmem_cache_create("se_sess_cache",
                        sizeof(struct se_session), __alignof__(struct se_session),
                        0, NULL);
        if (!se_sess_cache) {
                pr_err("kmem_cache_create() for struct se_session"
                                " failed\n");
-               goto out_free_tmr_req_cache;
+               goto out;
        }
        se_ua_cache = kmem_cache_create("se_ua_cache",
                        sizeof(struct se_ua), __alignof__(struct se_ua),
@@ -169,8 +160,6 @@ out_free_ua_cache:
        kmem_cache_destroy(se_ua_cache);
 out_free_sess_cache:
        kmem_cache_destroy(se_sess_cache);
-out_free_tmr_req_cache:
-       kmem_cache_destroy(se_tmr_req_cache);
 out:
        return -ENOMEM;
 }
@@ -178,7 +167,6 @@ out:
 void release_se_kmem_caches(void)
 {
        destroy_workqueue(target_completion_wq);
-       kmem_cache_destroy(se_tmr_req_cache);
        kmem_cache_destroy(se_sess_cache);
        kmem_cache_destroy(se_ua_cache);
        kmem_cache_destroy(t10_pr_reg_cache);
@@ -258,13 +246,14 @@ struct se_session *transport_init_session(void)
        INIT_LIST_HEAD(&se_sess->sess_cmd_list);
        INIT_LIST_HEAD(&se_sess->sess_wait_list);
        spin_lock_init(&se_sess->sess_cmd_lock);
+       kref_init(&se_sess->sess_kref);
 
        return se_sess;
 }
 EXPORT_SYMBOL(transport_init_session);
 
 /*
- * Called with spin_lock_bh(&struct se_portal_group->session_lock called.
+ * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called.
  */
 void __transport_register_session(
        struct se_portal_group *se_tpg,
@@ -293,6 +282,8 @@ void __transport_register_session(
                                        &buf[0], PR_REG_ISID_LEN);
                        se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
                }
+               kref_get(&se_nacl->acl_kref);
+
                spin_lock_irq(&se_nacl->nacl_sess_lock);
                /*
                 * The se_nacl->nacl_sess pointer will be set to the
@@ -317,12 +308,48 @@ void transport_register_session(
        struct se_session *se_sess,
        void *fabric_sess_ptr)
 {
-       spin_lock_bh(&se_tpg->session_lock);
+       unsigned long flags;
+
+       spin_lock_irqsave(&se_tpg->session_lock, flags);
        __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
-       spin_unlock_bh(&se_tpg->session_lock);
+       spin_unlock_irqrestore(&se_tpg->session_lock, flags);
 }
 EXPORT_SYMBOL(transport_register_session);
 
+static void target_release_session(struct kref *kref)
+{
+       struct se_session *se_sess = container_of(kref,
+                       struct se_session, sess_kref);
+       struct se_portal_group *se_tpg = se_sess->se_tpg;
+
+       se_tpg->se_tpg_tfo->close_session(se_sess);
+}
+
+void target_get_session(struct se_session *se_sess)
+{
+       kref_get(&se_sess->sess_kref);
+}
+EXPORT_SYMBOL(target_get_session);
+
+int target_put_session(struct se_session *se_sess)
+{
+       return kref_put(&se_sess->sess_kref, target_release_session);
+}
+EXPORT_SYMBOL(target_put_session);
+
+static void target_complete_nacl(struct kref *kref)
+{
+       struct se_node_acl *nacl = container_of(kref,
+                               struct se_node_acl, acl_kref);
+
+       complete(&nacl->acl_free_comp);
+}
+
+void target_put_nacl(struct se_node_acl *nacl)
+{
+       kref_put(&nacl->acl_kref, target_complete_nacl);
+}
+
 void transport_deregister_session_configfs(struct se_session *se_sess)
 {
        struct se_node_acl *se_nacl;
@@ -333,7 +360,8 @@ void transport_deregister_session_configfs(struct se_session *se_sess)
        se_nacl = se_sess->se_node_acl;
        if (se_nacl) {
                spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
-               list_del(&se_sess->sess_acl_list);
+               if (se_nacl->acl_stop == 0)
+                       list_del(&se_sess->sess_acl_list);
                /*
                 * If the session list is empty, then clear the pointer.
                 * Otherwise, set the struct se_session pointer from the tail
@@ -360,13 +388,16 @@ EXPORT_SYMBOL(transport_free_session);
 void transport_deregister_session(struct se_session *se_sess)
 {
        struct se_portal_group *se_tpg = se_sess->se_tpg;
+       struct target_core_fabric_ops *se_tfo;
        struct se_node_acl *se_nacl;
        unsigned long flags;
+       bool comp_nacl = true;
 
        if (!se_tpg) {
                transport_free_session(se_sess);
                return;
        }
+       se_tfo = se_tpg->se_tpg_tfo;
 
        spin_lock_irqsave(&se_tpg->session_lock, flags);
        list_del(&se_sess->sess_list);
@@ -379,29 +410,34 @@ void transport_deregister_session(struct se_session *se_sess)
         * struct se_node_acl if it had been previously dynamically generated.
         */
        se_nacl = se_sess->se_node_acl;
-       if (se_nacl) {
-               spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
-               if (se_nacl->dynamic_node_acl) {
-                       if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache(
-                                       se_tpg)) {
-                               list_del(&se_nacl->acl_list);
-                               se_tpg->num_node_acls--;
-                               spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
-
-                               core_tpg_wait_for_nacl_pr_ref(se_nacl);
-                               core_free_device_list_for_node(se_nacl, se_tpg);
-                               se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg,
-                                               se_nacl);
-                               spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
-                       }
+
+       spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
+       if (se_nacl && se_nacl->dynamic_node_acl) {
+               if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
+                       list_del(&se_nacl->acl_list);
+                       se_tpg->num_node_acls--;
+                       spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
+                       core_tpg_wait_for_nacl_pr_ref(se_nacl);
+                       core_free_device_list_for_node(se_nacl, se_tpg);
+                       se_tfo->tpg_release_fabric_acl(se_tpg, se_nacl);
+
+                       comp_nacl = false;
+                       spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
                }
-               spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
        }
-
-       transport_free_session(se_sess);
+       spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
 
        pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
                se_tpg->se_tpg_tfo->get_fabric_name());
+       /*
+        * If last kref is dropping now for an explict NodeACL, awake sleeping
+        * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
+        * removal context.
+        */
+       if (se_nacl && comp_nacl == true)
+               target_put_nacl(se_nacl);
+
+       transport_free_session(se_sess);
 }
 EXPORT_SYMBOL(transport_deregister_session);
 
@@ -437,7 +473,7 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
 
 /*     transport_cmd_check_stop():
  *
- *     'transport_off = 1' determines if t_transport_active should be cleared.
+ *     'transport_off = 1' determines if CMD_T_ACTIVE should be cleared.
  *     'transport_off = 2' determines if task_dev_state should be removed.
  *
  *     A non-zero u8 t_state sets cmd->t_state.
@@ -455,12 +491,11 @@ static int transport_cmd_check_stop(
         * Determine if IOCTL context caller in requesting the stopping of this
         * command for LUN shutdown purposes.
         */
-       if (atomic_read(&cmd->transport_lun_stop)) {
-               pr_debug("%s:%d atomic_read(&cmd->transport_lun_stop)"
-                       " == TRUE for ITT: 0x%08x\n", __func__, __LINE__,
-                       cmd->se_tfo->get_task_tag(cmd));
+       if (cmd->transport_state & CMD_T_LUN_STOP) {
+               pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
+                       __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
 
-               atomic_set(&cmd->t_transport_active, 0);
+               cmd->transport_state &= ~CMD_T_ACTIVE;
                if (transport_off == 2)
                        transport_all_task_dev_remove_state(cmd);
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
@@ -472,9 +507,9 @@ static int transport_cmd_check_stop(
         * Determine if frontend context caller is requesting the stopping of
         * this command for frontend exceptions.
         */
-       if (atomic_read(&cmd->t_transport_stop)) {
-               pr_debug("%s:%d atomic_read(&cmd->t_transport_stop) =="
-                       " TRUE for ITT: 0x%08x\n", __func__, __LINE__,
+       if (cmd->transport_state & CMD_T_STOP) {
+               pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
+                       __func__, __LINE__,
                        cmd->se_tfo->get_task_tag(cmd));
 
                if (transport_off == 2)
@@ -492,7 +527,7 @@ static int transport_cmd_check_stop(
                return 1;
        }
        if (transport_off) {
-               atomic_set(&cmd->t_transport_active, 0);
+               cmd->transport_state &= ~CMD_T_ACTIVE;
                if (transport_off == 2) {
                        transport_all_task_dev_remove_state(cmd);
                        /*
@@ -540,31 +575,21 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
                return;
 
        spin_lock_irqsave(&cmd->t_state_lock, flags);
-       if (!atomic_read(&cmd->transport_dev_active)) {
-               spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-               goto check_lun;
+       if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
+               cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
+               transport_all_task_dev_remove_state(cmd);
        }
-       atomic_set(&cmd->transport_dev_active, 0);
-       transport_all_task_dev_remove_state(cmd);
        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
-
-check_lun:
        spin_lock_irqsave(&lun->lun_cmd_lock, flags);
-       if (atomic_read(&cmd->transport_lun_active)) {
-               list_del(&cmd->se_lun_node);
-               atomic_set(&cmd->transport_lun_active, 0);
-#if 0
-               pr_debug("Removed ITT: 0x%08x from LUN LIST[%d]\n"
-                       cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun);
-#endif
-       }
+       if (!list_empty(&cmd->se_lun_node))
+               list_del_init(&cmd->se_lun_node);
        spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
 }
 
 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
 {
-       if (!cmd->se_tmr_req)
+       if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
                transport_lun_remove_cmd(cmd);
 
        if (transport_cmd_check_stop_to_fabric(cmd))
@@ -585,7 +610,7 @@ static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state,
        if (t_state) {
                spin_lock_irqsave(&cmd->t_state_lock, flags);
                cmd->t_state = t_state;
-               atomic_set(&cmd->t_transport_active, 1);
+               cmd->transport_state |= CMD_T_ACTIVE;
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
        }
 
@@ -601,7 +626,7 @@ static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state,
                list_add(&cmd->se_queue_node, &qobj->qobj_list);
        else
                list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
-       atomic_set(&cmd->t_transport_queue_active, 1);
+       cmd->transport_state |= CMD_T_QUEUED;
        spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
 
        wake_up_interruptible(&qobj->thread_wq);
@@ -620,8 +645,7 @@ transport_get_cmd_from_queue(struct se_queue_obj *qobj)
        }
        cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
 
-       atomic_set(&cmd->t_transport_queue_active, 0);
-
+       cmd->transport_state &= ~CMD_T_QUEUED;
        list_del_init(&cmd->se_queue_node);
        atomic_dec(&qobj->queue_cnt);
        spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
@@ -635,20 +659,14 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd)
        unsigned long flags;
 
        spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
-       if (!atomic_read(&cmd->t_transport_queue_active)) {
+       if (!(cmd->transport_state & CMD_T_QUEUED)) {
                spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
                return;
        }
-       atomic_set(&cmd->t_transport_queue_active, 0);
+       cmd->transport_state &= ~CMD_T_QUEUED;
        atomic_dec(&qobj->queue_cnt);
        list_del_init(&cmd->se_queue_node);
        spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
-
-       if (atomic_read(&cmd->t_transport_queue_active)) {
-               pr_err("ITT: 0x%08x t_transport_queue_active: %d\n",
-                       cmd->se_tfo->get_task_tag(cmd),
-                       atomic_read(&cmd->t_transport_queue_active));
-       }
 }
 
 /*
@@ -691,12 +709,6 @@ void transport_complete_task(struct se_task *task, int success)
        struct se_cmd *cmd = task->task_se_cmd;
        struct se_device *dev = cmd->se_dev;
        unsigned long flags;
-#if 0
-       pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task,
-                       cmd->t_task_cdb[0], dev);
-#endif
-       if (dev)
-               atomic_inc(&dev->depth_left);
 
        spin_lock_irqsave(&cmd->t_state_lock, flags);
        task->task_flags &= ~TF_ACTIVE;
@@ -725,7 +737,7 @@ void transport_complete_task(struct se_task *task, int success)
        }
 
        if (!success)
-               cmd->t_tasks_failed = 1;
+               cmd->transport_state |= CMD_T_FAILED;
 
        /*
         * Decrement the outstanding t_task_cdbs_left count.  The last
@@ -736,17 +748,24 @@ void transport_complete_task(struct se_task *task, int success)
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
                return;
        }
-
-       if (cmd->t_tasks_failed) {
+       /*
+        * Check for case where an explict ABORT_TASK has been received
+        * and transport_wait_for_tasks() will be waiting for completion..
+        */
+       if (cmd->transport_state & CMD_T_ABORTED &&
+           cmd->transport_state & CMD_T_STOP) {
+               spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+               complete(&cmd->t_transport_stop_comp);
+               return;
+       } else if (cmd->transport_state & CMD_T_FAILED) {
                cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                INIT_WORK(&cmd->work, target_complete_failure_work);
        } else {
-               atomic_set(&cmd->t_transport_complete, 1);
                INIT_WORK(&cmd->work, target_complete_ok_work);
        }
 
        cmd->t_state = TRANSPORT_COMPLETE;
-       atomic_set(&cmd->t_transport_active, 1);
+       cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
        queue_work(target_completion_wq, &cmd->work);
@@ -857,13 +876,11 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 }
 
-static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
+static void __transport_add_tasks_from_cmd(struct se_cmd *cmd)
 {
        struct se_device *dev = cmd->se_dev;
        struct se_task *task, *task_prev = NULL;
-       unsigned long flags;
 
-       spin_lock_irqsave(&dev->execute_task_lock, flags);
        list_for_each_entry(task, &cmd->t_task_list, t_list) {
                if (!list_empty(&task->t_execute_list))
                        continue;
@@ -874,6 +891,15 @@ static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
                __transport_add_task_to_execute_queue(task, task_prev, dev);
                task_prev = task;
        }
+}
+
+static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
+{
+       unsigned long flags;
+       struct se_device *dev = cmd->se_dev;
+
+       spin_lock_irqsave(&dev->execute_task_lock, flags);
+       __transport_add_tasks_from_cmd(cmd);
        spin_unlock_irqrestore(&dev->execute_task_lock, flags);
 }
 
@@ -971,9 +997,8 @@ void transport_dump_dev_state(
                break;
        }
 
-       *bl += sprintf(b + *bl, "  Execute/Left/Max Queue Depth: %d/%d/%d",
-               atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left),
-               dev->queue_depth);
+       *bl += sprintf(b + *bl, "  Execute/Max Queue Depth: %d/%d",
+               atomic_read(&dev->execute_tasks), dev->queue_depth);
        *bl += sprintf(b + *bl, "  SectorSize: %u  MaxSectors: %u\n",
                dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors);
        *bl += sprintf(b + *bl, "        ");
@@ -1255,32 +1280,34 @@ static void core_setup_task_attr_emulation(struct se_device *dev)
 static void scsi_dump_inquiry(struct se_device *dev)
 {
        struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
+       char buf[17];
        int i, device_type;
        /*
         * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
         */
-       pr_debug("  Vendor: ");
        for (i = 0; i < 8; i++)
                if (wwn->vendor[i] >= 0x20)
-                       pr_debug("%c", wwn->vendor[i]);
+                       buf[i] = wwn->vendor[i];
                else
-                       pr_debug(" ");
+                       buf[i] = ' ';
+       buf[i] = '\0';
+       pr_debug("  Vendor: %s\n", buf);
 
-       pr_debug("  Model: ");
        for (i = 0; i < 16; i++)
                if (wwn->model[i] >= 0x20)
-                       pr_debug("%c", wwn->model[i]);
+                       buf[i] = wwn->model[i];
                else
-                       pr_debug(" ");
+                       buf[i] = ' ';
+       buf[i] = '\0';
+       pr_debug("  Model: %s\n", buf);
 
-       pr_debug("  Revision: ");
        for (i = 0; i < 4; i++)
                if (wwn->revision[i] >= 0x20)
-                       pr_debug("%c", wwn->revision[i]);
+                       buf[i] = wwn->revision[i];
                else
-                       pr_debug(" ");
-
-       pr_debug("\n");
+                       buf[i] = ' ';
+       buf[i] = '\0';
+       pr_debug("  Revision: %s\n", buf);
 
        device_type = dev->transport->get_device_type(dev);
        pr_debug("  Type:   %s ", scsi_device_type(device_type));
@@ -1328,9 +1355,6 @@ struct se_device *transport_add_device_to_core_hba(
        spin_lock_init(&dev->se_port_lock);
        spin_lock_init(&dev->se_tmr_lock);
        spin_lock_init(&dev->qf_cmd_lock);
-
-       dev->queue_depth        = dev_limits->queue_depth;
-       atomic_set(&dev->depth_left, dev->queue_depth);
        atomic_set(&dev->dev_ordered_id, 0);
 
        se_dev_set_default_attribs(dev, dev_limits);
@@ -1489,7 +1513,7 @@ void transport_init_se_cmd(
        init_completion(&cmd->t_transport_stop_comp);
        init_completion(&cmd->cmd_wait_comp);
        spin_lock_init(&cmd->t_state_lock);
-       atomic_set(&cmd->transport_dev_active, 1);
+       cmd->transport_state = CMD_T_DEV_ACTIVE;
 
        cmd->se_tfo = tfo;
        cmd->se_sess = se_sess;
@@ -1619,7 +1643,7 @@ int transport_handle_cdb_direct(
                return -EINVAL;
        }
        /*
-        * Set TRANSPORT_NEW_CMD state and cmd->t_transport_active=1 following
+        * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE following
         * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue()
         * in existing usage to ensure that outstanding descriptors are handled
         * correctly during shutdown via transport_wait_for_tasks()
@@ -1628,7 +1652,8 @@ int transport_handle_cdb_direct(
         * this to be called for initial descriptor submission.
         */
        cmd->t_state = TRANSPORT_NEW_CMD;
-       atomic_set(&cmd->t_transport_active, 1);
+       cmd->transport_state |= CMD_T_ACTIVE;
+
        /*
         * transport_generic_new_cmd() is already handling QUEUE_FULL,
         * so follow TRANSPORT_NEW_CMD processing thread context usage
@@ -1642,6 +1667,148 @@ int transport_handle_cdb_direct(
 }
 EXPORT_SYMBOL(transport_handle_cdb_direct);
 
+/**
+ * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
+ *
+ * @se_cmd: command descriptor to submit
+ * @se_sess: associated se_sess for endpoint
+ * @cdb: pointer to SCSI CDB
+ * @sense: pointer to SCSI sense buffer
+ * @unpacked_lun: unpacked LUN to reference for struct se_lun
+ * @data_length: fabric expected data transfer length
+ * @task_addr: SAM task attribute
+ * @data_dir: DMA data direction
+ * @flags: flags for command submission from target_sc_flags_tables
+ *
+ * This may only be called from process context, and also currently
+ * assumes internal allocation of fabric payload buffer by target-core.
+ **/
+void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
+               unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
+               u32 data_length, int task_attr, int data_dir, int flags)
+{
+       struct se_portal_group *se_tpg;
+       int rc;
+
+       se_tpg = se_sess->se_tpg;
+       BUG_ON(!se_tpg);
+       BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
+       BUG_ON(in_interrupt());
+       /*
+        * Initialize se_cmd for target operation.  From this point
+        * exceptions are handled by sending exception status via
+        * target_core_fabric_ops->queue_status() callback
+        */
+       transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
+                               data_length, data_dir, task_attr, sense);
+       /*
+        * Obtain struct se_cmd->cmd_kref reference and add new cmd to
+        * se_sess->sess_cmd_list.  A second kref_get here is necessary
+        * for fabrics using TARGET_SCF_ACK_KREF that expect a second
+        * kref_put() to happen during fabric packet acknowledgement.
+        */
+       target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
+       /*
+        * Signal bidirectional data payloads to target-core
+        */
+       if (flags & TARGET_SCF_BIDI_OP)
+               se_cmd->se_cmd_flags |= SCF_BIDI;
+       /*
+        * Locate se_lun pointer and attach it to struct se_cmd
+        */
+       if (transport_lookup_cmd_lun(se_cmd, unpacked_lun) < 0) {
+               transport_send_check_condition_and_sense(se_cmd,
+                               se_cmd->scsi_sense_reason, 0);
+               target_put_sess_cmd(se_sess, se_cmd);
+               return;
+       }
+       /*
+        * Sanitize CDBs via transport_generic_cmd_sequencer() and
+        * allocate the necessary tasks to complete the received CDB+data
+        */
+       rc = transport_generic_allocate_tasks(se_cmd, cdb);
+       if (rc != 0) {
+               transport_generic_request_failure(se_cmd);
+               return;
+       }
+       /*
+        * Dispatch se_cmd descriptor to se_lun->lun_se_dev backend
+        * for immediate execution of READs, otherwise wait for
+        * transport_generic_handle_data() to be called for WRITEs
+        * when fabric has filled the incoming buffer.
+        */
+       transport_handle_cdb_direct(se_cmd);
+       return;
+}
+EXPORT_SYMBOL(target_submit_cmd);
+
+static void target_complete_tmr_failure(struct work_struct *work)
+{
+       struct se_cmd *se_cmd = container_of(work, struct se_cmd, work);
+
+       se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
+       se_cmd->se_tfo->queue_tm_rsp(se_cmd);
+}
+
+/**
+ * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd
+ *                     for TMR CDBs
+ *
+ * @se_cmd: command descriptor to submit
+ * @se_sess: associated se_sess for endpoint
+ * @sense: pointer to SCSI sense buffer
+ * @unpacked_lun: unpacked LUN to reference for struct se_lun
+ * @fabric_context: fabric context for TMR req
+ * @tm_type: Type of TM request
+ * @gfp: gfp type for caller
+ * @tag: referenced task tag for TMR_ABORT_TASK
+ * @flags: submit cmd flags
+ *
+ * Callable from all contexts.
+ **/
+
+int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
+               unsigned char *sense, u32 unpacked_lun,
+               void *fabric_tmr_ptr, unsigned char tm_type,
+               gfp_t gfp, unsigned int tag, int flags)
+{
+       struct se_portal_group *se_tpg;
+       int ret;
+
+       se_tpg = se_sess->se_tpg;
+       BUG_ON(!se_tpg);
+
+       transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
+                             0, DMA_NONE, MSG_SIMPLE_TAG, sense);
+       /*
+        * FIXME: Currently expect caller to handle se_cmd->se_tmr_req
+        * allocation failure.
+        */
+       ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp);
+       if (ret < 0)
+               return -ENOMEM;
+
+       if (tm_type == TMR_ABORT_TASK)
+               se_cmd->se_tmr_req->ref_task_tag = tag;
+
+       /* See target_submit_cmd for commentary */
+       target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
+
+       ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun);
+       if (ret) {
+               /*
+                * For callback during failure handling, push this work off
+                * to process context with TMR_LUN_DOES_NOT_EXIST status.
+                */
+               INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
+               schedule_work(&se_cmd->work);
+               return 0;
+       }
+       transport_generic_handle_tmr(se_cmd);
+       return 0;
+}
+EXPORT_SYMBOL(target_submit_tmr);
+
 /*
  * Used by fabric module frontends defining a TFO->new_cmd_map() caller
  * to  queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
@@ -1773,7 +1940,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
 /*
  * Handle SAM-esque emulation for generic transport request failures.
  */
-static void transport_generic_request_failure(struct se_cmd *cmd)
+void transport_generic_request_failure(struct se_cmd *cmd)
 {
        int ret = 0;
 
@@ -1785,14 +1952,14 @@ static void transport_generic_request_failure(struct se_cmd *cmd)
                cmd->t_state, cmd->scsi_sense_reason);
        pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
                " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
-               " t_transport_active: %d t_transport_stop: %d"
-               " t_transport_sent: %d\n", cmd->t_task_list_num,
+               " CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
+               cmd->t_task_list_num,
                atomic_read(&cmd->t_task_cdbs_left),
                atomic_read(&cmd->t_task_cdbs_sent),
                atomic_read(&cmd->t_task_cdbs_ex_left),
-               atomic_read(&cmd->t_transport_active),
-               atomic_read(&cmd->t_transport_stop),
-               atomic_read(&cmd->t_transport_sent));
+               (cmd->transport_state & CMD_T_ACTIVE) != 0,
+               (cmd->transport_state & CMD_T_STOP) != 0,
+               (cmd->transport_state & CMD_T_SENT) != 0);
 
        /*
         * For SAM Task Attribute emulation for failed struct se_cmd
@@ -1808,6 +1975,7 @@ static void transport_generic_request_failure(struct se_cmd *cmd)
        case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
        case TCM_UNKNOWN_MODE_PAGE:
        case TCM_WRITE_PROTECTED:
+       case TCM_ADDRESS_OUT_OF_RANGE:
        case TCM_CHECK_CONDITION_ABORT_CMD:
        case TCM_CHECK_CONDITION_UNIT_ATTENTION:
        case TCM_CHECK_CONDITION_NOT_READY:
@@ -1865,6 +2033,7 @@ queue_full:
        cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
        transport_handle_queue_full(cmd, cmd->se_dev);
 }
+EXPORT_SYMBOL(transport_generic_request_failure);
 
 static inline u32 transport_lba_21(unsigned char *cdb)
 {
@@ -1908,18 +2077,6 @@ static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
        spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
 }
 
-static inline int transport_tcq_window_closed(struct se_device *dev)
-{
-       if (dev->dev_tcq_window_closed++ <
-                       PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) {
-               msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT);
-       } else
-               msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG);
-
-       wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
-       return 0;
-}
-
 /*
  * Called from Fabric Module context from transport_execute_tasks()
  *
@@ -2002,13 +2159,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
 static int transport_execute_tasks(struct se_cmd *cmd)
 {
        int add_tasks;
-
-       if (se_dev_check_online(cmd->se_dev) != 0) {
-               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-               transport_generic_request_failure(cmd);
-               return 0;
-       }
-
+       struct se_device *se_dev = cmd->se_dev;
        /*
         * Call transport_cmd_check_stop() to see if a fabric exception
         * has occurred that prevents execution.
@@ -2022,19 +2173,16 @@ static int transport_execute_tasks(struct se_cmd *cmd)
                if (!add_tasks)
                        goto execute_tasks;
                /*
-                * This calls transport_add_tasks_from_cmd() to handle
-                * HEAD_OF_QUEUE ordering for SAM Task Attribute emulation
-                * (if enabled) in __transport_add_task_to_execute_queue() and
-                * transport_add_task_check_sam_attr().
+                * __transport_execute_tasks() -> __transport_add_tasks_from_cmd()
+                * adds associated se_tasks while holding dev->execute_task_lock
+                * before I/O dispath to avoid a double spinlock access.
                 */
-               transport_add_tasks_from_cmd(cmd);
+               __transport_execute_tasks(se_dev, cmd);
+               return 0;
        }
-       /*
-        * Kick the execution queue for the cmd associated struct se_device
-        * storage object.
-        */
+
 execute_tasks:
-       __transport_execute_tasks(cmd->se_dev);
+       __transport_execute_tasks(se_dev, NULL);
        return 0;
 }
 
@@ -2044,24 +2192,18 @@ execute_tasks:
  *
  * Called from transport_processing_thread()
  */
-static int __transport_execute_tasks(struct se_device *dev)
+static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_cmd)
 {
        int error;
        struct se_cmd *cmd = NULL;
        struct se_task *task = NULL;
        unsigned long flags;
 
-       /*
-        * Check if there is enough room in the device and HBA queue to send
-        * struct se_tasks to the selected transport.
-        */
 check_depth:
-       if (!atomic_read(&dev->depth_left))
-               return transport_tcq_window_closed(dev);
-
-       dev->dev_tcq_window_closed = 0;
-
        spin_lock_irq(&dev->execute_task_lock);
+       if (new_cmd != NULL)
+               __transport_add_tasks_from_cmd(new_cmd);
+
        if (list_empty(&dev->execute_task_list)) {
                spin_unlock_irq(&dev->execute_task_lock);
                return 0;
@@ -2071,17 +2213,14 @@ check_depth:
        __transport_remove_task_from_execute_queue(task, dev);
        spin_unlock_irq(&dev->execute_task_lock);
 
-       atomic_dec(&dev->depth_left);
-
        cmd = task->task_se_cmd;
-
        spin_lock_irqsave(&cmd->t_state_lock, flags);
        task->task_flags |= (TF_ACTIVE | TF_SENT);
        atomic_inc(&cmd->t_task_cdbs_sent);
 
        if (atomic_read(&cmd->t_task_cdbs_sent) ==
            cmd->t_task_list_num)
-               atomic_set(&cmd->t_transport_sent, 1);
+               cmd->transport_state |= CMD_T_SENT;
 
        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
@@ -2092,13 +2231,14 @@ check_depth:
        if (error != 0) {
                spin_lock_irqsave(&cmd->t_state_lock, flags);
                task->task_flags &= ~TF_ACTIVE;
+               cmd->transport_state &= ~CMD_T_SENT;
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-               atomic_set(&cmd->t_transport_sent, 0);
+
                transport_stop_tasks_for_cmd(cmd);
-               atomic_inc(&dev->depth_left);
                transport_generic_request_failure(cmd);
        }
 
+       new_cmd = NULL;
        goto check_depth;
 
        return 0;
@@ -2300,7 +2440,7 @@ static void transport_xor_callback(struct se_cmd *cmd)
 
        offset = 0;
        for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
-               addr = kmap_atomic(sg_page(sg), KM_USER0);
+               addr = kmap_atomic(sg_page(sg));
                if (!addr)
                        goto out;
 
@@ -2308,7 +2448,7 @@ static void transport_xor_callback(struct se_cmd *cmd)
                        *(addr + sg->offset + i) ^= *(buf + offset + i);
 
                offset += sg->length;
-               kunmap_atomic(addr, KM_USER0);
+               kunmap_atomic(addr);
        }
 
 out:
@@ -2495,6 +2635,7 @@ static int transport_generic_cmd_sequencer(
                                        cmd, cdb, pr_reg_type) != 0) {
                        cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
                        cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
+                       cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
                        cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
                        return -EBUSY;
                }
@@ -2653,7 +2794,7 @@ static int transport_generic_cmd_sequencer(
                        cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
 
                        if (target_check_write_same_discard(&cdb[10], dev) < 0)
-                               goto out_invalid_cdb_field;
+                               goto out_unsupported_cdb;
                        if (!passthrough)
                                cmd->execute_task = target_emulate_write_same;
                        break;
@@ -2802,7 +2943,7 @@ static int transport_generic_cmd_sequencer(
 
                        pr_err("Unsupported SA: 0x%02x\n",
                                cmd->t_task_cdb[1] & 0x1f);
-                       goto out_unsupported_cdb;
+                       goto out_invalid_cdb_field;
                }
                /*FALLTHROUGH*/
        case ACCESS_CONTROL_IN:
@@ -2884,7 +3025,7 @@ static int transport_generic_cmd_sequencer(
                cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
                break;
        case SYNCHRONIZE_CACHE:
-       case 0x91: /* SYNCHRONIZE_CACHE_16: */
+       case SYNCHRONIZE_CACHE_16:
                /*
                 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
                 */
@@ -2936,7 +3077,7 @@ static int transport_generic_cmd_sequencer(
                cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
 
                if (target_check_write_same_discard(&cdb[1], dev) < 0)
-                       goto out_invalid_cdb_field;
+                       goto out_unsupported_cdb;
                if (!passthrough)
                        cmd->execute_task = target_emulate_write_same;
                break;
@@ -2959,7 +3100,7 @@ static int transport_generic_cmd_sequencer(
                 * of byte 1 bit 3 UNMAP instead of original reserved field
                 */
                if (target_check_write_same_discard(&cdb[1], dev) < 0)
-                       goto out_invalid_cdb_field;
+                       goto out_unsupported_cdb;
                if (!passthrough)
                        cmd->execute_task = target_emulate_write_same;
                break;
@@ -3025,15 +3166,27 @@ static int transport_generic_cmd_sequencer(
                        /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
                        goto out_invalid_cdb_field;
                }
-
+               /*
+                * For the overflow case keep the existing fabric provided
+                * ->data_length.  Otherwise for the underflow case, reset
+                * ->data_length to the smaller SCSI expected data transfer
+                * length.
+                */
                if (size > cmd->data_length) {
                        cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
                        cmd->residual_count = (size - cmd->data_length);
                } else {
                        cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
                        cmd->residual_count = (cmd->data_length - size);
+                       cmd->data_length = size;
                }
-               cmd->data_length = size;
+       }
+
+       if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB &&
+           sectors > dev->se_sub_dev->se_dev_attrib.fabric_max_sectors) {
+               printk_ratelimited(KERN_ERR "SCSI OP %02xh with too big sectors %u\n",
+                                  cdb[0], sectors);
+               goto out_invalid_cdb_field;
        }
 
        /* reject any command that we don't have a handler for */
@@ -3041,11 +3194,6 @@ static int transport_generic_cmd_sequencer(
             (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
                goto out_unsupported_cdb;
 
-       /* Let's limit control cdbs to a page, for simplicity's sake. */
-       if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
-           size > PAGE_SIZE)
-               goto out_invalid_cdb_field;
-
        transport_set_supported_SAM_opcode(cmd);
        return ret;
 
@@ -3344,7 +3492,7 @@ static void transport_release_cmd(struct se_cmd *cmd)
 {
        BUG_ON(!cmd->se_tfo);
 
-       if (cmd->se_tmr_req)
+       if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
                core_tmr_release_req(cmd->se_tmr_req);
        if (cmd->t_task_cdb != cmd->__t_task_cdb)
                kfree(cmd->t_task_cdb);
@@ -3381,8 +3529,8 @@ static void transport_put_cmd(struct se_cmd *cmd)
                        goto out_busy;
        }
 
-       if (atomic_read(&cmd->transport_dev_active)) {
-               atomic_set(&cmd->transport_dev_active, 0);
+       if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
+               cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
                transport_all_task_dev_remove_state(cmd);
                free_tasks = 1;
        }
@@ -3449,9 +3597,11 @@ int transport_generic_map_mem_to_cmd(
 }
 EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
 
-void *transport_kmap_first_data_page(struct se_cmd *cmd)
+void *transport_kmap_data_sg(struct se_cmd *cmd)
 {
        struct scatterlist *sg = cmd->t_data_sg;
+       struct page **pages;
+       int i;
 
        BUG_ON(!sg);
        /*
@@ -3459,15 +3609,43 @@ void *transport_kmap_first_data_page(struct se_cmd *cmd)
         * tcm_loop who may be using a contig buffer from the SCSI midlayer for
         * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
         */
-       return kmap(sg_page(sg)) + sg->offset;
+       if (!cmd->t_data_nents)
+               return NULL;
+       else if (cmd->t_data_nents == 1)
+               return kmap(sg_page(sg)) + sg->offset;
+
+       /* >1 page. use vmap */
+       pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL);
+       if (!pages)
+               return NULL;
+
+       /* convert sg[] to pages[] */
+       for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
+               pages[i] = sg_page(sg);
+       }
+
+       cmd->t_data_vmap = vmap(pages, cmd->t_data_nents,  VM_MAP, PAGE_KERNEL);
+       kfree(pages);
+       if (!cmd->t_data_vmap)
+               return NULL;
+
+       return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
 }
-EXPORT_SYMBOL(transport_kmap_first_data_page);
+EXPORT_SYMBOL(transport_kmap_data_sg);
 
-void transport_kunmap_first_data_page(struct se_cmd *cmd)
+void transport_kunmap_data_sg(struct se_cmd *cmd)
 {
-       kunmap(sg_page(cmd->t_data_sg));
+       if (!cmd->t_data_nents) {
+               return;
+       } else if (cmd->t_data_nents == 1) {
+               kunmap(sg_page(cmd->t_data_sg));
+               return;
+       }
+
+       vunmap(cmd->t_data_vmap);
+       cmd->t_data_vmap = NULL;
 }
-EXPORT_SYMBOL(transport_kunmap_first_data_page);
+EXPORT_SYMBOL(transport_kunmap_data_sg);
 
 static int
 transport_generic_get_mem(struct se_cmd *cmd)
@@ -3475,6 +3653,7 @@ transport_generic_get_mem(struct se_cmd *cmd)
        u32 length = cmd->data_length;
        unsigned int nents;
        struct page *page;
+       gfp_t zero_flag;
        int i = 0;
 
        nents = DIV_ROUND_UP(length, PAGE_SIZE);
@@ -3485,9 +3664,11 @@ transport_generic_get_mem(struct se_cmd *cmd)
        cmd->t_data_nents = nents;
        sg_init_table(cmd->t_data_sg, nents);
 
+       zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB ? 0 : __GFP_ZERO;
+
        while (length) {
                u32 page_len = min_t(u32, length, PAGE_SIZE);
-               page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+               page = alloc_page(GFP_KERNEL | zero_flag);
                if (!page)
                        goto out;
 
@@ -3498,9 +3679,9 @@ transport_generic_get_mem(struct se_cmd *cmd)
        return 0;
 
 out:
-       while (i >= 0) {
-               __free_page(sg_page(&cmd->t_data_sg[i]));
+       while (i > 0) {
                i--;
+               __free_page(sg_page(&cmd->t_data_sg[i]));
        }
        kfree(cmd->t_data_sg);
        cmd->t_data_sg = NULL;
@@ -3715,6 +3896,11 @@ transport_allocate_control_task(struct se_cmd *cmd)
        struct se_task *task;
        unsigned long flags;
 
+       /* Workaround for handling zero-length control CDBs */
+       if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
+           !cmd->data_length)
+               return 0;
+
        task = transport_generic_get_task(cmd, cmd->data_direction);
        if (!task)
                return -ENOMEM;
@@ -3784,8 +3970,18 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
        if (task_cdbs < 0)
                goto out_fail;
        else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
+               spin_lock_irq(&cmd->t_state_lock);
                cmd->t_state = TRANSPORT_COMPLETE;
-               atomic_set(&cmd->t_transport_active, 1);
+               cmd->transport_state |= CMD_T_ACTIVE;
+               spin_unlock_irq(&cmd->t_state_lock);
+
+               if (cmd->t_task_cdb[0] == REQUEST_SENSE) {
+                       u8 ua_asc = 0, ua_ascq = 0;
+
+                       core_scsi3_ua_clear_for_request_sense(cmd,
+                                       &ua_asc, &ua_ascq);
+               }
+
                INIT_WORK(&cmd->work, target_complete_ok_work);
                queue_work(target_completion_wq, &cmd->work);
                return 0;
@@ -3858,9 +4054,9 @@ static int transport_generic_write_pending(struct se_cmd *cmd)
 
        /*
         * Clear the se_cmd for WRITE_PENDING status in order to set
-        * cmd->t_transport_active=0 so that transport_generic_handle_data
-        * can be called from HW target mode interrupt code.  This is safe
-        * to be called with transport_off=1 before the cmd->se_tfo->write_pending
+        * CMD_T_ACTIVE so that transport_generic_handle_data can be called
+        * from HW target mode interrupt code.  This is safe to be called
+        * with transport_off=1 before the cmd->se_tfo->write_pending
         * because the se_cmd->se_lun pointer is not being cleared.
         */
        transport_cmd_check_stop(cmd, 1, 0);
@@ -3887,7 +4083,7 @@ queue_full:
 void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
 {
        if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
-               if (wait_for_tasks && cmd->se_tmr_req)
+               if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
                         transport_wait_for_tasks(cmd);
 
                transport_release_cmd(cmd);
@@ -3910,13 +4106,23 @@ EXPORT_SYMBOL(transport_generic_free_cmd);
 /* target_get_sess_cmd - Add command to active ->sess_cmd_list
  * @se_sess:   session to reference
  * @se_cmd:    command descriptor to add
+ * @ack_kref:  Signal that fabric will perform an ack target_put_sess_cmd()
  */
-void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
+void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
+                       bool ack_kref)
 {
        unsigned long flags;
 
        kref_init(&se_cmd->cmd_kref);
-       kref_get(&se_cmd->cmd_kref);
+       /*
+        * Add a second kref if the fabric caller is expecting to handle
+        * fabric acknowledgement that requires two target_put_sess_cmd()
+        * invocations before se_cmd descriptor release.
+        */
+       if (ack_kref == true) {
+               kref_get(&se_cmd->cmd_kref);
+               se_cmd->se_cmd_flags |= SCF_ACK_KREF;
+       }
 
        spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
        list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
@@ -3934,7 +4140,7 @@ static void target_release_cmd_kref(struct kref *kref)
        spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
        if (list_empty(&se_cmd->se_cmd_list)) {
                spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
-               WARN_ON(1);
+               se_cmd->se_tfo->release_cmd(se_cmd);
                return;
        }
        if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
@@ -4038,15 +4244,16 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
         * be stopped, we can safely ignore this struct se_cmd.
         */
        spin_lock_irqsave(&cmd->t_state_lock, flags);
-       if (atomic_read(&cmd->t_transport_stop)) {
-               atomic_set(&cmd->transport_lun_stop, 0);
-               pr_debug("ConfigFS ITT[0x%08x] - t_transport_stop =="
-                       " TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd));
+       if (cmd->transport_state & CMD_T_STOP) {
+               cmd->transport_state &= ~CMD_T_LUN_STOP;
+
+               pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n",
+                        cmd->se_tfo->get_task_tag(cmd));
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
                transport_cmd_check_stop(cmd, 1, 0);
                return -EPERM;
        }
-       atomic_set(&cmd->transport_lun_fe_stop, 1);
+       cmd->transport_state |= CMD_T_LUN_FE_STOP;
        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
        wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
@@ -4079,9 +4286,8 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun)
        while (!list_empty(&lun->lun_cmd_list)) {
                cmd = list_first_entry(&lun->lun_cmd_list,
                       struct se_cmd, se_lun_node);
-               list_del(&cmd->se_lun_node);
+               list_del_init(&cmd->se_lun_node);
 
-               atomic_set(&cmd->transport_lun_active, 0);
                /*
                 * This will notify iscsi_target_transport.c:
                 * transport_cmd_check_stop() that a LUN shutdown is in
@@ -4092,7 +4298,7 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun)
                        "_lun_stop for  ITT: 0x%08x\n",
                        cmd->se_lun->unpacked_lun,
                        cmd->se_tfo->get_task_tag(cmd));
-               atomic_set(&cmd->transport_lun_stop, 1);
+               cmd->transport_state |= CMD_T_LUN_STOP;
                spin_unlock(&cmd->t_state_lock);
 
                spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
@@ -4122,11 +4328,11 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun)
                        cmd->se_tfo->get_task_tag(cmd));
 
                spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
-               if (!atomic_read(&cmd->transport_dev_active)) {
+               if (!(cmd->transport_state & CMD_T_DEV_ACTIVE)) {
                        spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
                        goto check_cond;
                }
-               atomic_set(&cmd->transport_dev_active, 0);
+               cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
                transport_all_task_dev_remove_state(cmd);
                spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
 
@@ -4146,7 +4352,7 @@ check_cond:
                 * finished accessing it.
                 */
                spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
-               if (atomic_read(&cmd->transport_lun_fe_stop)) {
+               if (cmd->transport_state & CMD_T_LUN_FE_STOP) {
                        pr_debug("SE_LUN[%d] - Detected FE stop for"
                                " struct se_cmd: %p ITT: 0x%08x\n",
                                lun->unpacked_lun,
@@ -4205,7 +4411,8 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
        unsigned long flags;
 
        spin_lock_irqsave(&cmd->t_state_lock, flags);
-       if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) {
+       if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
+           !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
                return false;
        }
@@ -4213,7 +4420,8 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
         * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE
         * has been set in transport_set_supported_SAM_opcode().
         */
-       if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && !cmd->se_tmr_req) {
+       if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
+           !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
                return false;
        }
@@ -4224,8 +4432,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
         * transport_clear_lun_from_sessions() once the ConfigFS context caller
         * has completed its operation on the struct se_cmd.
         */
-       if (atomic_read(&cmd->transport_lun_stop)) {
-
+       if (cmd->transport_state & CMD_T_LUN_STOP) {
                pr_debug("wait_for_tasks: Stopping"
                        " wait_for_completion(&cmd->t_tasktransport_lun_fe"
                        "_stop_comp); for ITT: 0x%08x\n",
@@ -4253,18 +4460,18 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
                        "stop_comp); for ITT: 0x%08x\n",
                        cmd->se_tfo->get_task_tag(cmd));
 
-               atomic_set(&cmd->transport_lun_stop, 0);
+               cmd->transport_state &= ~CMD_T_LUN_STOP;
        }
-       if (!atomic_read(&cmd->t_transport_active) ||
-            atomic_read(&cmd->t_transport_aborted)) {
+
+       if (!(cmd->transport_state & CMD_T_ACTIVE)) {
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
                return false;
        }
 
-       atomic_set(&cmd->t_transport_stop, 1);
+       cmd->transport_state |= CMD_T_STOP;
 
        pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x"
-               " i_state: %d, t_state: %d, t_transport_stop = TRUE\n",
+               " i_state: %d, t_state: %d, CMD_T_STOP\n",
                cmd, cmd->se_tfo->get_task_tag(cmd),
                cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
 
@@ -4275,8 +4482,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
        wait_for_completion(&cmd->t_transport_stop_comp);
 
        spin_lock_irqsave(&cmd->t_state_lock, flags);
-       atomic_set(&cmd->t_transport_active, 0);
-       atomic_set(&cmd->t_transport_stop, 0);
+       cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
 
        pr_debug("wait_for_tasks: Stopped wait_for_compltion("
                "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
@@ -4349,6 +4555,7 @@ int transport_send_check_condition_and_sense(
        case TCM_NON_EXISTENT_LUN:
                /* CURRENT ERROR */
                buffer[offset] = 0x70;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
                /* ILLEGAL REQUEST */
                buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
                /* LOGICAL UNIT NOT SUPPORTED */
@@ -4358,6 +4565,7 @@ int transport_send_check_condition_and_sense(
        case TCM_SECTOR_COUNT_TOO_MANY:
                /* CURRENT ERROR */
                buffer[offset] = 0x70;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
                /* ILLEGAL REQUEST */
                buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
                /* INVALID COMMAND OPERATION CODE */
@@ -4366,6 +4574,7 @@ int transport_send_check_condition_and_sense(
        case TCM_UNKNOWN_MODE_PAGE:
                /* CURRENT ERROR */
                buffer[offset] = 0x70;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
                /* ILLEGAL REQUEST */
                buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
                /* INVALID FIELD IN CDB */
@@ -4374,6 +4583,7 @@ int transport_send_check_condition_and_sense(
        case TCM_CHECK_CONDITION_ABORT_CMD:
                /* CURRENT ERROR */
                buffer[offset] = 0x70;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
                /* ABORTED COMMAND */
                buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
                /* BUS DEVICE RESET FUNCTION OCCURRED */
@@ -4383,6 +4593,7 @@ int transport_send_check_condition_and_sense(
        case TCM_INCORRECT_AMOUNT_OF_DATA:
                /* CURRENT ERROR */
                buffer[offset] = 0x70;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
                /* ABORTED COMMAND */
                buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
                /* WRITE ERROR */
@@ -4393,22 +4604,25 @@ int transport_send_check_condition_and_sense(
        case TCM_INVALID_CDB_FIELD:
                /* CURRENT ERROR */
                buffer[offset] = 0x70;
-               /* ABORTED COMMAND */
-               buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+               /* ILLEGAL REQUEST */
+               buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
                /* INVALID FIELD IN CDB */
                buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
                break;
        case TCM_INVALID_PARAMETER_LIST:
                /* CURRENT ERROR */
                buffer[offset] = 0x70;
-               /* ABORTED COMMAND */
-               buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+               /* ILLEGAL REQUEST */
+               buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
                /* INVALID FIELD IN PARAMETER LIST */
                buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26;
                break;
        case TCM_UNEXPECTED_UNSOLICITED_DATA:
                /* CURRENT ERROR */
                buffer[offset] = 0x70;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
                /* ABORTED COMMAND */
                buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
                /* WRITE ERROR */
@@ -4419,6 +4633,7 @@ int transport_send_check_condition_and_sense(
        case TCM_SERVICE_CRC_ERROR:
                /* CURRENT ERROR */
                buffer[offset] = 0x70;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
                /* ABORTED COMMAND */
                buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
                /* PROTOCOL SERVICE CRC ERROR */
@@ -4429,6 +4644,7 @@ int transport_send_check_condition_and_sense(
        case TCM_SNACK_REJECTED:
                /* CURRENT ERROR */
                buffer[offset] = 0x70;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
                /* ABORTED COMMAND */
                buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
                /* READ ERROR */
@@ -4439,14 +4655,25 @@ int transport_send_check_condition_and_sense(
        case TCM_WRITE_PROTECTED:
                /* CURRENT ERROR */
                buffer[offset] = 0x70;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
                /* DATA PROTECT */
                buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
                /* WRITE PROTECTED */
                buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
                break;
+       case TCM_ADDRESS_OUT_OF_RANGE:
+               /* CURRENT ERROR */
+               buffer[offset] = 0x70;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+               /* ILLEGAL REQUEST */
+               buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+               /* LOGICAL BLOCK ADDRESS OUT OF RANGE */
+               buffer[offset+SPC_ASC_KEY_OFFSET] = 0x21;
+               break;
        case TCM_CHECK_CONDITION_UNIT_ATTENTION:
                /* CURRENT ERROR */
                buffer[offset] = 0x70;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
                /* UNIT ATTENTION */
                buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
                core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
@@ -4456,6 +4683,7 @@ int transport_send_check_condition_and_sense(
        case TCM_CHECK_CONDITION_NOT_READY:
                /* CURRENT ERROR */
                buffer[offset] = 0x70;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
                /* Not Ready */
                buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
                transport_get_sense_codes(cmd, &asc, &ascq);
@@ -4466,6 +4694,7 @@ int transport_send_check_condition_and_sense(
        default:
                /* CURRENT ERROR */
                buffer[offset] = 0x70;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
                /* ILLEGAL REQUEST */
                buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
                /* LOGICAL UNIT COMMUNICATION FAILURE */
@@ -4491,7 +4720,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
 {
        int ret = 0;
 
-       if (atomic_read(&cmd->t_transport_aborted) != 0) {
+       if (cmd->transport_state & CMD_T_ABORTED) {
                if (!send_status ||
                     (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
                        return 1;
@@ -4528,7 +4757,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
         */
        if (cmd->data_direction == DMA_TO_DEVICE) {
                if (cmd->se_tfo->write_pending_status(cmd) != 0) {
-                       atomic_inc(&cmd->t_transport_aborted);
+                       cmd->transport_state |= CMD_T_ABORTED;
                        smp_mb__after_atomic_inc();
                }
        }
@@ -4549,7 +4778,7 @@ static int transport_generic_do_tmr(struct se_cmd *cmd)
 
        switch (tmr->function) {
        case TMR_ABORT_TASK:
-               tmr->response = TMR_FUNCTION_REJECTED;
+               core_tmr_abort_task(dev, tmr, cmd->se_sess);
                break;
        case TMR_ABORT_TASK_SET:
        case TMR_CLEAR_ACA:
@@ -4599,8 +4828,6 @@ static int transport_processing_thread(void *param)
                        goto out;
 
 get_cmd:
-               __transport_execute_tasks(dev);
-
                cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
                if (!cmd)
                        continue;