target: remove the transport_lun_active field in struct se_cmd
[linux-2.6.git] / drivers / target / target_core_transport.c
index 2869fb7..78ea638 100644 (file)
@@ -437,7 +437,7 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
 
 /*     transport_cmd_check_stop():
  *
- *     'transport_off = 1' determines if t_transport_active should be cleared.
+ *     'transport_off = 1' determines if CMD_T_ACTIVE should be cleared.
  *     'transport_off = 2' determines if task_dev_state should be removed.
  *
  *     A non-zero u8 t_state sets cmd->t_state.
@@ -455,12 +455,11 @@ static int transport_cmd_check_stop(
         * Determine if IOCTL context caller in requesting the stopping of this
         * command for LUN shutdown purposes.
         */
-       if (atomic_read(&cmd->transport_lun_stop)) {
-               pr_debug("%s:%d atomic_read(&cmd->transport_lun_stop)"
-                       " == TRUE for ITT: 0x%08x\n", __func__, __LINE__,
-                       cmd->se_tfo->get_task_tag(cmd));
+       if (cmd->transport_state & CMD_T_LUN_STOP) {
+               pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
+                       __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
 
-               atomic_set(&cmd->t_transport_active, 0);
+               cmd->transport_state &= ~CMD_T_ACTIVE;
                if (transport_off == 2)
                        transport_all_task_dev_remove_state(cmd);
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
@@ -472,9 +471,9 @@ static int transport_cmd_check_stop(
         * Determine if frontend context caller is requesting the stopping of
         * this command for frontend exceptions.
         */
-       if (atomic_read(&cmd->t_transport_stop)) {
-               pr_debug("%s:%d atomic_read(&cmd->t_transport_stop) =="
-                       " TRUE for ITT: 0x%08x\n", __func__, __LINE__,
+       if (cmd->transport_state & CMD_T_STOP) {
+               pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
+                       __func__, __LINE__,
                        cmd->se_tfo->get_task_tag(cmd));
 
                if (transport_off == 2)
@@ -492,7 +491,7 @@ static int transport_cmd_check_stop(
                return 1;
        }
        if (transport_off) {
-               atomic_set(&cmd->t_transport_active, 0);
+               cmd->transport_state &= ~CMD_T_ACTIVE;
                if (transport_off == 2) {
                        transport_all_task_dev_remove_state(cmd);
                        /*
@@ -540,25 +539,15 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
                return;
 
        spin_lock_irqsave(&cmd->t_state_lock, flags);
-       if (!atomic_read(&cmd->transport_dev_active)) {
-               spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-               goto check_lun;
+       if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
+               cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
+               transport_all_task_dev_remove_state(cmd);
        }
-       atomic_set(&cmd->transport_dev_active, 0);
-       transport_all_task_dev_remove_state(cmd);
        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
-
-check_lun:
        spin_lock_irqsave(&lun->lun_cmd_lock, flags);
-       if (atomic_read(&cmd->transport_lun_active)) {
-               list_del(&cmd->se_lun_node);
-               atomic_set(&cmd->transport_lun_active, 0);
-#if 0
-               pr_debug("Removed ITT: 0x%08x from LUN LIST[%d]\n"
-                       cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun);
-#endif
-       }
+       if (!list_empty(&cmd->se_lun_node))
+               list_del_init(&cmd->se_lun_node);
        spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
 }
 
@@ -585,7 +574,7 @@ static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state,
        if (t_state) {
                spin_lock_irqsave(&cmd->t_state_lock, flags);
                cmd->t_state = t_state;
-               atomic_set(&cmd->t_transport_active, 1);
+               cmd->transport_state |= CMD_T_ACTIVE;
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
        }
 
@@ -601,7 +590,7 @@ static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state,
                list_add(&cmd->se_queue_node, &qobj->qobj_list);
        else
                list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
-       atomic_set(&cmd->t_transport_queue_active, 1);
+       cmd->transport_state |= CMD_T_QUEUED;
        spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
 
        wake_up_interruptible(&qobj->thread_wq);
@@ -620,8 +609,7 @@ transport_get_cmd_from_queue(struct se_queue_obj *qobj)
        }
        cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
 
-       atomic_set(&cmd->t_transport_queue_active, 0);
-
+       cmd->transport_state &= ~CMD_T_QUEUED;
        list_del_init(&cmd->se_queue_node);
        atomic_dec(&qobj->queue_cnt);
        spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
@@ -635,20 +623,14 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd)
        unsigned long flags;
 
        spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
-       if (!atomic_read(&cmd->t_transport_queue_active)) {
+       if (!(cmd->transport_state & CMD_T_QUEUED)) {
                spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
                return;
        }
-       atomic_set(&cmd->t_transport_queue_active, 0);
+       cmd->transport_state &= ~CMD_T_QUEUED;
        atomic_dec(&qobj->queue_cnt);
        list_del_init(&cmd->se_queue_node);
        spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
-
-       if (atomic_read(&cmd->t_transport_queue_active)) {
-               pr_err("ITT: 0x%08x t_transport_queue_active: %d\n",
-                       cmd->se_tfo->get_task_tag(cmd),
-                       atomic_read(&cmd->t_transport_queue_active));
-       }
 }
 
 /*
@@ -719,7 +701,7 @@ void transport_complete_task(struct se_task *task, int success)
        }
 
        if (!success)
-               cmd->t_tasks_failed = 1;
+               cmd->transport_state |= CMD_T_FAILED;
 
        /*
         * Decrement the outstanding t_task_cdbs_left count.  The last
@@ -731,16 +713,16 @@ void transport_complete_task(struct se_task *task, int success)
                return;
        }
 
-       if (cmd->t_tasks_failed) {
+       if (cmd->transport_state & CMD_T_FAILED) {
                cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                INIT_WORK(&cmd->work, target_complete_failure_work);
        } else {
-               atomic_set(&cmd->t_transport_complete, 1);
+               cmd->transport_state |= CMD_T_COMPLETE;
                INIT_WORK(&cmd->work, target_complete_ok_work);
        }
 
        cmd->t_state = TRANSPORT_COMPLETE;
-       atomic_set(&cmd->t_transport_active, 1);
+       cmd->transport_state |= CMD_T_ACTIVE;
        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
        queue_work(target_completion_wq, &cmd->work);
@@ -1255,32 +1237,34 @@ static void core_setup_task_attr_emulation(struct se_device *dev)
 static void scsi_dump_inquiry(struct se_device *dev)
 {
        struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
+       char buf[17];
        int i, device_type;
        /*
         * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
         */
-       pr_debug("  Vendor: ");
        for (i = 0; i < 8; i++)
                if (wwn->vendor[i] >= 0x20)
-                       pr_debug("%c", wwn->vendor[i]);
+                       buf[i] = wwn->vendor[i];
                else
-                       pr_debug(" ");
+                       buf[i] = ' ';
+       buf[i] = '\0';
+       pr_debug("  Vendor: %s\n", buf);
 
-       pr_debug("  Model: ");
        for (i = 0; i < 16; i++)
                if (wwn->model[i] >= 0x20)
-                       pr_debug("%c", wwn->model[i]);
+                       buf[i] = wwn->model[i];
                else
-                       pr_debug(" ");
+                       buf[i] = ' ';
+       buf[i] = '\0';
+       pr_debug("  Model: %s\n", buf);
 
-       pr_debug("  Revision: ");
        for (i = 0; i < 4; i++)
                if (wwn->revision[i] >= 0x20)
-                       pr_debug("%c", wwn->revision[i]);
+                       buf[i] = wwn->revision[i];
                else
-                       pr_debug(" ");
-
-       pr_debug("\n");
+                       buf[i] = ' ';
+       buf[i] = '\0';
+       pr_debug("  Revision: %s\n", buf);
 
        device_type = dev->transport->get_device_type(dev);
        pr_debug("  Type:   %s ", scsi_device_type(device_type));
@@ -1486,7 +1470,7 @@ void transport_init_se_cmd(
        init_completion(&cmd->t_transport_stop_comp);
        init_completion(&cmd->cmd_wait_comp);
        spin_lock_init(&cmd->t_state_lock);
-       atomic_set(&cmd->transport_dev_active, 1);
+       cmd->transport_state = CMD_T_DEV_ACTIVE;
 
        cmd->se_tfo = tfo;
        cmd->se_sess = se_sess;
@@ -1616,7 +1600,7 @@ int transport_handle_cdb_direct(
                return -EINVAL;
        }
        /*
-        * Set TRANSPORT_NEW_CMD state and cmd->t_transport_active=1 following
+        * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE following
         * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue()
         * in existing usage to ensure that outstanding descriptors are handled
         * correctly during shutdown via transport_wait_for_tasks()
@@ -1625,7 +1609,8 @@ int transport_handle_cdb_direct(
         * this to be called for initial descriptor submission.
         */
        cmd->t_state = TRANSPORT_NEW_CMD;
-       atomic_set(&cmd->t_transport_active, 1);
+       cmd->transport_state |= CMD_T_ACTIVE;
+
        /*
         * transport_generic_new_cmd() is already handling QUEUE_FULL,
         * so follow TRANSPORT_NEW_CMD processing thread context usage
@@ -1655,7 +1640,7 @@ EXPORT_SYMBOL(transport_handle_cdb_direct);
  * This may only be called from process context, and also currently
  * assumes internal allocation of fabric payload buffer by target-core.
  **/
-int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
+void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
                unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
                u32 data_length, int task_attr, int data_dir, int flags)
 {
@@ -1688,15 +1673,21 @@ int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
        /*
         * Locate se_lun pointer and attach it to struct se_cmd
         */
-       if (transport_lookup_cmd_lun(se_cmd, unpacked_lun) < 0)
-               goto out_check_cond;
+       if (transport_lookup_cmd_lun(se_cmd, unpacked_lun) < 0) {
+               transport_send_check_condition_and_sense(se_cmd,
+                               se_cmd->scsi_sense_reason, 0);
+               target_put_sess_cmd(se_sess, se_cmd);
+               return;
+       }
        /*
         * Sanitize CDBs via transport_generic_cmd_sequencer() and
         * allocate the necessary tasks to complete the received CDB+data
         */
        rc = transport_generic_allocate_tasks(se_cmd, cdb);
-       if (rc != 0)
-               goto out_check_cond;
+       if (rc != 0) {
+               transport_generic_request_failure(se_cmd);
+               return;
+       }
        /*
         * Dispatch se_cmd descriptor to se_lun->lun_se_dev backend
         * for immediate execution of READs, otherwise wait for
@@ -1704,12 +1695,7 @@ int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
         * when fabric has filled the incoming buffer.
         */
        transport_handle_cdb_direct(se_cmd);
-       return 0;
-
-out_check_cond:
-       transport_send_check_condition_and_sense(se_cmd,
-                               se_cmd->scsi_sense_reason, 0);
-       return 0;
+       return;
 }
 EXPORT_SYMBOL(target_submit_cmd);
 
@@ -1856,14 +1842,14 @@ static void transport_generic_request_failure(struct se_cmd *cmd)
                cmd->t_state, cmd->scsi_sense_reason);
        pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
                " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
-               " t_transport_active: %d t_transport_stop: %d"
-               " t_transport_sent: %d\n", cmd->t_task_list_num,
+               " CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
+               cmd->t_task_list_num,
                atomic_read(&cmd->t_task_cdbs_left),
                atomic_read(&cmd->t_task_cdbs_sent),
                atomic_read(&cmd->t_task_cdbs_ex_left),
-               atomic_read(&cmd->t_transport_active),
-               atomic_read(&cmd->t_transport_stop),
-               atomic_read(&cmd->t_transport_sent));
+               (cmd->transport_state & CMD_T_ACTIVE) != 0,
+               (cmd->transport_state & CMD_T_STOP) != 0,
+               (cmd->transport_state & CMD_T_SENT) != 0);
 
        /*
         * For SAM Task Attribute emulation for failed struct se_cmd
@@ -2122,7 +2108,7 @@ check_depth:
 
        if (atomic_read(&cmd->t_task_cdbs_sent) ==
            cmd->t_task_list_num)
-               atomic_set(&cmd->t_transport_sent, 1);
+               cmd->transport_state |= CMD_T_SENT;
 
        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
@@ -2133,8 +2119,9 @@ check_depth:
        if (error != 0) {
                spin_lock_irqsave(&cmd->t_state_lock, flags);
                task->task_flags &= ~TF_ACTIVE;
+               cmd->transport_state &= ~CMD_T_SENT;
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-               atomic_set(&cmd->t_transport_sent, 0);
+
                transport_stop_tasks_for_cmd(cmd);
                transport_generic_request_failure(cmd);
        }
@@ -2694,7 +2681,7 @@ static int transport_generic_cmd_sequencer(
                        cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
 
                        if (target_check_write_same_discard(&cdb[10], dev) < 0)
-                               goto out_invalid_cdb_field;
+                               goto out_unsupported_cdb;
                        if (!passthrough)
                                cmd->execute_task = target_emulate_write_same;
                        break;
@@ -2977,7 +2964,7 @@ static int transport_generic_cmd_sequencer(
                cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
 
                if (target_check_write_same_discard(&cdb[1], dev) < 0)
-                       goto out_invalid_cdb_field;
+                       goto out_unsupported_cdb;
                if (!passthrough)
                        cmd->execute_task = target_emulate_write_same;
                break;
@@ -3000,7 +2987,7 @@ static int transport_generic_cmd_sequencer(
                 * of byte 1 bit 3 UNMAP instead of original reserved field
                 */
                if (target_check_write_same_discard(&cdb[1], dev) < 0)
-                       goto out_invalid_cdb_field;
+                       goto out_unsupported_cdb;
                if (!passthrough)
                        cmd->execute_task = target_emulate_write_same;
                break;
@@ -3082,11 +3069,6 @@ static int transport_generic_cmd_sequencer(
             (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
                goto out_unsupported_cdb;
 
-       /* Let's limit control cdbs to a page, for simplicity's sake. */
-       if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
-           size > PAGE_SIZE)
-               goto out_invalid_cdb_field;
-
        transport_set_supported_SAM_opcode(cmd);
        return ret;
 
@@ -3422,8 +3404,8 @@ static void transport_put_cmd(struct se_cmd *cmd)
                        goto out_busy;
        }
 
-       if (atomic_read(&cmd->transport_dev_active)) {
-               atomic_set(&cmd->transport_dev_active, 0);
+       if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
+               cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
                transport_all_task_dev_remove_state(cmd);
                free_tasks = 1;
        }
@@ -3490,9 +3472,11 @@ int transport_generic_map_mem_to_cmd(
 }
 EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
 
-void *transport_kmap_first_data_page(struct se_cmd *cmd)
+void *transport_kmap_data_sg(struct se_cmd *cmd)
 {
        struct scatterlist *sg = cmd->t_data_sg;
+       struct page **pages;
+       int i;
 
        BUG_ON(!sg);
        /*
@@ -3500,15 +3484,41 @@ void *transport_kmap_first_data_page(struct se_cmd *cmd)
         * tcm_loop who may be using a contig buffer from the SCSI midlayer for
         * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
         */
-       return kmap(sg_page(sg)) + sg->offset;
+       if (!cmd->t_data_nents)
+               return NULL;
+       else if (cmd->t_data_nents == 1)
+               return kmap(sg_page(sg)) + sg->offset;
+
+       /* >1 page. use vmap */
+       pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL);
+       if (!pages)
+               return NULL;
+
+       /* convert sg[] to pages[] */
+       for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
+               pages[i] = sg_page(sg);
+       }
+
+       cmd->t_data_vmap = vmap(pages, cmd->t_data_nents,  VM_MAP, PAGE_KERNEL);
+       kfree(pages);
+       if (!cmd->t_data_vmap)
+               return NULL;
+
+       return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
 }
-EXPORT_SYMBOL(transport_kmap_first_data_page);
+EXPORT_SYMBOL(transport_kmap_data_sg);
 
-void transport_kunmap_first_data_page(struct se_cmd *cmd)
+void transport_kunmap_data_sg(struct se_cmd *cmd)
 {
-       kunmap(sg_page(cmd->t_data_sg));
+       if (!cmd->t_data_nents)
+               return;
+       else if (cmd->t_data_nents == 1)
+               kunmap(sg_page(cmd->t_data_sg));
+
+       vunmap(cmd->t_data_vmap);
+       cmd->t_data_vmap = NULL;
 }
-EXPORT_SYMBOL(transport_kunmap_first_data_page);
+EXPORT_SYMBOL(transport_kunmap_data_sg);
 
 static int
 transport_generic_get_mem(struct se_cmd *cmd)
@@ -3759,6 +3769,11 @@ transport_allocate_control_task(struct se_cmd *cmd)
        struct se_task *task;
        unsigned long flags;
 
+       /* Workaround for handling zero-length control CDBs */
+       if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
+           !cmd->data_length)
+               return 0;
+
        task = transport_generic_get_task(cmd, cmd->data_direction);
        if (!task)
                return -ENOMEM;
@@ -3828,8 +3843,18 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
        if (task_cdbs < 0)
                goto out_fail;
        else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
+               spin_lock_irq(&cmd->t_state_lock);
                cmd->t_state = TRANSPORT_COMPLETE;
-               atomic_set(&cmd->t_transport_active, 1);
+               cmd->transport_state |= CMD_T_ACTIVE;
+               spin_unlock_irq(&cmd->t_state_lock);
+
+               if (cmd->t_task_cdb[0] == REQUEST_SENSE) {
+                       u8 ua_asc = 0, ua_ascq = 0;
+
+                       core_scsi3_ua_clear_for_request_sense(cmd,
+                                       &ua_asc, &ua_ascq);
+               }
+
                INIT_WORK(&cmd->work, target_complete_ok_work);
                queue_work(target_completion_wq, &cmd->work);
                return 0;
@@ -3902,9 +3927,9 @@ static int transport_generic_write_pending(struct se_cmd *cmd)
 
        /*
         * Clear the se_cmd for WRITE_PENDING status in order to set
-        * cmd->t_transport_active=0 so that transport_generic_handle_data
-        * can be called from HW target mode interrupt code.  This is safe
-        * to be called with transport_off=1 before the cmd->se_tfo->write_pending
+        * CMD_T_ACTIVE so that transport_generic_handle_data can be called
+        * from HW target mode interrupt code.  This is safe to be called
+        * with transport_off=1 before the cmd->se_tfo->write_pending
         * because the se_cmd->se_lun pointer is not being cleared.
         */
        transport_cmd_check_stop(cmd, 1, 0);
@@ -4090,15 +4115,16 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
         * be stopped, we can safely ignore this struct se_cmd.
         */
        spin_lock_irqsave(&cmd->t_state_lock, flags);
-       if (atomic_read(&cmd->t_transport_stop)) {
-               atomic_set(&cmd->transport_lun_stop, 0);
-               pr_debug("ConfigFS ITT[0x%08x] - t_transport_stop =="
-                       " TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd));
+       if (cmd->transport_state & CMD_T_STOP) {
+               cmd->transport_state &= ~CMD_T_LUN_STOP;
+
+               pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n",
+                        cmd->se_tfo->get_task_tag(cmd));
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
                transport_cmd_check_stop(cmd, 1, 0);
                return -EPERM;
        }
-       atomic_set(&cmd->transport_lun_fe_stop, 1);
+       cmd->transport_state |= CMD_T_LUN_FE_STOP;
        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
        wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
@@ -4131,9 +4157,8 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun)
        while (!list_empty(&lun->lun_cmd_list)) {
                cmd = list_first_entry(&lun->lun_cmd_list,
                       struct se_cmd, se_lun_node);
-               list_del(&cmd->se_lun_node);
+               list_del_init(&cmd->se_lun_node);
 
-               atomic_set(&cmd->transport_lun_active, 0);
                /*
                 * This will notify iscsi_target_transport.c:
                 * transport_cmd_check_stop() that a LUN shutdown is in
@@ -4144,7 +4169,7 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun)
                        "_lun_stop for  ITT: 0x%08x\n",
                        cmd->se_lun->unpacked_lun,
                        cmd->se_tfo->get_task_tag(cmd));
-               atomic_set(&cmd->transport_lun_stop, 1);
+               cmd->transport_state |= CMD_T_LUN_STOP;
                spin_unlock(&cmd->t_state_lock);
 
                spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
@@ -4174,11 +4199,11 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun)
                        cmd->se_tfo->get_task_tag(cmd));
 
                spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
-               if (!atomic_read(&cmd->transport_dev_active)) {
+               if (!(cmd->transport_state & CMD_T_DEV_ACTIVE)) {
                        spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
                        goto check_cond;
                }
-               atomic_set(&cmd->transport_dev_active, 0);
+               cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
                transport_all_task_dev_remove_state(cmd);
                spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
 
@@ -4198,7 +4223,7 @@ check_cond:
                 * finished accessing it.
                 */
                spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
-               if (atomic_read(&cmd->transport_lun_fe_stop)) {
+               if (cmd->transport_state & CMD_T_LUN_FE_STOP) {
                        pr_debug("SE_LUN[%d] - Detected FE stop for"
                                " struct se_cmd: %p ITT: 0x%08x\n",
                                lun->unpacked_lun,
@@ -4276,8 +4301,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
         * transport_clear_lun_from_sessions() once the ConfigFS context caller
         * has completed its operation on the struct se_cmd.
         */
-       if (atomic_read(&cmd->transport_lun_stop)) {
-
+       if (cmd->transport_state & CMD_T_LUN_STOP) {
                pr_debug("wait_for_tasks: Stopping"
                        " wait_for_completion(&cmd->t_tasktransport_lun_fe"
                        "_stop_comp); for ITT: 0x%08x\n",
@@ -4305,18 +4329,19 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
                        "stop_comp); for ITT: 0x%08x\n",
                        cmd->se_tfo->get_task_tag(cmd));
 
-               atomic_set(&cmd->transport_lun_stop, 0);
+               cmd->transport_state &= ~CMD_T_LUN_STOP;
        }
-       if (!atomic_read(&cmd->t_transport_active) ||
-            atomic_read(&cmd->t_transport_aborted)) {
+
+       if (!(cmd->transport_state & CMD_T_ACTIVE) ||
+            (cmd->transport_state & CMD_T_ABORTED)) {
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
                return false;
        }
 
-       atomic_set(&cmd->t_transport_stop, 1);
+       cmd->transport_state |= CMD_T_STOP;
 
        pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x"
-               " i_state: %d, t_state: %d, t_transport_stop = TRUE\n",
+               " i_state: %d, t_state: %d, CMD_T_STOP\n",
                cmd, cmd->se_tfo->get_task_tag(cmd),
                cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
 
@@ -4327,8 +4352,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
        wait_for_completion(&cmd->t_transport_stop_comp);
 
        spin_lock_irqsave(&cmd->t_state_lock, flags);
-       atomic_set(&cmd->t_transport_active, 0);
-       atomic_set(&cmd->t_transport_stop, 0);
+       cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
 
        pr_debug("wait_for_tasks: Stopped wait_for_compltion("
                "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
@@ -4557,7 +4581,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
 {
        int ret = 0;
 
-       if (atomic_read(&cmd->t_transport_aborted) != 0) {
+       if (cmd->transport_state & CMD_T_ABORTED) {
                if (!send_status ||
                     (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
                        return 1;
@@ -4594,7 +4618,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
         */
        if (cmd->data_direction == DMA_TO_DEVICE) {
                if (cmd->se_tfo->write_pending_status(cmd) != 0) {
-                       atomic_inc(&cmd->t_transport_aborted);
+                       cmd->transport_state |= CMD_T_ABORTED;
                        smp_mb__after_atomic_inc();
                }
        }