target: Fix WRITE_SAME_16 lba assignment breakage
[linux-2.6.git] / drivers / target / target_core_transport.c
1 /*******************************************************************************
2  * Filename:  target_core_transport.c
3  *
4  * This file contains the Generic Target Engine Core.
5  *
6  * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
7  * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8  * Copyright (c) 2007-2010 Rising Tide Systems
9  * Copyright (c) 2008-2010 Linux-iSCSI.org
10  *
11  * Nicholas A. Bellinger <nab@kernel.org>
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26  *
27  ******************************************************************************/
28
29 #include <linux/version.h>
30 #include <linux/net.h>
31 #include <linux/delay.h>
32 #include <linux/string.h>
33 #include <linux/timer.h>
34 #include <linux/slab.h>
35 #include <linux/blkdev.h>
36 #include <linux/spinlock.h>
37 #include <linux/kthread.h>
38 #include <linux/in.h>
39 #include <linux/cdrom.h>
40 #include <asm/unaligned.h>
41 #include <net/sock.h>
42 #include <net/tcp.h>
43 #include <scsi/scsi.h>
44 #include <scsi/scsi_cmnd.h>
45 #include <scsi/scsi_tcq.h>
46
47 #include <target/target_core_base.h>
48 #include <target/target_core_device.h>
49 #include <target/target_core_tmr.h>
50 #include <target/target_core_tpg.h>
51 #include <target/target_core_transport.h>
52 #include <target/target_core_fabric_ops.h>
53 #include <target/target_core_configfs.h>
54
55 #include "target_core_alua.h"
56 #include "target_core_hba.h"
57 #include "target_core_pr.h"
58 #include "target_core_scdb.h"
59 #include "target_core_ua.h"
60
61 static int sub_api_initialized;
62
63 static struct kmem_cache *se_cmd_cache;
64 static struct kmem_cache *se_sess_cache;
65 struct kmem_cache *se_tmr_req_cache;
66 struct kmem_cache *se_ua_cache;
67 struct kmem_cache *t10_pr_reg_cache;
68 struct kmem_cache *t10_alua_lu_gp_cache;
69 struct kmem_cache *t10_alua_lu_gp_mem_cache;
70 struct kmem_cache *t10_alua_tg_pt_gp_cache;
71 struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
72
73 /* Used for transport_dev_get_map_*() */
74 typedef int (*map_func_t)(struct se_task *, u32);
75
76 static int transport_generic_write_pending(struct se_cmd *);
77 static int transport_processing_thread(void *param);
78 static int __transport_execute_tasks(struct se_device *dev);
79 static void transport_complete_task_attr(struct se_cmd *cmd);
80 static int transport_complete_qf(struct se_cmd *cmd);
81 static void transport_handle_queue_full(struct se_cmd *cmd,
82                 struct se_device *dev, int (*qf_callback)(struct se_cmd *));
83 static void transport_direct_request_timeout(struct se_cmd *cmd);
84 static void transport_free_dev_tasks(struct se_cmd *cmd);
85 static u32 transport_allocate_tasks(struct se_cmd *cmd,
86                 unsigned long long starting_lba,
87                 enum dma_data_direction data_direction,
88                 struct scatterlist *sgl, unsigned int nents);
89 static int transport_generic_get_mem(struct se_cmd *cmd);
90 static int transport_generic_remove(struct se_cmd *cmd,
91                 int session_reinstatement);
92 static void transport_release_fe_cmd(struct se_cmd *cmd);
93 static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
94                 struct se_queue_obj *qobj);
95 static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
96 static void transport_stop_all_task_timers(struct se_cmd *cmd);
97
98 int init_se_kmem_caches(void)
99 {
100         se_cmd_cache = kmem_cache_create("se_cmd_cache",
101                         sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL);
102         if (!se_cmd_cache) {
103                 pr_err("kmem_cache_create for struct se_cmd failed\n");
104                 goto out;
105         }
106         se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
107                         sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
108                         0, NULL);
109         if (!se_tmr_req_cache) {
110                 pr_err("kmem_cache_create() for struct se_tmr_req"
111                                 " failed\n");
112                 goto out;
113         }
114         se_sess_cache = kmem_cache_create("se_sess_cache",
115                         sizeof(struct se_session), __alignof__(struct se_session),
116                         0, NULL);
117         if (!se_sess_cache) {
118                 pr_err("kmem_cache_create() for struct se_session"
119                                 " failed\n");
120                 goto out;
121         }
122         se_ua_cache = kmem_cache_create("se_ua_cache",
123                         sizeof(struct se_ua), __alignof__(struct se_ua),
124                         0, NULL);
125         if (!se_ua_cache) {
126                 pr_err("kmem_cache_create() for struct se_ua failed\n");
127                 goto out;
128         }
129         t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
130                         sizeof(struct t10_pr_registration),
131                         __alignof__(struct t10_pr_registration), 0, NULL);
132         if (!t10_pr_reg_cache) {
133                 pr_err("kmem_cache_create() for struct t10_pr_registration"
134                                 " failed\n");
135                 goto out;
136         }
137         t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
138                         sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
139                         0, NULL);
140         if (!t10_alua_lu_gp_cache) {
141                 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
142                                 " failed\n");
143                 goto out;
144         }
145         t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
146                         sizeof(struct t10_alua_lu_gp_member),
147                         __alignof__(struct t10_alua_lu_gp_member), 0, NULL);
148         if (!t10_alua_lu_gp_mem_cache) {
149                 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
150                                 "cache failed\n");
151                 goto out;
152         }
153         t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
154                         sizeof(struct t10_alua_tg_pt_gp),
155                         __alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
156         if (!t10_alua_tg_pt_gp_cache) {
157                 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
158                                 "cache failed\n");
159                 goto out;
160         }
161         t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
162                         "t10_alua_tg_pt_gp_mem_cache",
163                         sizeof(struct t10_alua_tg_pt_gp_member),
164                         __alignof__(struct t10_alua_tg_pt_gp_member),
165                         0, NULL);
166         if (!t10_alua_tg_pt_gp_mem_cache) {
167                 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
168                                 "mem_t failed\n");
169                 goto out;
170         }
171
172         return 0;
173 out:
174         if (se_cmd_cache)
175                 kmem_cache_destroy(se_cmd_cache);
176         if (se_tmr_req_cache)
177                 kmem_cache_destroy(se_tmr_req_cache);
178         if (se_sess_cache)
179                 kmem_cache_destroy(se_sess_cache);
180         if (se_ua_cache)
181                 kmem_cache_destroy(se_ua_cache);
182         if (t10_pr_reg_cache)
183                 kmem_cache_destroy(t10_pr_reg_cache);
184         if (t10_alua_lu_gp_cache)
185                 kmem_cache_destroy(t10_alua_lu_gp_cache);
186         if (t10_alua_lu_gp_mem_cache)
187                 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
188         if (t10_alua_tg_pt_gp_cache)
189                 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
190         if (t10_alua_tg_pt_gp_mem_cache)
191                 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
192         return -ENOMEM;
193 }
194
195 void release_se_kmem_caches(void)
196 {
197         kmem_cache_destroy(se_cmd_cache);
198         kmem_cache_destroy(se_tmr_req_cache);
199         kmem_cache_destroy(se_sess_cache);
200         kmem_cache_destroy(se_ua_cache);
201         kmem_cache_destroy(t10_pr_reg_cache);
202         kmem_cache_destroy(t10_alua_lu_gp_cache);
203         kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
204         kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
205         kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
206 }
207
208 /* This code ensures unique mib indexes are handed out. */
209 static DEFINE_SPINLOCK(scsi_mib_index_lock);
210 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
211
212 /*
213  * Allocate a new row index for the entry type specified
214  */
215 u32 scsi_get_new_index(scsi_index_t type)
216 {
217         u32 new_index;
218
219         BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
220
221         spin_lock(&scsi_mib_index_lock);
222         new_index = ++scsi_mib_index[type];
223         spin_unlock(&scsi_mib_index_lock);
224
225         return new_index;
226 }
227
228 void transport_init_queue_obj(struct se_queue_obj *qobj)
229 {
230         atomic_set(&qobj->queue_cnt, 0);
231         INIT_LIST_HEAD(&qobj->qobj_list);
232         init_waitqueue_head(&qobj->thread_wq);
233         spin_lock_init(&qobj->cmd_queue_lock);
234 }
235 EXPORT_SYMBOL(transport_init_queue_obj);
236
237 static int transport_subsystem_reqmods(void)
238 {
239         int ret;
240
241         ret = request_module("target_core_iblock");
242         if (ret != 0)
243                 pr_err("Unable to load target_core_iblock\n");
244
245         ret = request_module("target_core_file");
246         if (ret != 0)
247                 pr_err("Unable to load target_core_file\n");
248
249         ret = request_module("target_core_pscsi");
250         if (ret != 0)
251                 pr_err("Unable to load target_core_pscsi\n");
252
253         ret = request_module("target_core_stgt");
254         if (ret != 0)
255                 pr_err("Unable to load target_core_stgt\n");
256
257         return 0;
258 }
259
260 int transport_subsystem_check_init(void)
261 {
262         int ret;
263
264         if (sub_api_initialized)
265                 return 0;
266         /*
267          * Request the loading of known TCM subsystem plugins..
268          */
269         ret = transport_subsystem_reqmods();
270         if (ret < 0)
271                 return ret;
272
273         sub_api_initialized = 1;
274         return 0;
275 }
276
277 struct se_session *transport_init_session(void)
278 {
279         struct se_session *se_sess;
280
281         se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
282         if (!se_sess) {
283                 pr_err("Unable to allocate struct se_session from"
284                                 " se_sess_cache\n");
285                 return ERR_PTR(-ENOMEM);
286         }
287         INIT_LIST_HEAD(&se_sess->sess_list);
288         INIT_LIST_HEAD(&se_sess->sess_acl_list);
289
290         return se_sess;
291 }
292 EXPORT_SYMBOL(transport_init_session);
293
294 /*
295  * Called with spin_lock_bh(&struct se_portal_group->session_lock called.
296  */
297 void __transport_register_session(
298         struct se_portal_group *se_tpg,
299         struct se_node_acl *se_nacl,
300         struct se_session *se_sess,
301         void *fabric_sess_ptr)
302 {
303         unsigned char buf[PR_REG_ISID_LEN];
304
305         se_sess->se_tpg = se_tpg;
306         se_sess->fabric_sess_ptr = fabric_sess_ptr;
307         /*
308          * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
309          *
310          * Only set for struct se_session's that will actually be moving I/O.
311          * eg: *NOT* discovery sessions.
312          */
313         if (se_nacl) {
314                 /*
315                  * If the fabric module supports an ISID based TransportID,
316                  * save this value in binary from the fabric I_T Nexus now.
317                  */
318                 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
319                         memset(&buf[0], 0, PR_REG_ISID_LEN);
320                         se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
321                                         &buf[0], PR_REG_ISID_LEN);
322                         se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
323                 }
324                 spin_lock_irq(&se_nacl->nacl_sess_lock);
325                 /*
326                  * The se_nacl->nacl_sess pointer will be set to the
327                  * last active I_T Nexus for each struct se_node_acl.
328                  */
329                 se_nacl->nacl_sess = se_sess;
330
331                 list_add_tail(&se_sess->sess_acl_list,
332                               &se_nacl->acl_sess_list);
333                 spin_unlock_irq(&se_nacl->nacl_sess_lock);
334         }
335         list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
336
337         pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
338                 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
339 }
340 EXPORT_SYMBOL(__transport_register_session);
341
342 void transport_register_session(
343         struct se_portal_group *se_tpg,
344         struct se_node_acl *se_nacl,
345         struct se_session *se_sess,
346         void *fabric_sess_ptr)
347 {
348         spin_lock_bh(&se_tpg->session_lock);
349         __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
350         spin_unlock_bh(&se_tpg->session_lock);
351 }
352 EXPORT_SYMBOL(transport_register_session);
353
354 void transport_deregister_session_configfs(struct se_session *se_sess)
355 {
356         struct se_node_acl *se_nacl;
357         unsigned long flags;
358         /*
359          * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
360          */
361         se_nacl = se_sess->se_node_acl;
362         if (se_nacl) {
363                 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
364                 list_del(&se_sess->sess_acl_list);
365                 /*
366                  * If the session list is empty, then clear the pointer.
367                  * Otherwise, set the struct se_session pointer from the tail
368                  * element of the per struct se_node_acl active session list.
369                  */
370                 if (list_empty(&se_nacl->acl_sess_list))
371                         se_nacl->nacl_sess = NULL;
372                 else {
373                         se_nacl->nacl_sess = container_of(
374                                         se_nacl->acl_sess_list.prev,
375                                         struct se_session, sess_acl_list);
376                 }
377                 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
378         }
379 }
380 EXPORT_SYMBOL(transport_deregister_session_configfs);
381
382 void transport_free_session(struct se_session *se_sess)
383 {
384         kmem_cache_free(se_sess_cache, se_sess);
385 }
386 EXPORT_SYMBOL(transport_free_session);
387
388 void transport_deregister_session(struct se_session *se_sess)
389 {
390         struct se_portal_group *se_tpg = se_sess->se_tpg;
391         struct se_node_acl *se_nacl;
392
393         if (!se_tpg) {
394                 transport_free_session(se_sess);
395                 return;
396         }
397
398         spin_lock_bh(&se_tpg->session_lock);
399         list_del(&se_sess->sess_list);
400         se_sess->se_tpg = NULL;
401         se_sess->fabric_sess_ptr = NULL;
402         spin_unlock_bh(&se_tpg->session_lock);
403
404         /*
405          * Determine if we need to do extra work for this initiator node's
406          * struct se_node_acl if it had been previously dynamically generated.
407          */
408         se_nacl = se_sess->se_node_acl;
409         if (se_nacl) {
410                 spin_lock_bh(&se_tpg->acl_node_lock);
411                 if (se_nacl->dynamic_node_acl) {
412                         if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache(
413                                         se_tpg)) {
414                                 list_del(&se_nacl->acl_list);
415                                 se_tpg->num_node_acls--;
416                                 spin_unlock_bh(&se_tpg->acl_node_lock);
417
418                                 core_tpg_wait_for_nacl_pr_ref(se_nacl);
419                                 core_free_device_list_for_node(se_nacl, se_tpg);
420                                 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg,
421                                                 se_nacl);
422                                 spin_lock_bh(&se_tpg->acl_node_lock);
423                         }
424                 }
425                 spin_unlock_bh(&se_tpg->acl_node_lock);
426         }
427
428         transport_free_session(se_sess);
429
430         pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
431                 se_tpg->se_tpg_tfo->get_fabric_name());
432 }
433 EXPORT_SYMBOL(transport_deregister_session);
434
435 /*
436  * Called with cmd->t_state_lock held.
437  */
438 static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
439 {
440         struct se_device *dev;
441         struct se_task *task;
442         unsigned long flags;
443
444         list_for_each_entry(task, &cmd->t_task_list, t_list) {
445                 dev = task->se_dev;
446                 if (!dev)
447                         continue;
448
449                 if (atomic_read(&task->task_active))
450                         continue;
451
452                 if (!atomic_read(&task->task_state_active))
453                         continue;
454
455                 spin_lock_irqsave(&dev->execute_task_lock, flags);
456                 list_del(&task->t_state_list);
457                 pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n",
458                         cmd->se_tfo->get_task_tag(cmd), dev, task);
459                 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
460
461                 atomic_set(&task->task_state_active, 0);
462                 atomic_dec(&cmd->t_task_cdbs_ex_left);
463         }
464 }
465
466 /*      transport_cmd_check_stop():
467  *
468  *      'transport_off = 1' determines if t_transport_active should be cleared.
469  *      'transport_off = 2' determines if task_dev_state should be removed.
470  *
471  *      A non-zero u8 t_state sets cmd->t_state.
472  *      Returns 1 when command is stopped, else 0.
473  */
474 static int transport_cmd_check_stop(
475         struct se_cmd *cmd,
476         int transport_off,
477         u8 t_state)
478 {
479         unsigned long flags;
480
481         spin_lock_irqsave(&cmd->t_state_lock, flags);
482         /*
483          * Determine if IOCTL context caller in requesting the stopping of this
484          * command for LUN shutdown purposes.
485          */
486         if (atomic_read(&cmd->transport_lun_stop)) {
487                 pr_debug("%s:%d atomic_read(&cmd->transport_lun_stop)"
488                         " == TRUE for ITT: 0x%08x\n", __func__, __LINE__,
489                         cmd->se_tfo->get_task_tag(cmd));
490
491                 cmd->deferred_t_state = cmd->t_state;
492                 cmd->t_state = TRANSPORT_DEFERRED_CMD;
493                 atomic_set(&cmd->t_transport_active, 0);
494                 if (transport_off == 2)
495                         transport_all_task_dev_remove_state(cmd);
496                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
497
498                 complete(&cmd->transport_lun_stop_comp);
499                 return 1;
500         }
501         /*
502          * Determine if frontend context caller is requesting the stopping of
503          * this command for frontend exceptions.
504          */
505         if (atomic_read(&cmd->t_transport_stop)) {
506                 pr_debug("%s:%d atomic_read(&cmd->t_transport_stop) =="
507                         " TRUE for ITT: 0x%08x\n", __func__, __LINE__,
508                         cmd->se_tfo->get_task_tag(cmd));
509
510                 cmd->deferred_t_state = cmd->t_state;
511                 cmd->t_state = TRANSPORT_DEFERRED_CMD;
512                 if (transport_off == 2)
513                         transport_all_task_dev_remove_state(cmd);
514
515                 /*
516                  * Clear struct se_cmd->se_lun before the transport_off == 2 handoff
517                  * to FE.
518                  */
519                 if (transport_off == 2)
520                         cmd->se_lun = NULL;
521                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
522
523                 complete(&cmd->t_transport_stop_comp);
524                 return 1;
525         }
526         if (transport_off) {
527                 atomic_set(&cmd->t_transport_active, 0);
528                 if (transport_off == 2) {
529                         transport_all_task_dev_remove_state(cmd);
530                         /*
531                          * Clear struct se_cmd->se_lun before the transport_off == 2
532                          * handoff to fabric module.
533                          */
534                         cmd->se_lun = NULL;
535                         /*
536                          * Some fabric modules like tcm_loop can release
537                          * their internally allocated I/O reference now and
538                          * struct se_cmd now.
539                          */
540                         if (cmd->se_tfo->check_stop_free != NULL) {
541                                 spin_unlock_irqrestore(
542                                         &cmd->t_state_lock, flags);
543
544                                 cmd->se_tfo->check_stop_free(cmd);
545                                 return 1;
546                         }
547                 }
548                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
549
550                 return 0;
551         } else if (t_state)
552                 cmd->t_state = t_state;
553         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
554
555         return 0;
556 }
557
558 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
559 {
560         return transport_cmd_check_stop(cmd, 2, 0);
561 }
562
563 static void transport_lun_remove_cmd(struct se_cmd *cmd)
564 {
565         struct se_lun *lun = cmd->se_lun;
566         unsigned long flags;
567
568         if (!lun)
569                 return;
570
571         spin_lock_irqsave(&cmd->t_state_lock, flags);
572         if (!atomic_read(&cmd->transport_dev_active)) {
573                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
574                 goto check_lun;
575         }
576         atomic_set(&cmd->transport_dev_active, 0);
577         transport_all_task_dev_remove_state(cmd);
578         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
579
580
581 check_lun:
582         spin_lock_irqsave(&lun->lun_cmd_lock, flags);
583         if (atomic_read(&cmd->transport_lun_active)) {
584                 list_del(&cmd->se_lun_node);
585                 atomic_set(&cmd->transport_lun_active, 0);
586 #if 0
587                 pr_debug("Removed ITT: 0x%08x from LUN LIST[%d]\n"
588                         cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun);
589 #endif
590         }
591         spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
592 }
593
594 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
595 {
596         transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj);
597         transport_lun_remove_cmd(cmd);
598
599         if (transport_cmd_check_stop_to_fabric(cmd))
600                 return;
601         if (remove)
602                 transport_generic_remove(cmd, 0);
603 }
604
605 void transport_cmd_finish_abort_tmr(struct se_cmd *cmd)
606 {
607         transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj);
608
609         if (transport_cmd_check_stop_to_fabric(cmd))
610                 return;
611
612         transport_generic_remove(cmd, 0);
613 }
614
615 static void transport_add_cmd_to_queue(
616         struct se_cmd *cmd,
617         int t_state)
618 {
619         struct se_device *dev = cmd->se_dev;
620         struct se_queue_obj *qobj = &dev->dev_queue_obj;
621         unsigned long flags;
622
623         INIT_LIST_HEAD(&cmd->se_queue_node);
624
625         if (t_state) {
626                 spin_lock_irqsave(&cmd->t_state_lock, flags);
627                 cmd->t_state = t_state;
628                 atomic_set(&cmd->t_transport_active, 1);
629                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
630         }
631
632         spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
633         if (cmd->se_cmd_flags & SCF_EMULATE_QUEUE_FULL) {
634                 cmd->se_cmd_flags &= ~SCF_EMULATE_QUEUE_FULL;
635                 list_add(&cmd->se_queue_node, &qobj->qobj_list);
636         } else
637                 list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
638         atomic_inc(&cmd->t_transport_queue_active);
639         spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
640
641         atomic_inc(&qobj->queue_cnt);
642         wake_up_interruptible(&qobj->thread_wq);
643 }
644
645 static struct se_cmd *
646 transport_get_cmd_from_queue(struct se_queue_obj *qobj)
647 {
648         struct se_cmd *cmd;
649         unsigned long flags;
650
651         spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
652         if (list_empty(&qobj->qobj_list)) {
653                 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
654                 return NULL;
655         }
656         cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
657
658         atomic_dec(&cmd->t_transport_queue_active);
659
660         list_del(&cmd->se_queue_node);
661         atomic_dec(&qobj->queue_cnt);
662         spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
663
664         return cmd;
665 }
666
667 static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
668                 struct se_queue_obj *qobj)
669 {
670         struct se_cmd *t;
671         unsigned long flags;
672
673         spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
674         if (!atomic_read(&cmd->t_transport_queue_active)) {
675                 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
676                 return;
677         }
678
679         list_for_each_entry(t, &qobj->qobj_list, se_queue_node)
680                 if (t == cmd) {
681                         atomic_dec(&cmd->t_transport_queue_active);
682                         atomic_dec(&qobj->queue_cnt);
683                         list_del(&cmd->se_queue_node);
684                         break;
685                 }
686         spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
687
688         if (atomic_read(&cmd->t_transport_queue_active)) {
689                 pr_err("ITT: 0x%08x t_transport_queue_active: %d\n",
690                         cmd->se_tfo->get_task_tag(cmd),
691                         atomic_read(&cmd->t_transport_queue_active));
692         }
693 }
694
695 /*
696  * Completion function used by TCM subsystem plugins (such as FILEIO)
697  * for queueing up response from struct se_subsystem_api->do_task()
698  */
699 void transport_complete_sync_cache(struct se_cmd *cmd, int good)
700 {
701         struct se_task *task = list_entry(cmd->t_task_list.next,
702                                 struct se_task, t_list);
703
704         if (good) {
705                 cmd->scsi_status = SAM_STAT_GOOD;
706                 task->task_scsi_status = GOOD;
707         } else {
708                 task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
709                 task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST;
710                 task->task_se_cmd->transport_error_status =
711                                         PYX_TRANSPORT_ILLEGAL_REQUEST;
712         }
713
714         transport_complete_task(task, good);
715 }
716 EXPORT_SYMBOL(transport_complete_sync_cache);
717
718 /*      transport_complete_task():
719  *
720  *      Called from interrupt and non interrupt context depending
721  *      on the transport plugin.
722  */
723 void transport_complete_task(struct se_task *task, int success)
724 {
725         struct se_cmd *cmd = task->task_se_cmd;
726         struct se_device *dev = task->se_dev;
727         int t_state;
728         unsigned long flags;
729 #if 0
730         pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task,
731                         cmd->t_task_cdb[0], dev);
732 #endif
733         if (dev)
734                 atomic_inc(&dev->depth_left);
735
736         spin_lock_irqsave(&cmd->t_state_lock, flags);
737         atomic_set(&task->task_active, 0);
738
739         /*
740          * See if any sense data exists, if so set the TASK_SENSE flag.
741          * Also check for any other post completion work that needs to be
742          * done by the plugins.
743          */
744         if (dev && dev->transport->transport_complete) {
745                 if (dev->transport->transport_complete(task) != 0) {
746                         cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
747                         task->task_sense = 1;
748                         success = 1;
749                 }
750         }
751
752         /*
753          * See if we are waiting for outstanding struct se_task
754          * to complete for an exception condition
755          */
756         if (atomic_read(&task->task_stop)) {
757                 /*
758                  * Decrement cmd->t_se_count if this task had
759                  * previously thrown its timeout exception handler.
760                  */
761                 if (atomic_read(&task->task_timeout)) {
762                         atomic_dec(&cmd->t_se_count);
763                         atomic_set(&task->task_timeout, 0);
764                 }
765                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
766
767                 complete(&task->task_stop_comp);
768                 return;
769         }
770         /*
771          * If the task's timeout handler has fired, use the t_task_cdbs_timeout
772          * left counter to determine when the struct se_cmd is ready to be queued to
773          * the processing thread.
774          */
775         if (atomic_read(&task->task_timeout)) {
776                 if (!atomic_dec_and_test(
777                                 &cmd->t_task_cdbs_timeout_left)) {
778                         spin_unlock_irqrestore(&cmd->t_state_lock,
779                                 flags);
780                         return;
781                 }
782                 t_state = TRANSPORT_COMPLETE_TIMEOUT;
783                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
784
785                 transport_add_cmd_to_queue(cmd, t_state);
786                 return;
787         }
788         atomic_dec(&cmd->t_task_cdbs_timeout_left);
789
790         /*
791          * Decrement the outstanding t_task_cdbs_left count.  The last
792          * struct se_task from struct se_cmd will complete itself into the
793          * device queue depending upon int success.
794          */
795         if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
796                 if (!success)
797                         cmd->t_tasks_failed = 1;
798
799                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
800                 return;
801         }
802
803         if (!success || cmd->t_tasks_failed) {
804                 t_state = TRANSPORT_COMPLETE_FAILURE;
805                 if (!task->task_error_status) {
806                         task->task_error_status =
807                                 PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
808                         cmd->transport_error_status =
809                                 PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
810                 }
811         } else {
812                 atomic_set(&cmd->t_transport_complete, 1);
813                 t_state = TRANSPORT_COMPLETE_OK;
814         }
815         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
816
817         transport_add_cmd_to_queue(cmd, t_state);
818 }
819 EXPORT_SYMBOL(transport_complete_task);
820
821 /*
822  * Called by transport_add_tasks_from_cmd() once a struct se_cmd's
823  * struct se_task list are ready to be added to the active execution list
824  * struct se_device
825
826  * Called with se_dev_t->execute_task_lock called.
827  */
828 static inline int transport_add_task_check_sam_attr(
829         struct se_task *task,
830         struct se_task *task_prev,
831         struct se_device *dev)
832 {
833         /*
834          * No SAM Task attribute emulation enabled, add to tail of
835          * execution queue
836          */
837         if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) {
838                 list_add_tail(&task->t_execute_list, &dev->execute_task_list);
839                 return 0;
840         }
841         /*
842          * HEAD_OF_QUEUE attribute for received CDB, which means
843          * the first task that is associated with a struct se_cmd goes to
844          * head of the struct se_device->execute_task_list, and task_prev
845          * after that for each subsequent task
846          */
847         if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) {
848                 list_add(&task->t_execute_list,
849                                 (task_prev != NULL) ?
850                                 &task_prev->t_execute_list :
851                                 &dev->execute_task_list);
852
853                 pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
854                                 " in execution queue\n",
855                                 task->task_se_cmd->t_task_cdb[0]);
856                 return 1;
857         }
858         /*
859          * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been
860          * transitioned from Dermant -> Active state, and are added to the end
861          * of the struct se_device->execute_task_list
862          */
863         list_add_tail(&task->t_execute_list, &dev->execute_task_list);
864         return 0;
865 }
866
867 /*      __transport_add_task_to_execute_queue():
868  *
869  *      Called with se_dev_t->execute_task_lock called.
870  */
871 static void __transport_add_task_to_execute_queue(
872         struct se_task *task,
873         struct se_task *task_prev,
874         struct se_device *dev)
875 {
876         int head_of_queue;
877
878         head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev);
879         atomic_inc(&dev->execute_tasks);
880
881         if (atomic_read(&task->task_state_active))
882                 return;
883         /*
884          * Determine if this task needs to go to HEAD_OF_QUEUE for the
885          * state list as well.  Running with SAM Task Attribute emulation
886          * will always return head_of_queue == 0 here
887          */
888         if (head_of_queue)
889                 list_add(&task->t_state_list, (task_prev) ?
890                                 &task_prev->t_state_list :
891                                 &dev->state_task_list);
892         else
893                 list_add_tail(&task->t_state_list, &dev->state_task_list);
894
895         atomic_set(&task->task_state_active, 1);
896
897         pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
898                 task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd),
899                 task, dev);
900 }
901
902 static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
903 {
904         struct se_device *dev;
905         struct se_task *task;
906         unsigned long flags;
907
908         spin_lock_irqsave(&cmd->t_state_lock, flags);
909         list_for_each_entry(task, &cmd->t_task_list, t_list) {
910                 dev = task->se_dev;
911
912                 if (atomic_read(&task->task_state_active))
913                         continue;
914
915                 spin_lock(&dev->execute_task_lock);
916                 list_add_tail(&task->t_state_list, &dev->state_task_list);
917                 atomic_set(&task->task_state_active, 1);
918
919                 pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
920                         task->task_se_cmd->se_tfo->get_task_tag(
921                         task->task_se_cmd), task, dev);
922
923                 spin_unlock(&dev->execute_task_lock);
924         }
925         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
926 }
927
928 static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
929 {
930         struct se_device *dev = cmd->se_dev;
931         struct se_task *task, *task_prev = NULL;
932         unsigned long flags;
933
934         spin_lock_irqsave(&dev->execute_task_lock, flags);
935         list_for_each_entry(task, &cmd->t_task_list, t_list) {
936                 if (atomic_read(&task->task_execute_queue))
937                         continue;
938                 /*
939                  * __transport_add_task_to_execute_queue() handles the
940                  * SAM Task Attribute emulation if enabled
941                  */
942                 __transport_add_task_to_execute_queue(task, task_prev, dev);
943                 atomic_set(&task->task_execute_queue, 1);
944                 task_prev = task;
945         }
946         spin_unlock_irqrestore(&dev->execute_task_lock, flags);
947 }
948
949 /*      transport_remove_task_from_execute_queue():
950  *
951  *
952  */
953 void transport_remove_task_from_execute_queue(
954         struct se_task *task,
955         struct se_device *dev)
956 {
957         unsigned long flags;
958
959         if (atomic_read(&task->task_execute_queue) == 0) {
960                 dump_stack();
961                 return;
962         }
963
964         spin_lock_irqsave(&dev->execute_task_lock, flags);
965         list_del(&task->t_execute_list);
966         atomic_set(&task->task_execute_queue, 0);
967         atomic_dec(&dev->execute_tasks);
968         spin_unlock_irqrestore(&dev->execute_task_lock, flags);
969 }
970
971 /*
972  * Handle QUEUE_FULL / -EAGAIN status
973  */
974
975 static void target_qf_do_work(struct work_struct *work)
976 {
977         struct se_device *dev = container_of(work, struct se_device,
978                                         qf_work_queue);
979         struct se_cmd *cmd, *cmd_tmp;
980
981         spin_lock_irq(&dev->qf_cmd_lock);
982         list_for_each_entry_safe(cmd, cmd_tmp, &dev->qf_cmd_list, se_qf_node) {
983
984                 list_del(&cmd->se_qf_node);
985                 atomic_dec(&dev->dev_qf_count);
986                 smp_mb__after_atomic_dec();
987                 spin_unlock_irq(&dev->qf_cmd_lock);
988
989                 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
990                         " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
991                         (cmd->t_state == TRANSPORT_COMPLETE_OK) ? "COMPLETE_OK" :
992                         (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
993                         : "UNKNOWN");
994                 /*
995                  * The SCF_EMULATE_QUEUE_FULL flag will be cleared once se_cmd
996                  * has been added to head of queue
997                  */
998                 transport_add_cmd_to_queue(cmd, cmd->t_state);
999
1000                 spin_lock_irq(&dev->qf_cmd_lock);
1001         }
1002         spin_unlock_irq(&dev->qf_cmd_lock);
1003 }
1004
1005 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
1006 {
1007         switch (cmd->data_direction) {
1008         case DMA_NONE:
1009                 return "NONE";
1010         case DMA_FROM_DEVICE:
1011                 return "READ";
1012         case DMA_TO_DEVICE:
1013                 return "WRITE";
1014         case DMA_BIDIRECTIONAL:
1015                 return "BIDI";
1016         default:
1017                 break;
1018         }
1019
1020         return "UNKNOWN";
1021 }
1022
1023 void transport_dump_dev_state(
1024         struct se_device *dev,
1025         char *b,
1026         int *bl)
1027 {
1028         *bl += sprintf(b + *bl, "Status: ");
1029         switch (dev->dev_status) {
1030         case TRANSPORT_DEVICE_ACTIVATED:
1031                 *bl += sprintf(b + *bl, "ACTIVATED");
1032                 break;
1033         case TRANSPORT_DEVICE_DEACTIVATED:
1034                 *bl += sprintf(b + *bl, "DEACTIVATED");
1035                 break;
1036         case TRANSPORT_DEVICE_SHUTDOWN:
1037                 *bl += sprintf(b + *bl, "SHUTDOWN");
1038                 break;
1039         case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
1040         case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
1041                 *bl += sprintf(b + *bl, "OFFLINE");
1042                 break;
1043         default:
1044                 *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
1045                 break;
1046         }
1047
1048         *bl += sprintf(b + *bl, "  Execute/Left/Max Queue Depth: %d/%d/%d",
1049                 atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left),
1050                 dev->queue_depth);
1051         *bl += sprintf(b + *bl, "  SectorSize: %u  MaxSectors: %u\n",
1052                 dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors);
1053         *bl += sprintf(b + *bl, "        ");
1054 }
1055
1056 /*      transport_release_all_cmds():
1057  *
1058  *
1059  */
1060 static void transport_release_all_cmds(struct se_device *dev)
1061 {
1062         struct se_cmd *cmd, *tcmd;
1063         int bug_out = 0, t_state;
1064         unsigned long flags;
1065
1066         spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags);
1067         list_for_each_entry_safe(cmd, tcmd, &dev->dev_queue_obj.qobj_list,
1068                                 se_queue_node) {
1069                 t_state = cmd->t_state;
1070                 list_del(&cmd->se_queue_node);
1071                 spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock,
1072                                 flags);
1073
1074                 pr_err("Releasing ITT: 0x%08x, i_state: %u,"
1075                         " t_state: %u directly\n",
1076                         cmd->se_tfo->get_task_tag(cmd),
1077                         cmd->se_tfo->get_cmd_state(cmd), t_state);
1078
1079                 transport_release_fe_cmd(cmd);
1080                 bug_out = 1;
1081
1082                 spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags);
1083         }
1084         spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, flags);
1085 #if 0
1086         if (bug_out)
1087                 BUG();
1088 #endif
1089 }
1090
1091 void transport_dump_vpd_proto_id(
1092         struct t10_vpd *vpd,
1093         unsigned char *p_buf,
1094         int p_buf_len)
1095 {
1096         unsigned char buf[VPD_TMP_BUF_SIZE];
1097         int len;
1098
1099         memset(buf, 0, VPD_TMP_BUF_SIZE);
1100         len = sprintf(buf, "T10 VPD Protocol Identifier: ");
1101
1102         switch (vpd->protocol_identifier) {
1103         case 0x00:
1104                 sprintf(buf+len, "Fibre Channel\n");
1105                 break;
1106         case 0x10:
1107                 sprintf(buf+len, "Parallel SCSI\n");
1108                 break;
1109         case 0x20:
1110                 sprintf(buf+len, "SSA\n");
1111                 break;
1112         case 0x30:
1113                 sprintf(buf+len, "IEEE 1394\n");
1114                 break;
1115         case 0x40:
1116                 sprintf(buf+len, "SCSI Remote Direct Memory Access"
1117                                 " Protocol\n");
1118                 break;
1119         case 0x50:
1120                 sprintf(buf+len, "Internet SCSI (iSCSI)\n");
1121                 break;
1122         case 0x60:
1123                 sprintf(buf+len, "SAS Serial SCSI Protocol\n");
1124                 break;
1125         case 0x70:
1126                 sprintf(buf+len, "Automation/Drive Interface Transport"
1127                                 " Protocol\n");
1128                 break;
1129         case 0x80:
1130                 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
1131                 break;
1132         default:
1133                 sprintf(buf+len, "Unknown 0x%02x\n",
1134                                 vpd->protocol_identifier);
1135                 break;
1136         }
1137
1138         if (p_buf)
1139                 strncpy(p_buf, buf, p_buf_len);
1140         else
1141                 pr_debug("%s", buf);
1142 }
1143
1144 void
1145 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
1146 {
1147         /*
1148          * Check if the Protocol Identifier Valid (PIV) bit is set..
1149          *
1150          * from spc3r23.pdf section 7.5.1
1151          */
1152          if (page_83[1] & 0x80) {
1153                 vpd->protocol_identifier = (page_83[0] & 0xf0);
1154                 vpd->protocol_identifier_set = 1;
1155                 transport_dump_vpd_proto_id(vpd, NULL, 0);
1156         }
1157 }
1158 EXPORT_SYMBOL(transport_set_vpd_proto_id);
1159
1160 int transport_dump_vpd_assoc(
1161         struct t10_vpd *vpd,
1162         unsigned char *p_buf,
1163         int p_buf_len)
1164 {
1165         unsigned char buf[VPD_TMP_BUF_SIZE];
1166         int ret = 0;
1167         int len;
1168
1169         memset(buf, 0, VPD_TMP_BUF_SIZE);
1170         len = sprintf(buf, "T10 VPD Identifier Association: ");
1171
1172         switch (vpd->association) {
1173         case 0x00:
1174                 sprintf(buf+len, "addressed logical unit\n");
1175                 break;
1176         case 0x10:
1177                 sprintf(buf+len, "target port\n");
1178                 break;
1179         case 0x20:
1180                 sprintf(buf+len, "SCSI target device\n");
1181                 break;
1182         default:
1183                 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
1184                 ret = -EINVAL;
1185                 break;
1186         }
1187
1188         if (p_buf)
1189                 strncpy(p_buf, buf, p_buf_len);
1190         else
1191                 pr_debug("%s", buf);
1192
1193         return ret;
1194 }
1195
1196 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
1197 {
1198         /*
1199          * The VPD identification association..
1200          *
1201          * from spc3r23.pdf Section 7.6.3.1 Table 297
1202          */
1203         vpd->association = (page_83[1] & 0x30);
1204         return transport_dump_vpd_assoc(vpd, NULL, 0);
1205 }
1206 EXPORT_SYMBOL(transport_set_vpd_assoc);
1207
1208 int transport_dump_vpd_ident_type(
1209         struct t10_vpd *vpd,
1210         unsigned char *p_buf,
1211         int p_buf_len)
1212 {
1213         unsigned char buf[VPD_TMP_BUF_SIZE];
1214         int ret = 0;
1215         int len;
1216
1217         memset(buf, 0, VPD_TMP_BUF_SIZE);
1218         len = sprintf(buf, "T10 VPD Identifier Type: ");
1219
1220         switch (vpd->device_identifier_type) {
1221         case 0x00:
1222                 sprintf(buf+len, "Vendor specific\n");
1223                 break;
1224         case 0x01:
1225                 sprintf(buf+len, "T10 Vendor ID based\n");
1226                 break;
1227         case 0x02:
1228                 sprintf(buf+len, "EUI-64 based\n");
1229                 break;
1230         case 0x03:
1231                 sprintf(buf+len, "NAA\n");
1232                 break;
1233         case 0x04:
1234                 sprintf(buf+len, "Relative target port identifier\n");
1235                 break;
1236         case 0x08:
1237                 sprintf(buf+len, "SCSI name string\n");
1238                 break;
1239         default:
1240                 sprintf(buf+len, "Unsupported: 0x%02x\n",
1241                                 vpd->device_identifier_type);
1242                 ret = -EINVAL;
1243                 break;
1244         }
1245
1246         if (p_buf) {
1247                 if (p_buf_len < strlen(buf)+1)
1248                         return -EINVAL;
1249                 strncpy(p_buf, buf, p_buf_len);
1250         } else {
1251                 pr_debug("%s", buf);
1252         }
1253
1254         return ret;
1255 }
1256
1257 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
1258 {
1259         /*
1260          * The VPD identifier type..
1261          *
1262          * from spc3r23.pdf Section 7.6.3.1 Table 298
1263          */
1264         vpd->device_identifier_type = (page_83[1] & 0x0f);
1265         return transport_dump_vpd_ident_type(vpd, NULL, 0);
1266 }
1267 EXPORT_SYMBOL(transport_set_vpd_ident_type);
1268
1269 int transport_dump_vpd_ident(
1270         struct t10_vpd *vpd,
1271         unsigned char *p_buf,
1272         int p_buf_len)
1273 {
1274         unsigned char buf[VPD_TMP_BUF_SIZE];
1275         int ret = 0;
1276
1277         memset(buf, 0, VPD_TMP_BUF_SIZE);
1278
1279         switch (vpd->device_identifier_code_set) {
1280         case 0x01: /* Binary */
1281                 sprintf(buf, "T10 VPD Binary Device Identifier: %s\n",
1282                         &vpd->device_identifier[0]);
1283                 break;
1284         case 0x02: /* ASCII */
1285                 sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n",
1286                         &vpd->device_identifier[0]);
1287                 break;
1288         case 0x03: /* UTF-8 */
1289                 sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n",
1290                         &vpd->device_identifier[0]);
1291                 break;
1292         default:
1293                 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
1294                         " 0x%02x", vpd->device_identifier_code_set);
1295                 ret = -EINVAL;
1296                 break;
1297         }
1298
1299         if (p_buf)
1300                 strncpy(p_buf, buf, p_buf_len);
1301         else
1302                 pr_debug("%s", buf);
1303
1304         return ret;
1305 }
1306
1307 int
1308 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
1309 {
1310         static const char hex_str[] = "0123456789abcdef";
1311         int j = 0, i = 4; /* offset to start of the identifer */
1312
1313         /*
1314          * The VPD Code Set (encoding)
1315          *
1316          * from spc3r23.pdf Section 7.6.3.1 Table 296
1317          */
1318         vpd->device_identifier_code_set = (page_83[0] & 0x0f);
1319         switch (vpd->device_identifier_code_set) {
1320         case 0x01: /* Binary */
1321                 vpd->device_identifier[j++] =
1322                                 hex_str[vpd->device_identifier_type];
1323                 while (i < (4 + page_83[3])) {
1324                         vpd->device_identifier[j++] =
1325                                 hex_str[(page_83[i] & 0xf0) >> 4];
1326                         vpd->device_identifier[j++] =
1327                                 hex_str[page_83[i] & 0x0f];
1328                         i++;
1329                 }
1330                 break;
1331         case 0x02: /* ASCII */
1332         case 0x03: /* UTF-8 */
1333                 while (i < (4 + page_83[3]))
1334                         vpd->device_identifier[j++] = page_83[i++];
1335                 break;
1336         default:
1337                 break;
1338         }
1339
1340         return transport_dump_vpd_ident(vpd, NULL, 0);
1341 }
1342 EXPORT_SYMBOL(transport_set_vpd_ident);
1343
1344 static void core_setup_task_attr_emulation(struct se_device *dev)
1345 {
1346         /*
1347          * If this device is from Target_Core_Mod/pSCSI, disable the
1348          * SAM Task Attribute emulation.
1349          *
1350          * This is currently not available in upsream Linux/SCSI Target
1351          * mode code, and is assumed to be disabled while using TCM/pSCSI.
1352          */
1353         if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1354                 dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
1355                 return;
1356         }
1357
1358         dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
1359         pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
1360                 " device\n", dev->transport->name,
1361                 dev->transport->get_device_rev(dev));
1362 }
1363
1364 static void scsi_dump_inquiry(struct se_device *dev)
1365 {
1366         struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
1367         int i, device_type;
1368         /*
1369          * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
1370          */
1371         pr_debug("  Vendor: ");
1372         for (i = 0; i < 8; i++)
1373                 if (wwn->vendor[i] >= 0x20)
1374                         pr_debug("%c", wwn->vendor[i]);
1375                 else
1376                         pr_debug(" ");
1377
1378         pr_debug("  Model: ");
1379         for (i = 0; i < 16; i++)
1380                 if (wwn->model[i] >= 0x20)
1381                         pr_debug("%c", wwn->model[i]);
1382                 else
1383                         pr_debug(" ");
1384
1385         pr_debug("  Revision: ");
1386         for (i = 0; i < 4; i++)
1387                 if (wwn->revision[i] >= 0x20)
1388                         pr_debug("%c", wwn->revision[i]);
1389                 else
1390                         pr_debug(" ");
1391
1392         pr_debug("\n");
1393
1394         device_type = dev->transport->get_device_type(dev);
1395         pr_debug("  Type:   %s ", scsi_device_type(device_type));
1396         pr_debug("                 ANSI SCSI revision: %02x\n",
1397                                 dev->transport->get_device_rev(dev));
1398 }
1399
1400 struct se_device *transport_add_device_to_core_hba(
1401         struct se_hba *hba,
1402         struct se_subsystem_api *transport,
1403         struct se_subsystem_dev *se_dev,
1404         u32 device_flags,
1405         void *transport_dev,
1406         struct se_dev_limits *dev_limits,
1407         const char *inquiry_prod,
1408         const char *inquiry_rev)
1409 {
1410         int force_pt;
1411         struct se_device  *dev;
1412
1413         dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
1414         if (!dev) {
1415                 pr_err("Unable to allocate memory for se_dev_t\n");
1416                 return NULL;
1417         }
1418
1419         transport_init_queue_obj(&dev->dev_queue_obj);
1420         dev->dev_flags          = device_flags;
1421         dev->dev_status         |= TRANSPORT_DEVICE_DEACTIVATED;
1422         dev->dev_ptr            = transport_dev;
1423         dev->se_hba             = hba;
1424         dev->se_sub_dev         = se_dev;
1425         dev->transport          = transport;
1426         atomic_set(&dev->active_cmds, 0);
1427         INIT_LIST_HEAD(&dev->dev_list);
1428         INIT_LIST_HEAD(&dev->dev_sep_list);
1429         INIT_LIST_HEAD(&dev->dev_tmr_list);
1430         INIT_LIST_HEAD(&dev->execute_task_list);
1431         INIT_LIST_HEAD(&dev->delayed_cmd_list);
1432         INIT_LIST_HEAD(&dev->ordered_cmd_list);
1433         INIT_LIST_HEAD(&dev->state_task_list);
1434         INIT_LIST_HEAD(&dev->qf_cmd_list);
1435         spin_lock_init(&dev->execute_task_lock);
1436         spin_lock_init(&dev->delayed_cmd_lock);
1437         spin_lock_init(&dev->ordered_cmd_lock);
1438         spin_lock_init(&dev->state_task_lock);
1439         spin_lock_init(&dev->dev_alua_lock);
1440         spin_lock_init(&dev->dev_reservation_lock);
1441         spin_lock_init(&dev->dev_status_lock);
1442         spin_lock_init(&dev->dev_status_thr_lock);
1443         spin_lock_init(&dev->se_port_lock);
1444         spin_lock_init(&dev->se_tmr_lock);
1445         spin_lock_init(&dev->qf_cmd_lock);
1446
1447         dev->queue_depth        = dev_limits->queue_depth;
1448         atomic_set(&dev->depth_left, dev->queue_depth);
1449         atomic_set(&dev->dev_ordered_id, 0);
1450
1451         se_dev_set_default_attribs(dev, dev_limits);
1452
1453         dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
1454         dev->creation_time = get_jiffies_64();
1455         spin_lock_init(&dev->stats_lock);
1456
1457         spin_lock(&hba->device_lock);
1458         list_add_tail(&dev->dev_list, &hba->hba_dev_list);
1459         hba->dev_count++;
1460         spin_unlock(&hba->device_lock);
1461         /*
1462          * Setup the SAM Task Attribute emulation for struct se_device
1463          */
1464         core_setup_task_attr_emulation(dev);
1465         /*
1466          * Force PR and ALUA passthrough emulation with internal object use.
1467          */
1468         force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
1469         /*
1470          * Setup the Reservations infrastructure for struct se_device
1471          */
1472         core_setup_reservations(dev, force_pt);
1473         /*
1474          * Setup the Asymmetric Logical Unit Assignment for struct se_device
1475          */
1476         if (core_setup_alua(dev, force_pt) < 0)
1477                 goto out;
1478
1479         /*
1480          * Startup the struct se_device processing thread
1481          */
1482         dev->process_thread = kthread_run(transport_processing_thread, dev,
1483                                           "LIO_%s", dev->transport->name);
1484         if (IS_ERR(dev->process_thread)) {
1485                 pr_err("Unable to create kthread: LIO_%s\n",
1486                         dev->transport->name);
1487                 goto out;
1488         }
1489         /*
1490          * Setup work_queue for QUEUE_FULL
1491          */
1492         INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
1493         /*
1494          * Preload the initial INQUIRY const values if we are doing
1495          * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1496          * passthrough because this is being provided by the backend LLD.
1497          * This is required so that transport_get_inquiry() copies these
1498          * originals once back into DEV_T10_WWN(dev) for the virtual device
1499          * setup.
1500          */
1501         if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
1502                 if (!inquiry_prod || !inquiry_rev) {
1503                         pr_err("All non TCM/pSCSI plugins require"
1504                                 " INQUIRY consts\n");
1505                         goto out;
1506                 }
1507
1508                 strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8);
1509                 strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16);
1510                 strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4);
1511         }
1512         scsi_dump_inquiry(dev);
1513
1514         return dev;
1515 out:
1516         kthread_stop(dev->process_thread);
1517
1518         spin_lock(&hba->device_lock);
1519         list_del(&dev->dev_list);
1520         hba->dev_count--;
1521         spin_unlock(&hba->device_lock);
1522
1523         se_release_vpd_for_dev(dev);
1524
1525         kfree(dev);
1526
1527         return NULL;
1528 }
1529 EXPORT_SYMBOL(transport_add_device_to_core_hba);
1530
1531 /*      transport_generic_prepare_cdb():
1532  *
1533  *      Since the Initiator sees iSCSI devices as LUNs,  the SCSI CDB will
1534  *      contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2.
1535  *      The point of this is since we are mapping iSCSI LUNs to
1536  *      SCSI Target IDs having a non-zero LUN in the CDB will throw the
1537  *      devices and HBAs for a loop.
1538  */
1539 static inline void transport_generic_prepare_cdb(
1540         unsigned char *cdb)
1541 {
1542         switch (cdb[0]) {
1543         case READ_10: /* SBC - RDProtect */
1544         case READ_12: /* SBC - RDProtect */
1545         case READ_16: /* SBC - RDProtect */
1546         case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
1547         case VERIFY: /* SBC - VRProtect */
1548         case VERIFY_16: /* SBC - VRProtect */
1549         case WRITE_VERIFY: /* SBC - VRProtect */
1550         case WRITE_VERIFY_12: /* SBC - VRProtect */
1551                 break;
1552         default:
1553                 cdb[1] &= 0x1f; /* clear logical unit number */
1554                 break;
1555         }
1556 }
1557
1558 static struct se_task *
1559 transport_generic_get_task(struct se_cmd *cmd,
1560                 enum dma_data_direction data_direction)
1561 {
1562         struct se_task *task;
1563         struct se_device *dev = cmd->se_dev;
1564
1565         task = dev->transport->alloc_task(cmd->t_task_cdb);
1566         if (!task) {
1567                 pr_err("Unable to allocate struct se_task\n");
1568                 return NULL;
1569         }
1570
1571         INIT_LIST_HEAD(&task->t_list);
1572         INIT_LIST_HEAD(&task->t_execute_list);
1573         INIT_LIST_HEAD(&task->t_state_list);
1574         init_completion(&task->task_stop_comp);
1575         task->task_se_cmd = cmd;
1576         task->se_dev = dev;
1577         task->task_data_direction = data_direction;
1578
1579         return task;
1580 }
1581
1582 static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);
1583
1584 /*
1585  * Used by fabric modules containing a local struct se_cmd within their
1586  * fabric dependent per I/O descriptor.
1587  */
1588 void transport_init_se_cmd(
1589         struct se_cmd *cmd,
1590         struct target_core_fabric_ops *tfo,
1591         struct se_session *se_sess,
1592         u32 data_length,
1593         int data_direction,
1594         int task_attr,
1595         unsigned char *sense_buffer)
1596 {
1597         INIT_LIST_HEAD(&cmd->se_lun_node);
1598         INIT_LIST_HEAD(&cmd->se_delayed_node);
1599         INIT_LIST_HEAD(&cmd->se_ordered_node);
1600         INIT_LIST_HEAD(&cmd->se_qf_node);
1601
1602         INIT_LIST_HEAD(&cmd->t_task_list);
1603         init_completion(&cmd->transport_lun_fe_stop_comp);
1604         init_completion(&cmd->transport_lun_stop_comp);
1605         init_completion(&cmd->t_transport_stop_comp);
1606         spin_lock_init(&cmd->t_state_lock);
1607         atomic_set(&cmd->transport_dev_active, 1);
1608
1609         cmd->se_tfo = tfo;
1610         cmd->se_sess = se_sess;
1611         cmd->data_length = data_length;
1612         cmd->data_direction = data_direction;
1613         cmd->sam_task_attr = task_attr;
1614         cmd->sense_buffer = sense_buffer;
1615 }
1616 EXPORT_SYMBOL(transport_init_se_cmd);
1617
1618 static int transport_check_alloc_task_attr(struct se_cmd *cmd)
1619 {
1620         /*
1621          * Check if SAM Task Attribute emulation is enabled for this
1622          * struct se_device storage object
1623          */
1624         if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
1625                 return 0;
1626
1627         if (cmd->sam_task_attr == MSG_ACA_TAG) {
1628                 pr_debug("SAM Task Attribute ACA"
1629                         " emulation is not supported\n");
1630                 return -EINVAL;
1631         }
1632         /*
1633          * Used to determine when ORDERED commands should go from
1634          * Dormant to Active status.
1635          */
1636         cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
1637         smp_mb__after_atomic_inc();
1638         pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
1639                         cmd->se_ordered_id, cmd->sam_task_attr,
1640                         cmd->se_dev->transport->name);
1641         return 0;
1642 }
1643
1644 void transport_free_se_cmd(
1645         struct se_cmd *se_cmd)
1646 {
1647         if (se_cmd->se_tmr_req)
1648                 core_tmr_release_req(se_cmd->se_tmr_req);
1649         /*
1650          * Check and free any extended CDB buffer that was allocated
1651          */
1652         if (se_cmd->t_task_cdb != se_cmd->__t_task_cdb)
1653                 kfree(se_cmd->t_task_cdb);
1654 }
1655 EXPORT_SYMBOL(transport_free_se_cmd);
1656
1657 static void transport_generic_wait_for_tasks(struct se_cmd *, int, int);
1658
1659 /*      transport_generic_allocate_tasks():
1660  *
1661  *      Called from fabric RX Thread.
1662  */
1663 int transport_generic_allocate_tasks(
1664         struct se_cmd *cmd,
1665         unsigned char *cdb)
1666 {
1667         int ret;
1668
1669         transport_generic_prepare_cdb(cdb);
1670
1671         /*
1672          * This is needed for early exceptions.
1673          */
1674         cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
1675
1676         /*
1677          * Ensure that the received CDB is less than the max (252 + 8) bytes
1678          * for VARIABLE_LENGTH_CMD
1679          */
1680         if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
1681                 pr_err("Received SCSI CDB with command_size: %d that"
1682                         " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1683                         scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1684                 return -EINVAL;
1685         }
1686         /*
1687          * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
1688          * allocate the additional extended CDB buffer now..  Otherwise
1689          * setup the pointer from __t_task_cdb to t_task_cdb.
1690          */
1691         if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
1692                 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
1693                                                 GFP_KERNEL);
1694                 if (!cmd->t_task_cdb) {
1695                         pr_err("Unable to allocate cmd->t_task_cdb"
1696                                 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
1697                                 scsi_command_size(cdb),
1698                                 (unsigned long)sizeof(cmd->__t_task_cdb));
1699                         return -ENOMEM;
1700                 }
1701         } else
1702                 cmd->t_task_cdb = &cmd->__t_task_cdb[0];
1703         /*
1704          * Copy the original CDB into cmd->
1705          */
1706         memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
1707         /*
1708          * Setup the received CDB based on SCSI defined opcodes and
1709          * perform unit attention, persistent reservations and ALUA
1710          * checks for virtual device backends.  The cmd->t_task_cdb
1711          * pointer is expected to be setup before we reach this point.
1712          */
1713         ret = transport_generic_cmd_sequencer(cmd, cdb);
1714         if (ret < 0)
1715                 return ret;
1716         /*
1717          * Check for SAM Task Attribute Emulation
1718          */
1719         if (transport_check_alloc_task_attr(cmd) < 0) {
1720                 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1721                 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1722                 return -EINVAL;
1723         }
1724         spin_lock(&cmd->se_lun->lun_sep_lock);
1725         if (cmd->se_lun->lun_sep)
1726                 cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
1727         spin_unlock(&cmd->se_lun->lun_sep_lock);
1728         return 0;
1729 }
1730 EXPORT_SYMBOL(transport_generic_allocate_tasks);
1731
1732 /*
1733  * Used by fabric module frontends not defining a TFO->new_cmd_map()
1734  * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD statis
1735  */
1736 int transport_generic_handle_cdb(
1737         struct se_cmd *cmd)
1738 {
1739         if (!cmd->se_lun) {
1740                 dump_stack();
1741                 pr_err("cmd->se_lun is NULL\n");
1742                 return -EINVAL;
1743         }
1744
1745         transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD);
1746         return 0;
1747 }
1748 EXPORT_SYMBOL(transport_generic_handle_cdb);
1749
1750 /*
1751  * Used by fabric module frontends to queue tasks directly.
1752  * Many only be used from process context only
1753  */
1754 int transport_handle_cdb_direct(
1755         struct se_cmd *cmd)
1756 {
1757         if (!cmd->se_lun) {
1758                 dump_stack();
1759                 pr_err("cmd->se_lun is NULL\n");
1760                 return -EINVAL;
1761         }
1762         if (in_interrupt()) {
1763                 dump_stack();
1764                 pr_err("transport_generic_handle_cdb cannot be called"
1765                                 " from interrupt context\n");
1766                 return -EINVAL;
1767         }
1768
1769         return transport_generic_new_cmd(cmd);
1770 }
1771 EXPORT_SYMBOL(transport_handle_cdb_direct);
1772
1773 /*
1774  * Used by fabric module frontends defining a TFO->new_cmd_map() caller
1775  * to  queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
1776  * complete setup in TCM process context w/ TFO->new_cmd_map().
1777  */
1778 int transport_generic_handle_cdb_map(
1779         struct se_cmd *cmd)
1780 {
1781         if (!cmd->se_lun) {
1782                 dump_stack();
1783                 pr_err("cmd->se_lun is NULL\n");
1784                 return -EINVAL;
1785         }
1786
1787         transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP);
1788         return 0;
1789 }
1790 EXPORT_SYMBOL(transport_generic_handle_cdb_map);
1791
1792 /*      transport_generic_handle_data():
1793  *
1794  *
1795  */
1796 int transport_generic_handle_data(
1797         struct se_cmd *cmd)
1798 {
1799         /*
1800          * For the software fabric case, then we assume the nexus is being
1801          * failed/shutdown when signals are pending from the kthread context
1802          * caller, so we return a failure.  For the HW target mode case running
1803          * in interrupt code, the signal_pending() check is skipped.
1804          */
1805         if (!in_interrupt() && signal_pending(current))
1806                 return -EPERM;
1807         /*
1808          * If the received CDB has aleady been ABORTED by the generic
1809          * target engine, we now call transport_check_aborted_status()
1810          * to queue any delated TASK_ABORTED status for the received CDB to the
1811          * fabric module as we are expecting no further incoming DATA OUT
1812          * sequences at this point.
1813          */
1814         if (transport_check_aborted_status(cmd, 1) != 0)
1815                 return 0;
1816
1817         transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE);
1818         return 0;
1819 }
1820 EXPORT_SYMBOL(transport_generic_handle_data);
1821
1822 /*      transport_generic_handle_tmr():
1823  *
1824  *
1825  */
1826 int transport_generic_handle_tmr(
1827         struct se_cmd *cmd)
1828 {
1829         /*
1830          * This is needed for early exceptions.
1831          */
1832         cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
1833
1834         transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR);
1835         return 0;
1836 }
1837 EXPORT_SYMBOL(transport_generic_handle_tmr);
1838
1839 void transport_generic_free_cmd_intr(
1840         struct se_cmd *cmd)
1841 {
1842         transport_add_cmd_to_queue(cmd, TRANSPORT_FREE_CMD_INTR);
1843 }
1844 EXPORT_SYMBOL(transport_generic_free_cmd_intr);
1845
1846 static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
1847 {
1848         struct se_task *task, *task_tmp;
1849         unsigned long flags;
1850         int ret = 0;
1851
1852         pr_debug("ITT[0x%08x] - Stopping tasks\n",
1853                 cmd->se_tfo->get_task_tag(cmd));
1854
1855         /*
1856          * No tasks remain in the execution queue
1857          */
1858         spin_lock_irqsave(&cmd->t_state_lock, flags);
1859         list_for_each_entry_safe(task, task_tmp,
1860                                 &cmd->t_task_list, t_list) {
1861                 pr_debug("task_no[%d] - Processing task %p\n",
1862                                 task->task_no, task);
1863                 /*
1864                  * If the struct se_task has not been sent and is not active,
1865                  * remove the struct se_task from the execution queue.
1866                  */
1867                 if (!atomic_read(&task->task_sent) &&
1868                     !atomic_read(&task->task_active)) {
1869                         spin_unlock_irqrestore(&cmd->t_state_lock,
1870                                         flags);
1871                         transport_remove_task_from_execute_queue(task,
1872                                         task->se_dev);
1873
1874                         pr_debug("task_no[%d] - Removed from execute queue\n",
1875                                 task->task_no);
1876                         spin_lock_irqsave(&cmd->t_state_lock, flags);
1877                         continue;
1878                 }
1879
1880                 /*
1881                  * If the struct se_task is active, sleep until it is returned
1882                  * from the plugin.
1883                  */
1884                 if (atomic_read(&task->task_active)) {
1885                         atomic_set(&task->task_stop, 1);
1886                         spin_unlock_irqrestore(&cmd->t_state_lock,
1887                                         flags);
1888
1889                         pr_debug("task_no[%d] - Waiting to complete\n",
1890                                 task->task_no);
1891                         wait_for_completion(&task->task_stop_comp);
1892                         pr_debug("task_no[%d] - Stopped successfully\n",
1893                                 task->task_no);
1894
1895                         spin_lock_irqsave(&cmd->t_state_lock, flags);
1896                         atomic_dec(&cmd->t_task_cdbs_left);
1897
1898                         atomic_set(&task->task_active, 0);
1899                         atomic_set(&task->task_stop, 0);
1900                 } else {
1901                         pr_debug("task_no[%d] - Did nothing\n", task->task_no);
1902                         ret++;
1903                 }
1904
1905                 __transport_stop_task_timer(task, &flags);
1906         }
1907         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1908
1909         return ret;
1910 }
1911
1912 /*
1913  * Handle SAM-esque emulation for generic transport request failures.
1914  */
1915 static void transport_generic_request_failure(
1916         struct se_cmd *cmd,
1917         struct se_device *dev,
1918         int complete,
1919         int sc)
1920 {
1921         int ret = 0;
1922
1923         pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
1924                 " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
1925                 cmd->t_task_cdb[0]);
1926         pr_debug("-----[ i_state: %d t_state/def_t_state:"
1927                 " %d/%d transport_error_status: %d\n",
1928                 cmd->se_tfo->get_cmd_state(cmd),
1929                 cmd->t_state, cmd->deferred_t_state,
1930                 cmd->transport_error_status);
1931         pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
1932                 " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
1933                 " t_transport_active: %d t_transport_stop: %d"
1934                 " t_transport_sent: %d\n", cmd->t_task_list_num,
1935                 atomic_read(&cmd->t_task_cdbs_left),
1936                 atomic_read(&cmd->t_task_cdbs_sent),
1937                 atomic_read(&cmd->t_task_cdbs_ex_left),
1938                 atomic_read(&cmd->t_transport_active),
1939                 atomic_read(&cmd->t_transport_stop),
1940                 atomic_read(&cmd->t_transport_sent));
1941
1942         transport_stop_all_task_timers(cmd);
1943
1944         if (dev)
1945                 atomic_inc(&dev->depth_left);
1946         /*
1947          * For SAM Task Attribute emulation for failed struct se_cmd
1948          */
1949         if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
1950                 transport_complete_task_attr(cmd);
1951
1952         if (complete) {
1953                 transport_direct_request_timeout(cmd);
1954                 cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
1955         }
1956
1957         switch (cmd->transport_error_status) {
1958         case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE:
1959                 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1960                 break;
1961         case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS:
1962                 cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
1963                 break;
1964         case PYX_TRANSPORT_INVALID_CDB_FIELD:
1965                 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1966                 break;
1967         case PYX_TRANSPORT_INVALID_PARAMETER_LIST:
1968                 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
1969                 break;
1970         case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES:
1971                 if (!sc)
1972                         transport_new_cmd_failure(cmd);
1973                 /*
1974                  * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES,
1975                  * we force this session to fall back to session
1976                  * recovery.
1977                  */
1978                 cmd->se_tfo->fall_back_to_erl0(cmd->se_sess);
1979                 cmd->se_tfo->stop_session(cmd->se_sess, 0, 0);
1980
1981                 goto check_stop;
1982         case PYX_TRANSPORT_LU_COMM_FAILURE:
1983         case PYX_TRANSPORT_ILLEGAL_REQUEST:
1984                 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1985                 break;
1986         case PYX_TRANSPORT_UNKNOWN_MODE_PAGE:
1987                 cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
1988                 break;
1989         case PYX_TRANSPORT_WRITE_PROTECTED:
1990                 cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
1991                 break;
1992         case PYX_TRANSPORT_RESERVATION_CONFLICT:
1993                 /*
1994                  * No SENSE Data payload for this case, set SCSI Status
1995                  * and queue the response to $FABRIC_MOD.
1996                  *
1997                  * Uses linux/include/scsi/scsi.h SAM status codes defs
1998                  */
1999                 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
2000                 /*
2001                  * For UA Interlock Code 11b, a RESERVATION CONFLICT will
2002                  * establish a UNIT ATTENTION with PREVIOUS RESERVATION
2003                  * CONFLICT STATUS.
2004                  *
2005                  * See spc4r17, section 7.4.6 Control Mode Page, Table 349
2006                  */
2007                 if (cmd->se_sess &&
2008                     cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
2009                         core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
2010                                 cmd->orig_fe_lun, 0x2C,
2011                                 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
2012
2013                 ret = cmd->se_tfo->queue_status(cmd);
2014                 if (ret == -EAGAIN)
2015                         goto queue_full;
2016                 goto check_stop;
2017         case PYX_TRANSPORT_USE_SENSE_REASON:
2018                 /*
2019                  * struct se_cmd->scsi_sense_reason already set
2020                  */
2021                 break;
2022         default:
2023                 pr_err("Unknown transport error for CDB 0x%02x: %d\n",
2024                         cmd->t_task_cdb[0],
2025                         cmd->transport_error_status);
2026                 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
2027                 break;
2028         }
2029
2030         if (!sc)
2031                 transport_new_cmd_failure(cmd);
2032         else {
2033                 ret = transport_send_check_condition_and_sense(cmd,
2034                                 cmd->scsi_sense_reason, 0);
2035                 if (ret == -EAGAIN)
2036                         goto queue_full;
2037         }
2038
2039 check_stop:
2040         transport_lun_remove_cmd(cmd);
2041         if (!transport_cmd_check_stop_to_fabric(cmd))
2042                 ;
2043         return;
2044
2045 queue_full:
2046         cmd->t_state = TRANSPORT_COMPLETE_OK;
2047         transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf);
2048 }
2049
2050 static void transport_direct_request_timeout(struct se_cmd *cmd)
2051 {
2052         unsigned long flags;
2053
2054         spin_lock_irqsave(&cmd->t_state_lock, flags);
2055         if (!atomic_read(&cmd->t_transport_timeout)) {
2056                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2057                 return;
2058         }
2059         if (atomic_read(&cmd->t_task_cdbs_timeout_left)) {
2060                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2061                 return;
2062         }
2063
2064         atomic_sub(atomic_read(&cmd->t_transport_timeout),
2065                    &cmd->t_se_count);
2066         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2067 }
2068
2069 static void transport_generic_request_timeout(struct se_cmd *cmd)
2070 {
2071         unsigned long flags;
2072
2073         /*
2074          * Reset cmd->t_se_count to allow transport_generic_remove()
2075          * to allow last call to free memory resources.
2076          */
2077         spin_lock_irqsave(&cmd->t_state_lock, flags);
2078         if (atomic_read(&cmd->t_transport_timeout) > 1) {
2079                 int tmp = (atomic_read(&cmd->t_transport_timeout) - 1);
2080
2081                 atomic_sub(tmp, &cmd->t_se_count);
2082         }
2083         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2084
2085         transport_generic_remove(cmd, 0);
2086 }
2087
2088 static inline u32 transport_lba_21(unsigned char *cdb)
2089 {
2090         return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
2091 }
2092
2093 static inline u32 transport_lba_32(unsigned char *cdb)
2094 {
2095         return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
2096 }
2097
2098 static inline unsigned long long transport_lba_64(unsigned char *cdb)
2099 {
2100         unsigned int __v1, __v2;
2101
2102         __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
2103         __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
2104
2105         return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
2106 }
2107
2108 /*
2109  * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
2110  */
2111 static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
2112 {
2113         unsigned int __v1, __v2;
2114
2115         __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
2116         __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
2117
2118         return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
2119 }
2120
2121 static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
2122 {
2123         unsigned long flags;
2124
2125         spin_lock_irqsave(&se_cmd->t_state_lock, flags);
2126         se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
2127         spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
2128 }
2129
2130 /*
2131  * Called from interrupt context.
2132  */
2133 static void transport_task_timeout_handler(unsigned long data)
2134 {
2135         struct se_task *task = (struct se_task *)data;
2136         struct se_cmd *cmd = task->task_se_cmd;
2137         unsigned long flags;
2138
2139         pr_debug("transport task timeout fired! task: %p cmd: %p\n", task, cmd);
2140
2141         spin_lock_irqsave(&cmd->t_state_lock, flags);
2142         if (task->task_flags & TF_STOP) {
2143                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2144                 return;
2145         }
2146         task->task_flags &= ~TF_RUNNING;
2147
2148         /*
2149          * Determine if transport_complete_task() has already been called.
2150          */
2151         if (!atomic_read(&task->task_active)) {
2152                 pr_debug("transport task: %p cmd: %p timeout task_active"
2153                                 " == 0\n", task, cmd);
2154                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2155                 return;
2156         }
2157
2158         atomic_inc(&cmd->t_se_count);
2159         atomic_inc(&cmd->t_transport_timeout);
2160         cmd->t_tasks_failed = 1;
2161
2162         atomic_set(&task->task_timeout, 1);
2163         task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT;
2164         task->task_scsi_status = 1;
2165
2166         if (atomic_read(&task->task_stop)) {
2167                 pr_debug("transport task: %p cmd: %p timeout task_stop"
2168                                 " == 1\n", task, cmd);
2169                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2170                 complete(&task->task_stop_comp);
2171                 return;
2172         }
2173
2174         if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
2175                 pr_debug("transport task: %p cmd: %p timeout non zero"
2176                                 " t_task_cdbs_left\n", task, cmd);
2177                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2178                 return;
2179         }
2180         pr_debug("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n",
2181                         task, cmd);
2182
2183         cmd->t_state = TRANSPORT_COMPLETE_FAILURE;
2184         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2185
2186         transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE);
2187 }
2188
2189 /*
2190  * Called with cmd->t_state_lock held.
2191  */
2192 static void transport_start_task_timer(struct se_task *task)
2193 {
2194         struct se_device *dev = task->se_dev;
2195         int timeout;
2196
2197         if (task->task_flags & TF_RUNNING)
2198                 return;
2199         /*
2200          * If the task_timeout is disabled, exit now.
2201          */
2202         timeout = dev->se_sub_dev->se_dev_attrib.task_timeout;
2203         if (!timeout)
2204                 return;
2205
2206         init_timer(&task->task_timer);
2207         task->task_timer.expires = (get_jiffies_64() + timeout * HZ);
2208         task->task_timer.data = (unsigned long) task;
2209         task->task_timer.function = transport_task_timeout_handler;
2210
2211         task->task_flags |= TF_RUNNING;
2212         add_timer(&task->task_timer);
2213 #if 0
2214         pr_debug("Starting task timer for cmd: %p task: %p seconds:"
2215                 " %d\n", task->task_se_cmd, task, timeout);
2216 #endif
2217 }
2218
2219 /*
2220  * Called with spin_lock_irq(&cmd->t_state_lock) held.
2221  */
2222 void __transport_stop_task_timer(struct se_task *task, unsigned long *flags)
2223 {
2224         struct se_cmd *cmd = task->task_se_cmd;
2225
2226         if (!task->task_flags & TF_RUNNING)
2227                 return;
2228
2229         task->task_flags |= TF_STOP;
2230         spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
2231
2232         del_timer_sync(&task->task_timer);
2233
2234         spin_lock_irqsave(&cmd->t_state_lock, *flags);
2235         task->task_flags &= ~TF_RUNNING;
2236         task->task_flags &= ~TF_STOP;
2237 }
2238
2239 static void transport_stop_all_task_timers(struct se_cmd *cmd)
2240 {
2241         struct se_task *task = NULL, *task_tmp;
2242         unsigned long flags;
2243
2244         spin_lock_irqsave(&cmd->t_state_lock, flags);
2245         list_for_each_entry_safe(task, task_tmp,
2246                                 &cmd->t_task_list, t_list)
2247                 __transport_stop_task_timer(task, &flags);
2248         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2249 }
2250
2251 static inline int transport_tcq_window_closed(struct se_device *dev)
2252 {
2253         if (dev->dev_tcq_window_closed++ <
2254                         PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) {
2255                 msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT);
2256         } else
2257                 msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG);
2258
2259         wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
2260         return 0;
2261 }
2262
2263 /*
2264  * Called from Fabric Module context from transport_execute_tasks()
2265  *
2266  * The return of this function determins if the tasks from struct se_cmd
2267  * get added to the execution queue in transport_execute_tasks(),
2268  * or are added to the delayed or ordered lists here.
2269  */
2270 static inline int transport_execute_task_attr(struct se_cmd *cmd)
2271 {
2272         if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
2273                 return 1;
2274         /*
2275          * Check for the existence of HEAD_OF_QUEUE, and if true return 1
2276          * to allow the passed struct se_cmd list of tasks to the front of the list.
2277          */
2278          if (cmd->sam_task_attr == MSG_HEAD_TAG) {
2279                 atomic_inc(&cmd->se_dev->dev_hoq_count);
2280                 smp_mb__after_atomic_inc();
2281                 pr_debug("Added HEAD_OF_QUEUE for CDB:"
2282                         " 0x%02x, se_ordered_id: %u\n",
2283                         cmd->t_task_cdb[0],
2284                         cmd->se_ordered_id);
2285                 return 1;
2286         } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
2287                 spin_lock(&cmd->se_dev->ordered_cmd_lock);
2288                 list_add_tail(&cmd->se_ordered_node,
2289                                 &cmd->se_dev->ordered_cmd_list);
2290                 spin_unlock(&cmd->se_dev->ordered_cmd_lock);
2291
2292                 atomic_inc(&cmd->se_dev->dev_ordered_sync);
2293                 smp_mb__after_atomic_inc();
2294
2295                 pr_debug("Added ORDERED for CDB: 0x%02x to ordered"
2296                                 " list, se_ordered_id: %u\n",
2297                                 cmd->t_task_cdb[0],
2298                                 cmd->se_ordered_id);
2299                 /*
2300                  * Add ORDERED command to tail of execution queue if
2301                  * no other older commands exist that need to be
2302                  * completed first.
2303                  */
2304                 if (!atomic_read(&cmd->se_dev->simple_cmds))
2305                         return 1;
2306         } else {
2307                 /*
2308                  * For SIMPLE and UNTAGGED Task Attribute commands
2309                  */
2310                 atomic_inc(&cmd->se_dev->simple_cmds);
2311                 smp_mb__after_atomic_inc();
2312         }
2313         /*
2314          * Otherwise if one or more outstanding ORDERED task attribute exist,
2315          * add the dormant task(s) built for the passed struct se_cmd to the
2316          * execution queue and become in Active state for this struct se_device.
2317          */
2318         if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) {
2319                 /*
2320                  * Otherwise, add cmd w/ tasks to delayed cmd queue that
2321                  * will be drained upon completion of HEAD_OF_QUEUE task.
2322                  */
2323                 spin_lock(&cmd->se_dev->delayed_cmd_lock);
2324                 cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
2325                 list_add_tail(&cmd->se_delayed_node,
2326                                 &cmd->se_dev->delayed_cmd_list);
2327                 spin_unlock(&cmd->se_dev->delayed_cmd_lock);
2328
2329                 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
2330                         " delayed CMD list, se_ordered_id: %u\n",
2331                         cmd->t_task_cdb[0], cmd->sam_task_attr,
2332                         cmd->se_ordered_id);
2333                 /*
2334                  * Return zero to let transport_execute_tasks() know
2335                  * not to add the delayed tasks to the execution list.
2336                  */
2337                 return 0;
2338         }
2339         /*
2340          * Otherwise, no ORDERED task attributes exist..
2341          */
2342         return 1;
2343 }
2344
2345 /*
2346  * Called from fabric module context in transport_generic_new_cmd() and
2347  * transport_generic_process_write()
2348  */
2349 static int transport_execute_tasks(struct se_cmd *cmd)
2350 {
2351         int add_tasks;
2352
2353         if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) {
2354                 cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
2355                 transport_generic_request_failure(cmd, NULL, 0, 1);
2356                 return 0;
2357         }
2358
2359         /*
2360          * Call transport_cmd_check_stop() to see if a fabric exception
2361          * has occurred that prevents execution.
2362          */
2363         if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) {
2364                 /*
2365                  * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
2366                  * attribute for the tasks of the received struct se_cmd CDB
2367                  */
2368                 add_tasks = transport_execute_task_attr(cmd);
2369                 if (!add_tasks)
2370                         goto execute_tasks;
2371                 /*
2372                  * This calls transport_add_tasks_from_cmd() to handle
2373                  * HEAD_OF_QUEUE ordering for SAM Task Attribute emulation
2374                  * (if enabled) in __transport_add_task_to_execute_queue() and
2375                  * transport_add_task_check_sam_attr().
2376                  */
2377                 transport_add_tasks_from_cmd(cmd);
2378         }
2379         /*
2380          * Kick the execution queue for the cmd associated struct se_device
2381          * storage object.
2382          */
2383 execute_tasks:
2384         __transport_execute_tasks(cmd->se_dev);
2385         return 0;
2386 }
2387
2388 /*
2389  * Called to check struct se_device tcq depth window, and once open pull struct se_task
2390  * from struct se_device->execute_task_list and
2391  *
2392  * Called from transport_processing_thread()
2393  */
2394 static int __transport_execute_tasks(struct se_device *dev)
2395 {
2396         int error;
2397         struct se_cmd *cmd = NULL;
2398         struct se_task *task = NULL;
2399         unsigned long flags;
2400
2401         /*
2402          * Check if there is enough room in the device and HBA queue to send
2403          * struct se_tasks to the selected transport.
2404          */
2405 check_depth:
2406         if (!atomic_read(&dev->depth_left))
2407                 return transport_tcq_window_closed(dev);
2408
2409         dev->dev_tcq_window_closed = 0;
2410
2411         spin_lock_irq(&dev->execute_task_lock);
2412         if (list_empty(&dev->execute_task_list)) {
2413                 spin_unlock_irq(&dev->execute_task_lock);
2414                 return 0;
2415         }
2416         task = list_first_entry(&dev->execute_task_list,
2417                                 struct se_task, t_execute_list);
2418         list_del(&task->t_execute_list);
2419         atomic_set(&task->task_execute_queue, 0);
2420         atomic_dec(&dev->execute_tasks);
2421         spin_unlock_irq(&dev->execute_task_lock);
2422
2423         atomic_dec(&dev->depth_left);
2424
2425         cmd = task->task_se_cmd;
2426
2427         spin_lock_irqsave(&cmd->t_state_lock, flags);
2428         atomic_set(&task->task_active, 1);
2429         atomic_set(&task->task_sent, 1);
2430         atomic_inc(&cmd->t_task_cdbs_sent);
2431
2432         if (atomic_read(&cmd->t_task_cdbs_sent) ==
2433             cmd->t_task_list_num)
2434                 atomic_set(&cmd->transport_sent, 1);
2435
2436         transport_start_task_timer(task);
2437         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2438         /*
2439          * The struct se_cmd->transport_emulate_cdb() function pointer is used
2440          * to grab REPORT_LUNS and other CDBs we want to handle before they hit the
2441          * struct se_subsystem_api->do_task() caller below.
2442          */
2443         if (cmd->transport_emulate_cdb) {
2444                 error = cmd->transport_emulate_cdb(cmd);
2445                 if (error != 0) {
2446                         cmd->transport_error_status = error;
2447                         atomic_set(&task->task_active, 0);
2448                         atomic_set(&cmd->transport_sent, 0);
2449                         transport_stop_tasks_for_cmd(cmd);
2450                         transport_generic_request_failure(cmd, dev, 0, 1);
2451                         goto check_depth;
2452                 }
2453                 /*
2454                  * Handle the successful completion for transport_emulate_cdb()
2455                  * for synchronous operation, following SCF_EMULATE_CDB_ASYNC
2456                  * Otherwise the caller is expected to complete the task with
2457                  * proper status.
2458                  */
2459                 if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) {
2460                         cmd->scsi_status = SAM_STAT_GOOD;
2461                         task->task_scsi_status = GOOD;
2462                         transport_complete_task(task, 1);
2463                 }
2464         } else {
2465                 /*
2466                  * Currently for all virtual TCM plugins including IBLOCK, FILEIO and
2467                  * RAMDISK we use the internal transport_emulate_control_cdb() logic
2468                  * with struct se_subsystem_api callers for the primary SPC-3 TYPE_DISK
2469                  * LUN emulation code.
2470                  *
2471                  * For TCM/pSCSI and all other SCF_SCSI_DATA_SG_IO_CDB I/O tasks we
2472                  * call ->do_task() directly and let the underlying TCM subsystem plugin
2473                  * code handle the CDB emulation.
2474                  */
2475                 if ((dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) &&
2476                     (!(task->task_se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
2477                         error = transport_emulate_control_cdb(task);
2478                 else
2479                         error = dev->transport->do_task(task);
2480
2481                 if (error != 0) {
2482                         cmd->transport_error_status = error;
2483                         atomic_set(&task->task_active, 0);
2484                         atomic_set(&cmd->transport_sent, 0);
2485                         transport_stop_tasks_for_cmd(cmd);
2486                         transport_generic_request_failure(cmd, dev, 0, 1);
2487                 }
2488         }
2489
2490         goto check_depth;
2491
2492         return 0;
2493 }
2494
2495 void transport_new_cmd_failure(struct se_cmd *se_cmd)
2496 {
2497         unsigned long flags;
2498         /*
2499          * Any unsolicited data will get dumped for failed command inside of
2500          * the fabric plugin
2501          */
2502         spin_lock_irqsave(&se_cmd->t_state_lock, flags);
2503         se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED;
2504         se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2505         spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
2506 }
2507
2508 static void transport_nop_wait_for_tasks(struct se_cmd *, int, int);
2509
2510 static inline u32 transport_get_sectors_6(
2511         unsigned char *cdb,
2512         struct se_cmd *cmd,
2513         int *ret)
2514 {
2515         struct se_device *dev = cmd->se_dev;
2516
2517         /*
2518          * Assume TYPE_DISK for non struct se_device objects.
2519          * Use 8-bit sector value.
2520          */
2521         if (!dev)
2522                 goto type_disk;
2523
2524         /*
2525          * Use 24-bit allocation length for TYPE_TAPE.
2526          */
2527         if (dev->transport->get_device_type(dev) == TYPE_TAPE)
2528                 return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];
2529
2530         /*
2531          * Everything else assume TYPE_DISK Sector CDB location.
2532          * Use 8-bit sector value.
2533          */
2534 type_disk:
2535         return (u32)cdb[4];
2536 }
2537
2538 static inline u32 transport_get_sectors_10(
2539         unsigned char *cdb,
2540         struct se_cmd *cmd,
2541         int *ret)
2542 {
2543         struct se_device *dev = cmd->se_dev;
2544
2545         /*
2546          * Assume TYPE_DISK for non struct se_device objects.
2547          * Use 16-bit sector value.
2548          */
2549         if (!dev)
2550                 goto type_disk;
2551
2552         /*
2553          * XXX_10 is not defined in SSC, throw an exception
2554          */
2555         if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2556                 *ret = -EINVAL;
2557                 return 0;
2558         }
2559
2560         /*
2561          * Everything else assume TYPE_DISK Sector CDB location.
2562          * Use 16-bit sector value.
2563          */
2564 type_disk:
2565         return (u32)(cdb[7] << 8) + cdb[8];
2566 }
2567
2568 static inline u32 transport_get_sectors_12(
2569         unsigned char *cdb,
2570         struct se_cmd *cmd,
2571         int *ret)
2572 {
2573         struct se_device *dev = cmd->se_dev;
2574
2575         /*
2576          * Assume TYPE_DISK for non struct se_device objects.
2577          * Use 32-bit sector value.
2578          */
2579         if (!dev)
2580                 goto type_disk;
2581
2582         /*
2583          * XXX_12 is not defined in SSC, throw an exception
2584          */
2585         if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2586                 *ret = -EINVAL;
2587                 return 0;
2588         }
2589
2590         /*
2591          * Everything else assume TYPE_DISK Sector CDB location.
2592          * Use 32-bit sector value.
2593          */
2594 type_disk:
2595         return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
2596 }
2597
2598 static inline u32 transport_get_sectors_16(
2599         unsigned char *cdb,
2600         struct se_cmd *cmd,
2601         int *ret)
2602 {
2603         struct se_device *dev = cmd->se_dev;
2604
2605         /*
2606          * Assume TYPE_DISK for non struct se_device objects.
2607          * Use 32-bit sector value.
2608          */
2609         if (!dev)
2610                 goto type_disk;
2611
2612         /*
2613          * Use 24-bit allocation length for TYPE_TAPE.
2614          */
2615         if (dev->transport->get_device_type(dev) == TYPE_TAPE)
2616                 return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];
2617
2618 type_disk:
2619         return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
2620                     (cdb[12] << 8) + cdb[13];
2621 }
2622
2623 /*
2624  * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
2625  */
2626 static inline u32 transport_get_sectors_32(
2627         unsigned char *cdb,
2628         struct se_cmd *cmd,
2629         int *ret)
2630 {
2631         /*
2632          * Assume TYPE_DISK for non struct se_device objects.
2633          * Use 32-bit sector value.
2634          */
2635         return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
2636                     (cdb[30] << 8) + cdb[31];
2637
2638 }
2639
2640 static inline u32 transport_get_size(
2641         u32 sectors,
2642         unsigned char *cdb,
2643         struct se_cmd *cmd)
2644 {
2645         struct se_device *dev = cmd->se_dev;
2646
2647         if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2648                 if (cdb[1] & 1) { /* sectors */
2649                         return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
2650                 } else /* bytes */
2651                         return sectors;
2652         }
2653 #if 0
2654         pr_debug("Returning block_size: %u, sectors: %u == %u for"
2655                         " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors,
2656                         dev->se_sub_dev->se_dev_attrib.block_size * sectors,
2657                         dev->transport->name);
2658 #endif
2659         return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
2660 }
2661
2662 static void transport_xor_callback(struct se_cmd *cmd)
2663 {
2664         unsigned char *buf, *addr;
2665         struct scatterlist *sg;
2666         unsigned int offset;
2667         int i;
2668         int count;
2669         /*
2670          * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
2671          *
2672          * 1) read the specified logical block(s);
2673          * 2) transfer logical blocks from the data-out buffer;
2674          * 3) XOR the logical blocks transferred from the data-out buffer with
2675          *    the logical blocks read, storing the resulting XOR data in a buffer;
2676          * 4) if the DISABLE WRITE bit is set to zero, then write the logical
2677          *    blocks transferred from the data-out buffer; and
2678          * 5) transfer the resulting XOR data to the data-in buffer.
2679          */
2680         buf = kmalloc(cmd->data_length, GFP_KERNEL);
2681         if (!buf) {
2682                 pr_err("Unable to allocate xor_callback buf\n");
2683                 return;
2684         }
2685         /*
2686          * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
2687          * into the locally allocated *buf
2688          */
2689         sg_copy_to_buffer(cmd->t_data_sg,
2690                           cmd->t_data_nents,
2691                           buf,
2692                           cmd->data_length);
2693
2694         /*
2695          * Now perform the XOR against the BIDI read memory located at
2696          * cmd->t_mem_bidi_list
2697          */
2698
2699         offset = 0;
2700         for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
2701                 addr = kmap_atomic(sg_page(sg), KM_USER0);
2702                 if (!addr)
2703                         goto out;
2704
2705                 for (i = 0; i < sg->length; i++)
2706                         *(addr + sg->offset + i) ^= *(buf + offset + i);
2707
2708                 offset += sg->length;
2709                 kunmap_atomic(addr, KM_USER0);
2710         }
2711
2712 out:
2713         kfree(buf);
2714 }
2715
2716 /*
2717  * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
2718  */
2719 static int transport_get_sense_data(struct se_cmd *cmd)
2720 {
2721         unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
2722         struct se_device *dev;
2723         struct se_task *task = NULL, *task_tmp;
2724         unsigned long flags;
2725         u32 offset = 0;
2726
2727         WARN_ON(!cmd->se_lun);
2728
2729         spin_lock_irqsave(&cmd->t_state_lock, flags);
2730         if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
2731                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2732                 return 0;
2733         }
2734
2735         list_for_each_entry_safe(task, task_tmp,
2736                                 &cmd->t_task_list, t_list) {
2737
2738                 if (!task->task_sense)
2739                         continue;
2740
2741                 dev = task->se_dev;
2742                 if (!dev)
2743                         continue;
2744
2745                 if (!dev->transport->get_sense_buffer) {
2746                         pr_err("dev->transport->get_sense_buffer"
2747                                         " is NULL\n");
2748                         continue;
2749                 }
2750
2751                 sense_buffer = dev->transport->get_sense_buffer(task);
2752                 if (!sense_buffer) {
2753                         pr_err("ITT[0x%08x]_TASK[%d]: Unable to locate"
2754                                 " sense buffer for task with sense\n",
2755                                 cmd->se_tfo->get_task_tag(cmd), task->task_no);
2756                         continue;
2757                 }
2758                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2759
2760                 offset = cmd->se_tfo->set_fabric_sense_len(cmd,
2761                                 TRANSPORT_SENSE_BUFFER);
2762
2763                 memcpy(&buffer[offset], sense_buffer,
2764                                 TRANSPORT_SENSE_BUFFER);
2765                 cmd->scsi_status = task->task_scsi_status;
2766                 /* Automatically padded */
2767                 cmd->scsi_sense_length =
2768                                 (TRANSPORT_SENSE_BUFFER + offset);
2769
2770                 pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
2771                                 " and sense\n",
2772                         dev->se_hba->hba_id, dev->transport->name,
2773                                 cmd->scsi_status);
2774                 return 0;
2775         }
2776         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2777
2778         return -1;
2779 }
2780
2781 static int
2782 transport_handle_reservation_conflict(struct se_cmd *cmd)
2783 {
2784         cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
2785         cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2786         cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
2787         cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
2788         /*
2789          * For UA Interlock Code 11b, a RESERVATION CONFLICT will
2790          * establish a UNIT ATTENTION with PREVIOUS RESERVATION
2791          * CONFLICT STATUS.
2792          *
2793          * See spc4r17, section 7.4.6 Control Mode Page, Table 349
2794          */
2795         if (cmd->se_sess &&
2796             cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
2797                 core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
2798                         cmd->orig_fe_lun, 0x2C,
2799                         ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
2800         return -EINVAL;
2801 }
2802
2803 static inline long long transport_dev_end_lba(struct se_device *dev)
2804 {
2805         return dev->transport->get_blocks(dev) + 1;
2806 }
2807
2808 static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
2809 {
2810         struct se_device *dev = cmd->se_dev;
2811         u32 sectors;
2812
2813         if (dev->transport->get_device_type(dev) != TYPE_DISK)
2814                 return 0;
2815
2816         sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);
2817
2818         if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) {
2819                 pr_err("LBA: %llu Sectors: %u exceeds"
2820                         " transport_dev_end_lba(): %llu\n",
2821                         cmd->t_task_lba, sectors,
2822                         transport_dev_end_lba(dev));
2823                 pr_err("  We should return CHECK_CONDITION"
2824                        " but we don't yet\n");
2825                 return 0;
2826         }
2827
2828         return sectors;
2829 }
2830
2831 /*      transport_generic_cmd_sequencer():
2832  *
2833  *      Generic Command Sequencer that should work for most DAS transport
2834  *      drivers.
2835  *
2836  *      Called from transport_generic_allocate_tasks() in the $FABRIC_MOD
2837  *      RX Thread.
2838  *
2839  *      FIXME: Need to support other SCSI OPCODES where as well.
2840  */
2841 static int transport_generic_cmd_sequencer(
2842         struct se_cmd *cmd,
2843         unsigned char *cdb)
2844 {
2845         struct se_device *dev = cmd->se_dev;
2846         struct se_subsystem_dev *su_dev = dev->se_sub_dev;
2847         int ret = 0, sector_ret = 0, passthrough;
2848         u32 sectors = 0, size = 0, pr_reg_type = 0;
2849         u16 service_action;
2850         u8 alua_ascq = 0;
2851         /*
2852          * Check for an existing UNIT ATTENTION condition
2853          */
2854         if (core_scsi3_ua_check(cmd, cdb) < 0) {
2855                 cmd->transport_wait_for_tasks =
2856                                 &transport_nop_wait_for_tasks;
2857                 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2858                 cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
2859                 return -EINVAL;
2860         }
2861         /*
2862          * Check status of Asymmetric Logical Unit Assignment port
2863          */
2864         ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
2865         if (ret != 0) {
2866                 cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
2867                 /*
2868                  * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
2869                  * The ALUA additional sense code qualifier (ASCQ) is determined
2870                  * by the ALUA primary or secondary access state..
2871                  */
2872                 if (ret > 0) {
2873 #if 0
2874                         pr_debug("[%s]: ALUA TG Port not available,"
2875                                 " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
2876                                 cmd->se_tfo->get_fabric_name(), alua_ascq);
2877 #endif
2878                         transport_set_sense_codes(cmd, 0x04, alua_ascq);
2879                         cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2880                         cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
2881                         return -EINVAL;
2882                 }
2883                 goto out_invalid_cdb_field;
2884         }
2885         /*
2886          * Check status for SPC-3 Persistent Reservations
2887          */
2888         if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
2889                 if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
2890                                         cmd, cdb, pr_reg_type) != 0)
2891                         return transport_handle_reservation_conflict(cmd);
2892                 /*
2893                  * This means the CDB is allowed for the SCSI Initiator port
2894                  * when said port is *NOT* holding the legacy SPC-2 or
2895                  * SPC-3 Persistent Reservation.
2896                  */
2897         }
2898
2899         switch (cdb[0]) {
2900         case READ_6:
2901                 sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
2902                 if (sector_ret)
2903                         goto out_unsupported_cdb;
2904                 size = transport_get_size(sectors, cdb, cmd);
2905                 cmd->transport_split_cdb = &split_cdb_XX_6;
2906                 cmd->t_task_lba = transport_lba_21(cdb);
2907                 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2908                 break;
2909         case READ_10:
2910                 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2911                 if (sector_ret)
2912                         goto out_unsupported_cdb;
2913                 size = transport_get_size(sectors, cdb, cmd);
2914                 cmd->transport_split_cdb = &split_cdb_XX_10;
2915                 cmd->t_task_lba = transport_lba_32(cdb);
2916                 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2917                 break;
2918         case READ_12:
2919                 sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
2920                 if (sector_ret)
2921                         goto out_unsupported_cdb;
2922                 size = transport_get_size(sectors, cdb, cmd);
2923                 cmd->transport_split_cdb = &split_cdb_XX_12;
2924                 cmd->t_task_lba = transport_lba_32(cdb);
2925                 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2926                 break;
2927         case READ_16:
2928                 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
2929                 if (sector_ret)
2930                         goto out_unsupported_cdb;
2931                 size = transport_get_size(sectors, cdb, cmd);
2932                 cmd->transport_split_cdb = &split_cdb_XX_16;
2933                 cmd->t_task_lba = transport_lba_64(cdb);
2934                 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2935                 break;
2936         case WRITE_6:
2937                 sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
2938                 if (sector_ret)
2939                         goto out_unsupported_cdb;
2940                 size = transport_get_size(sectors, cdb, cmd);
2941                 cmd->transport_split_cdb = &split_cdb_XX_6;
2942                 cmd->t_task_lba = transport_lba_21(cdb);
2943                 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2944                 break;
2945         case WRITE_10:
2946                 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2947                 if (sector_ret)
2948                         goto out_unsupported_cdb;
2949                 size = transport_get_size(sectors, cdb, cmd);
2950                 cmd->transport_split_cdb = &split_cdb_XX_10;
2951                 cmd->t_task_lba = transport_lba_32(cdb);
2952                 cmd->t_tasks_fua = (cdb[1] & 0x8);
2953                 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2954                 break;
2955         case WRITE_12:
2956                 sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
2957                 if (sector_ret)
2958                         goto out_unsupported_cdb;
2959                 size = transport_get_size(sectors, cdb, cmd);
2960                 cmd->transport_split_cdb = &split_cdb_XX_12;
2961                 cmd->t_task_lba = transport_lba_32(cdb);
2962                 cmd->t_tasks_fua = (cdb[1] & 0x8);
2963                 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2964                 break;
2965         case WRITE_16:
2966                 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
2967                 if (sector_ret)
2968                         goto out_unsupported_cdb;
2969                 size = transport_get_size(sectors, cdb, cmd);
2970                 cmd->transport_split_cdb = &split_cdb_XX_16;
2971                 cmd->t_task_lba = transport_lba_64(cdb);
2972                 cmd->t_tasks_fua = (cdb[1] & 0x8);
2973                 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2974                 break;
2975         case XDWRITEREAD_10:
2976                 if ((cmd->data_direction != DMA_TO_DEVICE) ||
2977                     !(cmd->t_tasks_bidi))
2978                         goto out_invalid_cdb_field;
2979                 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2980                 if (sector_ret)
2981                         goto out_unsupported_cdb;
2982                 size = transport_get_size(sectors, cdb, cmd);
2983                 cmd->transport_split_cdb = &split_cdb_XX_10;
2984                 cmd->t_task_lba = transport_lba_32(cdb);
2985                 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2986                 passthrough = (dev->transport->transport_type ==
2987                                 TRANSPORT_PLUGIN_PHBA_PDEV);
2988                 /*
2989                  * Skip the remaining assignments for TCM/PSCSI passthrough
2990                  */
2991                 if (passthrough)
2992                         break;
2993                 /*
2994                  * Setup BIDI XOR callback to be run during transport_generic_complete_ok()
2995                  */
2996                 cmd->transport_complete_callback = &transport_xor_callback;
2997                 cmd->t_tasks_fua = (cdb[1] & 0x8);
2998                 break;
2999         case VARIABLE_LENGTH_CMD:
3000                 service_action = get_unaligned_be16(&cdb[8]);
3001                 /*
3002                  * Determine if this is TCM/PSCSI device and we should disable
3003                  * internal emulation for this CDB.
3004                  */
3005                 passthrough = (dev->transport->transport_type ==
3006                                         TRANSPORT_PLUGIN_PHBA_PDEV);
3007
3008                 switch (service_action) {
3009                 case XDWRITEREAD_32:
3010                         sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
3011                         if (sector_ret)
3012                                 goto out_unsupported_cdb;
3013                         size = transport_get_size(sectors, cdb, cmd);
3014                         /*
3015                          * Use WRITE_32 and READ_32 opcodes for the emulated
3016                          * XDWRITE_READ_32 logic.
3017                          */
3018                         cmd->transport_split_cdb = &split_cdb_XX_32;
3019                         cmd->t_task_lba = transport_lba_64_ext(cdb);
3020                         cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3021
3022                         /*
3023                          * Skip the remaining assignments for TCM/PSCSI passthrough
3024                          */
3025                         if (passthrough)
3026                                 break;
3027
3028                         /*
3029                          * Setup BIDI XOR callback to be run during
3030                          * transport_generic_complete_ok()
3031                          */
3032                         cmd->transport_complete_callback = &transport_xor_callback;
3033                         cmd->t_tasks_fua = (cdb[10] & 0x8);
3034                         break;
3035                 case WRITE_SAME_32:
3036                         sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
3037                         if (sector_ret)
3038                                 goto out_unsupported_cdb;
3039
3040                         if (sectors)
3041                                 size = transport_get_size(sectors, cdb, cmd);
3042                         else {
3043                                 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
3044                                        " supported\n");
3045                                 goto out_invalid_cdb_field;
3046                         }
3047
3048                         cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
3049                         cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3050
3051                         /*
3052                          * Skip the remaining assignments for TCM/PSCSI passthrough
3053                          */
3054                         if (passthrough)
3055                                 break;
3056
3057                         if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) {
3058                                 pr_err("WRITE_SAME PBDATA and LBDATA"
3059                                         " bits not supported for Block Discard"
3060                                         " Emulation\n");
3061                                 goto out_invalid_cdb_field;
3062                         }
3063                         /*
3064                          * Currently for the emulated case we only accept
3065                          * tpws with the UNMAP=1 bit set.
3066                          */
3067                         if (!(cdb[10] & 0x08)) {
3068                                 pr_err("WRITE_SAME w/o UNMAP bit not"
3069                                         " supported for Block Discard Emulation\n");
3070                                 goto out_invalid_cdb_field;
3071                         }
3072                         break;
3073                 default:
3074                         pr_err("VARIABLE_LENGTH_CMD service action"
3075                                 " 0x%04x not supported\n", service_action);
3076                         goto out_unsupported_cdb;
3077                 }
3078                 break;
3079         case MAINTENANCE_IN:
3080                 if (dev->transport->get_device_type(dev) != TYPE_ROM) {
3081                         /* MAINTENANCE_IN from SCC-2 */
3082                         /*
3083                          * Check for emulated MI_REPORT_TARGET_PGS.
3084                          */
3085                         if (cdb[1] == MI_REPORT_TARGET_PGS) {
3086                                 cmd->transport_emulate_cdb =
3087                                 (su_dev->t10_alua.alua_type ==
3088                                  SPC3_ALUA_EMULATED) ?
3089                                 core_emulate_report_target_port_groups :
3090                                 NULL;
3091                         }
3092                         size = (cdb[6] << 24) | (cdb[7] << 16) |
3093                                (cdb[8] << 8) | cdb[9];
3094                 } else {
3095                         /* GPCMD_SEND_KEY from multi media commands */
3096                         size = (cdb[8] << 8) + cdb[9];
3097                 }
3098                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3099                 break;
3100         case MODE_SELECT:
3101                 size = cdb[4];
3102                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3103                 break;
3104         case MODE_SELECT_10:
3105                 size = (cdb[7] << 8) + cdb[8];
3106                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3107                 break;
3108         case MODE_SENSE:
3109                 size = cdb[4];
3110                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3111                 break;
3112         case MODE_SENSE_10:
3113         case GPCMD_READ_BUFFER_CAPACITY:
3114         case GPCMD_SEND_OPC:
3115         case LOG_SELECT:
3116         case LOG_SENSE:
3117                 size = (cdb[7] << 8) + cdb[8];
3118                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3119                 break;
3120         case READ_BLOCK_LIMITS:
3121                 size = READ_BLOCK_LEN;
3122                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3123                 break;
3124         case GPCMD_GET_CONFIGURATION:
3125         case GPCMD_READ_FORMAT_CAPACITIES:
3126         case GPCMD_READ_DISC_INFO:
3127         case GPCMD_READ_TRACK_RZONE_INFO:
3128                 size = (cdb[7] << 8) + cdb[8];
3129                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3130                 break;
3131         case PERSISTENT_RESERVE_IN:
3132         case PERSISTENT_RESERVE_OUT:
3133                 cmd->transport_emulate_cdb =
3134                         (su_dev->t10_pr.res_type ==
3135                          SPC3_PERSISTENT_RESERVATIONS) ?
3136                         core_scsi3_emulate_pr : NULL;
3137                 size = (cdb[7] << 8) + cdb[8];
3138                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3139                 break;
3140         case GPCMD_MECHANISM_STATUS:
3141         case GPCMD_READ_DVD_STRUCTURE:
3142                 size = (cdb[8] << 8) + cdb[9];
3143                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3144                 break;
3145         case READ_POSITION:
3146                 size = READ_POSITION_LEN;
3147                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3148                 break;
3149         case MAINTENANCE_OUT:
3150                 if (dev->transport->get_device_type(dev) != TYPE_ROM) {
3151                         /* MAINTENANCE_OUT from SCC-2
3152                          *
3153                          * Check for emulated MO_SET_TARGET_PGS.
3154                          */
3155                         if (cdb[1] == MO_SET_TARGET_PGS) {
3156                                 cmd->transport_emulate_cdb =
3157                                 (su_dev->t10_alua.alua_type ==
3158                                         SPC3_ALUA_EMULATED) ?
3159                                 core_emulate_set_target_port_groups :
3160                                 NULL;
3161                         }
3162
3163                         size = (cdb[6] << 24) | (cdb[7] << 16) |
3164                                (cdb[8] << 8) | cdb[9];
3165                 } else  {
3166                         /* GPCMD_REPORT_KEY from multi media commands */
3167                         size = (cdb[8] << 8) + cdb[9];
3168                 }
3169                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3170                 break;
3171         case INQUIRY:
3172                 size = (cdb[3] << 8) + cdb[4];
3173                 /*
3174                  * Do implict HEAD_OF_QUEUE processing for INQUIRY.
3175                  * See spc4r17 section 5.3
3176                  */
3177                 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3178                         cmd->sam_task_attr = MSG_HEAD_TAG;
3179                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3180                 break;
3181         case READ_BUFFER:
3182                 size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
3183                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3184                 break;
3185         case READ_CAPACITY:
3186                 size = READ_CAP_LEN;
3187                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3188                 break;
3189         case READ_MEDIA_SERIAL_NUMBER:
3190         case SECURITY_PROTOCOL_IN:
3191         case SECURITY_PROTOCOL_OUT:
3192                 size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
3193                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3194                 break;
3195         case SERVICE_ACTION_IN:
3196         case ACCESS_CONTROL_IN:
3197         case ACCESS_CONTROL_OUT:
3198         case EXTENDED_COPY:
3199         case READ_ATTRIBUTE:
3200         case RECEIVE_COPY_RESULTS:
3201         case WRITE_ATTRIBUTE:
3202                 size = (cdb[10] << 24) | (cdb[11] << 16) |
3203                        (cdb[12] << 8) | cdb[13];
3204                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3205                 break;
3206         case RECEIVE_DIAGNOSTIC:
3207         case SEND_DIAGNOSTIC:
3208                 size = (cdb[3] << 8) | cdb[4];
3209                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3210                 break;
3211 /* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */
3212 #if 0
3213         case GPCMD_READ_CD:
3214                 sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
3215                 size = (2336 * sectors);
3216                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3217                 break;
3218 #endif
3219         case READ_TOC:
3220                 size = cdb[8];
3221                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3222                 break;
3223         case REQUEST_SENSE:
3224                 size = cdb[4];
3225                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3226                 break;
3227         case READ_ELEMENT_STATUS:
3228                 size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
3229                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3230                 break;
3231         case WRITE_BUFFER:
3232                 size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
3233                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3234                 break;
3235         case RESERVE:
3236         case RESERVE_10:
3237                 /*
3238                  * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
3239                  * Assume the passthrough or $FABRIC_MOD will tell us about it.
3240                  */
3241                 if (cdb[0] == RESERVE_10)
3242                         size = (cdb[7] << 8) | cdb[8];
3243                 else
3244                         size = cmd->data_length;
3245
3246                 /*
3247                  * Setup the legacy emulated handler for SPC-2 and
3248                  * >= SPC-3 compatible reservation handling (CRH=1)
3249                  * Otherwise, we assume the underlying SCSI logic is
3250                  * is running in SPC_PASSTHROUGH, and wants reservations
3251                  * emulation disabled.
3252                  */
3253                 cmd->transport_emulate_cdb =
3254                                 (su_dev->t10_pr.res_type !=
3255                                  SPC_PASSTHROUGH) ?
3256                                 core_scsi2_emulate_crh : NULL;
3257                 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3258                 break;
3259         case RELEASE:
3260         case RELEASE_10:
3261                 /*
3262                  * The SPC-2 RELEASE does not contain a size in the SCSI CDB.
3263                  * Assume the passthrough or $FABRIC_MOD will tell us about it.
3264                 */
3265                 if (cdb[0] == RELEASE_10)
3266                         size = (cdb[7] << 8) | cdb[8];
3267                 else
3268                         size = cmd->data_length;
3269
3270                 cmd->transport_emulate_cdb =
3271                                 (su_dev->t10_pr.res_type !=
3272                                  SPC_PASSTHROUGH) ?
3273                                 core_scsi2_emulate_crh : NULL;
3274                 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3275                 break;
3276         case SYNCHRONIZE_CACHE:
3277         case 0x91: /* SYNCHRONIZE_CACHE_16: */
3278                 /*
3279                  * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
3280                  */
3281                 if (cdb[0] == SYNCHRONIZE_CACHE) {
3282                         sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
3283                         cmd->t_task_lba = transport_lba_32(cdb);
3284                 } else {
3285                         sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
3286                         cmd->t_task_lba = transport_lba_64(cdb);
3287                 }
3288                 if (sector_ret)
3289                         goto out_unsupported_cdb;
3290
3291                 size = transport_get_size(sectors, cdb, cmd);
3292                 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3293
3294                 /*
3295                  * For TCM/pSCSI passthrough, skip cmd->transport_emulate_cdb()
3296                  */
3297                 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
3298                         break;
3299                 /*
3300                  * Set SCF_EMULATE_CDB_ASYNC to ensure asynchronous operation
3301                  * for SYNCHRONIZE_CACHE* Immed=1 case in __transport_execute_tasks()
3302                  */
3303                 cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC;
3304                 /*
3305                  * Check to ensure that LBA + Range does not exceed past end of
3306                  * device.
3307                  */
3308                 if (!transport_cmd_get_valid_sectors(cmd))
3309                         goto out_invalid_cdb_field;
3310                 break;
3311         case UNMAP:
3312                 size = get_unaligned_be16(&cdb[7]);
3313                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3314                 break;
3315         case WRITE_SAME_16:
3316                 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
3317                 if (sector_ret)
3318                         goto out_unsupported_cdb;
3319
3320                 if (sectors)
3321                         size = transport_get_size(sectors, cdb, cmd);
3322                 else {
3323                         pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
3324                         goto out_invalid_cdb_field;
3325                 }
3326
3327                 cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
3328                 passthrough = (dev->transport->transport_type ==
3329                                 TRANSPORT_PLUGIN_PHBA_PDEV);
3330                 /*
3331                  * Determine if the received WRITE_SAME_16 is used to for direct
3332                  * passthrough into Linux/SCSI with struct request via TCM/pSCSI
3333                  * or we are signaling the use of internal WRITE_SAME + UNMAP=1
3334                  * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and
3335                  * TCM/FILEIO subsystem plugin backstores.
3336                  */
3337                 if (!passthrough) {
3338                         if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) {
3339                                 pr_err("WRITE_SAME PBDATA and LBDATA"
3340                                         " bits not supported for Block Discard"
3341                                         " Emulation\n");
3342                                 goto out_invalid_cdb_field;
3343                         }
3344                         /*
3345                          * Currently for the emulated case we only accept
3346                          * tpws with the UNMAP=1 bit set.
3347                          */
3348                         if (!(cdb[1] & 0x08)) {
3349                                 pr_err("WRITE_SAME w/o UNMAP bit not "
3350                                         " supported for Block Discard Emulation\n");
3351                                 goto out_invalid_cdb_field;
3352                         }
3353                 }
3354                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3355                 break;
3356         case ALLOW_MEDIUM_REMOVAL:
3357         case GPCMD_CLOSE_TRACK:
3358         case ERASE:
3359         case INITIALIZE_ELEMENT_STATUS:
3360         case GPCMD_LOAD_UNLOAD:
3361         case REZERO_UNIT:
3362         case SEEK_10:
3363         case GPCMD_SET_SPEED:
3364         case SPACE:
3365         case START_STOP:
3366         case TEST_UNIT_READY:
3367         case VERIFY:
3368         case WRITE_FILEMARKS:
3369         case MOVE_MEDIUM:
3370                 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3371                 break;
3372         case REPORT_LUNS:
3373                 cmd->transport_emulate_cdb =
3374                                 transport_core_report_lun_response;
3375                 size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
3376                 /*
3377                  * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
3378                  * See spc4r17 section 5.3
3379                  */
3380                 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3381                         cmd->sam_task_attr = MSG_HEAD_TAG;
3382                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3383                 break;
3384         default:
3385                 pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
3386                         " 0x%02x, sending CHECK_CONDITION.\n",
3387                         cmd->se_tfo->get_fabric_name(), cdb[0]);
3388                 cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
3389                 goto out_unsupported_cdb;
3390         }
3391
3392         if (size != cmd->data_length) {
3393                 pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
3394                         " %u does not match SCSI CDB Length: %u for SAM Opcode:"
3395                         " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
3396                                 cmd->data_length, size, cdb[0]);
3397
3398                 cmd->cmd_spdtl = size;
3399
3400                 if (cmd->data_direction == DMA_TO_DEVICE) {
3401                         pr_err("Rejecting underflow/overflow"
3402                                         " WRITE data\n");
3403                         goto out_invalid_cdb_field;
3404                 }
3405                 /*
3406                  * Reject READ_* or WRITE_* with overflow/underflow for
3407                  * type SCF_SCSI_DATA_SG_IO_CDB.
3408                  */
3409                 if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512))  {
3410                         pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
3411                                 " CDB on non 512-byte sector setup subsystem"
3412                                 " plugin: %s\n", dev->transport->name);
3413                         /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
3414                         goto out_invalid_cdb_field;
3415                 }
3416
3417                 if (size > cmd->data_length) {
3418                         cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
3419                         cmd->residual_count = (size - cmd->data_length);
3420                 } else {
3421                         cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
3422                         cmd->residual_count = (cmd->data_length - size);
3423                 }
3424                 cmd->data_length = size;
3425         }
3426
3427         /* Let's limit control cdbs to a page, for simplicity's sake. */
3428         if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
3429             size > PAGE_SIZE)
3430                 goto out_invalid_cdb_field;
3431
3432         transport_set_supported_SAM_opcode(cmd);
3433         return ret;
3434
3435 out_unsupported_cdb:
3436         cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3437         cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
3438         return -EINVAL;
3439 out_invalid_cdb_field:
3440         cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3441         cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3442         return -EINVAL;
3443 }
3444
3445 /*
3446  * Called from transport_generic_complete_ok() and
3447  * transport_generic_request_failure() to determine which dormant/delayed
3448  * and ordered cmds need to have their tasks added to the execution queue.
3449  */
3450 static void transport_complete_task_attr(struct se_cmd *cmd)
3451 {
3452         struct se_device *dev = cmd->se_dev;
3453         struct se_cmd *cmd_p, *cmd_tmp;
3454         int new_active_tasks = 0;
3455
3456         if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
3457                 atomic_dec(&dev->simple_cmds);
3458                 smp_mb__after_atomic_dec();
3459                 dev->dev_cur_ordered_id++;
3460                 pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
3461                         " SIMPLE: %u\n", dev->dev_cur_ordered_id,
3462                         cmd->se_ordered_id);
3463         } else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
3464                 atomic_dec(&dev->dev_hoq_count);
3465                 smp_mb__after_atomic_dec();
3466                 dev->dev_cur_ordered_id++;
3467                 pr_debug("Incremented dev_cur_ordered_id: %u for"
3468                         " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
3469                         cmd->se_ordered_id);
3470         } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
3471                 spin_lock(&dev->ordered_cmd_lock);
3472                 list_del(&cmd->se_ordered_node);
3473                 atomic_dec(&dev->dev_ordered_sync);
3474                 smp_mb__after_atomic_dec();
3475                 spin_unlock(&dev->ordered_cmd_lock);
3476
3477                 dev->dev_cur_ordered_id++;
3478                 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
3479                         " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
3480         }
3481         /*
3482          * Process all commands up to the last received
3483          * ORDERED task attribute which requires another blocking
3484          * boundary
3485          */
3486         spin_lock(&dev->delayed_cmd_lock);
3487         list_for_each_entry_safe(cmd_p, cmd_tmp,
3488                         &dev->delayed_cmd_list, se_delayed_node) {
3489
3490                 list_del(&cmd_p->se_delayed_node);
3491                 spin_unlock(&dev->delayed_cmd_lock);
3492
3493                 pr_debug("Calling add_tasks() for"
3494                         " cmd_p: 0x%02x Task Attr: 0x%02x"
3495                         " Dormant -> Active, se_ordered_id: %u\n",
3496                         cmd_p->t_task_cdb[0],
3497                         cmd_p->sam_task_attr, cmd_p->se_ordered_id);
3498
3499                 transport_add_tasks_from_cmd(cmd_p);
3500                 new_active_tasks++;
3501
3502                 spin_lock(&dev->delayed_cmd_lock);
3503                 if (cmd_p->sam_task_attr == MSG_ORDERED_TAG)
3504                         break;
3505         }
3506         spin_unlock(&dev->delayed_cmd_lock);
3507         /*
3508          * If new tasks have become active, wake up the transport thread
3509          * to do the processing of the Active tasks.
3510          */
3511         if (new_active_tasks != 0)
3512                 wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
3513 }
3514
3515 static int transport_complete_qf(struct se_cmd *cmd)
3516 {
3517         int ret = 0;
3518
3519         if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
3520                 return cmd->se_tfo->queue_status(cmd);
3521
3522         switch (cmd->data_direction) {
3523         case DMA_FROM_DEVICE:
3524                 ret = cmd->se_tfo->queue_data_in(cmd);
3525                 break;
3526         case DMA_TO_DEVICE:
3527                 if (cmd->t_bidi_data_sg) {
3528                         ret = cmd->se_tfo->queue_data_in(cmd);
3529                         if (ret < 0)
3530                                 return ret;
3531                 }
3532                 /* Fall through for DMA_TO_DEVICE */
3533         case DMA_NONE:
3534                 ret = cmd->se_tfo->queue_status(cmd);
3535                 break;
3536         default:
3537                 break;
3538         }
3539
3540         return ret;
3541 }
3542
3543 static void transport_handle_queue_full(
3544         struct se_cmd *cmd,
3545         struct se_device *dev,
3546         int (*qf_callback)(struct se_cmd *))
3547 {
3548         spin_lock_irq(&dev->qf_cmd_lock);
3549         cmd->se_cmd_flags |= SCF_EMULATE_QUEUE_FULL;
3550         cmd->transport_qf_callback = qf_callback;
3551         list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
3552         atomic_inc(&dev->dev_qf_count);
3553         smp_mb__after_atomic_inc();
3554         spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
3555
3556         schedule_work(&cmd->se_dev->qf_work_queue);
3557 }
3558
3559 static void transport_generic_complete_ok(struct se_cmd *cmd)
3560 {
3561         int reason = 0, ret;
3562         /*
3563          * Check if we need to move delayed/dormant tasks from cmds on the
3564          * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
3565          * Attribute.
3566          */
3567         if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3568                 transport_complete_task_attr(cmd);
3569         /*
3570          * Check to schedule QUEUE_FULL work, or execute an existing
3571          * cmd->transport_qf_callback()
3572          */
3573         if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
3574                 schedule_work(&cmd->se_dev->qf_work_queue);
3575
3576         if (cmd->transport_qf_callback) {
3577                 ret = cmd->transport_qf_callback(cmd);
3578                 if (ret < 0)
3579                         goto queue_full;
3580
3581                 cmd->transport_qf_callback = NULL;
3582                 goto done;
3583         }
3584         /*
3585          * Check if we need to retrieve a sense buffer from
3586          * the struct se_cmd in question.
3587          */
3588         if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
3589                 if (transport_get_sense_data(cmd) < 0)
3590                         reason = TCM_NON_EXISTENT_LUN;
3591
3592                 /*
3593                  * Only set when an struct se_task->task_scsi_status returned
3594                  * a non GOOD status.
3595                  */
3596                 if (cmd->scsi_status) {
3597                         ret = transport_send_check_condition_and_sense(
3598                                         cmd, reason, 1);
3599                         if (ret == -EAGAIN)
3600                                 goto queue_full;
3601
3602                         transport_lun_remove_cmd(cmd);
3603                         transport_cmd_check_stop_to_fabric(cmd);
3604                         return;
3605                 }
3606         }
3607         /*
3608          * Check for a callback, used by amongst other things
3609          * XDWRITE_READ_10 emulation.
3610          */
3611         if (cmd->transport_complete_callback)
3612                 cmd->transport_complete_callback(cmd);
3613
3614         switch (cmd->data_direction) {
3615         case DMA_FROM_DEVICE:
3616                 spin_lock(&cmd->se_lun->lun_sep_lock);
3617                 if (cmd->se_lun->lun_sep) {
3618                         cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
3619                                         cmd->data_length;
3620                 }
3621                 spin_unlock(&cmd->se_lun->lun_sep_lock);
3622
3623                 ret = cmd->se_tfo->queue_data_in(cmd);
3624                 if (ret == -EAGAIN)
3625                         goto queue_full;
3626                 break;
3627         case DMA_TO_DEVICE:
3628                 spin_lock(&cmd->se_lun->lun_sep_lock);
3629                 if (cmd->se_lun->lun_sep) {
3630                         cmd->se_lun->lun_sep->sep_stats.rx_data_octets +=
3631                                 cmd->data_length;
3632                 }
3633                 spin_unlock(&cmd->se_lun->lun_sep_lock);
3634                 /*
3635                  * Check if we need to send READ payload for BIDI-COMMAND
3636                  */
3637                 if (cmd->t_bidi_data_sg) {
3638                         spin_lock(&cmd->se_lun->lun_sep_lock);
3639                         if (cmd->se_lun->lun_sep) {
3640                                 cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
3641                                         cmd->data_length;
3642                         }
3643                         spin_unlock(&cmd->se_lun->lun_sep_lock);
3644                         ret = cmd->se_tfo->queue_data_in(cmd);
3645                         if (ret == -EAGAIN)
3646                                 goto queue_full;
3647                         break;
3648                 }
3649                 /* Fall through for DMA_TO_DEVICE */
3650         case DMA_NONE:
3651                 ret = cmd->se_tfo->queue_status(cmd);
3652                 if (ret == -EAGAIN)
3653                         goto queue_full;
3654                 break;
3655         default:
3656                 break;
3657         }
3658
3659 done:
3660         transport_lun_remove_cmd(cmd);
3661         transport_cmd_check_stop_to_fabric(cmd);
3662         return;
3663
3664 queue_full:
3665         pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
3666                 " data_direction: %d\n", cmd, cmd->data_direction);
3667         transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf);
3668 }
3669
3670 static void transport_free_dev_tasks(struct se_cmd *cmd)
3671 {
3672         struct se_task *task, *task_tmp;
3673         unsigned long flags;
3674
3675         spin_lock_irqsave(&cmd->t_state_lock, flags);
3676         list_for_each_entry_safe(task, task_tmp,
3677                                 &cmd->t_task_list, t_list) {
3678                 if (atomic_read(&task->task_active))
3679                         continue;
3680
3681                 kfree(task->task_sg_bidi);
3682                 kfree(task->task_sg);
3683
3684                 list_del(&task->t_list);
3685
3686                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3687                 if (task->se_dev)
3688                         task->se_dev->transport->free_task(task);
3689                 else
3690                         pr_err("task[%u] - task->se_dev is NULL\n",
3691                                 task->task_no);
3692                 spin_lock_irqsave(&cmd->t_state_lock, flags);
3693         }
3694         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3695 }
3696
3697 static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
3698 {
3699         struct scatterlist *sg;
3700         int count;
3701
3702         for_each_sg(sgl, sg, nents, count)
3703                 __free_page(sg_page(sg));
3704
3705         kfree(sgl);
3706 }
3707
3708 static inline void transport_free_pages(struct se_cmd *cmd)
3709 {
3710         if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
3711                 return;
3712
3713         transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
3714         cmd->t_data_sg = NULL;
3715         cmd->t_data_nents = 0;
3716
3717         transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
3718         cmd->t_bidi_data_sg = NULL;
3719         cmd->t_bidi_data_nents = 0;
3720 }
3721
3722 static inline void transport_release_tasks(struct se_cmd *cmd)
3723 {
3724         transport_free_dev_tasks(cmd);
3725 }
3726
3727 static inline int transport_dec_and_check(struct se_cmd *cmd)
3728 {
3729         unsigned long flags;
3730
3731         spin_lock_irqsave(&cmd->t_state_lock, flags);
3732         if (atomic_read(&cmd->t_fe_count)) {
3733                 if (!atomic_dec_and_test(&cmd->t_fe_count)) {
3734                         spin_unlock_irqrestore(&cmd->t_state_lock,
3735                                         flags);
3736                         return 1;
3737                 }
3738         }
3739
3740         if (atomic_read(&cmd->t_se_count)) {
3741                 if (!atomic_dec_and_test(&cmd->t_se_count)) {
3742                         spin_unlock_irqrestore(&cmd->t_state_lock,
3743                                         flags);
3744                         return 1;
3745                 }
3746         }
3747         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3748
3749         return 0;
3750 }
3751
3752 static void transport_release_fe_cmd(struct se_cmd *cmd)
3753 {
3754         unsigned long flags;
3755
3756         if (transport_dec_and_check(cmd))
3757                 return;
3758
3759         spin_lock_irqsave(&cmd->t_state_lock, flags);
3760         if (!atomic_read(&cmd->transport_dev_active)) {
3761                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3762                 goto free_pages;
3763         }
3764         atomic_set(&cmd->transport_dev_active, 0);
3765         transport_all_task_dev_remove_state(cmd);
3766         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3767
3768         transport_release_tasks(cmd);
3769 free_pages:
3770         transport_free_pages(cmd);
3771         transport_free_se_cmd(cmd);
3772         cmd->se_tfo->release_cmd(cmd);
3773 }
3774
3775 static int
3776 transport_generic_remove(struct se_cmd *cmd, int session_reinstatement)
3777 {
3778         unsigned long flags;
3779
3780         if (transport_dec_and_check(cmd)) {
3781                 if (session_reinstatement) {
3782                         spin_lock_irqsave(&cmd->t_state_lock, flags);
3783                         transport_all_task_dev_remove_state(cmd);
3784                         spin_unlock_irqrestore(&cmd->t_state_lock,
3785                                         flags);
3786                 }
3787                 return 1;
3788         }
3789
3790         spin_lock_irqsave(&cmd->t_state_lock, flags);
3791         if (!atomic_read(&cmd->transport_dev_active)) {
3792                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3793                 goto free_pages;
3794         }
3795         atomic_set(&cmd->transport_dev_active, 0);
3796         transport_all_task_dev_remove_state(cmd);
3797         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3798
3799         transport_release_tasks(cmd);
3800
3801 free_pages:
3802         transport_free_pages(cmd);
3803         transport_release_cmd(cmd);
3804         return 0;
3805 }
3806
3807 /*
3808  * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of
3809  * allocating in the core.
3810  * @cmd:  Associated se_cmd descriptor
3811  * @mem:  SGL style memory for TCM WRITE / READ
3812  * @sg_mem_num: Number of SGL elements
3813  * @mem_bidi_in: SGL style memory for TCM BIDI READ
3814  * @sg_mem_bidi_num: Number of BIDI READ SGL elements
3815  *
3816  * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage
3817  * of parameters.
3818  */
3819 int transport_generic_map_mem_to_cmd(
3820         struct se_cmd *cmd,
3821         struct scatterlist *sgl,
3822         u32 sgl_count,
3823         struct scatterlist *sgl_bidi,
3824         u32 sgl_bidi_count)
3825 {
3826         if (!sgl || !sgl_count)
3827                 return 0;
3828
3829         if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
3830             (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
3831
3832                 cmd->t_data_sg = sgl;
3833                 cmd->t_data_nents = sgl_count;
3834
3835                 if (sgl_bidi && sgl_bidi_count) {
3836                         cmd->t_bidi_data_sg = sgl_bidi;
3837                         cmd->t_bidi_data_nents = sgl_bidi_count;
3838                 }
3839                 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
3840         }
3841
3842         return 0;
3843 }
3844 EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
3845
3846 static int transport_new_cmd_obj(struct se_cmd *cmd)
3847 {
3848         struct se_device *dev = cmd->se_dev;
3849         u32 task_cdbs;
3850         u32 rc;
3851         int set_counts = 1;
3852
3853         /*
3854          * Setup any BIDI READ tasks and memory from
3855          * cmd->t_mem_bidi_list so the READ struct se_tasks
3856          * are queued first for the non pSCSI passthrough case.
3857          */
3858         if (cmd->t_bidi_data_sg &&
3859             (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) {
3860                 rc = transport_allocate_tasks(cmd,
3861                                               cmd->t_task_lba,
3862                                               DMA_FROM_DEVICE,
3863                                               cmd->t_bidi_data_sg,
3864                                               cmd->t_bidi_data_nents);
3865                 if (rc <= 0) {
3866                         cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3867                         cmd->scsi_sense_reason =
3868                                 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3869                         return PYX_TRANSPORT_LU_COMM_FAILURE;
3870                 }
3871                 atomic_inc(&cmd->t_fe_count);
3872                 atomic_inc(&cmd->t_se_count);
3873                 set_counts = 0;
3874         }
3875         /*
3876          * Setup the tasks and memory from cmd->t_mem_list
3877          * Note for BIDI transfers this will contain the WRITE payload
3878          */
3879         task_cdbs = transport_allocate_tasks(cmd,
3880                                              cmd->t_task_lba,
3881                                              cmd->data_direction,
3882                                              cmd->t_data_sg,
3883                                              cmd->t_data_nents);
3884         if (task_cdbs <= 0) {
3885                 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3886                 cmd->scsi_sense_reason =
3887                         TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3888                 return PYX_TRANSPORT_LU_COMM_FAILURE;
3889         }
3890
3891         if (set_counts) {
3892                 atomic_inc(&cmd->t_fe_count);
3893                 atomic_inc(&cmd->t_se_count);
3894         }
3895
3896         cmd->t_task_list_num = task_cdbs;
3897
3898         atomic_set(&cmd->t_task_cdbs_left, task_cdbs);
3899         atomic_set(&cmd->t_task_cdbs_ex_left, task_cdbs);
3900         atomic_set(&cmd->t_task_cdbs_timeout_left, task_cdbs);
3901         return 0;
3902 }
3903
3904 void *transport_kmap_first_data_page(struct se_cmd *cmd)
3905 {
3906         struct scatterlist *sg = cmd->t_data_sg;
3907
3908         BUG_ON(!sg);
3909         /*
3910          * We need to take into account a possible offset here for fabrics like
3911          * tcm_loop who may be using a contig buffer from the SCSI midlayer for
3912          * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
3913          */
3914         return kmap(sg_page(sg)) + sg->offset;
3915 }
3916 EXPORT_SYMBOL(transport_kmap_first_data_page);
3917
3918 void transport_kunmap_first_data_page(struct se_cmd *cmd)
3919 {
3920         kunmap(sg_page(cmd->t_data_sg));
3921 }
3922 EXPORT_SYMBOL(transport_kunmap_first_data_page);
3923
3924 static int
3925 transport_generic_get_mem(struct se_cmd *cmd)
3926 {
3927         u32 length = cmd->data_length;
3928         unsigned int nents;
3929         struct page *page;
3930         int i = 0;
3931
3932         nents = DIV_ROUND_UP(length, PAGE_SIZE);
3933         cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL);
3934         if (!cmd->t_data_sg)
3935                 return -ENOMEM;
3936
3937         cmd->t_data_nents = nents;
3938         sg_init_table(cmd->t_data_sg, nents);
3939
3940         while (length) {
3941                 u32 page_len = min_t(u32, length, PAGE_SIZE);
3942                 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
3943                 if (!page)
3944                         goto out;
3945
3946                 sg_set_page(&cmd->t_data_sg[i], page, page_len, 0);
3947                 length -= page_len;
3948                 i++;
3949         }
3950         return 0;
3951
3952 out:
3953         while (i >= 0) {
3954                 __free_page(sg_page(&cmd->t_data_sg[i]));
3955                 i--;
3956         }
3957         kfree(cmd->t_data_sg);
3958         cmd->t_data_sg = NULL;
3959         return -ENOMEM;
3960 }
3961
3962 /* Reduce sectors if they are too long for the device */
3963 static inline sector_t transport_limit_task_sectors(
3964         struct se_device *dev,
3965         unsigned long long lba,
3966         sector_t sectors)
3967 {
3968         sectors = min_t(sector_t, sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
3969
3970         if (dev->transport->get_device_type(dev) == TYPE_DISK)
3971                 if ((lba + sectors) > transport_dev_end_lba(dev))
3972                         sectors = ((transport_dev_end_lba(dev) - lba) + 1);
3973
3974         return sectors;
3975 }
3976
3977
3978 /*
3979  * This function can be used by HW target mode drivers to create a linked
3980  * scatterlist from all contiguously allocated struct se_task->task_sg[].
3981  * This is intended to be called during the completion path by TCM Core
3982  * when struct target_core_fabric_ops->check_task_sg_chaining is enabled.
3983  */
3984 void transport_do_task_sg_chain(struct se_cmd *cmd)
3985 {
3986         struct scatterlist *sg_first = NULL;
3987         struct scatterlist *sg_prev = NULL;
3988         int sg_prev_nents = 0;
3989         struct scatterlist *sg;
3990         struct se_task *task;
3991         u32 chained_nents = 0;
3992         int i;
3993
3994         BUG_ON(!cmd->se_tfo->task_sg_chaining);
3995
3996         /*
3997          * Walk the struct se_task list and setup scatterlist chains
3998          * for each contiguously allocated struct se_task->task_sg[].
3999          */
4000         list_for_each_entry(task, &cmd->t_task_list, t_list) {
4001                 if (!task->task_sg)
4002                         continue;
4003
4004                 BUG_ON(!task->task_padded_sg);
4005
4006                 if (!sg_first) {
4007                         sg_first = task->task_sg;
4008                         chained_nents = task->task_sg_nents;
4009                 } else {
4010                         sg_chain(sg_prev, sg_prev_nents, task->task_sg);
4011                         chained_nents += task->task_sg_nents;
4012                 }
4013
4014                 sg_prev = task->task_sg;
4015                 sg_prev_nents = task->task_sg_nents;
4016         }
4017         /*
4018          * Setup the starting pointer and total t_tasks_sg_linked_no including
4019          * padding SGs for linking and to mark the end.
4020          */
4021         cmd->t_tasks_sg_chained = sg_first;
4022         cmd->t_tasks_sg_chained_no = chained_nents;
4023
4024         pr_debug("Setup cmd: %p cmd->t_tasks_sg_chained: %p and"
4025                 " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained,
4026                 cmd->t_tasks_sg_chained_no);
4027
4028         for_each_sg(cmd->t_tasks_sg_chained, sg,
4029                         cmd->t_tasks_sg_chained_no, i) {
4030
4031                 pr_debug("SG[%d]: %p page: %p length: %d offset: %d\n",
4032                         i, sg, sg_page(sg), sg->length, sg->offset);
4033                 if (sg_is_chain(sg))
4034                         pr_debug("SG: %p sg_is_chain=1\n", sg);
4035                 if (sg_is_last(sg))
4036                         pr_debug("SG: %p sg_is_last=1\n", sg);
4037         }
4038 }
4039 EXPORT_SYMBOL(transport_do_task_sg_chain);
4040
4041 /*
4042  * Break up cmd into chunks transport can handle
4043  */
4044 static int transport_allocate_data_tasks(
4045         struct se_cmd *cmd,
4046         unsigned long long lba,
4047         enum dma_data_direction data_direction,
4048         struct scatterlist *sgl,
4049         unsigned int sgl_nents)
4050 {
4051         unsigned char *cdb = NULL;
4052         struct se_task *task;
4053         struct se_device *dev = cmd->se_dev;
4054         unsigned long flags;
4055         int task_count, i, ret;
4056         sector_t sectors, dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors;
4057         u32 sector_size = dev->se_sub_dev->se_dev_attrib.block_size;
4058         struct scatterlist *sg;
4059         struct scatterlist *cmd_sg;
4060
4061         WARN_ON(cmd->data_length % sector_size);
4062         sectors = DIV_ROUND_UP(cmd->data_length, sector_size);
4063         task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors);
4064         
4065         cmd_sg = sgl;
4066         for (i = 0; i < task_count; i++) {
4067                 unsigned int task_size;
4068                 int count;
4069
4070                 task = transport_generic_get_task(cmd, data_direction);
4071                 if (!task)
4072                         return -ENOMEM;
4073
4074                 task->task_lba = lba;
4075                 task->task_sectors = min(sectors, dev_max_sectors);
4076                 task->task_size = task->task_sectors * sector_size;
4077
4078                 cdb = dev->transport->get_cdb(task);
4079                 BUG_ON(!cdb);
4080
4081                 memcpy(cdb, cmd->t_task_cdb,
4082                        scsi_command_size(cmd->t_task_cdb));
4083
4084                 /* Update new cdb with updated lba/sectors */
4085                 cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb);
4086
4087                 /*
4088                  * Check if the fabric module driver is requesting that all
4089                  * struct se_task->task_sg[] be chained together..  If so,
4090                  * then allocate an extra padding SG entry for linking and
4091                  * marking the end of the chained SGL.
4092                  * Possibly over-allocate task sgl size by using cmd sgl size.
4093                  * It's so much easier and only a waste when task_count > 1.
4094                  * That is extremely rare.
4095                  */
4096                 task->task_sg_nents = sgl_nents;
4097                 if (cmd->se_tfo->task_sg_chaining) {
4098                         task->task_sg_nents++;
4099                         task->task_padded_sg = 1;
4100                 }
4101
4102                 task->task_sg = kmalloc(sizeof(struct scatterlist) *
4103                                         task->task_sg_nents, GFP_KERNEL);
4104                 if (!task->task_sg) {
4105                         cmd->se_dev->transport->free_task(task);
4106                         return -ENOMEM;
4107                 }
4108
4109                 sg_init_table(task->task_sg, task->task_sg_nents);
4110
4111                 task_size = task->task_size;
4112
4113                 /* Build new sgl, only up to task_size */
4114                 for_each_sg(task->task_sg, sg, task->task_sg_nents, count) {
4115                         if (cmd_sg->length > task_size)
4116                                 break;
4117
4118                         *sg = *cmd_sg;
4119                         task_size -= cmd_sg->length;
4120                         cmd_sg = sg_next(cmd_sg);
4121                 }
4122
4123                 lba += task->task_sectors;
4124                 sectors -= task->task_sectors;
4125
4126                 spin_lock_irqsave(&cmd->t_state_lock, flags);
4127                 list_add_tail(&task->t_list, &cmd->t_task_list);
4128                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4129         }
4130         /*
4131          * Now perform the memory map of task->task_sg[] into backend
4132          * subsystem memory..
4133          */
4134         list_for_each_entry(task, &cmd->t_task_list, t_list) {
4135                 if (atomic_read(&task->task_sent))
4136                         continue;
4137                 if (!dev->transport->map_data_SG)
4138                         continue;
4139
4140                 ret = dev->transport->map_data_SG(task);
4141                 if (ret < 0)
4142                         return 0;
4143         }
4144
4145         return task_count;
4146 }
4147
4148 static int
4149 transport_allocate_control_task(struct se_cmd *cmd)
4150 {
4151         struct se_device *dev = cmd->se_dev;
4152         unsigned char *cdb;
4153         struct se_task *task;
4154         unsigned long flags;
4155         int ret = 0;
4156
4157         task = transport_generic_get_task(cmd, cmd->data_direction);
4158         if (!task)
4159                 return -ENOMEM;
4160
4161         cdb = dev->transport->get_cdb(task);
4162         BUG_ON(!cdb);
4163         memcpy(cdb, cmd->t_task_cdb,
4164                scsi_command_size(cmd->t_task_cdb));
4165
4166         task->task_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
4167                                 GFP_KERNEL);
4168         if (!task->task_sg) {
4169                 cmd->se_dev->transport->free_task(task);
4170                 return -ENOMEM;
4171         }
4172
4173         memcpy(task->task_sg, cmd->t_data_sg,
4174                sizeof(struct scatterlist) * cmd->t_data_nents);
4175         task->task_size = cmd->data_length;
4176         task->task_sg_nents = cmd->t_data_nents;
4177
4178         spin_lock_irqsave(&cmd->t_state_lock, flags);
4179         list_add_tail(&task->t_list, &cmd->t_task_list);
4180         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4181
4182         if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) {
4183                 if (dev->transport->map_control_SG)
4184                         ret = dev->transport->map_control_SG(task);
4185         } else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) {
4186                 if (dev->transport->cdb_none)
4187                         ret = dev->transport->cdb_none(task);
4188         } else {
4189                 pr_err("target: Unknown control cmd type!\n");
4190                 BUG();
4191         }
4192
4193         /* Success! Return number of tasks allocated */
4194         if (ret == 0)
4195                 return 1;
4196         return ret;
4197 }
4198
4199 static u32 transport_allocate_tasks(
4200         struct se_cmd *cmd,
4201         unsigned long long lba,
4202         enum dma_data_direction data_direction,
4203         struct scatterlist *sgl,
4204         unsigned int sgl_nents)
4205 {
4206         if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)
4207                 return transport_allocate_data_tasks(cmd, lba, data_direction,
4208                                                      sgl, sgl_nents);
4209         else
4210                 return transport_allocate_control_task(cmd);
4211
4212 }
4213
4214
4215 /*       transport_generic_new_cmd(): Called from transport_processing_thread()
4216  *
4217  *       Allocate storage transport resources from a set of values predefined
4218  *       by transport_generic_cmd_sequencer() from the iSCSI Target RX process.
4219  *       Any non zero return here is treated as an "out of resource' op here.
4220  */
4221         /*
4222          * Generate struct se_task(s) and/or their payloads for this CDB.
4223          */
4224 int transport_generic_new_cmd(struct se_cmd *cmd)
4225 {
4226         int ret = 0;
4227
4228         /*
4229          * Determine is the TCM fabric module has already allocated physical
4230          * memory, and is directly calling transport_generic_map_mem_to_cmd()
4231          * beforehand.
4232          */
4233         if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
4234             cmd->data_length) {
4235                 ret = transport_generic_get_mem(cmd);
4236                 if (ret < 0)
4237                         return ret;
4238         }
4239         /*
4240          * Call transport_new_cmd_obj() to invoke transport_allocate_tasks() for
4241          * control or data CDB types, and perform the map to backend subsystem
4242          * code from SGL memory allocated here by transport_generic_get_mem(), or
4243          * via pre-existing SGL memory setup explictly by fabric module code with
4244          * transport_generic_map_mem_to_cmd().
4245          */
4246         ret = transport_new_cmd_obj(cmd);
4247         if (ret < 0)
4248                 return ret;
4249         /*
4250          * For WRITEs, let the fabric know its buffer is ready..
4251          * This WRITE struct se_cmd (and all of its associated struct se_task's)
4252          * will be added to the struct se_device execution queue after its WRITE
4253          * data has arrived. (ie: It gets handled by the transport processing
4254          * thread a second time)
4255          */
4256         if (cmd->data_direction == DMA_TO_DEVICE) {
4257                 transport_add_tasks_to_state_queue(cmd);
4258                 return transport_generic_write_pending(cmd);
4259         }
4260         /*
4261          * Everything else but a WRITE, add the struct se_cmd's struct se_task's
4262          * to the execution queue.
4263          */
4264         transport_execute_tasks(cmd);
4265         return 0;
4266 }
4267 EXPORT_SYMBOL(transport_generic_new_cmd);
4268
4269 /*      transport_generic_process_write():
4270  *
4271  *
4272  */
4273 void transport_generic_process_write(struct se_cmd *cmd)
4274 {
4275         transport_execute_tasks(cmd);
4276 }
4277 EXPORT_SYMBOL(transport_generic_process_write);
4278
4279 static int transport_write_pending_qf(struct se_cmd *cmd)
4280 {
4281         return cmd->se_tfo->write_pending(cmd);
4282 }
4283
4284 /*      transport_generic_write_pending():
4285  *
4286  *
4287  */
4288 static int transport_generic_write_pending(struct se_cmd *cmd)
4289 {
4290         unsigned long flags;
4291         int ret;
4292
4293         spin_lock_irqsave(&cmd->t_state_lock, flags);
4294         cmd->t_state = TRANSPORT_WRITE_PENDING;
4295         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4296
4297         if (cmd->transport_qf_callback) {
4298                 ret = cmd->transport_qf_callback(cmd);
4299                 if (ret == -EAGAIN)
4300                         goto queue_full;
4301                 else if (ret < 0)
4302                         return ret;
4303
4304                 cmd->transport_qf_callback = NULL;
4305                 return 0;
4306         }
4307
4308         /*
4309          * Clear the se_cmd for WRITE_PENDING status in order to set
4310          * cmd->t_transport_active=0 so that transport_generic_handle_data
4311          * can be called from HW target mode interrupt code.  This is safe
4312          * to be called with transport_off=1 before the cmd->se_tfo->write_pending
4313          * because the se_cmd->se_lun pointer is not being cleared.
4314          */
4315         transport_cmd_check_stop(cmd, 1, 0);
4316
4317         /*
4318          * Call the fabric write_pending function here to let the
4319          * frontend know that WRITE buffers are ready.
4320          */
4321         ret = cmd->se_tfo->write_pending(cmd);
4322         if (ret == -EAGAIN)
4323                 goto queue_full;
4324         else if (ret < 0)
4325                 return ret;
4326
4327         return PYX_TRANSPORT_WRITE_PENDING;
4328
4329 queue_full:
4330         pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
4331         cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
4332         transport_handle_queue_full(cmd, cmd->se_dev,
4333                         transport_write_pending_qf);
4334         return ret;
4335 }
4336
4337 void transport_release_cmd(struct se_cmd *cmd)
4338 {
4339         BUG_ON(!cmd->se_tfo);
4340
4341         transport_free_se_cmd(cmd);
4342         cmd->se_tfo->release_cmd(cmd);
4343 }
4344 EXPORT_SYMBOL(transport_release_cmd);
4345
4346 /*      transport_generic_free_cmd():
4347  *
4348  *      Called from processing frontend to release storage engine resources
4349  */
4350 void transport_generic_free_cmd(
4351         struct se_cmd *cmd,
4352         int wait_for_tasks,
4353         int session_reinstatement)
4354 {
4355         if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD))
4356                 transport_release_cmd(cmd);
4357         else {
4358                 core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
4359
4360                 if (cmd->se_lun) {
4361 #if 0
4362                         pr_debug("cmd: %p ITT: 0x%08x contains"
4363                                 " cmd->se_lun\n", cmd,
4364                                 cmd->se_tfo->get_task_tag(cmd));
4365 #endif
4366                         transport_lun_remove_cmd(cmd);
4367                 }
4368
4369                 if (wait_for_tasks && cmd->transport_wait_for_tasks)
4370                         cmd->transport_wait_for_tasks(cmd, 0, 0);
4371
4372                 transport_free_dev_tasks(cmd);
4373
4374                 transport_generic_remove(cmd, session_reinstatement);
4375         }
4376 }
4377 EXPORT_SYMBOL(transport_generic_free_cmd);
4378
4379 static void transport_nop_wait_for_tasks(
4380         struct se_cmd *cmd,
4381         int remove_cmd,
4382         int session_reinstatement)
4383 {
4384         return;
4385 }
4386
4387 /*      transport_lun_wait_for_tasks():
4388  *
4389  *      Called from ConfigFS context to stop the passed struct se_cmd to allow
4390  *      an struct se_lun to be successfully shutdown.
4391  */
4392 static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
4393 {
4394         unsigned long flags;
4395         int ret;
4396         /*
4397          * If the frontend has already requested this struct se_cmd to
4398          * be stopped, we can safely ignore this struct se_cmd.
4399          */
4400         spin_lock_irqsave(&cmd->t_state_lock, flags);
4401         if (atomic_read(&cmd->t_transport_stop)) {
4402                 atomic_set(&cmd->transport_lun_stop, 0);
4403                 pr_debug("ConfigFS ITT[0x%08x] - t_transport_stop =="
4404                         " TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd));
4405                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4406                 transport_cmd_check_stop(cmd, 1, 0);
4407                 return -EPERM;
4408         }
4409         atomic_set(&cmd->transport_lun_fe_stop, 1);
4410         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4411
4412         wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
4413
4414         ret = transport_stop_tasks_for_cmd(cmd);
4415
4416         pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:"
4417                         " %d\n", cmd, cmd->t_task_list_num, ret);
4418         if (!ret) {
4419                 pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
4420                                 cmd->se_tfo->get_task_tag(cmd));
4421                 wait_for_completion(&cmd->transport_lun_stop_comp);
4422                 pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
4423                                 cmd->se_tfo->get_task_tag(cmd));
4424         }
4425         transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj);
4426
4427         return 0;
4428 }
4429
4430 static void __transport_clear_lun_from_sessions(struct se_lun *lun)
4431 {
4432         struct se_cmd *cmd = NULL;
4433         unsigned long lun_flags, cmd_flags;
4434         /*
4435          * Do exception processing and return CHECK_CONDITION status to the
4436          * Initiator Port.
4437          */
4438         spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4439         while (!list_empty(&lun->lun_cmd_list)) {
4440                 cmd = list_first_entry(&lun->lun_cmd_list,
4441                        struct se_cmd, se_lun_node);
4442                 list_del(&cmd->se_lun_node);
4443
4444                 atomic_set(&cmd->transport_lun_active, 0);
4445                 /*
4446                  * This will notify iscsi_target_transport.c:
4447                  * transport_cmd_check_stop() that a LUN shutdown is in
4448                  * progress for the iscsi_cmd_t.
4449                  */
4450                 spin_lock(&cmd->t_state_lock);
4451                 pr_debug("SE_LUN[%d] - Setting cmd->transport"
4452                         "_lun_stop for  ITT: 0x%08x\n",
4453                         cmd->se_lun->unpacked_lun,
4454                         cmd->se_tfo->get_task_tag(cmd));
4455                 atomic_set(&cmd->transport_lun_stop, 1);
4456                 spin_unlock(&cmd->t_state_lock);
4457
4458                 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
4459
4460                 if (!cmd->se_lun) {
4461                         pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n",
4462                                 cmd->se_tfo->get_task_tag(cmd),
4463                                 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
4464                         BUG();
4465                 }
4466                 /*
4467                  * If the Storage engine still owns the iscsi_cmd_t, determine
4468                  * and/or stop its context.
4469                  */
4470                 pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport"
4471                         "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun,
4472                         cmd->se_tfo->get_task_tag(cmd));
4473
4474                 if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) {
4475                         spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4476                         continue;
4477                 }
4478
4479                 pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
4480                         "_wait_for_tasks(): SUCCESS\n",
4481                         cmd->se_lun->unpacked_lun,
4482                         cmd->se_tfo->get_task_tag(cmd));
4483
4484                 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
4485                 if (!atomic_read(&cmd->transport_dev_active)) {
4486                         spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
4487                         goto check_cond;
4488                 }
4489                 atomic_set(&cmd->transport_dev_active, 0);
4490                 transport_all_task_dev_remove_state(cmd);
4491                 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
4492
4493                 transport_free_dev_tasks(cmd);
4494                 /*
4495                  * The Storage engine stopped this struct se_cmd before it was
4496                  * send to the fabric frontend for delivery back to the
4497                  * Initiator Node.  Return this SCSI CDB back with an
4498                  * CHECK_CONDITION status.
4499                  */
4500 check_cond:
4501                 transport_send_check_condition_and_sense(cmd,
4502                                 TCM_NON_EXISTENT_LUN, 0);
4503                 /*
4504                  *  If the fabric frontend is waiting for this iscsi_cmd_t to
4505                  * be released, notify the waiting thread now that LU has
4506                  * finished accessing it.
4507                  */
4508                 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
4509                 if (atomic_read(&cmd->transport_lun_fe_stop)) {
4510                         pr_debug("SE_LUN[%d] - Detected FE stop for"
4511                                 " struct se_cmd: %p ITT: 0x%08x\n",
4512                                 lun->unpacked_lun,
4513                                 cmd, cmd->se_tfo->get_task_tag(cmd));
4514
4515                         spin_unlock_irqrestore(&cmd->t_state_lock,
4516                                         cmd_flags);
4517                         transport_cmd_check_stop(cmd, 1, 0);
4518                         complete(&cmd->transport_lun_fe_stop_comp);
4519                         spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4520                         continue;
4521                 }
4522                 pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
4523                         lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
4524
4525                 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
4526                 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4527         }
4528         spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
4529 }
4530
4531 static int transport_clear_lun_thread(void *p)
4532 {
4533         struct se_lun *lun = (struct se_lun *)p;
4534
4535         __transport_clear_lun_from_sessions(lun);
4536         complete(&lun->lun_shutdown_comp);
4537
4538         return 0;
4539 }
4540
4541 int transport_clear_lun_from_sessions(struct se_lun *lun)
4542 {
4543         struct task_struct *kt;
4544
4545         kt = kthread_run(transport_clear_lun_thread, lun,
4546                         "tcm_cl_%u", lun->unpacked_lun);
4547         if (IS_ERR(kt)) {
4548                 pr_err("Unable to start clear_lun thread\n");
4549                 return PTR_ERR(kt);
4550         }
4551         wait_for_completion(&lun->lun_shutdown_comp);
4552
4553         return 0;
4554 }
4555
4556 /*      transport_generic_wait_for_tasks():
4557  *
4558  *      Called from frontend or passthrough context to wait for storage engine
4559  *      to pause and/or release frontend generated struct se_cmd.
4560  */
4561 static void transport_generic_wait_for_tasks(
4562         struct se_cmd *cmd,
4563         int remove_cmd,
4564         int session_reinstatement)
4565 {
4566         unsigned long flags;
4567
4568         if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req))
4569                 return;
4570
4571         spin_lock_irqsave(&cmd->t_state_lock, flags);
4572         /*
4573          * If we are already stopped due to an external event (ie: LUN shutdown)
4574          * sleep until the connection can have the passed struct se_cmd back.
4575          * The cmd->transport_lun_stopped_sem will be upped by
4576          * transport_clear_lun_from_sessions() once the ConfigFS context caller
4577          * has completed its operation on the struct se_cmd.
4578          */
4579         if (atomic_read(&cmd->transport_lun_stop)) {
4580
4581                 pr_debug("wait_for_tasks: Stopping"
4582                         " wait_for_completion(&cmd->t_tasktransport_lun_fe"
4583                         "_stop_comp); for ITT: 0x%08x\n",
4584                         cmd->se_tfo->get_task_tag(cmd));
4585                 /*
4586                  * There is a special case for WRITES where a FE exception +
4587                  * LUN shutdown means ConfigFS context is still sleeping on
4588                  * transport_lun_stop_comp in transport_lun_wait_for_tasks().
4589                  * We go ahead and up transport_lun_stop_comp just to be sure
4590                  * here.
4591                  */
4592                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4593                 complete(&cmd->transport_lun_stop_comp);
4594                 wait_for_completion(&cmd->transport_lun_fe_stop_comp);
4595                 spin_lock_irqsave(&cmd->t_state_lock, flags);
4596
4597                 transport_all_task_dev_remove_state(cmd);
4598                 /*
4599                  * At this point, the frontend who was the originator of this
4600                  * struct se_cmd, now owns the structure and can be released through
4601                  * normal means below.
4602                  */
4603                 pr_debug("wait_for_tasks: Stopped"
4604                         " wait_for_completion(&cmd->t_tasktransport_lun_fe_"
4605                         "stop_comp); for ITT: 0x%08x\n",
4606                         cmd->se_tfo->get_task_tag(cmd));
4607
4608                 atomic_set(&cmd->transport_lun_stop, 0);
4609         }
4610         if (!atomic_read(&cmd->t_transport_active) ||
4611              atomic_read(&cmd->t_transport_aborted))
4612                 goto remove;
4613
4614         atomic_set(&cmd->t_transport_stop, 1);
4615
4616         pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x"
4617                 " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop"
4618                 " = TRUE\n", cmd, cmd->se_tfo->get_task_tag(cmd),
4619                 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
4620                 cmd->deferred_t_state);
4621
4622         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4623
4624         wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
4625
4626         wait_for_completion(&cmd->t_transport_stop_comp);
4627
4628         spin_lock_irqsave(&cmd->t_state_lock, flags);
4629         atomic_set(&cmd->t_transport_active, 0);
4630         atomic_set(&cmd->t_transport_stop, 0);
4631
4632         pr_debug("wait_for_tasks: Stopped wait_for_compltion("
4633                 "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
4634                 cmd->se_tfo->get_task_tag(cmd));
4635 remove:
4636         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4637         if (!remove_cmd)
4638                 return;
4639
4640         transport_generic_free_cmd(cmd, 0, session_reinstatement);
4641 }
4642
4643 static int transport_get_sense_codes(
4644         struct se_cmd *cmd,
4645         u8 *asc,
4646         u8 *ascq)
4647 {
4648         *asc = cmd->scsi_asc;
4649         *ascq = cmd->scsi_ascq;
4650
4651         return 0;
4652 }
4653
4654 static int transport_set_sense_codes(
4655         struct se_cmd *cmd,
4656         u8 asc,
4657         u8 ascq)
4658 {
4659         cmd->scsi_asc = asc;
4660         cmd->scsi_ascq = ascq;
4661
4662         return 0;
4663 }
4664
4665 int transport_send_check_condition_and_sense(
4666         struct se_cmd *cmd,
4667         u8 reason,
4668         int from_transport)
4669 {
4670         unsigned char *buffer = cmd->sense_buffer;
4671         unsigned long flags;
4672         int offset;
4673         u8 asc = 0, ascq = 0;
4674
4675         spin_lock_irqsave(&cmd->t_state_lock, flags);
4676         if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
4677                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4678                 return 0;
4679         }
4680         cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
4681         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4682
4683         if (!reason && from_transport)
4684                 goto after_reason;
4685
4686         if (!from_transport)
4687                 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
4688         /*
4689          * Data Segment and SenseLength of the fabric response PDU.
4690          *
4691          * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE
4692          * from include/scsi/scsi_cmnd.h
4693          */
4694         offset = cmd->se_tfo->set_fabric_sense_len(cmd,
4695                                 TRANSPORT_SENSE_BUFFER);
4696         /*
4697          * Actual SENSE DATA, see SPC-3 7.23.2  SPC_SENSE_KEY_OFFSET uses
4698          * SENSE KEY values from include/scsi/scsi.h
4699          */
4700         switch (reason) {
4701         case TCM_NON_EXISTENT_LUN:
4702         case TCM_UNSUPPORTED_SCSI_OPCODE:
4703         case TCM_SECTOR_COUNT_TOO_MANY:
4704                 /* CURRENT ERROR */
4705                 buffer[offset] = 0x70;
4706                 /* ILLEGAL REQUEST */
4707                 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4708                 /* INVALID COMMAND OPERATION CODE */
4709                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20;
4710                 break;
4711         case TCM_UNKNOWN_MODE_PAGE:
4712                 /* CURRENT ERROR */
4713                 buffer[offset] = 0x70;
4714                 /* ILLEGAL REQUEST */
4715                 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4716                 /* INVALID FIELD IN CDB */
4717                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
4718                 break;
4719         case TCM_CHECK_CONDITION_ABORT_CMD:
4720                 /* CURRENT ERROR */
4721                 buffer[offset] = 0x70;
4722                 /* ABORTED COMMAND */
4723                 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4724                 /* BUS DEVICE RESET FUNCTION OCCURRED */
4725                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29;
4726                 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03;
4727                 break;
4728         case TCM_INCORRECT_AMOUNT_OF_DATA:
4729                 /* CURRENT ERROR */
4730                 buffer[offset] = 0x70;
4731                 /* ABORTED COMMAND */
4732                 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4733                 /* WRITE ERROR */
4734                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
4735                 /* NOT ENOUGH UNSOLICITED DATA */
4736                 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d;
4737                 break;
4738         case TCM_INVALID_CDB_FIELD:
4739                 /* CURRENT ERROR */
4740                 buffer[offset] = 0x70;
4741                 /* ABORTED COMMAND */
4742                 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4743                 /* INVALID FIELD IN CDB */
4744                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
4745                 break;
4746         case TCM_INVALID_PARAMETER_LIST:
4747                 /* CURRENT ERROR */
4748                 buffer[offset] = 0x70;
4749                 /* ABORTED COMMAND */
4750                 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4751                 /* INVALID FIELD IN PARAMETER LIST */
4752                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26;
4753                 break;
4754         case TCM_UNEXPECTED_UNSOLICITED_DATA:
4755                 /* CURRENT ERROR */
4756                 buffer[offset] = 0x70;
4757                 /* ABORTED COMMAND */
4758                 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4759                 /* WRITE ERROR */
4760                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
4761                 /* UNEXPECTED_UNSOLICITED_DATA */
4762                 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c;
4763                 break;
4764         case TCM_SERVICE_CRC_ERROR:
4765                 /* CURRENT ERROR */
4766                 buffer[offset] = 0x70;
4767                 /* ABORTED COMMAND */
4768                 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4769                 /* PROTOCOL SERVICE CRC ERROR */
4770                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47;
4771                 /* N/A */
4772                 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05;
4773                 break;
4774         case TCM_SNACK_REJECTED:
4775                 /* CURRENT ERROR */
4776                 buffer[offset] = 0x70;
4777                 /* ABORTED COMMAND */
4778                 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4779                 /* READ ERROR */
4780                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11;
4781                 /* FAILED RETRANSMISSION REQUEST */
4782                 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13;
4783                 break;
4784         case TCM_WRITE_PROTECTED:
4785                 /* CURRENT ERROR */
4786                 buffer[offset] = 0x70;
4787                 /* DATA PROTECT */
4788                 buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
4789                 /* WRITE PROTECTED */
4790                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
4791                 break;
4792         case TCM_CHECK_CONDITION_UNIT_ATTENTION:
4793                 /* CURRENT ERROR */
4794                 buffer[offset] = 0x70;
4795                 /* UNIT ATTENTION */
4796                 buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
4797                 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
4798                 buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
4799                 buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
4800                 break;
4801         case TCM_CHECK_CONDITION_NOT_READY:
4802                 /* CURRENT ERROR */
4803                 buffer[offset] = 0x70;
4804                 /* Not Ready */
4805                 buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
4806                 transport_get_sense_codes(cmd, &asc, &ascq);
4807                 buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
4808                 buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
4809                 break;
4810         case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
4811         default:
4812                 /* CURRENT ERROR */
4813                 buffer[offset] = 0x70;
4814                 /* ILLEGAL REQUEST */
4815                 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4816                 /* LOGICAL UNIT COMMUNICATION FAILURE */
4817                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80;
4818                 break;
4819         }
4820         /*
4821          * This code uses linux/include/scsi/scsi.h SAM status codes!
4822          */
4823         cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
4824         /*
4825          * Automatically padded, this value is encoded in the fabric's
4826          * data_length response PDU containing the SCSI defined sense data.
4827          */
4828         cmd->scsi_sense_length  = TRANSPORT_SENSE_BUFFER + offset;
4829
4830 after_reason:
4831         return cmd->se_tfo->queue_status(cmd);
4832 }
4833 EXPORT_SYMBOL(transport_send_check_condition_and_sense);
4834
4835 int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
4836 {
4837         int ret = 0;
4838
4839         if (atomic_read(&cmd->t_transport_aborted) != 0) {
4840                 if (!send_status ||
4841                      (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
4842                         return 1;
4843 #if 0
4844                 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED"
4845                         " status for CDB: 0x%02x ITT: 0x%08x\n",
4846                         cmd->t_task_cdb[0],
4847                         cmd->se_tfo->get_task_tag(cmd));
4848 #endif
4849                 cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
4850                 cmd->se_tfo->queue_status(cmd);
4851                 ret = 1;
4852         }
4853         return ret;
4854 }
4855 EXPORT_SYMBOL(transport_check_aborted_status);
4856
4857 void transport_send_task_abort(struct se_cmd *cmd)
4858 {
4859         /*
4860          * If there are still expected incoming fabric WRITEs, we wait
4861          * until until they have completed before sending a TASK_ABORTED
4862          * response.  This response with TASK_ABORTED status will be
4863          * queued back to fabric module by transport_check_aborted_status().
4864          */
4865         if (cmd->data_direction == DMA_TO_DEVICE) {
4866                 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
4867                         atomic_inc(&cmd->t_transport_aborted);
4868                         smp_mb__after_atomic_inc();
4869                         cmd->scsi_status = SAM_STAT_TASK_ABORTED;
4870                         transport_new_cmd_failure(cmd);
4871                         return;
4872                 }
4873         }
4874         cmd->scsi_status = SAM_STAT_TASK_ABORTED;
4875 #if 0
4876         pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
4877                 " ITT: 0x%08x\n", cmd->t_task_cdb[0],
4878                 cmd->se_tfo->get_task_tag(cmd));
4879 #endif
4880         cmd->se_tfo->queue_status(cmd);
4881 }
4882
4883 /*      transport_generic_do_tmr():
4884  *
4885  *
4886  */
4887 int transport_generic_do_tmr(struct se_cmd *cmd)
4888 {
4889         struct se_device *dev = cmd->se_dev;
4890         struct se_tmr_req *tmr = cmd->se_tmr_req;
4891         int ret;
4892
4893         switch (tmr->function) {
4894         case TMR_ABORT_TASK:
4895                 tmr->response = TMR_FUNCTION_REJECTED;
4896                 break;
4897         case TMR_ABORT_TASK_SET:
4898         case TMR_CLEAR_ACA:
4899         case TMR_CLEAR_TASK_SET:
4900                 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
4901                 break;
4902         case TMR_LUN_RESET:
4903                 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
4904                 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
4905                                          TMR_FUNCTION_REJECTED;
4906                 break;
4907         case TMR_TARGET_WARM_RESET:
4908                 tmr->response = TMR_FUNCTION_REJECTED;
4909                 break;
4910         case TMR_TARGET_COLD_RESET:
4911                 tmr->response = TMR_FUNCTION_REJECTED;
4912                 break;
4913         default:
4914                 pr_err("Uknown TMR function: 0x%02x.\n",
4915                                 tmr->function);
4916                 tmr->response = TMR_FUNCTION_REJECTED;
4917                 break;
4918         }
4919
4920         cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
4921         cmd->se_tfo->queue_tm_rsp(cmd);
4922
4923         transport_cmd_check_stop(cmd, 2, 0);
4924         return 0;
4925 }
4926
4927 /*
4928  *      Called with spin_lock_irq(&dev->execute_task_lock); held
4929  *
4930  */
4931 static struct se_task *
4932 transport_get_task_from_state_list(struct se_device *dev)
4933 {
4934         struct se_task *task;
4935
4936         if (list_empty(&dev->state_task_list))
4937                 return NULL;
4938
4939         list_for_each_entry(task, &dev->state_task_list, t_state_list)
4940                 break;
4941
4942         list_del(&task->t_state_list);
4943         atomic_set(&task->task_state_active, 0);
4944
4945         return task;
4946 }
4947
4948 static void transport_processing_shutdown(struct se_device *dev)
4949 {
4950         struct se_cmd *cmd;
4951         struct se_task *task;
4952         unsigned long flags;
4953         /*
4954          * Empty the struct se_device's struct se_task state list.
4955          */
4956         spin_lock_irqsave(&dev->execute_task_lock, flags);
4957         while ((task = transport_get_task_from_state_list(dev))) {
4958                 if (!task->task_se_cmd) {
4959                         pr_err("task->task_se_cmd is NULL!\n");
4960                         continue;
4961                 }
4962                 cmd = task->task_se_cmd;
4963
4964                 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
4965
4966                 spin_lock_irqsave(&cmd->t_state_lock, flags);
4967
4968                 pr_debug("PT: cmd: %p task: %p ITT: 0x%08x,"
4969                         " i_state: %d, t_state/def_t_state:"
4970                         " %d/%d cdb: 0x%02x\n", cmd, task,
4971                         cmd->se_tfo->get_task_tag(cmd),
4972                         cmd->se_tfo->get_cmd_state(cmd),
4973                         cmd->t_state, cmd->deferred_t_state,
4974                         cmd->t_task_cdb[0]);
4975                 pr_debug("PT: ITT[0x%08x] - t_tasks: %d t_task_cdbs_left:"
4976                         " %d t_task_cdbs_sent: %d -- t_transport_active: %d"
4977                         " t_transport_stop: %d t_transport_sent: %d\n",
4978                         cmd->se_tfo->get_task_tag(cmd),
4979                         cmd->t_task_list_num,
4980                         atomic_read(&cmd->t_task_cdbs_left),
4981                         atomic_read(&cmd->t_task_cdbs_sent),
4982                         atomic_read(&cmd->t_transport_active),
4983                         atomic_read(&cmd->t_transport_stop),
4984                         atomic_read(&cmd->t_transport_sent));
4985
4986                 if (atomic_read(&task->task_active)) {
4987                         atomic_set(&task->task_stop, 1);
4988                         spin_unlock_irqrestore(
4989                                 &cmd->t_state_lock, flags);
4990
4991                         pr_debug("Waiting for task: %p to shutdown for dev:"
4992                                 " %p\n", task, dev);
4993                         wait_for_completion(&task->task_stop_comp);
4994                         pr_debug("Completed task: %p shutdown for dev: %p\n",
4995                                 task, dev);
4996
4997                         spin_lock_irqsave(&cmd->t_state_lock, flags);
4998                         atomic_dec(&cmd->t_task_cdbs_left);
4999
5000                         atomic_set(&task->task_active, 0);
5001                         atomic_set(&task->task_stop, 0);
5002                 } else {
5003                         if (atomic_read(&task->task_execute_queue) != 0)
5004                                 transport_remove_task_from_execute_queue(task, dev);
5005                 }
5006                 __transport_stop_task_timer(task, &flags);
5007
5008                 if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) {
5009                         spin_unlock_irqrestore(
5010                                         &cmd->t_state_lock, flags);
5011
5012                         pr_debug("Skipping task: %p, dev: %p for"
5013                                 " t_task_cdbs_ex_left: %d\n", task, dev,
5014                                 atomic_read(&cmd->t_task_cdbs_ex_left));
5015
5016                         spin_lock_irqsave(&dev->execute_task_lock, flags);
5017                         continue;
5018                 }
5019
5020                 if (atomic_read(&cmd->t_transport_active)) {
5021                         pr_debug("got t_transport_active = 1 for task: %p, dev:"
5022                                         " %p\n", task, dev);
5023
5024                         if (atomic_read(&cmd->t_fe_count)) {
5025                                 spin_unlock_irqrestore(
5026                                         &cmd->t_state_lock, flags);
5027                                 transport_send_check_condition_and_sense(
5028                                         cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE,
5029                                         0);
5030                                 transport_remove_cmd_from_queue(cmd,
5031                                         &cmd->se_dev->dev_queue_obj);
5032
5033                                 transport_lun_remove_cmd(cmd);
5034                                 transport_cmd_check_stop(cmd, 1, 0);
5035                         } else {
5036                                 spin_unlock_irqrestore(
5037                                         &cmd->t_state_lock, flags);
5038
5039                                 transport_remove_cmd_from_queue(cmd,
5040                                         &cmd->se_dev->dev_queue_obj);
5041
5042                                 transport_lun_remove_cmd(cmd);
5043
5044                                 if (transport_cmd_check_stop(cmd, 1, 0))
5045                                         transport_generic_remove(cmd, 0);
5046                         }
5047
5048                         spin_lock_irqsave(&dev->execute_task_lock, flags);
5049                         continue;
5050                 }
5051                 pr_debug("Got t_transport_active = 0 for task: %p, dev: %p\n",
5052                                 task, dev);
5053
5054                 if (atomic_read(&cmd->t_fe_count)) {
5055                         spin_unlock_irqrestore(
5056                                 &cmd->t_state_lock, flags);
5057                         transport_send_check_condition_and_sense(cmd,
5058                                 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
5059                         transport_remove_cmd_from_queue(cmd,
5060                                 &cmd->se_dev->dev_queue_obj);
5061
5062                         transport_lun_remove_cmd(cmd);
5063                         transport_cmd_check_stop(cmd, 1, 0);
5064                 } else {
5065                         spin_unlock_irqrestore(
5066                                 &cmd->t_state_lock, flags);
5067
5068                         transport_remove_cmd_from_queue(cmd,
5069                                 &cmd->se_dev->dev_queue_obj);
5070                         transport_lun_remove_cmd(cmd);
5071
5072                         if (transport_cmd_check_stop(cmd, 1, 0))
5073                                 transport_generic_remove(cmd, 0);
5074                 }
5075
5076                 spin_lock_irqsave(&dev->execute_task_lock, flags);
5077         }
5078         spin_unlock_irqrestore(&dev->execute_task_lock, flags);
5079         /*
5080          * Empty the struct se_device's struct se_cmd list.
5081          */
5082         while ((cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj))) {
5083
5084                 pr_debug("From Device Queue: cmd: %p t_state: %d\n",
5085                                 cmd, cmd->t_state);
5086
5087                 if (atomic_read(&cmd->t_fe_count)) {
5088                         transport_send_check_condition_and_sense(cmd,
5089                                 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
5090
5091                         transport_lun_remove_cmd(cmd);
5092                         transport_cmd_check_stop(cmd, 1, 0);
5093                 } else {
5094                         transport_lun_remove_cmd(cmd);
5095                         if (transport_cmd_check_stop(cmd, 1, 0))
5096                                 transport_generic_remove(cmd, 0);
5097                 }
5098         }
5099 }
5100
5101 /*      transport_processing_thread():
5102  *
5103  *
5104  */
5105 static int transport_processing_thread(void *param)
5106 {
5107         int ret;
5108         struct se_cmd *cmd;
5109         struct se_device *dev = (struct se_device *) param;
5110
5111         set_user_nice(current, -20);
5112
5113         while (!kthread_should_stop()) {
5114                 ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
5115                                 atomic_read(&dev->dev_queue_obj.queue_cnt) ||
5116                                 kthread_should_stop());
5117                 if (ret < 0)
5118                         goto out;
5119
5120                 spin_lock_irq(&dev->dev_status_lock);
5121                 if (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) {
5122                         spin_unlock_irq(&dev->dev_status_lock);
5123                         transport_processing_shutdown(dev);
5124                         continue;
5125                 }
5126                 spin_unlock_irq(&dev->dev_status_lock);
5127
5128 get_cmd:
5129                 __transport_execute_tasks(dev);
5130
5131                 cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
5132                 if (!cmd)
5133                         continue;
5134
5135                 switch (cmd->t_state) {
5136                 case TRANSPORT_NEW_CMD_MAP:
5137                         if (!cmd->se_tfo->new_cmd_map) {
5138                                 pr_err("cmd->se_tfo->new_cmd_map is"
5139                                         " NULL for TRANSPORT_NEW_CMD_MAP\n");
5140                                 BUG();
5141                         }
5142                         ret = cmd->se_tfo->new_cmd_map(cmd);
5143                         if (ret < 0) {
5144                                 cmd->transport_error_status = ret;
5145                                 transport_generic_request_failure(cmd, NULL,
5146                                                 0, (cmd->data_direction !=
5147                                                     DMA_TO_DEVICE));
5148                                 break;
5149                         }
5150                         /* Fall through */
5151                 case TRANSPORT_NEW_CMD:
5152                         ret = transport_generic_new_cmd(cmd);
5153                         if (ret == -EAGAIN)
5154                                 break;
5155                         else if (ret < 0) {
5156                                 cmd->transport_error_status = ret;
5157                                 transport_generic_request_failure(cmd, NULL,
5158                                         0, (cmd->data_direction !=
5159                                          DMA_TO_DEVICE));
5160                         }
5161                         break;
5162                 case TRANSPORT_PROCESS_WRITE:
5163                         transport_generic_process_write(cmd);
5164                         break;
5165                 case TRANSPORT_COMPLETE_OK:
5166                         transport_stop_all_task_timers(cmd);
5167                         transport_generic_complete_ok(cmd);
5168                         break;
5169                 case TRANSPORT_REMOVE:
5170                         transport_generic_remove(cmd, 0);
5171                         break;
5172                 case TRANSPORT_FREE_CMD_INTR:
5173                         transport_generic_free_cmd(cmd, 0, 0);
5174                         break;
5175                 case TRANSPORT_PROCESS_TMR:
5176                         transport_generic_do_tmr(cmd);
5177                         break;
5178                 case TRANSPORT_COMPLETE_FAILURE:
5179                         transport_generic_request_failure(cmd, NULL, 1, 1);
5180                         break;
5181                 case TRANSPORT_COMPLETE_TIMEOUT:
5182                         transport_stop_all_task_timers(cmd);
5183                         transport_generic_request_timeout(cmd);
5184                         break;
5185                 case TRANSPORT_COMPLETE_QF_WP:
5186                         transport_generic_write_pending(cmd);
5187                         break;
5188                 default:
5189                         pr_err("Unknown t_state: %d deferred_t_state:"
5190                                 " %d for ITT: 0x%08x i_state: %d on SE LUN:"
5191                                 " %u\n", cmd->t_state, cmd->deferred_t_state,
5192                                 cmd->se_tfo->get_task_tag(cmd),
5193                                 cmd->se_tfo->get_cmd_state(cmd),
5194                                 cmd->se_lun->unpacked_lun);
5195                         BUG();
5196                 }
5197
5198                 goto get_cmd;
5199         }
5200
5201 out:
5202         transport_release_all_cmds(dev);
5203         dev->process_thread = NULL;
5204         return 0;
5205 }