isci: unify isci_remote_device and scic_sds_remote_device
[linux-2.6.git] / drivers / scsi / isci / request.c
1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  * redistributing this file, you may do so under either license.
4  *
5  * GPL LICENSE SUMMARY
6  *
7  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21  * The full GNU General Public License is included in this distribution
22  * in the file called LICENSE.GPL.
23  *
24  * BSD LICENSE
25  *
26  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27  * All rights reserved.
28  *
29  * Redistribution and use in source and binary forms, with or without
30  * modification, are permitted provided that the following conditions
31  * are met:
32  *
33  *   * Redistributions of source code must retain the above copyright
34  *     notice, this list of conditions and the following disclaimer.
35  *   * Redistributions in binary form must reproduce the above copyright
36  *     notice, this list of conditions and the following disclaimer in
37  *     the documentation and/or other materials provided with the
38  *     distribution.
39  *   * Neither the name of Intel Corporation nor the names of its
40  *     contributors may be used to endorse or promote products derived
41  *     from this software without specific prior written permission.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54  */
55
56 #include "isci.h"
57 #include "task.h"
58 #include "request.h"
59 #include "sata.h"
60 #include "scu_completion_codes.h"
61 #include "scu_event_codes.h"
62 #include "sas.h"
63
64 static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq,
65                                                         int idx)
66 {
67         if (idx == 0)
68                 return &ireq->tc->sgl_pair_ab;
69         else if (idx == 1)
70                 return &ireq->tc->sgl_pair_cd;
71         else if (idx < 0)
72                 return NULL;
73         else
74                 return &ireq->sg_table[idx - 2];
75 }
76
77 static dma_addr_t to_sgl_element_pair_dma(struct scic_sds_controller *scic,
78                                           struct isci_request *ireq, u32 idx)
79 {
80         u32 offset;
81
82         if (idx == 0) {
83                 offset = (void *) &ireq->tc->sgl_pair_ab -
84                          (void *) &scic->task_context_table[0];
85                 return scic->task_context_dma + offset;
86         } else if (idx == 1) {
87                 offset = (void *) &ireq->tc->sgl_pair_cd -
88                          (void *) &scic->task_context_table[0];
89                 return scic->task_context_dma + offset;
90         }
91
92         return scic_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]);
93 }
94
95 static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg)
96 {
97         e->length = sg_dma_len(sg);
98         e->address_upper = upper_32_bits(sg_dma_address(sg));
99         e->address_lower = lower_32_bits(sg_dma_address(sg));
100         e->address_modifier = 0;
101 }
102
103 static void scic_sds_request_build_sgl(struct isci_request *ireq)
104 {
105         struct isci_host *isci_host = ireq->isci_host;
106         struct scic_sds_controller *scic = &isci_host->sci;
107         struct sas_task *task = isci_request_access_task(ireq);
108         struct scatterlist *sg = NULL;
109         dma_addr_t dma_addr;
110         u32 sg_idx = 0;
111         struct scu_sgl_element_pair *scu_sg   = NULL;
112         struct scu_sgl_element_pair *prev_sg  = NULL;
113
114         if (task->num_scatter > 0) {
115                 sg = task->scatter;
116
117                 while (sg) {
118                         scu_sg = to_sgl_element_pair(ireq, sg_idx);
119                         init_sgl_element(&scu_sg->A, sg);
120                         sg = sg_next(sg);
121                         if (sg) {
122                                 init_sgl_element(&scu_sg->B, sg);
123                                 sg = sg_next(sg);
124                         } else
125                                 memset(&scu_sg->B, 0, sizeof(scu_sg->B));
126
127                         if (prev_sg) {
128                                 dma_addr = to_sgl_element_pair_dma(scic,
129                                                                    ireq,
130                                                                    sg_idx);
131
132                                 prev_sg->next_pair_upper =
133                                         upper_32_bits(dma_addr);
134                                 prev_sg->next_pair_lower =
135                                         lower_32_bits(dma_addr);
136                         }
137
138                         prev_sg = scu_sg;
139                         sg_idx++;
140                 }
141         } else {        /* handle when no sg */
142                 scu_sg = to_sgl_element_pair(ireq, sg_idx);
143
144                 dma_addr = dma_map_single(&isci_host->pdev->dev,
145                                           task->scatter,
146                                           task->total_xfer_len,
147                                           task->data_dir);
148
149                 ireq->zero_scatter_daddr = dma_addr;
150
151                 scu_sg->A.length = task->total_xfer_len;
152                 scu_sg->A.address_upper = upper_32_bits(dma_addr);
153                 scu_sg->A.address_lower = lower_32_bits(dma_addr);
154         }
155
156         if (scu_sg) {
157                 scu_sg->next_pair_upper = 0;
158                 scu_sg->next_pair_lower = 0;
159         }
160 }
161
162 static void scic_sds_io_request_build_ssp_command_iu(struct isci_request *ireq)
163 {
164         struct ssp_cmd_iu *cmd_iu;
165         struct sas_task *task = isci_request_access_task(ireq);
166
167         cmd_iu = &ireq->ssp.cmd;
168
169         memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8);
170         cmd_iu->add_cdb_len = 0;
171         cmd_iu->_r_a = 0;
172         cmd_iu->_r_b = 0;
173         cmd_iu->en_fburst = 0; /* unsupported */
174         cmd_iu->task_prio = task->ssp_task.task_prio;
175         cmd_iu->task_attr = task->ssp_task.task_attr;
176         cmd_iu->_r_c = 0;
177
178         sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb,
179                        sizeof(task->ssp_task.cdb) / sizeof(u32));
180 }
181
182 static void scic_sds_task_request_build_ssp_task_iu(struct isci_request *ireq)
183 {
184         struct ssp_task_iu *task_iu;
185         struct sas_task *task = isci_request_access_task(ireq);
186         struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
187
188         task_iu = &ireq->ssp.tmf;
189
190         memset(task_iu, 0, sizeof(struct ssp_task_iu));
191
192         memcpy(task_iu->LUN, task->ssp_task.LUN, 8);
193
194         task_iu->task_func = isci_tmf->tmf_code;
195         task_iu->task_tag =
196                 (ireq->ttype == tmf_task) ?
197                 isci_tmf->io_tag :
198                 SCI_CONTROLLER_INVALID_IO_TAG;
199 }
200
201 /**
202  * This method is will fill in the SCU Task Context for any type of SSP request.
203  * @sci_req:
204  * @task_context:
205  *
206  */
207 static void scu_ssp_reqeust_construct_task_context(
208         struct isci_request *ireq,
209         struct scu_task_context *task_context)
210 {
211         dma_addr_t dma_addr;
212         struct isci_remote_device *idev;
213         struct isci_port *iport;
214
215         idev = scic_sds_request_get_device(ireq);
216         iport = scic_sds_request_get_port(ireq);
217
218         /* Fill in the TC with the its required data */
219         task_context->abort = 0;
220         task_context->priority = 0;
221         task_context->initiator_request = 1;
222         task_context->connection_rate = idev->connection_rate;
223         task_context->protocol_engine_index =
224                 scic_sds_controller_get_protocol_engine_group(controller);
225         task_context->logical_port_index = scic_sds_port_get_index(iport);
226         task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
227         task_context->valid = SCU_TASK_CONTEXT_VALID;
228         task_context->context_type = SCU_TASK_CONTEXT_TYPE;
229
230         task_context->remote_node_index = scic_sds_remote_device_get_index(idev);
231         task_context->command_code = 0;
232
233         task_context->link_layer_control = 0;
234         task_context->do_not_dma_ssp_good_response = 1;
235         task_context->strict_ordering = 0;
236         task_context->control_frame = 0;
237         task_context->timeout_enable = 0;
238         task_context->block_guard_enable = 0;
239
240         task_context->address_modifier = 0;
241
242         /* task_context->type.ssp.tag = ireq->io_tag; */
243         task_context->task_phase = 0x01;
244
245         ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
246                               (scic_sds_controller_get_protocol_engine_group(controller) <<
247                                SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
248                               (scic_sds_port_get_index(iport) <<
249                                SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
250                               ISCI_TAG_TCI(ireq->io_tag));
251
252         /*
253          * Copy the physical address for the command buffer to the
254          * SCU Task Context
255          */
256         dma_addr = scic_io_request_get_dma_addr(ireq, &ireq->ssp.cmd);
257
258         task_context->command_iu_upper = upper_32_bits(dma_addr);
259         task_context->command_iu_lower = lower_32_bits(dma_addr);
260
261         /*
262          * Copy the physical address for the response buffer to the
263          * SCU Task Context
264          */
265         dma_addr = scic_io_request_get_dma_addr(ireq, &ireq->ssp.rsp);
266
267         task_context->response_iu_upper = upper_32_bits(dma_addr);
268         task_context->response_iu_lower = lower_32_bits(dma_addr);
269 }
270
271 /**
272  * This method is will fill in the SCU Task Context for a SSP IO request.
273  * @sci_req:
274  *
275  */
276 static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
277                                                       enum dma_data_direction dir,
278                                                       u32 len)
279 {
280         struct scu_task_context *task_context = ireq->tc;
281
282         scu_ssp_reqeust_construct_task_context(ireq, task_context);
283
284         task_context->ssp_command_iu_length =
285                 sizeof(struct ssp_cmd_iu) / sizeof(u32);
286         task_context->type.ssp.frame_type = SSP_COMMAND;
287
288         switch (dir) {
289         case DMA_FROM_DEVICE:
290         case DMA_NONE:
291         default:
292                 task_context->task_type = SCU_TASK_TYPE_IOREAD;
293                 break;
294         case DMA_TO_DEVICE:
295                 task_context->task_type = SCU_TASK_TYPE_IOWRITE;
296                 break;
297         }
298
299         task_context->transfer_length_bytes = len;
300
301         if (task_context->transfer_length_bytes > 0)
302                 scic_sds_request_build_sgl(ireq);
303 }
304
305 /**
306  * This method will fill in the SCU Task Context for a SSP Task request.  The
307  *    following important settings are utilized: -# priority ==
308  *    SCU_TASK_PRIORITY_HIGH.  This ensures that the task request is issued
309  *    ahead of other task destined for the same Remote Node. -# task_type ==
310  *    SCU_TASK_TYPE_IOREAD.  This simply indicates that a normal request type
311  *    (i.e. non-raw frame) is being utilized to perform task management. -#
312  *    control_frame == 1.  This ensures that the proper endianess is set so
313  *    that the bytes are transmitted in the right order for a task frame.
314  * @sci_req: This parameter specifies the task request object being
315  *    constructed.
316  *
317  */
318 static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq)
319 {
320         struct scu_task_context *task_context = ireq->tc;
321
322         scu_ssp_reqeust_construct_task_context(ireq, task_context);
323
324         task_context->control_frame                = 1;
325         task_context->priority                     = SCU_TASK_PRIORITY_HIGH;
326         task_context->task_type                    = SCU_TASK_TYPE_RAW_FRAME;
327         task_context->transfer_length_bytes        = 0;
328         task_context->type.ssp.frame_type          = SSP_TASK;
329         task_context->ssp_command_iu_length =
330                 sizeof(struct ssp_task_iu) / sizeof(u32);
331 }
332
333 /**
334  * This method is will fill in the SCU Task Context for any type of SATA
335  *    request.  This is called from the various SATA constructors.
336  * @sci_req: The general IO request object which is to be used in
337  *    constructing the SCU task context.
338  * @task_context: The buffer pointer for the SCU task context which is being
339  *    constructed.
340  *
341  * The general io request construction is complete. The buffer assignment for
342  * the command buffer is complete. none Revisit task context construction to
343  * determine what is common for SSP/SMP/STP task context structures.
344  */
345 static void scu_sata_reqeust_construct_task_context(
346         struct isci_request *ireq,
347         struct scu_task_context *task_context)
348 {
349         dma_addr_t dma_addr;
350         struct isci_remote_device *idev;
351         struct isci_port *iport;
352
353         idev = scic_sds_request_get_device(ireq);
354         iport = scic_sds_request_get_port(ireq);
355
356         /* Fill in the TC with the its required data */
357         task_context->abort = 0;
358         task_context->priority = SCU_TASK_PRIORITY_NORMAL;
359         task_context->initiator_request = 1;
360         task_context->connection_rate = idev->connection_rate;
361         task_context->protocol_engine_index =
362                 scic_sds_controller_get_protocol_engine_group(controller);
363         task_context->logical_port_index =
364                 scic_sds_port_get_index(iport);
365         task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
366         task_context->valid = SCU_TASK_CONTEXT_VALID;
367         task_context->context_type = SCU_TASK_CONTEXT_TYPE;
368
369         task_context->remote_node_index = scic_sds_remote_device_get_index(idev);
370         task_context->command_code = 0;
371
372         task_context->link_layer_control = 0;
373         task_context->do_not_dma_ssp_good_response = 1;
374         task_context->strict_ordering = 0;
375         task_context->control_frame = 0;
376         task_context->timeout_enable = 0;
377         task_context->block_guard_enable = 0;
378
379         task_context->address_modifier = 0;
380         task_context->task_phase = 0x01;
381
382         task_context->ssp_command_iu_length =
383                 (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
384
385         /* Set the first word of the H2D REG FIS */
386         task_context->type.words[0] = *(u32 *)&ireq->stp.cmd;
387
388         ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
389                                  (scic_sds_controller_get_protocol_engine_group(controller) <<
390                                   SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
391                                  (scic_sds_port_get_index(iport) <<
392                                   SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
393                                  ISCI_TAG_TCI(ireq->io_tag));
394         /*
395          * Copy the physical address for the command buffer to the SCU Task
396          * Context. We must offset the command buffer by 4 bytes because the
397          * first 4 bytes are transfered in the body of the TC.
398          */
399         dma_addr = scic_io_request_get_dma_addr(ireq,
400                                                 ((char *) &ireq->stp.cmd) +
401                                                 sizeof(u32));
402
403         task_context->command_iu_upper = upper_32_bits(dma_addr);
404         task_context->command_iu_lower = lower_32_bits(dma_addr);
405
406         /* SATA Requests do not have a response buffer */
407         task_context->response_iu_upper = 0;
408         task_context->response_iu_lower = 0;
409 }
410
411 static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq)
412 {
413         struct scu_task_context *task_context = ireq->tc;
414
415         scu_sata_reqeust_construct_task_context(ireq, task_context);
416
417         task_context->control_frame         = 0;
418         task_context->priority              = SCU_TASK_PRIORITY_NORMAL;
419         task_context->task_type             = SCU_TASK_TYPE_SATA_RAW_FRAME;
420         task_context->type.stp.fis_type     = FIS_REGH2D;
421         task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
422 }
423
424 static enum sci_status scic_sds_stp_pio_request_construct(struct isci_request *ireq,
425                                                           bool copy_rx_frame)
426 {
427         struct isci_stp_request *stp_req = &ireq->stp.req;
428
429         scu_stp_raw_request_construct_task_context(ireq);
430
431         stp_req->status = 0;
432         stp_req->sgl.offset = 0;
433         stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A;
434
435         if (copy_rx_frame) {
436                 scic_sds_request_build_sgl(ireq);
437                 stp_req->sgl.index = 0;
438         } else {
439                 /* The user does not want the data copied to the SGL buffer location */
440                 stp_req->sgl.index = -1;
441         }
442
443         return SCI_SUCCESS;
444 }
445
446 /**
447  *
448  * @sci_req: This parameter specifies the request to be constructed as an
449  *    optimized request.
450  * @optimized_task_type: This parameter specifies whether the request is to be
451  *    an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
452  *    value of 1 indicates NCQ.
453  *
454  * This method will perform request construction common to all types of STP
455  * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
456  * returns an indication as to whether the construction was successful.
457  */
458 static void scic_sds_stp_optimized_request_construct(struct isci_request *ireq,
459                                                      u8 optimized_task_type,
460                                                      u32 len,
461                                                      enum dma_data_direction dir)
462 {
463         struct scu_task_context *task_context = ireq->tc;
464
465         /* Build the STP task context structure */
466         scu_sata_reqeust_construct_task_context(ireq, task_context);
467
468         /* Copy over the SGL elements */
469         scic_sds_request_build_sgl(ireq);
470
471         /* Copy over the number of bytes to be transfered */
472         task_context->transfer_length_bytes = len;
473
474         if (dir == DMA_TO_DEVICE) {
475                 /*
476                  * The difference between the DMA IN and DMA OUT request task type
477                  * values are consistent with the difference between FPDMA READ
478                  * and FPDMA WRITE values.  Add the supplied task type parameter
479                  * to this difference to set the task type properly for this
480                  * DATA OUT (WRITE) case. */
481                 task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
482                                                                  - SCU_TASK_TYPE_DMA_IN);
483         } else {
484                 /*
485                  * For the DATA IN (READ) case, simply save the supplied
486                  * optimized task type. */
487                 task_context->task_type = optimized_task_type;
488         }
489 }
490
491
492
493 static enum sci_status
494 scic_io_request_construct_sata(struct isci_request *ireq,
495                                u32 len,
496                                enum dma_data_direction dir,
497                                bool copy)
498 {
499         enum sci_status status = SCI_SUCCESS;
500         struct sas_task *task = isci_request_access_task(ireq);
501
502         /* check for management protocols */
503         if (ireq->ttype == tmf_task) {
504                 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
505
506                 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
507                     tmf->tmf_code == isci_tmf_sata_srst_low) {
508                         scu_stp_raw_request_construct_task_context(ireq);
509                         return SCI_SUCCESS;
510                 } else {
511                         dev_err(scic_to_dev(ireq->owning_controller),
512                                 "%s: Request 0x%p received un-handled SAT "
513                                 "management protocol 0x%x.\n",
514                                 __func__, ireq, tmf->tmf_code);
515
516                         return SCI_FAILURE;
517                 }
518         }
519
520         if (!sas_protocol_ata(task->task_proto)) {
521                 dev_err(scic_to_dev(ireq->owning_controller),
522                         "%s: Non-ATA protocol in SATA path: 0x%x\n",
523                         __func__,
524                         task->task_proto);
525                 return SCI_FAILURE;
526
527         }
528
529         /* non data */
530         if (task->data_dir == DMA_NONE) {
531                 scu_stp_raw_request_construct_task_context(ireq);
532                 return SCI_SUCCESS;
533         }
534
535         /* NCQ */
536         if (task->ata_task.use_ncq) {
537                 scic_sds_stp_optimized_request_construct(ireq,
538                                                          SCU_TASK_TYPE_FPDMAQ_READ,
539                                                          len, dir);
540                 return SCI_SUCCESS;
541         }
542
543         /* DMA */
544         if (task->ata_task.dma_xfer) {
545                 scic_sds_stp_optimized_request_construct(ireq,
546                                                          SCU_TASK_TYPE_DMA_IN,
547                                                          len, dir);
548                 return SCI_SUCCESS;
549         } else /* PIO */
550                 return scic_sds_stp_pio_request_construct(ireq, copy);
551
552         return status;
553 }
554
555 static enum sci_status scic_io_request_construct_basic_ssp(struct isci_request *ireq)
556 {
557         struct sas_task *task = isci_request_access_task(ireq);
558
559         ireq->protocol = SCIC_SSP_PROTOCOL;
560
561         scu_ssp_io_request_construct_task_context(ireq,
562                                                   task->data_dir,
563                                                   task->total_xfer_len);
564
565         scic_sds_io_request_build_ssp_command_iu(ireq);
566
567         sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
568
569         return SCI_SUCCESS;
570 }
571
572 enum sci_status scic_task_request_construct_ssp(
573         struct isci_request *ireq)
574 {
575         /* Construct the SSP Task SCU Task Context */
576         scu_ssp_task_request_construct_task_context(ireq);
577
578         /* Fill in the SSP Task IU */
579         scic_sds_task_request_build_ssp_task_iu(ireq);
580
581         sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
582
583         return SCI_SUCCESS;
584 }
585
586 static enum sci_status scic_io_request_construct_basic_sata(struct isci_request *ireq)
587 {
588         enum sci_status status;
589         bool copy = false;
590         struct sas_task *task = isci_request_access_task(ireq);
591
592         ireq->protocol = SCIC_STP_PROTOCOL;
593
594         copy = (task->data_dir == DMA_NONE) ? false : true;
595
596         status = scic_io_request_construct_sata(ireq,
597                                                 task->total_xfer_len,
598                                                 task->data_dir,
599                                                 copy);
600
601         if (status == SCI_SUCCESS)
602                 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
603
604         return status;
605 }
606
607 enum sci_status scic_task_request_construct_sata(struct isci_request *ireq)
608 {
609         enum sci_status status = SCI_SUCCESS;
610
611         /* check for management protocols */
612         if (ireq->ttype == tmf_task) {
613                 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
614
615                 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
616                     tmf->tmf_code == isci_tmf_sata_srst_low) {
617                         scu_stp_raw_request_construct_task_context(ireq);
618                 } else {
619                         dev_err(scic_to_dev(ireq->owning_controller),
620                                 "%s: Request 0x%p received un-handled SAT "
621                                 "Protocol 0x%x.\n",
622                                 __func__, ireq, tmf->tmf_code);
623
624                         return SCI_FAILURE;
625                 }
626         }
627
628         if (status != SCI_SUCCESS)
629                 return status;
630         sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
631
632         return status;
633 }
634
635 /**
636  * sci_req_tx_bytes - bytes transferred when reply underruns request
637  * @sci_req: request that was terminated early
638  */
639 #define SCU_TASK_CONTEXT_SRAM 0x200000
640 static u32 sci_req_tx_bytes(struct isci_request *ireq)
641 {
642         struct scic_sds_controller *scic = ireq->owning_controller;
643         u32 ret_val = 0;
644
645         if (readl(&scic->smu_registers->address_modifier) == 0) {
646                 void __iomem *scu_reg_base = scic->scu_registers;
647
648                 /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where
649                  *   BAR1 is the scu_registers
650                  *   0x20002C = 0x200000 + 0x2c
651                  *            = start of task context SRAM + offset of (type.ssp.data_offset)
652                  *   TCi is the io_tag of struct scic_sds_request
653                  */
654                 ret_val = readl(scu_reg_base +
655                                 (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
656                                 ((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(ireq->io_tag)));
657         }
658
659         return ret_val;
660 }
661
662 enum sci_status scic_sds_request_start(struct isci_request *ireq)
663 {
664         enum sci_base_request_states state;
665         struct scu_task_context *tc = ireq->tc;
666         struct scic_sds_controller *scic = ireq->owning_controller;
667
668         state = ireq->sm.current_state_id;
669         if (state != SCI_REQ_CONSTRUCTED) {
670                 dev_warn(scic_to_dev(scic),
671                         "%s: SCIC IO Request requested to start while in wrong "
672                          "state %d\n", __func__, state);
673                 return SCI_FAILURE_INVALID_STATE;
674         }
675
676         tc->task_index = ISCI_TAG_TCI(ireq->io_tag);
677
678         switch (tc->protocol_type) {
679         case SCU_TASK_CONTEXT_PROTOCOL_SMP:
680         case SCU_TASK_CONTEXT_PROTOCOL_SSP:
681                 /* SSP/SMP Frame */
682                 tc->type.ssp.tag = ireq->io_tag;
683                 tc->type.ssp.target_port_transfer_tag = 0xFFFF;
684                 break;
685
686         case SCU_TASK_CONTEXT_PROTOCOL_STP:
687                 /* STP/SATA Frame
688                  * tc->type.stp.ncq_tag = ireq->ncq_tag;
689                  */
690                 break;
691
692         case SCU_TASK_CONTEXT_PROTOCOL_NONE:
693                 /* / @todo When do we set no protocol type? */
694                 break;
695
696         default:
697                 /* This should never happen since we build the IO
698                  * requests */
699                 break;
700         }
701
702         /* Add to the post_context the io tag value */
703         ireq->post_context |= ISCI_TAG_TCI(ireq->io_tag);
704
705         /* Everything is good go ahead and change state */
706         sci_change_state(&ireq->sm, SCI_REQ_STARTED);
707
708         return SCI_SUCCESS;
709 }
710
711 enum sci_status
712 scic_sds_io_request_terminate(struct isci_request *ireq)
713 {
714         enum sci_base_request_states state;
715
716         state = ireq->sm.current_state_id;
717
718         switch (state) {
719         case SCI_REQ_CONSTRUCTED:
720                 scic_sds_request_set_status(ireq,
721                         SCU_TASK_DONE_TASK_ABORT,
722                         SCI_FAILURE_IO_TERMINATED);
723
724                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
725                 return SCI_SUCCESS;
726         case SCI_REQ_STARTED:
727         case SCI_REQ_TASK_WAIT_TC_COMP:
728         case SCI_REQ_SMP_WAIT_RESP:
729         case SCI_REQ_SMP_WAIT_TC_COMP:
730         case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
731         case SCI_REQ_STP_UDMA_WAIT_D2H:
732         case SCI_REQ_STP_NON_DATA_WAIT_H2D:
733         case SCI_REQ_STP_NON_DATA_WAIT_D2H:
734         case SCI_REQ_STP_PIO_WAIT_H2D:
735         case SCI_REQ_STP_PIO_WAIT_FRAME:
736         case SCI_REQ_STP_PIO_DATA_IN:
737         case SCI_REQ_STP_PIO_DATA_OUT:
738         case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
739         case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
740         case SCI_REQ_STP_SOFT_RESET_WAIT_D2H:
741                 sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
742                 return SCI_SUCCESS;
743         case SCI_REQ_TASK_WAIT_TC_RESP:
744                 sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
745                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
746                 return SCI_SUCCESS;
747         case SCI_REQ_ABORTING:
748                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
749                 return SCI_SUCCESS;
750         case SCI_REQ_COMPLETED:
751         default:
752                 dev_warn(scic_to_dev(ireq->owning_controller),
753                          "%s: SCIC IO Request requested to abort while in wrong "
754                          "state %d\n",
755                          __func__,
756                          ireq->sm.current_state_id);
757                 break;
758         }
759
760         return SCI_FAILURE_INVALID_STATE;
761 }
762
763 enum sci_status scic_sds_request_complete(struct isci_request *ireq)
764 {
765         enum sci_base_request_states state;
766         struct scic_sds_controller *scic = ireq->owning_controller;
767
768         state = ireq->sm.current_state_id;
769         if (WARN_ONCE(state != SCI_REQ_COMPLETED,
770                       "isci: request completion from wrong state (%d)\n", state))
771                 return SCI_FAILURE_INVALID_STATE;
772
773         if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX)
774                 scic_sds_controller_release_frame(scic,
775                                                   ireq->saved_rx_frame_index);
776
777         /* XXX can we just stop the machine and remove the 'final' state? */
778         sci_change_state(&ireq->sm, SCI_REQ_FINAL);
779         return SCI_SUCCESS;
780 }
781
782 enum sci_status scic_sds_io_request_event_handler(struct isci_request *ireq,
783                                                   u32 event_code)
784 {
785         enum sci_base_request_states state;
786         struct scic_sds_controller *scic = ireq->owning_controller;
787
788         state = ireq->sm.current_state_id;
789
790         if (state != SCI_REQ_STP_PIO_DATA_IN) {
791                 dev_warn(scic_to_dev(scic), "%s: (%x) in wrong state %d\n",
792                          __func__, event_code, state);
793
794                 return SCI_FAILURE_INVALID_STATE;
795         }
796
797         switch (scu_get_event_specifier(event_code)) {
798         case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT:
799                 /* We are waiting for data and the SCU has R_ERR the data frame.
800                  * Go back to waiting for the D2H Register FIS
801                  */
802                 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
803                 return SCI_SUCCESS;
804         default:
805                 dev_err(scic_to_dev(scic),
806                         "%s: pio request unexpected event %#x\n",
807                         __func__, event_code);
808
809                 /* TODO Should we fail the PIO request when we get an
810                  * unexpected event?
811                  */
812                 return SCI_FAILURE;
813         }
814 }
815
816 /*
817  * This function copies response data for requests returning response data
818  *    instead of sense data.
819  * @sci_req: This parameter specifies the request object for which to copy
820  *    the response data.
821  */
822 static void scic_sds_io_request_copy_response(struct isci_request *ireq)
823 {
824         void *resp_buf;
825         u32 len;
826         struct ssp_response_iu *ssp_response;
827         struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
828
829         ssp_response = &ireq->ssp.rsp;
830
831         resp_buf = &isci_tmf->resp.resp_iu;
832
833         len = min_t(u32,
834                     SSP_RESP_IU_MAX_SIZE,
835                     be32_to_cpu(ssp_response->response_data_len));
836
837         memcpy(resp_buf, ssp_response->resp_data, len);
838 }
839
840 static enum sci_status
841 request_started_state_tc_event(struct isci_request *ireq,
842                                u32 completion_code)
843 {
844         struct ssp_response_iu *resp_iu;
845         u8 datapres;
846
847         /* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000
848          * to determine SDMA status
849          */
850         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
851         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
852                 scic_sds_request_set_status(ireq,
853                                             SCU_TASK_DONE_GOOD,
854                                             SCI_SUCCESS);
855                 break;
856         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): {
857                 /* There are times when the SCU hardware will return an early
858                  * response because the io request specified more data than is
859                  * returned by the target device (mode pages, inquiry data,
860                  * etc.).  We must check the response stats to see if this is
861                  * truly a failed request or a good request that just got
862                  * completed early.
863                  */
864                 struct ssp_response_iu *resp = &ireq->ssp.rsp;
865                 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
866
867                 sci_swab32_cpy(&ireq->ssp.rsp,
868                                &ireq->ssp.rsp,
869                                word_cnt);
870
871                 if (resp->status == 0) {
872                         scic_sds_request_set_status(ireq,
873                                                     SCU_TASK_DONE_GOOD,
874                                                     SCI_SUCCESS_IO_DONE_EARLY);
875                 } else {
876                         scic_sds_request_set_status(ireq,
877                                                     SCU_TASK_DONE_CHECK_RESPONSE,
878                                                     SCI_FAILURE_IO_RESPONSE_VALID);
879                 }
880                 break;
881         }
882         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): {
883                 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
884
885                 sci_swab32_cpy(&ireq->ssp.rsp,
886                                &ireq->ssp.rsp,
887                                word_cnt);
888
889                 scic_sds_request_set_status(ireq,
890                                             SCU_TASK_DONE_CHECK_RESPONSE,
891                                             SCI_FAILURE_IO_RESPONSE_VALID);
892                 break;
893         }
894
895         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR):
896                 /* TODO With TASK_DONE_RESP_LEN_ERR is the response frame
897                  * guaranteed to be received before this completion status is
898                  * posted?
899                  */
900                 resp_iu = &ireq->ssp.rsp;
901                 datapres = resp_iu->datapres;
902
903                 if (datapres == 1 || datapres == 2) {
904                         scic_sds_request_set_status(ireq,
905                                                     SCU_TASK_DONE_CHECK_RESPONSE,
906                                                     SCI_FAILURE_IO_RESPONSE_VALID);
907                 } else
908                         scic_sds_request_set_status(ireq,
909                                                     SCU_TASK_DONE_GOOD,
910                                                     SCI_SUCCESS);
911                 break;
912         /* only stp device gets suspended. */
913         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
914         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR):
915         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR):
916         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR):
917         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR):
918         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN):
919         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
920         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP):
921         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
922         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
923         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
924                 if (ireq->protocol == SCIC_STP_PROTOCOL) {
925                         scic_sds_request_set_status(ireq,
926                                 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
927                                 SCU_COMPLETION_TL_STATUS_SHIFT,
928                                 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
929                 } else {
930                         scic_sds_request_set_status(ireq,
931                                 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
932                                 SCU_COMPLETION_TL_STATUS_SHIFT,
933                                 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
934                 }
935                 break;
936
937         /* both stp/ssp device gets suspended */
938         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR):
939         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION):
940         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1):
941         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2):
942         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3):
943         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION):
944         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION):
945         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
946         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
947         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
948                 scic_sds_request_set_status(ireq,
949                                             SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
950                                             SCU_COMPLETION_TL_STATUS_SHIFT,
951                                             SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
952                 break;
953
954         /* neither ssp nor stp gets suspended. */
955         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR):
956         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR):
957         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR):
958         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR):
959         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR):
960         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA):
961         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
962         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
963         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
964         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
965         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA):
966         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL):
967         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV):
968         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV):
969         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
970         default:
971                 scic_sds_request_set_status(
972                         ireq,
973                         SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
974                         SCU_COMPLETION_TL_STATUS_SHIFT,
975                         SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
976                 break;
977         }
978
979         /*
980          * TODO: This is probably wrong for ACK/NAK timeout conditions
981          */
982
983         /* In all cases we will treat this as the completion of the IO req. */
984         sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
985         return SCI_SUCCESS;
986 }
987
988 static enum sci_status
989 request_aborting_state_tc_event(struct isci_request *ireq,
990                                 u32 completion_code)
991 {
992         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
993         case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
994         case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
995                 scic_sds_request_set_status(ireq, SCU_TASK_DONE_TASK_ABORT,
996                                             SCI_FAILURE_IO_TERMINATED);
997
998                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
999                 break;
1000
1001         default:
1002                 /* Unless we get some strange error wait for the task abort to complete
1003                  * TODO: Should there be a state change for this completion?
1004                  */
1005                 break;
1006         }
1007
1008         return SCI_SUCCESS;
1009 }
1010
1011 static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq,
1012                                                        u32 completion_code)
1013 {
1014         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1015         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1016                 scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD,
1017                                             SCI_SUCCESS);
1018
1019                 sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
1020                 break;
1021         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
1022                 /* Currently, the decision is to simply allow the task request
1023                  * to timeout if the task IU wasn't received successfully.
1024                  * There is a potential for receiving multiple task responses if
1025                  * we decide to send the task IU again.
1026                  */
1027                 dev_warn(scic_to_dev(ireq->owning_controller),
1028                          "%s: TaskRequest:0x%p CompletionCode:%x - "
1029                          "ACK/NAK timeout\n", __func__, ireq,
1030                          completion_code);
1031
1032                 sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
1033                 break;
1034         default:
1035                 /*
1036                  * All other completion status cause the IO to be complete.
1037                  * If a NAK was received, then it is up to the user to retry
1038                  * the request.
1039                  */
1040                 scic_sds_request_set_status(ireq,
1041                         SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1042                         SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1043
1044                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1045                 break;
1046         }
1047
1048         return SCI_SUCCESS;
1049 }
1050
1051 static enum sci_status
1052 smp_request_await_response_tc_event(struct isci_request *ireq,
1053                                     u32 completion_code)
1054 {
1055         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1056         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1057                 /* In the AWAIT RESPONSE state, any TC completion is
1058                  * unexpected.  but if the TC has success status, we
1059                  * complete the IO anyway.
1060                  */
1061                 scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD,
1062                                             SCI_SUCCESS);
1063
1064                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1065                 break;
1066
1067         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
1068         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
1069         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
1070         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
1071                 /* These status has been seen in a specific LSI
1072                  * expander, which sometimes is not able to send smp
1073                  * response within 2 ms. This causes our hardware break
1074                  * the connection and set TC completion with one of
1075                  * these SMP_XXX_XX_ERR status. For these type of error,
1076                  * we ask scic user to retry the request.
1077                  */
1078                 scic_sds_request_set_status(ireq, SCU_TASK_DONE_SMP_RESP_TO_ERR,
1079                                             SCI_FAILURE_RETRY_REQUIRED);
1080
1081                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1082                 break;
1083
1084         default:
1085                 /* All other completion status cause the IO to be complete.  If a NAK
1086                  * was received, then it is up to the user to retry the request
1087                  */
1088                 scic_sds_request_set_status(ireq,
1089                                             SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1090                                             SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1091
1092                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1093                 break;
1094         }
1095
1096         return SCI_SUCCESS;
1097 }
1098
1099 static enum sci_status
1100 smp_request_await_tc_event(struct isci_request *ireq,
1101                            u32 completion_code)
1102 {
1103         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1104         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1105                 scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD,
1106                                             SCI_SUCCESS);
1107
1108                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1109                 break;
1110         default:
1111                 /* All other completion status cause the IO to be
1112                  * complete.  If a NAK was received, then it is up to
1113                  * the user to retry the request.
1114                  */
1115                 scic_sds_request_set_status(ireq,
1116                                             SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1117                                             SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1118
1119                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1120                 break;
1121         }
1122
1123         return SCI_SUCCESS;
1124 }
1125
1126 void scic_stp_io_request_set_ncq_tag(struct isci_request *ireq,
1127                                      u16 ncq_tag)
1128 {
1129         /**
1130          * @note This could be made to return an error to the user if the user
1131          *       attempts to set the NCQ tag in the wrong state.
1132          */
1133         ireq->tc->type.stp.ncq_tag = ncq_tag;
1134 }
1135
1136 static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req)
1137 {
1138         struct scu_sgl_element *sgl;
1139         struct scu_sgl_element_pair *sgl_pair;
1140         struct isci_request *ireq = to_ireq(stp_req);
1141         struct isci_stp_pio_sgl *pio_sgl = &stp_req->sgl;
1142
1143         sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
1144         if (!sgl_pair)
1145                 sgl = NULL;
1146         else if (pio_sgl->set == SCU_SGL_ELEMENT_PAIR_A) {
1147                 if (sgl_pair->B.address_lower == 0 &&
1148                     sgl_pair->B.address_upper == 0) {
1149                         sgl = NULL;
1150                 } else {
1151                         pio_sgl->set = SCU_SGL_ELEMENT_PAIR_B;
1152                         sgl = &sgl_pair->B;
1153                 }
1154         } else {
1155                 if (sgl_pair->next_pair_lower == 0 &&
1156                     sgl_pair->next_pair_upper == 0) {
1157                         sgl = NULL;
1158                 } else {
1159                         pio_sgl->index++;
1160                         pio_sgl->set = SCU_SGL_ELEMENT_PAIR_A;
1161                         sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
1162                         sgl = &sgl_pair->A;
1163                 }
1164         }
1165
1166         return sgl;
1167 }
1168
1169 static enum sci_status
1170 stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq,
1171                                         u32 completion_code)
1172 {
1173         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1174         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1175                 scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD,
1176                                             SCI_SUCCESS);
1177
1178                 sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H);
1179                 break;
1180
1181         default:
1182                 /* All other completion status cause the IO to be
1183                  * complete.  If a NAK was received, then it is up to
1184                  * the user to retry the request.
1185                  */
1186                 scic_sds_request_set_status(ireq,
1187                                             SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1188                                             SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1189
1190                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1191                 break;
1192         }
1193
1194         return SCI_SUCCESS;
1195 }
1196
1197 #define SCU_MAX_FRAME_BUFFER_SIZE  0x400  /* 1K is the maximum SCU frame data payload */
1198
1199 /* transmit DATA_FIS from (current sgl + offset) for input
1200  * parameter length. current sgl and offset is alreay stored in the IO request
1201  */
1202 static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame(
1203         struct isci_request *ireq,
1204         u32 length)
1205 {
1206         struct isci_stp_request *stp_req = &ireq->stp.req;
1207         struct scu_task_context *task_context = ireq->tc;
1208         struct scu_sgl_element_pair *sgl_pair;
1209         struct scu_sgl_element *current_sgl;
1210
1211         /* Recycle the TC and reconstruct it for sending out DATA FIS containing
1212          * for the data from current_sgl+offset for the input length
1213          */
1214         sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
1215         if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A)
1216                 current_sgl = &sgl_pair->A;
1217         else
1218                 current_sgl = &sgl_pair->B;
1219
1220         /* update the TC */
1221         task_context->command_iu_upper = current_sgl->address_upper;
1222         task_context->command_iu_lower = current_sgl->address_lower;
1223         task_context->transfer_length_bytes = length;
1224         task_context->type.stp.fis_type = FIS_DATA;
1225
1226         /* send the new TC out. */
1227         return scic_controller_continue_io(ireq);
1228 }
1229
1230 static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct isci_request *ireq)
1231 {
1232         struct isci_stp_request *stp_req = &ireq->stp.req;
1233         struct scu_sgl_element_pair *sgl_pair;
1234         struct scu_sgl_element *sgl;
1235         enum sci_status status;
1236         u32 offset;
1237         u32 len = 0;
1238
1239         offset = stp_req->sgl.offset;
1240         sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
1241         if (WARN_ONCE(!sgl_pair, "%s: null sgl element", __func__))
1242                 return SCI_FAILURE;
1243
1244         if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) {
1245                 sgl = &sgl_pair->A;
1246                 len = sgl_pair->A.length - offset;
1247         } else {
1248                 sgl = &sgl_pair->B;
1249                 len = sgl_pair->B.length - offset;
1250         }
1251
1252         if (stp_req->pio_len == 0)
1253                 return SCI_SUCCESS;
1254
1255         if (stp_req->pio_len >= len) {
1256                 status = scic_sds_stp_request_pio_data_out_trasmit_data_frame(ireq, len);
1257                 if (status != SCI_SUCCESS)
1258                         return status;
1259                 stp_req->pio_len -= len;
1260
1261                 /* update the current sgl, offset and save for future */
1262                 sgl = pio_sgl_next(stp_req);
1263                 offset = 0;
1264         } else if (stp_req->pio_len < len) {
1265                 scic_sds_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len);
1266
1267                 /* Sgl offset will be adjusted and saved for future */
1268                 offset += stp_req->pio_len;
1269                 sgl->address_lower += stp_req->pio_len;
1270                 stp_req->pio_len = 0;
1271         }
1272
1273         stp_req->sgl.offset = offset;
1274
1275         return status;
1276 }
1277
1278 /**
1279  *
1280  * @stp_request: The request that is used for the SGL processing.
1281  * @data_buffer: The buffer of data to be copied.
1282  * @length: The length of the data transfer.
1283  *
1284  * Copy the data from the buffer for the length specified to the IO reqeust SGL
1285  * specified data region. enum sci_status
1286  */
1287 static enum sci_status
1288 scic_sds_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req,
1289                                                   u8 *data_buf, u32 len)
1290 {
1291         struct isci_request *ireq;
1292         u8 *src_addr;
1293         int copy_len;
1294         struct sas_task *task;
1295         struct scatterlist *sg;
1296         void *kaddr;
1297         int total_len = len;
1298
1299         ireq = to_ireq(stp_req);
1300         task = isci_request_access_task(ireq);
1301         src_addr = data_buf;
1302
1303         if (task->num_scatter > 0) {
1304                 sg = task->scatter;
1305
1306                 while (total_len > 0) {
1307                         struct page *page = sg_page(sg);
1308
1309                         copy_len = min_t(int, total_len, sg_dma_len(sg));
1310                         kaddr = kmap_atomic(page, KM_IRQ0);
1311                         memcpy(kaddr + sg->offset, src_addr, copy_len);
1312                         kunmap_atomic(kaddr, KM_IRQ0);
1313                         total_len -= copy_len;
1314                         src_addr += copy_len;
1315                         sg = sg_next(sg);
1316                 }
1317         } else {
1318                 BUG_ON(task->total_xfer_len < total_len);
1319                 memcpy(task->scatter, src_addr, total_len);
1320         }
1321
1322         return SCI_SUCCESS;
1323 }
1324
1325 /**
1326  *
1327  * @sci_req: The PIO DATA IN request that is to receive the data.
1328  * @data_buffer: The buffer to copy from.
1329  *
1330  * Copy the data buffer to the io request data region. enum sci_status
1331  */
1332 static enum sci_status scic_sds_stp_request_pio_data_in_copy_data(
1333         struct isci_stp_request *stp_req,
1334         u8 *data_buffer)
1335 {
1336         enum sci_status status;
1337
1338         /*
1339          * If there is less than 1K remaining in the transfer request
1340          * copy just the data for the transfer */
1341         if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) {
1342                 status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
1343                         stp_req, data_buffer, stp_req->pio_len);
1344
1345                 if (status == SCI_SUCCESS)
1346                         stp_req->pio_len = 0;
1347         } else {
1348                 /* We are transfering the whole frame so copy */
1349                 status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
1350                         stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
1351
1352                 if (status == SCI_SUCCESS)
1353                         stp_req->pio_len -= SCU_MAX_FRAME_BUFFER_SIZE;
1354         }
1355
1356         return status;
1357 }
1358
1359 static enum sci_status
1360 stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq,
1361                                               u32 completion_code)
1362 {
1363         enum sci_status status = SCI_SUCCESS;
1364
1365         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1366         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1367                 scic_sds_request_set_status(ireq,
1368                                             SCU_TASK_DONE_GOOD,
1369                                             SCI_SUCCESS);
1370
1371                 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1372                 break;
1373
1374         default:
1375                 /* All other completion status cause the IO to be
1376                  * complete.  If a NAK was received, then it is up to
1377                  * the user to retry the request.
1378                  */
1379                 scic_sds_request_set_status(ireq,
1380                                             SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1381                                             SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1382
1383                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1384                 break;
1385         }
1386
1387         return status;
1388 }
1389
1390 static enum sci_status
1391 pio_data_out_tx_done_tc_event(struct isci_request *ireq,
1392                               u32 completion_code)
1393 {
1394         enum sci_status status = SCI_SUCCESS;
1395         bool all_frames_transferred = false;
1396         struct isci_stp_request *stp_req = &ireq->stp.req;
1397
1398         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1399         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1400                 /* Transmit data */
1401                 if (stp_req->pio_len != 0) {
1402                         status = scic_sds_stp_request_pio_data_out_transmit_data(ireq);
1403                         if (status == SCI_SUCCESS) {
1404                                 if (stp_req->pio_len == 0)
1405                                         all_frames_transferred = true;
1406                         }
1407                 } else if (stp_req->pio_len == 0) {
1408                         /*
1409                          * this will happen if the all data is written at the
1410                          * first time after the pio setup fis is received
1411                          */
1412                         all_frames_transferred  = true;
1413                 }
1414
1415                 /* all data transferred. */
1416                 if (all_frames_transferred) {
1417                         /*
1418                          * Change the state to SCI_REQ_STP_PIO_DATA_IN
1419                          * and wait for PIO_SETUP fis / or D2H REg fis. */
1420                         sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1421                 }
1422                 break;
1423
1424         default:
1425                 /*
1426                  * All other completion status cause the IO to be complete.
1427                  * If a NAK was received, then it is up to the user to retry
1428                  * the request.
1429                  */
1430                 scic_sds_request_set_status(
1431                         ireq,
1432                         SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1433                         SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1434
1435                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1436                 break;
1437         }
1438
1439         return status;
1440 }
1441
1442 static void scic_sds_stp_request_udma_complete_request(
1443         struct isci_request *ireq,
1444         u32 scu_status,
1445         enum sci_status sci_status)
1446 {
1447         scic_sds_request_set_status(ireq, scu_status, sci_status);
1448         sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1449 }
1450
1451 static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct isci_request *ireq,
1452                                                                        u32 frame_index)
1453 {
1454         struct scic_sds_controller *scic = ireq->owning_controller;
1455         struct dev_to_host_fis *frame_header;
1456         enum sci_status status;
1457         u32 *frame_buffer;
1458
1459         status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1460                                                                frame_index,
1461                                                                (void **)&frame_header);
1462
1463         if ((status == SCI_SUCCESS) &&
1464             (frame_header->fis_type == FIS_REGD2H)) {
1465                 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1466                                                               frame_index,
1467                                                               (void **)&frame_buffer);
1468
1469                 scic_sds_controller_copy_sata_response(&ireq->stp.rsp,
1470                                                        frame_header,
1471                                                        frame_buffer);
1472         }
1473
1474         scic_sds_controller_release_frame(scic, frame_index);
1475
1476         return status;
1477 }
1478
1479 enum sci_status
1480 scic_sds_io_request_frame_handler(struct isci_request *ireq,
1481                                   u32 frame_index)
1482 {
1483         struct scic_sds_controller *scic = ireq->owning_controller;
1484         struct isci_stp_request *stp_req = &ireq->stp.req;
1485         enum sci_base_request_states state;
1486         enum sci_status status;
1487         ssize_t word_cnt;
1488
1489         state = ireq->sm.current_state_id;
1490         switch (state)  {
1491         case SCI_REQ_STARTED: {
1492                 struct ssp_frame_hdr ssp_hdr;
1493                 void *frame_header;
1494
1495                 scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1496                                                               frame_index,
1497                                                               &frame_header);
1498
1499                 word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32);
1500                 sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt);
1501
1502                 if (ssp_hdr.frame_type == SSP_RESPONSE) {
1503                         struct ssp_response_iu *resp_iu;
1504                         ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
1505
1506                         scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1507                                                                       frame_index,
1508                                                                       (void **)&resp_iu);
1509
1510                         sci_swab32_cpy(&ireq->ssp.rsp, resp_iu, word_cnt);
1511
1512                         resp_iu = &ireq->ssp.rsp;
1513
1514                         if (resp_iu->datapres == 0x01 ||
1515                             resp_iu->datapres == 0x02) {
1516                                 scic_sds_request_set_status(ireq,
1517                                                             SCU_TASK_DONE_CHECK_RESPONSE,
1518                                                             SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1519                         } else
1520                                 scic_sds_request_set_status(ireq,
1521                                                             SCU_TASK_DONE_GOOD,
1522                                                             SCI_SUCCESS);
1523                 } else {
1524                         /* not a response frame, why did it get forwarded? */
1525                         dev_err(scic_to_dev(scic),
1526                                 "%s: SCIC IO Request 0x%p received unexpected "
1527                                 "frame %d type 0x%02x\n", __func__, ireq,
1528                                 frame_index, ssp_hdr.frame_type);
1529                 }
1530
1531                 /*
1532                  * In any case we are done with this frame buffer return it to
1533                  * the controller
1534                  */
1535                 scic_sds_controller_release_frame(scic, frame_index);
1536
1537                 return SCI_SUCCESS;
1538         }
1539
1540         case SCI_REQ_TASK_WAIT_TC_RESP:
1541                 scic_sds_io_request_copy_response(ireq);
1542                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1543                 scic_sds_controller_release_frame(scic,frame_index);
1544                 return SCI_SUCCESS;
1545
1546         case SCI_REQ_SMP_WAIT_RESP: {
1547                 struct smp_resp *rsp_hdr = &ireq->smp.rsp;
1548                 void *frame_header;
1549
1550                 scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1551                                                               frame_index,
1552                                                               &frame_header);
1553
1554                 /* byte swap the header. */
1555                 word_cnt = SMP_RESP_HDR_SZ / sizeof(u32);
1556                 sci_swab32_cpy(rsp_hdr, frame_header, word_cnt);
1557
1558                 if (rsp_hdr->frame_type == SMP_RESPONSE) {
1559                         void *smp_resp;
1560
1561                         scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1562                                                                       frame_index,
1563                                                                       &smp_resp);
1564
1565                         word_cnt = (sizeof(struct smp_resp) - SMP_RESP_HDR_SZ) /
1566                                 sizeof(u32);
1567
1568                         sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ,
1569                                        smp_resp, word_cnt);
1570
1571                         scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD,
1572                                                     SCI_SUCCESS);
1573
1574                         sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP);
1575                 } else {
1576                         /*
1577                          * This was not a response frame why did it get
1578                          * forwarded?
1579                          */
1580                         dev_err(scic_to_dev(scic),
1581                                 "%s: SCIC SMP Request 0x%p received unexpected "
1582                                 "frame %d type 0x%02x\n",
1583                                 __func__,
1584                                 ireq,
1585                                 frame_index,
1586                                 rsp_hdr->frame_type);
1587
1588                         scic_sds_request_set_status(ireq,
1589                                                     SCU_TASK_DONE_SMP_FRM_TYPE_ERR,
1590                                                     SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1591
1592                         sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1593                 }
1594
1595                 scic_sds_controller_release_frame(scic, frame_index);
1596
1597                 return SCI_SUCCESS;
1598         }
1599
1600         case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
1601                 return scic_sds_stp_request_udma_general_frame_handler(ireq,
1602                                                                        frame_index);
1603
1604         case SCI_REQ_STP_UDMA_WAIT_D2H:
1605                 /* Use the general frame handler to copy the resposne data */
1606                 status = scic_sds_stp_request_udma_general_frame_handler(ireq,
1607                                                                          frame_index);
1608
1609                 if (status != SCI_SUCCESS)
1610                         return status;
1611
1612                 scic_sds_stp_request_udma_complete_request(ireq,
1613                                                            SCU_TASK_DONE_CHECK_RESPONSE,
1614                                                            SCI_FAILURE_IO_RESPONSE_VALID);
1615
1616                 return SCI_SUCCESS;
1617
1618         case SCI_REQ_STP_NON_DATA_WAIT_D2H: {
1619                 struct dev_to_host_fis *frame_header;
1620                 u32 *frame_buffer;
1621
1622                 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1623                                                                        frame_index,
1624                                                                        (void **)&frame_header);
1625
1626                 if (status != SCI_SUCCESS) {
1627                         dev_err(scic_to_dev(scic),
1628                                 "%s: SCIC IO Request 0x%p could not get frame "
1629                                 "header for frame index %d, status %x\n",
1630                                 __func__,
1631                                 stp_req,
1632                                 frame_index,
1633                                 status);
1634
1635                         return status;
1636                 }
1637
1638                 switch (frame_header->fis_type) {
1639                 case FIS_REGD2H:
1640                         scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1641                                                                       frame_index,
1642                                                                       (void **)&frame_buffer);
1643
1644                         scic_sds_controller_copy_sata_response(&ireq->stp.rsp,
1645                                                                frame_header,
1646                                                                frame_buffer);
1647
1648                         /* The command has completed with error */
1649                         scic_sds_request_set_status(ireq, SCU_TASK_DONE_CHECK_RESPONSE,
1650                                                     SCI_FAILURE_IO_RESPONSE_VALID);
1651                         break;
1652
1653                 default:
1654                         dev_warn(scic_to_dev(scic),
1655                                  "%s: IO Request:0x%p Frame Id:%d protocol "
1656                                   "violation occurred\n", __func__, stp_req,
1657                                   frame_index);
1658
1659                         scic_sds_request_set_status(ireq, SCU_TASK_DONE_UNEXP_FIS,
1660                                                     SCI_FAILURE_PROTOCOL_VIOLATION);
1661                         break;
1662                 }
1663
1664                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1665
1666                 /* Frame has been decoded return it to the controller */
1667                 scic_sds_controller_release_frame(scic, frame_index);
1668
1669                 return status;
1670         }
1671
1672         case SCI_REQ_STP_PIO_WAIT_FRAME: {
1673                 struct sas_task *task = isci_request_access_task(ireq);
1674                 struct dev_to_host_fis *frame_header;
1675                 u32 *frame_buffer;
1676
1677                 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1678                                                                        frame_index,
1679                                                                        (void **)&frame_header);
1680
1681                 if (status != SCI_SUCCESS) {
1682                         dev_err(scic_to_dev(scic),
1683                                 "%s: SCIC IO Request 0x%p could not get frame "
1684                                 "header for frame index %d, status %x\n",
1685                                 __func__, stp_req, frame_index, status);
1686                         return status;
1687                 }
1688
1689                 switch (frame_header->fis_type) {
1690                 case FIS_PIO_SETUP:
1691                         /* Get from the frame buffer the PIO Setup Data */
1692                         scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1693                                                                       frame_index,
1694                                                                       (void **)&frame_buffer);
1695
1696                         /* Get the data from the PIO Setup The SCU Hardware
1697                          * returns first word in the frame_header and the rest
1698                          * of the data is in the frame buffer so we need to
1699                          * back up one dword
1700                          */
1701
1702                         /* transfer_count: first 16bits in the 4th dword */
1703                         stp_req->pio_len = frame_buffer[3] & 0xffff;
1704
1705                         /* status: 4th byte in the 3rd dword */
1706                         stp_req->status = (frame_buffer[2] >> 24) & 0xff;
1707
1708                         scic_sds_controller_copy_sata_response(&ireq->stp.rsp,
1709                                                                frame_header,
1710                                                                frame_buffer);
1711
1712                         ireq->stp.rsp.status = stp_req->status;
1713
1714                         /* The next state is dependent on whether the
1715                          * request was PIO Data-in or Data out
1716                          */
1717                         if (task->data_dir == DMA_FROM_DEVICE) {
1718                                 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN);
1719                         } else if (task->data_dir == DMA_TO_DEVICE) {
1720                                 /* Transmit data */
1721                                 status = scic_sds_stp_request_pio_data_out_transmit_data(ireq);
1722                                 if (status != SCI_SUCCESS)
1723                                         break;
1724                                 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT);
1725                         }
1726                         break;
1727
1728                 case FIS_SETDEVBITS:
1729                         sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1730                         break;
1731
1732                 case FIS_REGD2H:
1733                         if (frame_header->status & ATA_BUSY) {
1734                                 /*
1735                                  * Now why is the drive sending a D2H Register
1736                                  * FIS when it is still busy?  Do nothing since
1737                                  * we are still in the right state.
1738                                  */
1739                                 dev_dbg(scic_to_dev(scic),
1740                                         "%s: SCIC PIO Request 0x%p received "
1741                                         "D2H Register FIS with BSY status "
1742                                         "0x%x\n",
1743                                         __func__,
1744                                         stp_req,
1745                                         frame_header->status);
1746                                 break;
1747                         }
1748
1749                         scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1750                                                                       frame_index,
1751                                                                       (void **)&frame_buffer);
1752
1753                         scic_sds_controller_copy_sata_response(&ireq->stp.req,
1754                                                                frame_header,
1755                                                                frame_buffer);
1756
1757                         scic_sds_request_set_status(ireq,
1758                                                     SCU_TASK_DONE_CHECK_RESPONSE,
1759                                                     SCI_FAILURE_IO_RESPONSE_VALID);
1760
1761                         sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1762                         break;
1763
1764                 default:
1765                         /* FIXME: what do we do here? */
1766                         break;
1767                 }
1768
1769                 /* Frame is decoded return it to the controller */
1770                 scic_sds_controller_release_frame(scic, frame_index);
1771
1772                 return status;
1773         }
1774
1775         case SCI_REQ_STP_PIO_DATA_IN: {
1776                 struct dev_to_host_fis *frame_header;
1777                 struct sata_fis_data *frame_buffer;
1778
1779                 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1780                                                                        frame_index,
1781                                                                        (void **)&frame_header);
1782
1783                 if (status != SCI_SUCCESS) {
1784                         dev_err(scic_to_dev(scic),
1785                                 "%s: SCIC IO Request 0x%p could not get frame "
1786                                 "header for frame index %d, status %x\n",
1787                                 __func__,
1788                                 stp_req,
1789                                 frame_index,
1790                                 status);
1791                         return status;
1792                 }
1793
1794                 if (frame_header->fis_type != FIS_DATA) {
1795                         dev_err(scic_to_dev(scic),
1796                                 "%s: SCIC PIO Request 0x%p received frame %d "
1797                                 "with fis type 0x%02x when expecting a data "
1798                                 "fis.\n",
1799                                 __func__,
1800                                 stp_req,
1801                                 frame_index,
1802                                 frame_header->fis_type);
1803
1804                         scic_sds_request_set_status(ireq,
1805                                                     SCU_TASK_DONE_GOOD,
1806                                                     SCI_FAILURE_IO_REQUIRES_SCSI_ABORT);
1807
1808                         sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1809
1810                         /* Frame is decoded return it to the controller */
1811                         scic_sds_controller_release_frame(scic, frame_index);
1812                         return status;
1813                 }
1814
1815                 if (stp_req->sgl.index < 0) {
1816                         ireq->saved_rx_frame_index = frame_index;
1817                         stp_req->pio_len = 0;
1818                 } else {
1819                         scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1820                                                                       frame_index,
1821                                                                       (void **)&frame_buffer);
1822
1823                         status = scic_sds_stp_request_pio_data_in_copy_data(stp_req,
1824                                                                             (u8 *)frame_buffer);
1825
1826                         /* Frame is decoded return it to the controller */
1827                         scic_sds_controller_release_frame(scic, frame_index);
1828                 }
1829
1830                 /* Check for the end of the transfer, are there more
1831                  * bytes remaining for this data transfer
1832                  */
1833                 if (status != SCI_SUCCESS || stp_req->pio_len != 0)
1834                         return status;
1835
1836                 if ((stp_req->status & ATA_BUSY) == 0) {
1837                         scic_sds_request_set_status(ireq,
1838                                                     SCU_TASK_DONE_CHECK_RESPONSE,
1839                                                     SCI_FAILURE_IO_RESPONSE_VALID);
1840
1841                         sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1842                 } else {
1843                         sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1844                 }
1845                 return status;
1846         }
1847
1848         case SCI_REQ_STP_SOFT_RESET_WAIT_D2H: {
1849                 struct dev_to_host_fis *frame_header;
1850                 u32 *frame_buffer;
1851
1852                 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1853                                                                        frame_index,
1854                                                                        (void **)&frame_header);
1855                 if (status != SCI_SUCCESS) {
1856                         dev_err(scic_to_dev(scic),
1857                                 "%s: SCIC IO Request 0x%p could not get frame "
1858                                 "header for frame index %d, status %x\n",
1859                                 __func__,
1860                                 stp_req,
1861                                 frame_index,
1862                                 status);
1863                         return status;
1864                 }
1865
1866                 switch (frame_header->fis_type) {
1867                 case FIS_REGD2H:
1868                         scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1869                                                                       frame_index,
1870                                                                       (void **)&frame_buffer);
1871
1872                         scic_sds_controller_copy_sata_response(&ireq->stp.rsp,
1873                                                                frame_header,
1874                                                                frame_buffer);
1875
1876                         /* The command has completed with error */
1877                         scic_sds_request_set_status(ireq,
1878                                                     SCU_TASK_DONE_CHECK_RESPONSE,
1879                                                     SCI_FAILURE_IO_RESPONSE_VALID);
1880                         break;
1881
1882                 default:
1883                         dev_warn(scic_to_dev(scic),
1884                                  "%s: IO Request:0x%p Frame Id:%d protocol "
1885                                  "violation occurred\n",
1886                                  __func__,
1887                                  stp_req,
1888                                  frame_index);
1889
1890                         scic_sds_request_set_status(ireq,
1891                                                     SCU_TASK_DONE_UNEXP_FIS,
1892                                                     SCI_FAILURE_PROTOCOL_VIOLATION);
1893                         break;
1894                 }
1895
1896                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1897
1898                 /* Frame has been decoded return it to the controller */
1899                 scic_sds_controller_release_frame(scic, frame_index);
1900
1901                 return status;
1902         }
1903         case SCI_REQ_ABORTING:
1904                 /*
1905                  * TODO: Is it even possible to get an unsolicited frame in the
1906                  * aborting state?
1907                  */
1908                 scic_sds_controller_release_frame(scic, frame_index);
1909                 return SCI_SUCCESS;
1910
1911         default:
1912                 dev_warn(scic_to_dev(scic),
1913                          "%s: SCIC IO Request given unexpected frame %x while "
1914                          "in state %d\n",
1915                          __func__,
1916                          frame_index,
1917                          state);
1918
1919                 scic_sds_controller_release_frame(scic, frame_index);
1920                 return SCI_FAILURE_INVALID_STATE;
1921         }
1922 }
1923
1924 static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq,
1925                                                        u32 completion_code)
1926 {
1927         enum sci_status status = SCI_SUCCESS;
1928
1929         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1930         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1931                 scic_sds_stp_request_udma_complete_request(ireq,
1932                                                            SCU_TASK_DONE_GOOD,
1933                                                            SCI_SUCCESS);
1934                 break;
1935         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
1936         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
1937                 /* We must check ther response buffer to see if the D2H
1938                  * Register FIS was received before we got the TC
1939                  * completion.
1940                  */
1941                 if (ireq->stp.rsp.fis_type == FIS_REGD2H) {
1942                         scic_sds_remote_device_suspend(ireq->target_device,
1943                                 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
1944
1945                         scic_sds_stp_request_udma_complete_request(ireq,
1946                                                                    SCU_TASK_DONE_CHECK_RESPONSE,
1947                                                                    SCI_FAILURE_IO_RESPONSE_VALID);
1948                 } else {
1949                         /* If we have an error completion status for the
1950                          * TC then we can expect a D2H register FIS from
1951                          * the device so we must change state to wait
1952                          * for it
1953                          */
1954                         sci_change_state(&ireq->sm, SCI_REQ_STP_UDMA_WAIT_D2H);
1955                 }
1956                 break;
1957
1958         /* TODO Check to see if any of these completion status need to
1959          * wait for the device to host register fis.
1960          */
1961         /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR
1962          * - this comes only for B0
1963          */
1964         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN):
1965         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
1966         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
1967         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
1968         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR):
1969                 scic_sds_remote_device_suspend(ireq->target_device,
1970                         SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
1971         /* Fall through to the default case */
1972         default:
1973                 /* All other completion status cause the IO to be complete. */
1974                 scic_sds_stp_request_udma_complete_request(ireq,
1975                                         SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1976                                         SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1977                 break;
1978         }
1979
1980         return status;
1981 }
1982
1983 static enum sci_status
1984 stp_request_soft_reset_await_h2d_asserted_tc_event(struct isci_request *ireq,
1985                                                    u32 completion_code)
1986 {
1987         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1988         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1989                 scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD,
1990                                             SCI_SUCCESS);
1991
1992                 sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG);
1993                 break;
1994
1995         default:
1996                 /*
1997                  * All other completion status cause the IO to be complete.
1998                  * If a NAK was received, then it is up to the user to retry
1999                  * the request.
2000                  */
2001                 scic_sds_request_set_status(ireq,
2002                                             SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
2003                                             SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
2004
2005                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2006                 break;
2007         }
2008
2009         return SCI_SUCCESS;
2010 }
2011
2012 static enum sci_status
2013 stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq,
2014                                                      u32 completion_code)
2015 {
2016         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2017         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2018                 scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD,
2019                                             SCI_SUCCESS);
2020
2021                 sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H);
2022                 break;
2023
2024         default:
2025                 /* All other completion status cause the IO to be complete.  If
2026                  * a NAK was received, then it is up to the user to retry the
2027                  * request.
2028                  */
2029                 scic_sds_request_set_status(ireq,
2030                         SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
2031                         SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
2032
2033                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2034                 break;
2035         }
2036
2037         return SCI_SUCCESS;
2038 }
2039
2040 enum sci_status
2041 scic_sds_io_request_tc_completion(struct isci_request *ireq,
2042                                   u32 completion_code)
2043 {
2044         enum sci_base_request_states state;
2045         struct scic_sds_controller *scic = ireq->owning_controller;
2046
2047         state = ireq->sm.current_state_id;
2048
2049         switch (state) {
2050         case SCI_REQ_STARTED:
2051                 return request_started_state_tc_event(ireq, completion_code);
2052
2053         case SCI_REQ_TASK_WAIT_TC_COMP:
2054                 return ssp_task_request_await_tc_event(ireq,
2055                                                        completion_code);
2056
2057         case SCI_REQ_SMP_WAIT_RESP:
2058                 return smp_request_await_response_tc_event(ireq,
2059                                                            completion_code);
2060
2061         case SCI_REQ_SMP_WAIT_TC_COMP:
2062                 return smp_request_await_tc_event(ireq, completion_code);
2063
2064         case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
2065                 return stp_request_udma_await_tc_event(ireq,
2066                                                        completion_code);
2067
2068         case SCI_REQ_STP_NON_DATA_WAIT_H2D:
2069                 return stp_request_non_data_await_h2d_tc_event(ireq,
2070                                                                completion_code);
2071
2072         case SCI_REQ_STP_PIO_WAIT_H2D:
2073                 return stp_request_pio_await_h2d_completion_tc_event(ireq,
2074                                                                      completion_code);
2075
2076         case SCI_REQ_STP_PIO_DATA_OUT:
2077                 return pio_data_out_tx_done_tc_event(ireq, completion_code);
2078
2079         case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
2080                 return stp_request_soft_reset_await_h2d_asserted_tc_event(ireq,
2081                                                                           completion_code);
2082
2083         case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
2084                 return stp_request_soft_reset_await_h2d_diagnostic_tc_event(ireq,
2085                                                                             completion_code);
2086
2087         case SCI_REQ_ABORTING:
2088                 return request_aborting_state_tc_event(ireq,
2089                                                        completion_code);
2090
2091         default:
2092                 dev_warn(scic_to_dev(scic),
2093                          "%s: SCIC IO Request given task completion "
2094                          "notification %x while in wrong state %d\n",
2095                          __func__,
2096                          completion_code,
2097                          state);
2098                 return SCI_FAILURE_INVALID_STATE;
2099         }
2100 }
2101
2102 /**
2103  * isci_request_process_response_iu() - This function sets the status and
2104  *    response iu, in the task struct, from the request object for the upper
2105  *    layer driver.
2106  * @sas_task: This parameter is the task struct from the upper layer driver.
2107  * @resp_iu: This parameter points to the response iu of the completed request.
2108  * @dev: This parameter specifies the linux device struct.
2109  *
2110  * none.
2111  */
2112 static void isci_request_process_response_iu(
2113         struct sas_task *task,
2114         struct ssp_response_iu *resp_iu,
2115         struct device *dev)
2116 {
2117         dev_dbg(dev,
2118                 "%s: resp_iu = %p "
2119                 "resp_iu->status = 0x%x,\nresp_iu->datapres = %d "
2120                 "resp_iu->response_data_len = %x, "
2121                 "resp_iu->sense_data_len = %x\nrepsonse data: ",
2122                 __func__,
2123                 resp_iu,
2124                 resp_iu->status,
2125                 resp_iu->datapres,
2126                 resp_iu->response_data_len,
2127                 resp_iu->sense_data_len);
2128
2129         task->task_status.stat = resp_iu->status;
2130
2131         /* libsas updates the task status fields based on the response iu. */
2132         sas_ssp_task_response(dev, task, resp_iu);
2133 }
2134
2135 /**
2136  * isci_request_set_open_reject_status() - This function prepares the I/O
2137  *    completion for OPEN_REJECT conditions.
2138  * @request: This parameter is the completed isci_request object.
2139  * @response_ptr: This parameter specifies the service response for the I/O.
2140  * @status_ptr: This parameter specifies the exec status for the I/O.
2141  * @complete_to_host_ptr: This parameter specifies the action to be taken by
2142  *    the LLDD with respect to completing this request or forcing an abort
2143  *    condition on the I/O.
2144  * @open_rej_reason: This parameter specifies the encoded reason for the
2145  *    abandon-class reject.
2146  *
2147  * none.
2148  */
2149 static void isci_request_set_open_reject_status(
2150         struct isci_request *request,
2151         struct sas_task *task,
2152         enum service_response *response_ptr,
2153         enum exec_status *status_ptr,
2154         enum isci_completion_selection *complete_to_host_ptr,
2155         enum sas_open_rej_reason open_rej_reason)
2156 {
2157         /* Task in the target is done. */
2158         set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2159         *response_ptr                     = SAS_TASK_UNDELIVERED;
2160         *status_ptr                       = SAS_OPEN_REJECT;
2161         *complete_to_host_ptr             = isci_perform_normal_io_completion;
2162         task->task_status.open_rej_reason = open_rej_reason;
2163 }
2164
2165 /**
2166  * isci_request_handle_controller_specific_errors() - This function decodes
2167  *    controller-specific I/O completion error conditions.
2168  * @request: This parameter is the completed isci_request object.
2169  * @response_ptr: This parameter specifies the service response for the I/O.
2170  * @status_ptr: This parameter specifies the exec status for the I/O.
2171  * @complete_to_host_ptr: This parameter specifies the action to be taken by
2172  *    the LLDD with respect to completing this request or forcing an abort
2173  *    condition on the I/O.
2174  *
2175  * none.
2176  */
2177 static void isci_request_handle_controller_specific_errors(
2178         struct isci_remote_device *idev,
2179         struct isci_request *request,
2180         struct sas_task *task,
2181         enum service_response *response_ptr,
2182         enum exec_status *status_ptr,
2183         enum isci_completion_selection *complete_to_host_ptr)
2184 {
2185         unsigned int cstatus;
2186
2187         cstatus = request->scu_status;
2188
2189         dev_dbg(&request->isci_host->pdev->dev,
2190                 "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR "
2191                 "- controller status = 0x%x\n",
2192                 __func__, request, cstatus);
2193
2194         /* Decode the controller-specific errors; most
2195          * important is to recognize those conditions in which
2196          * the target may still have a task outstanding that
2197          * must be aborted.
2198          *
2199          * Note that there are SCU completion codes being
2200          * named in the decode below for which SCIC has already
2201          * done work to handle them in a way other than as
2202          * a controller-specific completion code; these are left
2203          * in the decode below for completeness sake.
2204          */
2205         switch (cstatus) {
2206         case SCU_TASK_DONE_DMASETUP_DIRERR:
2207         /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */
2208         case SCU_TASK_DONE_XFERCNT_ERR:
2209                 /* Also SCU_TASK_DONE_SMP_UFI_ERR: */
2210                 if (task->task_proto == SAS_PROTOCOL_SMP) {
2211                         /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */
2212                         *response_ptr = SAS_TASK_COMPLETE;
2213
2214                         /* See if the device has been/is being stopped. Note
2215                          * that we ignore the quiesce state, since we are
2216                          * concerned about the actual device state.
2217                          */
2218                         if (!idev)
2219                                 *status_ptr = SAS_DEVICE_UNKNOWN;
2220                         else
2221                                 *status_ptr = SAS_ABORTED_TASK;
2222
2223                         set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2224
2225                         *complete_to_host_ptr =
2226                                 isci_perform_normal_io_completion;
2227                 } else {
2228                         /* Task in the target is not done. */
2229                         *response_ptr = SAS_TASK_UNDELIVERED;
2230
2231                         if (!idev)
2232                                 *status_ptr = SAS_DEVICE_UNKNOWN;
2233                         else
2234                                 *status_ptr = SAM_STAT_TASK_ABORTED;
2235
2236                         clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2237
2238                         *complete_to_host_ptr =
2239                                 isci_perform_error_io_completion;
2240                 }
2241
2242                 break;
2243
2244         case SCU_TASK_DONE_CRC_ERR:
2245         case SCU_TASK_DONE_NAK_CMD_ERR:
2246         case SCU_TASK_DONE_EXCESS_DATA:
2247         case SCU_TASK_DONE_UNEXP_FIS:
2248         /* Also SCU_TASK_DONE_UNEXP_RESP: */
2249         case SCU_TASK_DONE_VIIT_ENTRY_NV:       /* TODO - conditions? */
2250         case SCU_TASK_DONE_IIT_ENTRY_NV:        /* TODO - conditions? */
2251         case SCU_TASK_DONE_RNCNV_OUTBOUND:      /* TODO - conditions? */
2252                 /* These are conditions in which the target
2253                  * has completed the task, so that no cleanup
2254                  * is necessary.
2255                  */
2256                 *response_ptr = SAS_TASK_COMPLETE;
2257
2258                 /* See if the device has been/is being stopped. Note
2259                  * that we ignore the quiesce state, since we are
2260                  * concerned about the actual device state.
2261                  */
2262                 if (!idev)
2263                         *status_ptr = SAS_DEVICE_UNKNOWN;
2264                 else
2265                         *status_ptr = SAS_ABORTED_TASK;
2266
2267                 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2268
2269                 *complete_to_host_ptr = isci_perform_normal_io_completion;
2270                 break;
2271
2272
2273         /* Note that the only open reject completion codes seen here will be
2274          * abandon-class codes; all others are automatically retried in the SCU.
2275          */
2276         case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
2277
2278                 isci_request_set_open_reject_status(
2279                         request, task, response_ptr, status_ptr,
2280                         complete_to_host_ptr, SAS_OREJ_WRONG_DEST);
2281                 break;
2282
2283         case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
2284
2285                 /* Note - the return of AB0 will change when
2286                  * libsas implements detection of zone violations.
2287                  */
2288                 isci_request_set_open_reject_status(
2289                         request, task, response_ptr, status_ptr,
2290                         complete_to_host_ptr, SAS_OREJ_RESV_AB0);
2291                 break;
2292
2293         case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
2294
2295                 isci_request_set_open_reject_status(
2296                         request, task, response_ptr, status_ptr,
2297                         complete_to_host_ptr, SAS_OREJ_RESV_AB1);
2298                 break;
2299
2300         case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
2301
2302                 isci_request_set_open_reject_status(
2303                         request, task, response_ptr, status_ptr,
2304                         complete_to_host_ptr, SAS_OREJ_RESV_AB2);
2305                 break;
2306
2307         case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
2308
2309                 isci_request_set_open_reject_status(
2310                         request, task, response_ptr, status_ptr,
2311                         complete_to_host_ptr, SAS_OREJ_RESV_AB3);
2312                 break;
2313
2314         case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
2315
2316                 isci_request_set_open_reject_status(
2317                         request, task, response_ptr, status_ptr,
2318                         complete_to_host_ptr, SAS_OREJ_BAD_DEST);
2319                 break;
2320
2321         case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
2322
2323                 isci_request_set_open_reject_status(
2324                         request, task, response_ptr, status_ptr,
2325                         complete_to_host_ptr, SAS_OREJ_STP_NORES);
2326                 break;
2327
2328         case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
2329
2330                 isci_request_set_open_reject_status(
2331                         request, task, response_ptr, status_ptr,
2332                         complete_to_host_ptr, SAS_OREJ_EPROTO);
2333                 break;
2334
2335         case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
2336
2337                 isci_request_set_open_reject_status(
2338                         request, task, response_ptr, status_ptr,
2339                         complete_to_host_ptr, SAS_OREJ_CONN_RATE);
2340                 break;
2341
2342         case SCU_TASK_DONE_LL_R_ERR:
2343         /* Also SCU_TASK_DONE_ACK_NAK_TO: */
2344         case SCU_TASK_DONE_LL_PERR:
2345         case SCU_TASK_DONE_LL_SY_TERM:
2346         /* Also SCU_TASK_DONE_NAK_ERR:*/
2347         case SCU_TASK_DONE_LL_LF_TERM:
2348         /* Also SCU_TASK_DONE_DATA_LEN_ERR: */
2349         case SCU_TASK_DONE_LL_ABORT_ERR:
2350         case SCU_TASK_DONE_SEQ_INV_TYPE:
2351         /* Also SCU_TASK_DONE_UNEXP_XR: */
2352         case SCU_TASK_DONE_XR_IU_LEN_ERR:
2353         case SCU_TASK_DONE_INV_FIS_LEN:
2354         /* Also SCU_TASK_DONE_XR_WD_LEN: */
2355         case SCU_TASK_DONE_SDMA_ERR:
2356         case SCU_TASK_DONE_OFFSET_ERR:
2357         case SCU_TASK_DONE_MAX_PLD_ERR:
2358         case SCU_TASK_DONE_LF_ERR:
2359         case SCU_TASK_DONE_SMP_RESP_TO_ERR:  /* Escalate to dev reset? */
2360         case SCU_TASK_DONE_SMP_LL_RX_ERR:
2361         case SCU_TASK_DONE_UNEXP_DATA:
2362         case SCU_TASK_DONE_UNEXP_SDBFIS:
2363         case SCU_TASK_DONE_REG_ERR:
2364         case SCU_TASK_DONE_SDB_ERR:
2365         case SCU_TASK_DONE_TASK_ABORT:
2366         default:
2367                 /* Task in the target is not done. */
2368                 *response_ptr = SAS_TASK_UNDELIVERED;
2369                 *status_ptr = SAM_STAT_TASK_ABORTED;
2370
2371                 if (task->task_proto == SAS_PROTOCOL_SMP) {
2372                         set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2373
2374                         *complete_to_host_ptr = isci_perform_normal_io_completion;
2375                 } else {
2376                         clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2377
2378                         *complete_to_host_ptr = isci_perform_error_io_completion;
2379                 }
2380                 break;
2381         }
2382 }
2383
2384 /**
2385  * isci_task_save_for_upper_layer_completion() - This function saves the
2386  *    request for later completion to the upper layer driver.
2387  * @host: This parameter is a pointer to the host on which the the request
2388  *    should be queued (either as an error or success).
2389  * @request: This parameter is the completed request.
2390  * @response: This parameter is the response code for the completed task.
2391  * @status: This parameter is the status code for the completed task.
2392  *
2393  * none.
2394  */
2395 static void isci_task_save_for_upper_layer_completion(
2396         struct isci_host *host,
2397         struct isci_request *request,
2398         enum service_response response,
2399         enum exec_status status,
2400         enum isci_completion_selection task_notification_selection)
2401 {
2402         struct sas_task *task = isci_request_access_task(request);
2403
2404         task_notification_selection
2405                 = isci_task_set_completion_status(task, response, status,
2406                                                   task_notification_selection);
2407
2408         /* Tasks aborted specifically by a call to the lldd_abort_task
2409          * function should not be completed to the host in the regular path.
2410          */
2411         switch (task_notification_selection) {
2412
2413         case isci_perform_normal_io_completion:
2414
2415                 /* Normal notification (task_done) */
2416                 dev_dbg(&host->pdev->dev,
2417                         "%s: Normal - task = %p, response=%d (%d), status=%d (%d)\n",
2418                         __func__,
2419                         task,
2420                         task->task_status.resp, response,
2421                         task->task_status.stat, status);
2422                 /* Add to the completed list. */
2423                 list_add(&request->completed_node,
2424                          &host->requests_to_complete);
2425
2426                 /* Take the request off the device's pending request list. */
2427                 list_del_init(&request->dev_node);
2428                 break;
2429
2430         case isci_perform_aborted_io_completion:
2431                 /* No notification to libsas because this request is
2432                  * already in the abort path.
2433                  */
2434                 dev_warn(&host->pdev->dev,
2435                          "%s: Aborted - task = %p, response=%d (%d), status=%d (%d)\n",
2436                          __func__,
2437                          task,
2438                          task->task_status.resp, response,
2439                          task->task_status.stat, status);
2440
2441                 /* Wake up whatever process was waiting for this
2442                  * request to complete.
2443                  */
2444                 WARN_ON(request->io_request_completion == NULL);
2445
2446                 if (request->io_request_completion != NULL) {
2447
2448                         /* Signal whoever is waiting that this
2449                         * request is complete.
2450                         */
2451                         complete(request->io_request_completion);
2452                 }
2453                 break;
2454
2455         case isci_perform_error_io_completion:
2456                 /* Use sas_task_abort */
2457                 dev_warn(&host->pdev->dev,
2458                          "%s: Error - task = %p, response=%d (%d), status=%d (%d)\n",
2459                          __func__,
2460                          task,
2461                          task->task_status.resp, response,
2462                          task->task_status.stat, status);
2463                 /* Add to the aborted list. */
2464                 list_add(&request->completed_node,
2465                          &host->requests_to_errorback);
2466                 break;
2467
2468         default:
2469                 dev_warn(&host->pdev->dev,
2470                          "%s: Unknown - task = %p, response=%d (%d), status=%d (%d)\n",
2471                          __func__,
2472                          task,
2473                          task->task_status.resp, response,
2474                          task->task_status.stat, status);
2475
2476                 /* Add to the error to libsas list. */
2477                 list_add(&request->completed_node,
2478                          &host->requests_to_errorback);
2479                 break;
2480         }
2481 }
2482
2483 static void isci_request_io_request_complete(struct isci_host *isci_host,
2484                                              struct isci_request *request,
2485                                              enum sci_io_status completion_status)
2486 {
2487         struct sas_task *task = isci_request_access_task(request);
2488         struct ssp_response_iu *resp_iu;
2489         void *resp_buf;
2490         unsigned long task_flags;
2491         struct isci_remote_device *idev = isci_lookup_device(task->dev);
2492         enum service_response response       = SAS_TASK_UNDELIVERED;
2493         enum exec_status status         = SAS_ABORTED_TASK;
2494         enum isci_request_status request_status;
2495         enum isci_completion_selection complete_to_host
2496                 = isci_perform_normal_io_completion;
2497
2498         dev_dbg(&isci_host->pdev->dev,
2499                 "%s: request = %p, task = %p,\n"
2500                 "task->data_dir = %d completion_status = 0x%x\n",
2501                 __func__,
2502                 request,
2503                 task,
2504                 task->data_dir,
2505                 completion_status);
2506
2507         spin_lock(&request->state_lock);
2508         request_status = isci_request_get_state(request);
2509
2510         /* Decode the request status.  Note that if the request has been
2511          * aborted by a task management function, we don't care
2512          * what the status is.
2513          */
2514         switch (request_status) {
2515
2516         case aborted:
2517                 /* "aborted" indicates that the request was aborted by a task
2518                  * management function, since once a task management request is
2519                  * perfomed by the device, the request only completes because
2520                  * of the subsequent driver terminate.
2521                  *
2522                  * Aborted also means an external thread is explicitly managing
2523                  * this request, so that we do not complete it up the stack.
2524                  *
2525                  * The target is still there (since the TMF was successful).
2526                  */
2527                 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2528                 response = SAS_TASK_COMPLETE;
2529
2530                 /* See if the device has been/is being stopped. Note
2531                  * that we ignore the quiesce state, since we are
2532                  * concerned about the actual device state.
2533                  */
2534                 if (!idev)
2535                         status = SAS_DEVICE_UNKNOWN;
2536                 else
2537                         status = SAS_ABORTED_TASK;
2538
2539                 complete_to_host = isci_perform_aborted_io_completion;
2540                 /* This was an aborted request. */
2541
2542                 spin_unlock(&request->state_lock);
2543                 break;
2544
2545         case aborting:
2546                 /* aborting means that the task management function tried and
2547                  * failed to abort the request. We need to note the request
2548                  * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the
2549                  * target as down.
2550                  *
2551                  * Aborting also means an external thread is explicitly managing
2552                  * this request, so that we do not complete it up the stack.
2553                  */
2554                 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2555                 response = SAS_TASK_UNDELIVERED;
2556
2557                 if (!idev)
2558                         /* The device has been /is being stopped. Note that
2559                          * we ignore the quiesce state, since we are
2560                          * concerned about the actual device state.
2561                          */
2562                         status = SAS_DEVICE_UNKNOWN;
2563                 else
2564                         status = SAS_PHY_DOWN;
2565
2566                 complete_to_host = isci_perform_aborted_io_completion;
2567
2568                 /* This was an aborted request. */
2569
2570                 spin_unlock(&request->state_lock);
2571                 break;
2572
2573         case terminating:
2574
2575                 /* This was an terminated request.  This happens when
2576                  * the I/O is being terminated because of an action on
2577                  * the device (reset, tear down, etc.), and the I/O needs
2578                  * to be completed up the stack.
2579                  */
2580                 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2581                 response = SAS_TASK_UNDELIVERED;
2582
2583                 /* See if the device has been/is being stopped. Note
2584                  * that we ignore the quiesce state, since we are
2585                  * concerned about the actual device state.
2586                  */
2587                 if (!idev)
2588                         status = SAS_DEVICE_UNKNOWN;
2589                 else
2590                         status = SAS_ABORTED_TASK;
2591
2592                 complete_to_host = isci_perform_aborted_io_completion;
2593
2594                 /* This was a terminated request. */
2595
2596                 spin_unlock(&request->state_lock);
2597                 break;
2598
2599         case dead:
2600                 /* This was a terminated request that timed-out during the
2601                  * termination process.  There is no task to complete to
2602                  * libsas.
2603                  */
2604                 complete_to_host = isci_perform_normal_io_completion;
2605                 spin_unlock(&request->state_lock);
2606                 break;
2607
2608         default:
2609
2610                 /* The request is done from an SCU HW perspective. */
2611                 request->status = completed;
2612
2613                 spin_unlock(&request->state_lock);
2614
2615                 /* This is an active request being completed from the core. */
2616                 switch (completion_status) {
2617
2618                 case SCI_IO_FAILURE_RESPONSE_VALID:
2619                         dev_dbg(&isci_host->pdev->dev,
2620                                 "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
2621                                 __func__,
2622                                 request,
2623                                 task);
2624
2625                         if (sas_protocol_ata(task->task_proto)) {
2626                                 resp_buf = &request->stp.rsp;
2627                                 isci_request_process_stp_response(task,
2628                                                                   resp_buf);
2629                         } else if (SAS_PROTOCOL_SSP == task->task_proto) {
2630
2631                                 /* crack the iu response buffer. */
2632                                 resp_iu = &request->ssp.rsp;
2633                                 isci_request_process_response_iu(task, resp_iu,
2634                                                                  &isci_host->pdev->dev);
2635
2636                         } else if (SAS_PROTOCOL_SMP == task->task_proto) {
2637
2638                                 dev_err(&isci_host->pdev->dev,
2639                                         "%s: SCI_IO_FAILURE_RESPONSE_VALID: "
2640                                         "SAS_PROTOCOL_SMP protocol\n",
2641                                         __func__);
2642
2643                         } else
2644                                 dev_err(&isci_host->pdev->dev,
2645                                         "%s: unknown protocol\n", __func__);
2646
2647                         /* use the task status set in the task struct by the
2648                          * isci_request_process_response_iu call.
2649                          */
2650                         set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2651                         response = task->task_status.resp;
2652                         status = task->task_status.stat;
2653                         break;
2654
2655                 case SCI_IO_SUCCESS:
2656                 case SCI_IO_SUCCESS_IO_DONE_EARLY:
2657
2658                         response = SAS_TASK_COMPLETE;
2659                         status   = SAM_STAT_GOOD;
2660                         set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2661
2662                         if (task->task_proto == SAS_PROTOCOL_SMP) {
2663                                 void *rsp = &request->smp.rsp;
2664
2665                                 dev_dbg(&isci_host->pdev->dev,
2666                                         "%s: SMP protocol completion\n",
2667                                         __func__);
2668
2669                                 sg_copy_from_buffer(
2670                                         &task->smp_task.smp_resp, 1,
2671                                         rsp, sizeof(struct smp_resp));
2672                         } else if (completion_status
2673                                    == SCI_IO_SUCCESS_IO_DONE_EARLY) {
2674
2675                                 /* This was an SSP / STP / SATA transfer.
2676                                  * There is a possibility that less data than
2677                                  * the maximum was transferred.
2678                                  */
2679                                 u32 transferred_length = sci_req_tx_bytes(request);
2680
2681                                 task->task_status.residual
2682                                         = task->total_xfer_len - transferred_length;
2683
2684                                 /* If there were residual bytes, call this an
2685                                  * underrun.
2686                                  */
2687                                 if (task->task_status.residual != 0)
2688                                         status = SAS_DATA_UNDERRUN;
2689
2690                                 dev_dbg(&isci_host->pdev->dev,
2691                                         "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
2692                                         __func__,
2693                                         status);
2694
2695                         } else
2696                                 dev_dbg(&isci_host->pdev->dev,
2697                                         "%s: SCI_IO_SUCCESS\n",
2698                                         __func__);
2699
2700                         break;
2701
2702                 case SCI_IO_FAILURE_TERMINATED:
2703                         dev_dbg(&isci_host->pdev->dev,
2704                                 "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n",
2705                                 __func__,
2706                                 request,
2707                                 task);
2708
2709                         /* The request was terminated explicitly.  No handling
2710                          * is needed in the SCSI error handler path.
2711                          */
2712                         set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2713                         response = SAS_TASK_UNDELIVERED;
2714
2715                         /* See if the device has been/is being stopped. Note
2716                          * that we ignore the quiesce state, since we are
2717                          * concerned about the actual device state.
2718                          */
2719                         if (!idev)
2720                                 status = SAS_DEVICE_UNKNOWN;
2721                         else
2722                                 status = SAS_ABORTED_TASK;
2723
2724                         complete_to_host = isci_perform_normal_io_completion;
2725                         break;
2726
2727                 case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
2728
2729                         isci_request_handle_controller_specific_errors(
2730                                 idev, request, task, &response, &status,
2731                                 &complete_to_host);
2732
2733                         break;
2734
2735                 case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
2736                         /* This is a special case, in that the I/O completion
2737                          * is telling us that the device needs a reset.
2738                          * In order for the device reset condition to be
2739                          * noticed, the I/O has to be handled in the error
2740                          * handler.  Set the reset flag and cause the
2741                          * SCSI error thread to be scheduled.
2742                          */
2743                         spin_lock_irqsave(&task->task_state_lock, task_flags);
2744                         task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
2745                         spin_unlock_irqrestore(&task->task_state_lock, task_flags);
2746
2747                         /* Fail the I/O. */
2748                         response = SAS_TASK_UNDELIVERED;
2749                         status = SAM_STAT_TASK_ABORTED;
2750
2751                         complete_to_host = isci_perform_error_io_completion;
2752                         clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2753                         break;
2754
2755                 case SCI_FAILURE_RETRY_REQUIRED:
2756
2757                         /* Fail the I/O so it can be retried. */
2758                         response = SAS_TASK_UNDELIVERED;
2759                         if (!idev)
2760                                 status = SAS_DEVICE_UNKNOWN;
2761                         else
2762                                 status = SAS_ABORTED_TASK;
2763
2764                         complete_to_host = isci_perform_normal_io_completion;
2765                         set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2766                         break;
2767
2768
2769                 default:
2770                         /* Catch any otherwise unhandled error codes here. */
2771                         dev_warn(&isci_host->pdev->dev,
2772                                  "%s: invalid completion code: 0x%x - "
2773                                  "isci_request = %p\n",
2774                                  __func__, completion_status, request);
2775
2776                         response = SAS_TASK_UNDELIVERED;
2777
2778                         /* See if the device has been/is being stopped. Note
2779                          * that we ignore the quiesce state, since we are
2780                          * concerned about the actual device state.
2781                          */
2782                         if (!idev)
2783                                 status = SAS_DEVICE_UNKNOWN;
2784                         else
2785                                 status = SAS_ABORTED_TASK;
2786
2787                         if (SAS_PROTOCOL_SMP == task->task_proto) {
2788                                 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2789                                 complete_to_host = isci_perform_normal_io_completion;
2790                         } else {
2791                                 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2792                                 complete_to_host = isci_perform_error_io_completion;
2793                         }
2794                         break;
2795                 }
2796                 break;
2797         }
2798
2799         switch (task->task_proto) {
2800         case SAS_PROTOCOL_SSP:
2801                 if (task->data_dir == DMA_NONE)
2802                         break;
2803                 if (task->num_scatter == 0)
2804                         /* 0 indicates a single dma address */
2805                         dma_unmap_single(&isci_host->pdev->dev,
2806                                          request->zero_scatter_daddr,
2807                                          task->total_xfer_len, task->data_dir);
2808                 else  /* unmap the sgl dma addresses */
2809                         dma_unmap_sg(&isci_host->pdev->dev, task->scatter,
2810                                      request->num_sg_entries, task->data_dir);
2811                 break;
2812         case SAS_PROTOCOL_SMP: {
2813                 struct scatterlist *sg = &task->smp_task.smp_req;
2814                 struct smp_req *smp_req;
2815                 void *kaddr;
2816
2817                 dma_unmap_sg(&isci_host->pdev->dev, sg, 1, DMA_TO_DEVICE);
2818
2819                 /* need to swab it back in case the command buffer is re-used */
2820                 kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
2821                 smp_req = kaddr + sg->offset;
2822                 sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
2823                 kunmap_atomic(kaddr, KM_IRQ0);
2824                 break;
2825         }
2826         default:
2827                 break;
2828         }
2829
2830         /* Put the completed request on the correct list */
2831         isci_task_save_for_upper_layer_completion(isci_host, request, response,
2832                                                   status, complete_to_host
2833                                                   );
2834
2835         /* complete the io request to the core. */
2836         scic_controller_complete_io(&isci_host->sci,
2837                                     request->target_device,
2838                                     request);
2839         isci_put_device(idev);
2840
2841         /* set terminated handle so it cannot be completed or
2842          * terminated again, and to cause any calls into abort
2843          * task to recognize the already completed case.
2844          */
2845         set_bit(IREQ_TERMINATED, &request->flags);
2846 }
2847
2848 static void scic_sds_request_started_state_enter(struct sci_base_state_machine *sm)
2849 {
2850         struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2851         struct domain_device *dev = ireq->target_device->domain_dev;
2852         struct sas_task *task;
2853
2854         /* XXX as hch said always creating an internal sas_task for tmf
2855          * requests would simplify the driver
2856          */
2857         task = ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL;
2858
2859         /* all unaccelerated request types (non ssp or ncq) handled with
2860          * substates
2861          */
2862         if (!task && dev->dev_type == SAS_END_DEV) {
2863                 sci_change_state(sm, SCI_REQ_TASK_WAIT_TC_COMP);
2864         } else if (!task &&
2865                    (isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high ||
2866                     isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) {
2867                 sci_change_state(sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED);
2868         } else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
2869                 sci_change_state(sm, SCI_REQ_SMP_WAIT_RESP);
2870         } else if (task && sas_protocol_ata(task->task_proto) &&
2871                    !task->ata_task.use_ncq) {
2872                 u32 state;
2873
2874                 if (task->data_dir == DMA_NONE)
2875                         state = SCI_REQ_STP_NON_DATA_WAIT_H2D;
2876                 else if (task->ata_task.dma_xfer)
2877                         state = SCI_REQ_STP_UDMA_WAIT_TC_COMP;
2878                 else /* PIO */
2879                         state = SCI_REQ_STP_PIO_WAIT_H2D;
2880
2881                 sci_change_state(sm, state);
2882         }
2883 }
2884
2885 static void scic_sds_request_completed_state_enter(struct sci_base_state_machine *sm)
2886 {
2887         struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2888         struct scic_sds_controller *scic = ireq->owning_controller;
2889         struct isci_host *ihost = scic_to_ihost(scic);
2890
2891         /* Tell the SCI_USER that the IO request is complete */
2892         if (!test_bit(IREQ_TMF, &ireq->flags))
2893                 isci_request_io_request_complete(ihost, ireq,
2894                                                  ireq->sci_status);
2895         else
2896                 isci_task_request_complete(ihost, ireq, ireq->sci_status);
2897 }
2898
2899 static void scic_sds_request_aborting_state_enter(struct sci_base_state_machine *sm)
2900 {
2901         struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2902
2903         /* Setting the abort bit in the Task Context is required by the silicon. */
2904         ireq->tc->abort = 1;
2905 }
2906
2907 static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm)
2908 {
2909         struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2910
2911         scic_sds_remote_device_set_working_request(ireq->target_device,
2912                                                    ireq);
2913 }
2914
2915 static void scic_sds_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm)
2916 {
2917         struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2918
2919         scic_sds_remote_device_set_working_request(ireq->target_device,
2920                                                    ireq);
2921 }
2922
2923 static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm)
2924 {
2925         struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2926
2927         scic_sds_remote_device_set_working_request(ireq->target_device,
2928                                                    ireq);
2929 }
2930
2931 static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm)
2932 {
2933         struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2934         struct scu_task_context *tc = ireq->tc;
2935         struct host_to_dev_fis *h2d_fis;
2936         enum sci_status status;
2937
2938         /* Clear the SRST bit */
2939         h2d_fis = &ireq->stp.cmd;
2940         h2d_fis->control = 0;
2941
2942         /* Clear the TC control bit */
2943         tc->control_frame = 0;
2944
2945         status = scic_controller_continue_io(ireq);
2946         WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n");
2947 }
2948
2949 static const struct sci_base_state scic_sds_request_state_table[] = {
2950         [SCI_REQ_INIT] = { },
2951         [SCI_REQ_CONSTRUCTED] = { },
2952         [SCI_REQ_STARTED] = {
2953                 .enter_state = scic_sds_request_started_state_enter,
2954         },
2955         [SCI_REQ_STP_NON_DATA_WAIT_H2D] = {
2956                 .enter_state = scic_sds_stp_request_started_non_data_await_h2d_completion_enter,
2957         },
2958         [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { },
2959         [SCI_REQ_STP_PIO_WAIT_H2D] = {
2960                 .enter_state = scic_sds_stp_request_started_pio_await_h2d_completion_enter,
2961         },
2962         [SCI_REQ_STP_PIO_WAIT_FRAME] = { },
2963         [SCI_REQ_STP_PIO_DATA_IN] = { },
2964         [SCI_REQ_STP_PIO_DATA_OUT] = { },
2965         [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { },
2966         [SCI_REQ_STP_UDMA_WAIT_D2H] = { },
2967         [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED] = {
2968                 .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
2969         },
2970         [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG] = {
2971                 .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
2972         },
2973         [SCI_REQ_STP_SOFT_RESET_WAIT_D2H] = { },
2974         [SCI_REQ_TASK_WAIT_TC_COMP] = { },
2975         [SCI_REQ_TASK_WAIT_TC_RESP] = { },
2976         [SCI_REQ_SMP_WAIT_RESP] = { },
2977         [SCI_REQ_SMP_WAIT_TC_COMP] = { },
2978         [SCI_REQ_COMPLETED] = {
2979                 .enter_state = scic_sds_request_completed_state_enter,
2980         },
2981         [SCI_REQ_ABORTING] = {
2982                 .enter_state = scic_sds_request_aborting_state_enter,
2983         },
2984         [SCI_REQ_FINAL] = { },
2985 };
2986
2987 static void
2988 scic_sds_general_request_construct(struct scic_sds_controller *scic,
2989                                    struct isci_remote_device *idev,
2990                                    struct isci_request *ireq)
2991 {
2992         sci_init_sm(&ireq->sm, scic_sds_request_state_table, SCI_REQ_INIT);
2993
2994         ireq->target_device = idev;
2995         ireq->protocol = SCIC_NO_PROTOCOL;
2996         ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
2997
2998         ireq->sci_status   = SCI_SUCCESS;
2999         ireq->scu_status   = 0;
3000         ireq->post_context = 0xFFFFFFFF;
3001 }
3002
3003 static enum sci_status
3004 scic_io_request_construct(struct scic_sds_controller *scic,
3005                           struct isci_remote_device *idev,
3006                           struct isci_request *ireq)
3007 {
3008         struct domain_device *dev = idev->domain_dev;
3009         enum sci_status status = SCI_SUCCESS;
3010
3011         /* Build the common part of the request */
3012         scic_sds_general_request_construct(scic, idev, ireq);
3013
3014         if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
3015                 return SCI_FAILURE_INVALID_REMOTE_DEVICE;
3016
3017         if (dev->dev_type == SAS_END_DEV)
3018                 /* pass */;
3019         else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
3020                 memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
3021         else if (dev_is_expander(dev))
3022                 /* pass */;
3023         else
3024                 return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3025
3026         memset(ireq->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab));
3027
3028         return status;
3029 }
3030
3031 enum sci_status scic_task_request_construct(struct scic_sds_controller *scic,
3032                                             struct isci_remote_device *idev,
3033                                             u16 io_tag, struct isci_request *ireq)
3034 {
3035         struct domain_device *dev = idev->domain_dev;
3036         enum sci_status status = SCI_SUCCESS;
3037
3038         /* Build the common part of the request */
3039         scic_sds_general_request_construct(scic, idev, ireq);
3040
3041         if (dev->dev_type == SAS_END_DEV ||
3042             dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
3043                 set_bit(IREQ_TMF, &ireq->flags);
3044                 memset(ireq->tc, 0, sizeof(struct scu_task_context));
3045         } else
3046                 status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3047
3048         return status;
3049 }
3050
3051 static enum sci_status isci_request_ssp_request_construct(
3052         struct isci_request *request)
3053 {
3054         enum sci_status status;
3055
3056         dev_dbg(&request->isci_host->pdev->dev,
3057                 "%s: request = %p\n",
3058                 __func__,
3059                 request);
3060         status = scic_io_request_construct_basic_ssp(request);
3061         return status;
3062 }
3063
3064 static enum sci_status isci_request_stp_request_construct(
3065         struct isci_request *request)
3066 {
3067         struct sas_task *task = isci_request_access_task(request);
3068         enum sci_status status;
3069         struct host_to_dev_fis *register_fis;
3070
3071         dev_dbg(&request->isci_host->pdev->dev,
3072                 "%s: request = %p\n",
3073                 __func__,
3074                 request);
3075
3076         /* Get the host_to_dev_fis from the core and copy
3077          * the fis from the task into it.
3078          */
3079         register_fis = isci_sata_task_to_fis_copy(task);
3080
3081         status = scic_io_request_construct_basic_sata(request);
3082
3083         /* Set the ncq tag in the fis, from the queue
3084          * command in the task.
3085          */
3086         if (isci_sata_is_task_ncq(task)) {
3087
3088                 isci_sata_set_ncq_tag(
3089                         register_fis,
3090                         task
3091                         );
3092         }
3093
3094         return status;
3095 }
3096
3097 static enum sci_status
3098 scic_io_request_construct_smp(struct device *dev,
3099                               struct isci_request *ireq,
3100                               struct sas_task *task)
3101 {
3102         struct scatterlist *sg = &task->smp_task.smp_req;
3103         struct isci_remote_device *idev;
3104         struct scu_task_context *task_context;
3105         struct isci_port *iport;
3106         struct smp_req *smp_req;
3107         void *kaddr;
3108         u8 req_len;
3109         u32 cmd;
3110
3111         kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
3112         smp_req = kaddr + sg->offset;
3113         /*
3114          * Look at the SMP requests' header fields; for certain SAS 1.x SMP
3115          * functions under SAS 2.0, a zero request length really indicates
3116          * a non-zero default length.
3117          */
3118         if (smp_req->req_len == 0) {
3119                 switch (smp_req->func) {
3120                 case SMP_DISCOVER:
3121                 case SMP_REPORT_PHY_ERR_LOG:
3122                 case SMP_REPORT_PHY_SATA:
3123                 case SMP_REPORT_ROUTE_INFO:
3124                         smp_req->req_len = 2;
3125                         break;
3126                 case SMP_CONF_ROUTE_INFO:
3127                 case SMP_PHY_CONTROL:
3128                 case SMP_PHY_TEST_FUNCTION:
3129                         smp_req->req_len = 9;
3130                         break;
3131                         /* Default - zero is a valid default for 2.0. */
3132                 }
3133         }
3134         req_len = smp_req->req_len;
3135         sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
3136         cmd = *(u32 *) smp_req;
3137         kunmap_atomic(kaddr, KM_IRQ0);
3138
3139         if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE))
3140                 return SCI_FAILURE;
3141
3142         ireq->protocol = SCIC_SMP_PROTOCOL;
3143
3144         /* byte swap the smp request. */
3145
3146         task_context = ireq->tc;
3147
3148         idev = scic_sds_request_get_device(ireq);
3149         iport = scic_sds_request_get_port(ireq);
3150
3151         /*
3152          * Fill in the TC with the its required data
3153          * 00h
3154          */
3155         task_context->priority = 0;
3156         task_context->initiator_request = 1;
3157         task_context->connection_rate = idev->connection_rate;
3158         task_context->protocol_engine_index =
3159                 scic_sds_controller_get_protocol_engine_group(scic);
3160         task_context->logical_port_index = scic_sds_port_get_index(iport);
3161         task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP;
3162         task_context->abort = 0;
3163         task_context->valid = SCU_TASK_CONTEXT_VALID;
3164         task_context->context_type = SCU_TASK_CONTEXT_TYPE;
3165
3166         /* 04h */
3167         task_context->remote_node_index = idev->rnc.remote_node_index;
3168         task_context->command_code = 0;
3169         task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST;
3170
3171         /* 08h */
3172         task_context->link_layer_control = 0;
3173         task_context->do_not_dma_ssp_good_response = 1;
3174         task_context->strict_ordering = 0;
3175         task_context->control_frame = 1;
3176         task_context->timeout_enable = 0;
3177         task_context->block_guard_enable = 0;
3178
3179         /* 0ch */
3180         task_context->address_modifier = 0;
3181
3182         /* 10h */
3183         task_context->ssp_command_iu_length = req_len;
3184
3185         /* 14h */
3186         task_context->transfer_length_bytes = 0;
3187
3188         /*
3189          * 18h ~ 30h, protocol specific
3190          * since commandIU has been build by framework at this point, we just
3191          * copy the frist DWord from command IU to this location. */
3192         memcpy(&task_context->type.smp, &cmd, sizeof(u32));
3193
3194         /*
3195          * 40h
3196          * "For SMP you could program it to zero. We would prefer that way
3197          * so that done code will be consistent." - Venki
3198          */
3199         task_context->task_phase = 0;
3200
3201         ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
3202                                  (scic_sds_controller_get_protocol_engine_group(scic) <<
3203                                   SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
3204                                  (scic_sds_port_get_index(iport) <<
3205                                   SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
3206                                  ISCI_TAG_TCI(ireq->io_tag));
3207         /*
3208          * Copy the physical address for the command buffer to the SCU Task
3209          * Context command buffer should not contain command header.
3210          */
3211         task_context->command_iu_upper = upper_32_bits(sg_dma_address(sg));
3212         task_context->command_iu_lower = lower_32_bits(sg_dma_address(sg) + sizeof(u32));
3213
3214         /* SMP response comes as UF, so no need to set response IU address. */
3215         task_context->response_iu_upper = 0;
3216         task_context->response_iu_lower = 0;
3217
3218         sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
3219
3220         return SCI_SUCCESS;
3221 }
3222
3223 /*
3224  * isci_smp_request_build() - This function builds the smp request.
3225  * @ireq: This parameter points to the isci_request allocated in the
3226  *    request construct function.
3227  *
3228  * SCI_SUCCESS on successfull completion, or specific failure code.
3229  */
3230 static enum sci_status isci_smp_request_build(struct isci_request *ireq)
3231 {
3232         struct sas_task *task = isci_request_access_task(ireq);
3233         struct device *dev = &ireq->isci_host->pdev->dev;
3234         enum sci_status status = SCI_FAILURE;
3235
3236         status = scic_io_request_construct_smp(dev, ireq, task);
3237         if (status != SCI_SUCCESS)
3238                 dev_warn(&ireq->isci_host->pdev->dev,
3239                          "%s: failed with status = %d\n",
3240                          __func__,
3241                          status);
3242
3243         return status;
3244 }
3245
3246 /**
3247  * isci_io_request_build() - This function builds the io request object.
3248  * @isci_host: This parameter specifies the ISCI host object
3249  * @request: This parameter points to the isci_request object allocated in the
3250  *    request construct function.
3251  * @sci_device: This parameter is the handle for the sci core's remote device
3252  *    object that is the destination for this request.
3253  *
3254  * SCI_SUCCESS on successfull completion, or specific failure code.
3255  */
3256 static enum sci_status isci_io_request_build(struct isci_host *isci_host,
3257                                              struct isci_request *request,
3258                                              struct isci_remote_device *idev)
3259 {
3260         enum sci_status status = SCI_SUCCESS;
3261         struct sas_task *task = isci_request_access_task(request);
3262
3263         dev_dbg(&isci_host->pdev->dev,
3264                 "%s: idev = 0x%p; request = %p, "
3265                 "num_scatter = %d\n",
3266                 __func__,
3267                 idev,
3268                 request,
3269                 task->num_scatter);
3270
3271         /* map the sgl addresses, if present.
3272          * libata does the mapping for sata devices
3273          * before we get the request.
3274          */
3275         if (task->num_scatter &&
3276             !sas_protocol_ata(task->task_proto) &&
3277             !(SAS_PROTOCOL_SMP & task->task_proto)) {
3278
3279                 request->num_sg_entries = dma_map_sg(
3280                         &isci_host->pdev->dev,
3281                         task->scatter,
3282                         task->num_scatter,
3283                         task->data_dir
3284                         );
3285
3286                 if (request->num_sg_entries == 0)
3287                         return SCI_FAILURE_INSUFFICIENT_RESOURCES;
3288         }
3289
3290         status = scic_io_request_construct(&isci_host->sci, idev, request);
3291
3292         if (status != SCI_SUCCESS) {
3293                 dev_warn(&isci_host->pdev->dev,
3294                          "%s: failed request construct\n",
3295                          __func__);
3296                 return SCI_FAILURE;
3297         }
3298
3299         switch (task->task_proto) {
3300         case SAS_PROTOCOL_SMP:
3301                 status = isci_smp_request_build(request);
3302                 break;
3303         case SAS_PROTOCOL_SSP:
3304                 status = isci_request_ssp_request_construct(request);
3305                 break;
3306         case SAS_PROTOCOL_SATA:
3307         case SAS_PROTOCOL_STP:
3308         case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
3309                 status = isci_request_stp_request_construct(request);
3310                 break;
3311         default:
3312                 dev_warn(&isci_host->pdev->dev,
3313                          "%s: unknown protocol\n", __func__);
3314                 return SCI_FAILURE;
3315         }
3316
3317         return SCI_SUCCESS;
3318 }
3319
3320 static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag)
3321 {
3322         struct isci_request *ireq;
3323
3324         ireq = ihost->reqs[ISCI_TAG_TCI(tag)];
3325         ireq->io_tag = tag;
3326         ireq->io_request_completion = NULL;
3327         ireq->flags = 0;
3328         ireq->num_sg_entries = 0;
3329         INIT_LIST_HEAD(&ireq->completed_node);
3330         INIT_LIST_HEAD(&ireq->dev_node);
3331         isci_request_change_state(ireq, allocated);
3332
3333         return ireq;
3334 }
3335
3336 static struct isci_request *isci_io_request_from_tag(struct isci_host *ihost,
3337                                                      struct sas_task *task,
3338                                                      u16 tag)
3339 {
3340         struct isci_request *ireq;
3341
3342         ireq = isci_request_from_tag(ihost, tag);
3343         ireq->ttype_ptr.io_task_ptr = task;
3344         ireq->ttype = io_task;
3345         task->lldd_task = ireq;
3346
3347         return ireq;
3348 }
3349
3350 struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
3351                                                struct isci_tmf *isci_tmf,
3352                                                u16 tag)
3353 {
3354         struct isci_request *ireq;
3355
3356         ireq = isci_request_from_tag(ihost, tag);
3357         ireq->ttype_ptr.tmf_task_ptr = isci_tmf;
3358         ireq->ttype = tmf_task;
3359
3360         return ireq;
3361 }
3362
3363 int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
3364                          struct sas_task *task, u16 tag)
3365 {
3366         enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3367         struct isci_request *ireq;
3368         unsigned long flags;
3369         int ret = 0;
3370
3371         /* do common allocation and init of request object. */
3372         ireq = isci_io_request_from_tag(ihost, task, tag);
3373
3374         status = isci_io_request_build(ihost, ireq, idev);
3375         if (status != SCI_SUCCESS) {
3376                 dev_warn(&ihost->pdev->dev,
3377                          "%s: request_construct failed - status = 0x%x\n",
3378                          __func__,
3379                          status);
3380                 return status;
3381         }
3382
3383         spin_lock_irqsave(&ihost->scic_lock, flags);
3384
3385         if (test_bit(IDEV_IO_NCQERROR, &idev->flags)) {
3386
3387                 if (isci_task_is_ncq_recovery(task)) {
3388
3389                         /* The device is in an NCQ recovery state.  Issue the
3390                          * request on the task side.  Note that it will
3391                          * complete on the I/O request side because the
3392                          * request was built that way (ie.
3393                          * ireq->is_task_management_request is false).
3394                          */
3395                         status = scic_controller_start_task(&ihost->sci,
3396                                                             idev,
3397                                                             ireq);
3398                 } else {
3399                         status = SCI_FAILURE;
3400                 }
3401         } else {
3402                 /* send the request, let the core assign the IO TAG.    */
3403                 status = scic_controller_start_io(&ihost->sci, idev,
3404                                                   ireq);
3405         }
3406
3407         if (status != SCI_SUCCESS &&
3408             status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
3409                 dev_warn(&ihost->pdev->dev,
3410                          "%s: failed request start (0x%x)\n",
3411                          __func__, status);
3412                 spin_unlock_irqrestore(&ihost->scic_lock, flags);
3413                 return status;
3414         }
3415
3416         /* Either I/O started OK, or the core has signaled that
3417          * the device needs a target reset.
3418          *
3419          * In either case, hold onto the I/O for later.
3420          *
3421          * Update it's status and add it to the list in the
3422          * remote device object.
3423          */
3424         list_add(&ireq->dev_node, &idev->reqs_in_process);
3425
3426         if (status == SCI_SUCCESS) {
3427                 isci_request_change_state(ireq, started);
3428         } else {
3429                 /* The request did not really start in the
3430                  * hardware, so clear the request handle
3431                  * here so no terminations will be done.
3432                  */
3433                 set_bit(IREQ_TERMINATED, &ireq->flags);
3434                 isci_request_change_state(ireq, completed);
3435         }
3436         spin_unlock_irqrestore(&ihost->scic_lock, flags);
3437
3438         if (status ==
3439             SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
3440                 /* Signal libsas that we need the SCSI error
3441                  * handler thread to work on this I/O and that
3442                  * we want a device reset.
3443                  */
3444                 spin_lock_irqsave(&task->task_state_lock, flags);
3445                 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
3446                 spin_unlock_irqrestore(&task->task_state_lock, flags);
3447
3448                 /* Cause this task to be scheduled in the SCSI error
3449                  * handler thread.
3450                  */
3451                 isci_execpath_callback(ihost, task,
3452                                        sas_task_abort);
3453
3454                 /* Change the status, since we are holding
3455                  * the I/O until it is managed by the SCSI
3456                  * error handler.
3457                  */
3458                 status = SCI_SUCCESS;
3459         }
3460
3461         return ret;
3462 }