2d29abf3ce1f3ee8b3e3af2c0c6390afcfefa573
[linux-2.6.git] / drivers / scsi / isci / request.c
1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  * redistributing this file, you may do so under either license.
4  *
5  * GPL LICENSE SUMMARY
6  *
7  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21  * The full GNU General Public License is included in this distribution
22  * in the file called LICENSE.GPL.
23  *
24  * BSD LICENSE
25  *
26  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27  * All rights reserved.
28  *
29  * Redistribution and use in source and binary forms, with or without
30  * modification, are permitted provided that the following conditions
31  * are met:
32  *
33  *   * Redistributions of source code must retain the above copyright
34  *     notice, this list of conditions and the following disclaimer.
35  *   * Redistributions in binary form must reproduce the above copyright
36  *     notice, this list of conditions and the following disclaimer in
37  *     the documentation and/or other materials provided with the
38  *     distribution.
39  *   * Neither the name of Intel Corporation nor the names of its
40  *     contributors may be used to endorse or promote products derived
41  *     from this software without specific prior written permission.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54  */
55
56 #include "isci.h"
57 #include "task.h"
58 #include "request.h"
59 #include "sata.h"
60 #include "scu_completion_codes.h"
61 #include "scu_event_codes.h"
62 #include "sas.h"
63
64 static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq,
65                                                         int idx)
66 {
67         if (idx == 0)
68                 return &ireq->tc->sgl_pair_ab;
69         else if (idx == 1)
70                 return &ireq->tc->sgl_pair_cd;
71         else if (idx < 0)
72                 return NULL;
73         else
74                 return &ireq->sg_table[idx - 2];
75 }
76
77 static dma_addr_t to_sgl_element_pair_dma(struct scic_sds_controller *scic,
78                                           struct isci_request *ireq, u32 idx)
79 {
80         u32 offset;
81
82         if (idx == 0) {
83                 offset = (void *) &ireq->tc->sgl_pair_ab -
84                          (void *) &scic->task_context_table[0];
85                 return scic->task_context_dma + offset;
86         } else if (idx == 1) {
87                 offset = (void *) &ireq->tc->sgl_pair_cd -
88                          (void *) &scic->task_context_table[0];
89                 return scic->task_context_dma + offset;
90         }
91
92         return scic_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]);
93 }
94
95 static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg)
96 {
97         e->length = sg_dma_len(sg);
98         e->address_upper = upper_32_bits(sg_dma_address(sg));
99         e->address_lower = lower_32_bits(sg_dma_address(sg));
100         e->address_modifier = 0;
101 }
102
103 static void scic_sds_request_build_sgl(struct isci_request *ireq)
104 {
105         struct isci_host *isci_host = ireq->isci_host;
106         struct scic_sds_controller *scic = &isci_host->sci;
107         struct sas_task *task = isci_request_access_task(ireq);
108         struct scatterlist *sg = NULL;
109         dma_addr_t dma_addr;
110         u32 sg_idx = 0;
111         struct scu_sgl_element_pair *scu_sg   = NULL;
112         struct scu_sgl_element_pair *prev_sg  = NULL;
113
114         if (task->num_scatter > 0) {
115                 sg = task->scatter;
116
117                 while (sg) {
118                         scu_sg = to_sgl_element_pair(ireq, sg_idx);
119                         init_sgl_element(&scu_sg->A, sg);
120                         sg = sg_next(sg);
121                         if (sg) {
122                                 init_sgl_element(&scu_sg->B, sg);
123                                 sg = sg_next(sg);
124                         } else
125                                 memset(&scu_sg->B, 0, sizeof(scu_sg->B));
126
127                         if (prev_sg) {
128                                 dma_addr = to_sgl_element_pair_dma(scic,
129                                                                    ireq,
130                                                                    sg_idx);
131
132                                 prev_sg->next_pair_upper =
133                                         upper_32_bits(dma_addr);
134                                 prev_sg->next_pair_lower =
135                                         lower_32_bits(dma_addr);
136                         }
137
138                         prev_sg = scu_sg;
139                         sg_idx++;
140                 }
141         } else {        /* handle when no sg */
142                 scu_sg = to_sgl_element_pair(ireq, sg_idx);
143
144                 dma_addr = dma_map_single(&isci_host->pdev->dev,
145                                           task->scatter,
146                                           task->total_xfer_len,
147                                           task->data_dir);
148
149                 ireq->zero_scatter_daddr = dma_addr;
150
151                 scu_sg->A.length = task->total_xfer_len;
152                 scu_sg->A.address_upper = upper_32_bits(dma_addr);
153                 scu_sg->A.address_lower = lower_32_bits(dma_addr);
154         }
155
156         if (scu_sg) {
157                 scu_sg->next_pair_upper = 0;
158                 scu_sg->next_pair_lower = 0;
159         }
160 }
161
162 static void scic_sds_io_request_build_ssp_command_iu(struct isci_request *ireq)
163 {
164         struct ssp_cmd_iu *cmd_iu;
165         struct sas_task *task = isci_request_access_task(ireq);
166
167         cmd_iu = &ireq->ssp.cmd;
168
169         memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8);
170         cmd_iu->add_cdb_len = 0;
171         cmd_iu->_r_a = 0;
172         cmd_iu->_r_b = 0;
173         cmd_iu->en_fburst = 0; /* unsupported */
174         cmd_iu->task_prio = task->ssp_task.task_prio;
175         cmd_iu->task_attr = task->ssp_task.task_attr;
176         cmd_iu->_r_c = 0;
177
178         sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb,
179                        sizeof(task->ssp_task.cdb) / sizeof(u32));
180 }
181
182 static void scic_sds_task_request_build_ssp_task_iu(struct isci_request *ireq)
183 {
184         struct ssp_task_iu *task_iu;
185         struct sas_task *task = isci_request_access_task(ireq);
186         struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
187
188         task_iu = &ireq->ssp.tmf;
189
190         memset(task_iu, 0, sizeof(struct ssp_task_iu));
191
192         memcpy(task_iu->LUN, task->ssp_task.LUN, 8);
193
194         task_iu->task_func = isci_tmf->tmf_code;
195         task_iu->task_tag =
196                 (ireq->ttype == tmf_task) ?
197                 isci_tmf->io_tag :
198                 SCI_CONTROLLER_INVALID_IO_TAG;
199 }
200
201 /**
202  * This method is will fill in the SCU Task Context for any type of SSP request.
203  * @sci_req:
204  * @task_context:
205  *
206  */
207 static void scu_ssp_reqeust_construct_task_context(
208         struct isci_request *ireq,
209         struct scu_task_context *task_context)
210 {
211         dma_addr_t dma_addr;
212         struct scic_sds_remote_device *target_device;
213         struct isci_port *iport;
214
215         target_device = scic_sds_request_get_device(ireq);
216         iport = scic_sds_request_get_port(ireq);
217
218         /* Fill in the TC with the its required data */
219         task_context->abort = 0;
220         task_context->priority = 0;
221         task_context->initiator_request = 1;
222         task_context->connection_rate = target_device->connection_rate;
223         task_context->protocol_engine_index =
224                 scic_sds_controller_get_protocol_engine_group(controller);
225         task_context->logical_port_index = scic_sds_port_get_index(iport);
226         task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
227         task_context->valid = SCU_TASK_CONTEXT_VALID;
228         task_context->context_type = SCU_TASK_CONTEXT_TYPE;
229
230         task_context->remote_node_index =
231                 scic_sds_remote_device_get_index(ireq->target_device);
232         task_context->command_code = 0;
233
234         task_context->link_layer_control = 0;
235         task_context->do_not_dma_ssp_good_response = 1;
236         task_context->strict_ordering = 0;
237         task_context->control_frame = 0;
238         task_context->timeout_enable = 0;
239         task_context->block_guard_enable = 0;
240
241         task_context->address_modifier = 0;
242
243         /* task_context->type.ssp.tag = ireq->io_tag; */
244         task_context->task_phase = 0x01;
245
246         ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
247                               (scic_sds_controller_get_protocol_engine_group(controller) <<
248                                SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
249                               (scic_sds_port_get_index(iport) <<
250                                SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
251                               ISCI_TAG_TCI(ireq->io_tag));
252
253         /*
254          * Copy the physical address for the command buffer to the
255          * SCU Task Context
256          */
257         dma_addr = scic_io_request_get_dma_addr(ireq, &ireq->ssp.cmd);
258
259         task_context->command_iu_upper = upper_32_bits(dma_addr);
260         task_context->command_iu_lower = lower_32_bits(dma_addr);
261
262         /*
263          * Copy the physical address for the response buffer to the
264          * SCU Task Context
265          */
266         dma_addr = scic_io_request_get_dma_addr(ireq, &ireq->ssp.rsp);
267
268         task_context->response_iu_upper = upper_32_bits(dma_addr);
269         task_context->response_iu_lower = lower_32_bits(dma_addr);
270 }
271
272 /**
273  * This method is will fill in the SCU Task Context for a SSP IO request.
274  * @sci_req:
275  *
276  */
277 static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
278                                                       enum dma_data_direction dir,
279                                                       u32 len)
280 {
281         struct scu_task_context *task_context = ireq->tc;
282
283         scu_ssp_reqeust_construct_task_context(ireq, task_context);
284
285         task_context->ssp_command_iu_length =
286                 sizeof(struct ssp_cmd_iu) / sizeof(u32);
287         task_context->type.ssp.frame_type = SSP_COMMAND;
288
289         switch (dir) {
290         case DMA_FROM_DEVICE:
291         case DMA_NONE:
292         default:
293                 task_context->task_type = SCU_TASK_TYPE_IOREAD;
294                 break;
295         case DMA_TO_DEVICE:
296                 task_context->task_type = SCU_TASK_TYPE_IOWRITE;
297                 break;
298         }
299
300         task_context->transfer_length_bytes = len;
301
302         if (task_context->transfer_length_bytes > 0)
303                 scic_sds_request_build_sgl(ireq);
304 }
305
306 /**
307  * This method will fill in the SCU Task Context for a SSP Task request.  The
308  *    following important settings are utilized: -# priority ==
309  *    SCU_TASK_PRIORITY_HIGH.  This ensures that the task request is issued
310  *    ahead of other task destined for the same Remote Node. -# task_type ==
311  *    SCU_TASK_TYPE_IOREAD.  This simply indicates that a normal request type
312  *    (i.e. non-raw frame) is being utilized to perform task management. -#
313  *    control_frame == 1.  This ensures that the proper endianess is set so
314  *    that the bytes are transmitted in the right order for a task frame.
315  * @sci_req: This parameter specifies the task request object being
316  *    constructed.
317  *
318  */
319 static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq)
320 {
321         struct scu_task_context *task_context = ireq->tc;
322
323         scu_ssp_reqeust_construct_task_context(ireq, task_context);
324
325         task_context->control_frame                = 1;
326         task_context->priority                     = SCU_TASK_PRIORITY_HIGH;
327         task_context->task_type                    = SCU_TASK_TYPE_RAW_FRAME;
328         task_context->transfer_length_bytes        = 0;
329         task_context->type.ssp.frame_type          = SSP_TASK;
330         task_context->ssp_command_iu_length =
331                 sizeof(struct ssp_task_iu) / sizeof(u32);
332 }
333
334 /**
335  * This method is will fill in the SCU Task Context for any type of SATA
336  *    request.  This is called from the various SATA constructors.
337  * @sci_req: The general IO request object which is to be used in
338  *    constructing the SCU task context.
339  * @task_context: The buffer pointer for the SCU task context which is being
340  *    constructed.
341  *
342  * The general io request construction is complete. The buffer assignment for
343  * the command buffer is complete. none Revisit task context construction to
344  * determine what is common for SSP/SMP/STP task context structures.
345  */
346 static void scu_sata_reqeust_construct_task_context(
347         struct isci_request *ireq,
348         struct scu_task_context *task_context)
349 {
350         dma_addr_t dma_addr;
351         struct scic_sds_remote_device *target_device;
352         struct isci_port *iport;
353
354         target_device = scic_sds_request_get_device(ireq);
355         iport = scic_sds_request_get_port(ireq);
356
357         /* Fill in the TC with the its required data */
358         task_context->abort = 0;
359         task_context->priority = SCU_TASK_PRIORITY_NORMAL;
360         task_context->initiator_request = 1;
361         task_context->connection_rate = target_device->connection_rate;
362         task_context->protocol_engine_index =
363                 scic_sds_controller_get_protocol_engine_group(controller);
364         task_context->logical_port_index =
365                 scic_sds_port_get_index(iport);
366         task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
367         task_context->valid = SCU_TASK_CONTEXT_VALID;
368         task_context->context_type = SCU_TASK_CONTEXT_TYPE;
369
370         task_context->remote_node_index =
371                 scic_sds_remote_device_get_index(ireq->target_device);
372         task_context->command_code = 0;
373
374         task_context->link_layer_control = 0;
375         task_context->do_not_dma_ssp_good_response = 1;
376         task_context->strict_ordering = 0;
377         task_context->control_frame = 0;
378         task_context->timeout_enable = 0;
379         task_context->block_guard_enable = 0;
380
381         task_context->address_modifier = 0;
382         task_context->task_phase = 0x01;
383
384         task_context->ssp_command_iu_length =
385                 (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
386
387         /* Set the first word of the H2D REG FIS */
388         task_context->type.words[0] = *(u32 *)&ireq->stp.cmd;
389
390         ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
391                                  (scic_sds_controller_get_protocol_engine_group(controller) <<
392                                   SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
393                                  (scic_sds_port_get_index(iport) <<
394                                   SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
395                                  ISCI_TAG_TCI(ireq->io_tag));
396         /*
397          * Copy the physical address for the command buffer to the SCU Task
398          * Context. We must offset the command buffer by 4 bytes because the
399          * first 4 bytes are transfered in the body of the TC.
400          */
401         dma_addr = scic_io_request_get_dma_addr(ireq,
402                                                 ((char *) &ireq->stp.cmd) +
403                                                 sizeof(u32));
404
405         task_context->command_iu_upper = upper_32_bits(dma_addr);
406         task_context->command_iu_lower = lower_32_bits(dma_addr);
407
408         /* SATA Requests do not have a response buffer */
409         task_context->response_iu_upper = 0;
410         task_context->response_iu_lower = 0;
411 }
412
413 static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq)
414 {
415         struct scu_task_context *task_context = ireq->tc;
416
417         scu_sata_reqeust_construct_task_context(ireq, task_context);
418
419         task_context->control_frame         = 0;
420         task_context->priority              = SCU_TASK_PRIORITY_NORMAL;
421         task_context->task_type             = SCU_TASK_TYPE_SATA_RAW_FRAME;
422         task_context->type.stp.fis_type     = FIS_REGH2D;
423         task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
424 }
425
426 static enum sci_status scic_sds_stp_pio_request_construct(struct isci_request *ireq,
427                                                           bool copy_rx_frame)
428 {
429         struct isci_stp_request *stp_req = &ireq->stp.req;
430
431         scu_stp_raw_request_construct_task_context(ireq);
432
433         stp_req->status = 0;
434         stp_req->sgl.offset = 0;
435         stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A;
436
437         if (copy_rx_frame) {
438                 scic_sds_request_build_sgl(ireq);
439                 stp_req->sgl.index = 0;
440         } else {
441                 /* The user does not want the data copied to the SGL buffer location */
442                 stp_req->sgl.index = -1;
443         }
444
445         return SCI_SUCCESS;
446 }
447
448 /**
449  *
450  * @sci_req: This parameter specifies the request to be constructed as an
451  *    optimized request.
452  * @optimized_task_type: This parameter specifies whether the request is to be
453  *    an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
454  *    value of 1 indicates NCQ.
455  *
456  * This method will perform request construction common to all types of STP
457  * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
458  * returns an indication as to whether the construction was successful.
459  */
460 static void scic_sds_stp_optimized_request_construct(struct isci_request *ireq,
461                                                      u8 optimized_task_type,
462                                                      u32 len,
463                                                      enum dma_data_direction dir)
464 {
465         struct scu_task_context *task_context = ireq->tc;
466
467         /* Build the STP task context structure */
468         scu_sata_reqeust_construct_task_context(ireq, task_context);
469
470         /* Copy over the SGL elements */
471         scic_sds_request_build_sgl(ireq);
472
473         /* Copy over the number of bytes to be transfered */
474         task_context->transfer_length_bytes = len;
475
476         if (dir == DMA_TO_DEVICE) {
477                 /*
478                  * The difference between the DMA IN and DMA OUT request task type
479                  * values are consistent with the difference between FPDMA READ
480                  * and FPDMA WRITE values.  Add the supplied task type parameter
481                  * to this difference to set the task type properly for this
482                  * DATA OUT (WRITE) case. */
483                 task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
484                                                                  - SCU_TASK_TYPE_DMA_IN);
485         } else {
486                 /*
487                  * For the DATA IN (READ) case, simply save the supplied
488                  * optimized task type. */
489                 task_context->task_type = optimized_task_type;
490         }
491 }
492
493
494
495 static enum sci_status
496 scic_io_request_construct_sata(struct isci_request *ireq,
497                                u32 len,
498                                enum dma_data_direction dir,
499                                bool copy)
500 {
501         enum sci_status status = SCI_SUCCESS;
502         struct sas_task *task = isci_request_access_task(ireq);
503
504         /* check for management protocols */
505         if (ireq->ttype == tmf_task) {
506                 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
507
508                 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
509                     tmf->tmf_code == isci_tmf_sata_srst_low) {
510                         scu_stp_raw_request_construct_task_context(ireq);
511                         return SCI_SUCCESS;
512                 } else {
513                         dev_err(scic_to_dev(ireq->owning_controller),
514                                 "%s: Request 0x%p received un-handled SAT "
515                                 "management protocol 0x%x.\n",
516                                 __func__, ireq, tmf->tmf_code);
517
518                         return SCI_FAILURE;
519                 }
520         }
521
522         if (!sas_protocol_ata(task->task_proto)) {
523                 dev_err(scic_to_dev(ireq->owning_controller),
524                         "%s: Non-ATA protocol in SATA path: 0x%x\n",
525                         __func__,
526                         task->task_proto);
527                 return SCI_FAILURE;
528
529         }
530
531         /* non data */
532         if (task->data_dir == DMA_NONE) {
533                 scu_stp_raw_request_construct_task_context(ireq);
534                 return SCI_SUCCESS;
535         }
536
537         /* NCQ */
538         if (task->ata_task.use_ncq) {
539                 scic_sds_stp_optimized_request_construct(ireq,
540                                                          SCU_TASK_TYPE_FPDMAQ_READ,
541                                                          len, dir);
542                 return SCI_SUCCESS;
543         }
544
545         /* DMA */
546         if (task->ata_task.dma_xfer) {
547                 scic_sds_stp_optimized_request_construct(ireq,
548                                                          SCU_TASK_TYPE_DMA_IN,
549                                                          len, dir);
550                 return SCI_SUCCESS;
551         } else /* PIO */
552                 return scic_sds_stp_pio_request_construct(ireq, copy);
553
554         return status;
555 }
556
557 static enum sci_status scic_io_request_construct_basic_ssp(struct isci_request *ireq)
558 {
559         struct sas_task *task = isci_request_access_task(ireq);
560
561         ireq->protocol = SCIC_SSP_PROTOCOL;
562
563         scu_ssp_io_request_construct_task_context(ireq,
564                                                   task->data_dir,
565                                                   task->total_xfer_len);
566
567         scic_sds_io_request_build_ssp_command_iu(ireq);
568
569         sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
570
571         return SCI_SUCCESS;
572 }
573
574 enum sci_status scic_task_request_construct_ssp(
575         struct isci_request *ireq)
576 {
577         /* Construct the SSP Task SCU Task Context */
578         scu_ssp_task_request_construct_task_context(ireq);
579
580         /* Fill in the SSP Task IU */
581         scic_sds_task_request_build_ssp_task_iu(ireq);
582
583         sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
584
585         return SCI_SUCCESS;
586 }
587
588 static enum sci_status scic_io_request_construct_basic_sata(struct isci_request *ireq)
589 {
590         enum sci_status status;
591         bool copy = false;
592         struct sas_task *task = isci_request_access_task(ireq);
593
594         ireq->protocol = SCIC_STP_PROTOCOL;
595
596         copy = (task->data_dir == DMA_NONE) ? false : true;
597
598         status = scic_io_request_construct_sata(ireq,
599                                                 task->total_xfer_len,
600                                                 task->data_dir,
601                                                 copy);
602
603         if (status == SCI_SUCCESS)
604                 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
605
606         return status;
607 }
608
609 enum sci_status scic_task_request_construct_sata(struct isci_request *ireq)
610 {
611         enum sci_status status = SCI_SUCCESS;
612
613         /* check for management protocols */
614         if (ireq->ttype == tmf_task) {
615                 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
616
617                 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
618                     tmf->tmf_code == isci_tmf_sata_srst_low) {
619                         scu_stp_raw_request_construct_task_context(ireq);
620                 } else {
621                         dev_err(scic_to_dev(ireq->owning_controller),
622                                 "%s: Request 0x%p received un-handled SAT "
623                                 "Protocol 0x%x.\n",
624                                 __func__, ireq, tmf->tmf_code);
625
626                         return SCI_FAILURE;
627                 }
628         }
629
630         if (status != SCI_SUCCESS)
631                 return status;
632         sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
633
634         return status;
635 }
636
637 /**
638  * sci_req_tx_bytes - bytes transferred when reply underruns request
639  * @sci_req: request that was terminated early
640  */
641 #define SCU_TASK_CONTEXT_SRAM 0x200000
642 static u32 sci_req_tx_bytes(struct isci_request *ireq)
643 {
644         struct scic_sds_controller *scic = ireq->owning_controller;
645         u32 ret_val = 0;
646
647         if (readl(&scic->smu_registers->address_modifier) == 0) {
648                 void __iomem *scu_reg_base = scic->scu_registers;
649
650                 /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where
651                  *   BAR1 is the scu_registers
652                  *   0x20002C = 0x200000 + 0x2c
653                  *            = start of task context SRAM + offset of (type.ssp.data_offset)
654                  *   TCi is the io_tag of struct scic_sds_request
655                  */
656                 ret_val = readl(scu_reg_base +
657                                 (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
658                                 ((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(ireq->io_tag)));
659         }
660
661         return ret_val;
662 }
663
664 enum sci_status scic_sds_request_start(struct isci_request *ireq)
665 {
666         enum sci_base_request_states state;
667         struct scu_task_context *tc = ireq->tc;
668         struct scic_sds_controller *scic = ireq->owning_controller;
669
670         state = ireq->sm.current_state_id;
671         if (state != SCI_REQ_CONSTRUCTED) {
672                 dev_warn(scic_to_dev(scic),
673                         "%s: SCIC IO Request requested to start while in wrong "
674                          "state %d\n", __func__, state);
675                 return SCI_FAILURE_INVALID_STATE;
676         }
677
678         tc->task_index = ISCI_TAG_TCI(ireq->io_tag);
679
680         switch (tc->protocol_type) {
681         case SCU_TASK_CONTEXT_PROTOCOL_SMP:
682         case SCU_TASK_CONTEXT_PROTOCOL_SSP:
683                 /* SSP/SMP Frame */
684                 tc->type.ssp.tag = ireq->io_tag;
685                 tc->type.ssp.target_port_transfer_tag = 0xFFFF;
686                 break;
687
688         case SCU_TASK_CONTEXT_PROTOCOL_STP:
689                 /* STP/SATA Frame
690                  * tc->type.stp.ncq_tag = ireq->ncq_tag;
691                  */
692                 break;
693
694         case SCU_TASK_CONTEXT_PROTOCOL_NONE:
695                 /* / @todo When do we set no protocol type? */
696                 break;
697
698         default:
699                 /* This should never happen since we build the IO
700                  * requests */
701                 break;
702         }
703
704         /* Add to the post_context the io tag value */
705         ireq->post_context |= ISCI_TAG_TCI(ireq->io_tag);
706
707         /* Everything is good go ahead and change state */
708         sci_change_state(&ireq->sm, SCI_REQ_STARTED);
709
710         return SCI_SUCCESS;
711 }
712
713 enum sci_status
714 scic_sds_io_request_terminate(struct isci_request *ireq)
715 {
716         enum sci_base_request_states state;
717
718         state = ireq->sm.current_state_id;
719
720         switch (state) {
721         case SCI_REQ_CONSTRUCTED:
722                 scic_sds_request_set_status(ireq,
723                         SCU_TASK_DONE_TASK_ABORT,
724                         SCI_FAILURE_IO_TERMINATED);
725
726                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
727                 return SCI_SUCCESS;
728         case SCI_REQ_STARTED:
729         case SCI_REQ_TASK_WAIT_TC_COMP:
730         case SCI_REQ_SMP_WAIT_RESP:
731         case SCI_REQ_SMP_WAIT_TC_COMP:
732         case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
733         case SCI_REQ_STP_UDMA_WAIT_D2H:
734         case SCI_REQ_STP_NON_DATA_WAIT_H2D:
735         case SCI_REQ_STP_NON_DATA_WAIT_D2H:
736         case SCI_REQ_STP_PIO_WAIT_H2D:
737         case SCI_REQ_STP_PIO_WAIT_FRAME:
738         case SCI_REQ_STP_PIO_DATA_IN:
739         case SCI_REQ_STP_PIO_DATA_OUT:
740         case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
741         case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
742         case SCI_REQ_STP_SOFT_RESET_WAIT_D2H:
743                 sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
744                 return SCI_SUCCESS;
745         case SCI_REQ_TASK_WAIT_TC_RESP:
746                 sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
747                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
748                 return SCI_SUCCESS;
749         case SCI_REQ_ABORTING:
750                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
751                 return SCI_SUCCESS;
752         case SCI_REQ_COMPLETED:
753         default:
754                 dev_warn(scic_to_dev(ireq->owning_controller),
755                          "%s: SCIC IO Request requested to abort while in wrong "
756                          "state %d\n",
757                          __func__,
758                          ireq->sm.current_state_id);
759                 break;
760         }
761
762         return SCI_FAILURE_INVALID_STATE;
763 }
764
765 enum sci_status scic_sds_request_complete(struct isci_request *ireq)
766 {
767         enum sci_base_request_states state;
768         struct scic_sds_controller *scic = ireq->owning_controller;
769
770         state = ireq->sm.current_state_id;
771         if (WARN_ONCE(state != SCI_REQ_COMPLETED,
772                       "isci: request completion from wrong state (%d)\n", state))
773                 return SCI_FAILURE_INVALID_STATE;
774
775         if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX)
776                 scic_sds_controller_release_frame(scic,
777                                                   ireq->saved_rx_frame_index);
778
779         /* XXX can we just stop the machine and remove the 'final' state? */
780         sci_change_state(&ireq->sm, SCI_REQ_FINAL);
781         return SCI_SUCCESS;
782 }
783
784 enum sci_status scic_sds_io_request_event_handler(struct isci_request *ireq,
785                                                   u32 event_code)
786 {
787         enum sci_base_request_states state;
788         struct scic_sds_controller *scic = ireq->owning_controller;
789
790         state = ireq->sm.current_state_id;
791
792         if (state != SCI_REQ_STP_PIO_DATA_IN) {
793                 dev_warn(scic_to_dev(scic), "%s: (%x) in wrong state %d\n",
794                          __func__, event_code, state);
795
796                 return SCI_FAILURE_INVALID_STATE;
797         }
798
799         switch (scu_get_event_specifier(event_code)) {
800         case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT:
801                 /* We are waiting for data and the SCU has R_ERR the data frame.
802                  * Go back to waiting for the D2H Register FIS
803                  */
804                 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
805                 return SCI_SUCCESS;
806         default:
807                 dev_err(scic_to_dev(scic),
808                         "%s: pio request unexpected event %#x\n",
809                         __func__, event_code);
810
811                 /* TODO Should we fail the PIO request when we get an
812                  * unexpected event?
813                  */
814                 return SCI_FAILURE;
815         }
816 }
817
818 /*
819  * This function copies response data for requests returning response data
820  *    instead of sense data.
821  * @sci_req: This parameter specifies the request object for which to copy
822  *    the response data.
823  */
824 static void scic_sds_io_request_copy_response(struct isci_request *ireq)
825 {
826         void *resp_buf;
827         u32 len;
828         struct ssp_response_iu *ssp_response;
829         struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
830
831         ssp_response = &ireq->ssp.rsp;
832
833         resp_buf = &isci_tmf->resp.resp_iu;
834
835         len = min_t(u32,
836                     SSP_RESP_IU_MAX_SIZE,
837                     be32_to_cpu(ssp_response->response_data_len));
838
839         memcpy(resp_buf, ssp_response->resp_data, len);
840 }
841
842 static enum sci_status
843 request_started_state_tc_event(struct isci_request *ireq,
844                                u32 completion_code)
845 {
846         struct ssp_response_iu *resp_iu;
847         u8 datapres;
848
849         /* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000
850          * to determine SDMA status
851          */
852         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
853         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
854                 scic_sds_request_set_status(ireq,
855                                             SCU_TASK_DONE_GOOD,
856                                             SCI_SUCCESS);
857                 break;
858         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): {
859                 /* There are times when the SCU hardware will return an early
860                  * response because the io request specified more data than is
861                  * returned by the target device (mode pages, inquiry data,
862                  * etc.).  We must check the response stats to see if this is
863                  * truly a failed request or a good request that just got
864                  * completed early.
865                  */
866                 struct ssp_response_iu *resp = &ireq->ssp.rsp;
867                 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
868
869                 sci_swab32_cpy(&ireq->ssp.rsp,
870                                &ireq->ssp.rsp,
871                                word_cnt);
872
873                 if (resp->status == 0) {
874                         scic_sds_request_set_status(ireq,
875                                                     SCU_TASK_DONE_GOOD,
876                                                     SCI_SUCCESS_IO_DONE_EARLY);
877                 } else {
878                         scic_sds_request_set_status(ireq,
879                                                     SCU_TASK_DONE_CHECK_RESPONSE,
880                                                     SCI_FAILURE_IO_RESPONSE_VALID);
881                 }
882                 break;
883         }
884         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): {
885                 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
886
887                 sci_swab32_cpy(&ireq->ssp.rsp,
888                                &ireq->ssp.rsp,
889                                word_cnt);
890
891                 scic_sds_request_set_status(ireq,
892                                             SCU_TASK_DONE_CHECK_RESPONSE,
893                                             SCI_FAILURE_IO_RESPONSE_VALID);
894                 break;
895         }
896
897         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR):
898                 /* TODO With TASK_DONE_RESP_LEN_ERR is the response frame
899                  * guaranteed to be received before this completion status is
900                  * posted?
901                  */
902                 resp_iu = &ireq->ssp.rsp;
903                 datapres = resp_iu->datapres;
904
905                 if (datapres == 1 || datapres == 2) {
906                         scic_sds_request_set_status(ireq,
907                                                     SCU_TASK_DONE_CHECK_RESPONSE,
908                                                     SCI_FAILURE_IO_RESPONSE_VALID);
909                 } else
910                         scic_sds_request_set_status(ireq,
911                                                     SCU_TASK_DONE_GOOD,
912                                                     SCI_SUCCESS);
913                 break;
914         /* only stp device gets suspended. */
915         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
916         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR):
917         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR):
918         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR):
919         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR):
920         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN):
921         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
922         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP):
923         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
924         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
925         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
926                 if (ireq->protocol == SCIC_STP_PROTOCOL) {
927                         scic_sds_request_set_status(ireq,
928                                 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
929                                 SCU_COMPLETION_TL_STATUS_SHIFT,
930                                 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
931                 } else {
932                         scic_sds_request_set_status(ireq,
933                                 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
934                                 SCU_COMPLETION_TL_STATUS_SHIFT,
935                                 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
936                 }
937                 break;
938
939         /* both stp/ssp device gets suspended */
940         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR):
941         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION):
942         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1):
943         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2):
944         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3):
945         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION):
946         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION):
947         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
948         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
949         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
950                 scic_sds_request_set_status(ireq,
951                                             SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
952                                             SCU_COMPLETION_TL_STATUS_SHIFT,
953                                             SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
954                 break;
955
956         /* neither ssp nor stp gets suspended. */
957         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR):
958         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR):
959         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR):
960         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR):
961         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR):
962         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA):
963         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
964         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
965         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
966         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
967         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA):
968         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL):
969         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV):
970         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV):
971         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
972         default:
973                 scic_sds_request_set_status(
974                         ireq,
975                         SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
976                         SCU_COMPLETION_TL_STATUS_SHIFT,
977                         SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
978                 break;
979         }
980
981         /*
982          * TODO: This is probably wrong for ACK/NAK timeout conditions
983          */
984
985         /* In all cases we will treat this as the completion of the IO req. */
986         sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
987         return SCI_SUCCESS;
988 }
989
990 static enum sci_status
991 request_aborting_state_tc_event(struct isci_request *ireq,
992                                 u32 completion_code)
993 {
994         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
995         case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
996         case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
997                 scic_sds_request_set_status(ireq, SCU_TASK_DONE_TASK_ABORT,
998                                             SCI_FAILURE_IO_TERMINATED);
999
1000                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1001                 break;
1002
1003         default:
1004                 /* Unless we get some strange error wait for the task abort to complete
1005                  * TODO: Should there be a state change for this completion?
1006                  */
1007                 break;
1008         }
1009
1010         return SCI_SUCCESS;
1011 }
1012
1013 static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq,
1014                                                        u32 completion_code)
1015 {
1016         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1017         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1018                 scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD,
1019                                             SCI_SUCCESS);
1020
1021                 sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
1022                 break;
1023         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
1024                 /* Currently, the decision is to simply allow the task request
1025                  * to timeout if the task IU wasn't received successfully.
1026                  * There is a potential for receiving multiple task responses if
1027                  * we decide to send the task IU again.
1028                  */
1029                 dev_warn(scic_to_dev(ireq->owning_controller),
1030                          "%s: TaskRequest:0x%p CompletionCode:%x - "
1031                          "ACK/NAK timeout\n", __func__, ireq,
1032                          completion_code);
1033
1034                 sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
1035                 break;
1036         default:
1037                 /*
1038                  * All other completion status cause the IO to be complete.
1039                  * If a NAK was received, then it is up to the user to retry
1040                  * the request.
1041                  */
1042                 scic_sds_request_set_status(ireq,
1043                         SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1044                         SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1045
1046                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1047                 break;
1048         }
1049
1050         return SCI_SUCCESS;
1051 }
1052
1053 static enum sci_status
1054 smp_request_await_response_tc_event(struct isci_request *ireq,
1055                                     u32 completion_code)
1056 {
1057         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1058         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1059                 /* In the AWAIT RESPONSE state, any TC completion is
1060                  * unexpected.  but if the TC has success status, we
1061                  * complete the IO anyway.
1062                  */
1063                 scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD,
1064                                             SCI_SUCCESS);
1065
1066                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1067                 break;
1068
1069         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
1070         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
1071         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
1072         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
1073                 /* These status has been seen in a specific LSI
1074                  * expander, which sometimes is not able to send smp
1075                  * response within 2 ms. This causes our hardware break
1076                  * the connection and set TC completion with one of
1077                  * these SMP_XXX_XX_ERR status. For these type of error,
1078                  * we ask scic user to retry the request.
1079                  */
1080                 scic_sds_request_set_status(ireq, SCU_TASK_DONE_SMP_RESP_TO_ERR,
1081                                             SCI_FAILURE_RETRY_REQUIRED);
1082
1083                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1084                 break;
1085
1086         default:
1087                 /* All other completion status cause the IO to be complete.  If a NAK
1088                  * was received, then it is up to the user to retry the request
1089                  */
1090                 scic_sds_request_set_status(ireq,
1091                                             SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1092                                             SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1093
1094                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1095                 break;
1096         }
1097
1098         return SCI_SUCCESS;
1099 }
1100
1101 static enum sci_status
1102 smp_request_await_tc_event(struct isci_request *ireq,
1103                            u32 completion_code)
1104 {
1105         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1106         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1107                 scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD,
1108                                             SCI_SUCCESS);
1109
1110                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1111                 break;
1112         default:
1113                 /* All other completion status cause the IO to be
1114                  * complete.  If a NAK was received, then it is up to
1115                  * the user to retry the request.
1116                  */
1117                 scic_sds_request_set_status(ireq,
1118                                             SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1119                                             SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1120
1121                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1122                 break;
1123         }
1124
1125         return SCI_SUCCESS;
1126 }
1127
1128 void scic_stp_io_request_set_ncq_tag(struct isci_request *ireq,
1129                                      u16 ncq_tag)
1130 {
1131         /**
1132          * @note This could be made to return an error to the user if the user
1133          *       attempts to set the NCQ tag in the wrong state.
1134          */
1135         ireq->tc->type.stp.ncq_tag = ncq_tag;
1136 }
1137
1138 static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req)
1139 {
1140         struct scu_sgl_element *sgl;
1141         struct scu_sgl_element_pair *sgl_pair;
1142         struct isci_request *ireq = to_ireq(stp_req);
1143         struct isci_stp_pio_sgl *pio_sgl = &stp_req->sgl;
1144
1145         sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
1146         if (!sgl_pair)
1147                 sgl = NULL;
1148         else if (pio_sgl->set == SCU_SGL_ELEMENT_PAIR_A) {
1149                 if (sgl_pair->B.address_lower == 0 &&
1150                     sgl_pair->B.address_upper == 0) {
1151                         sgl = NULL;
1152                 } else {
1153                         pio_sgl->set = SCU_SGL_ELEMENT_PAIR_B;
1154                         sgl = &sgl_pair->B;
1155                 }
1156         } else {
1157                 if (sgl_pair->next_pair_lower == 0 &&
1158                     sgl_pair->next_pair_upper == 0) {
1159                         sgl = NULL;
1160                 } else {
1161                         pio_sgl->index++;
1162                         pio_sgl->set = SCU_SGL_ELEMENT_PAIR_A;
1163                         sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
1164                         sgl = &sgl_pair->A;
1165                 }
1166         }
1167
1168         return sgl;
1169 }
1170
1171 static enum sci_status
1172 stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq,
1173                                         u32 completion_code)
1174 {
1175         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1176         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1177                 scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD,
1178                                             SCI_SUCCESS);
1179
1180                 sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H);
1181                 break;
1182
1183         default:
1184                 /* All other completion status cause the IO to be
1185                  * complete.  If a NAK was received, then it is up to
1186                  * the user to retry the request.
1187                  */
1188                 scic_sds_request_set_status(ireq,
1189                                             SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1190                                             SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1191
1192                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1193                 break;
1194         }
1195
1196         return SCI_SUCCESS;
1197 }
1198
1199 #define SCU_MAX_FRAME_BUFFER_SIZE  0x400  /* 1K is the maximum SCU frame data payload */
1200
1201 /* transmit DATA_FIS from (current sgl + offset) for input
1202  * parameter length. current sgl and offset is alreay stored in the IO request
1203  */
1204 static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame(
1205         struct isci_request *ireq,
1206         u32 length)
1207 {
1208         struct isci_stp_request *stp_req = &ireq->stp.req;
1209         struct scu_task_context *task_context = ireq->tc;
1210         struct scu_sgl_element_pair *sgl_pair;
1211         struct scu_sgl_element *current_sgl;
1212
1213         /* Recycle the TC and reconstruct it for sending out DATA FIS containing
1214          * for the data from current_sgl+offset for the input length
1215          */
1216         sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
1217         if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A)
1218                 current_sgl = &sgl_pair->A;
1219         else
1220                 current_sgl = &sgl_pair->B;
1221
1222         /* update the TC */
1223         task_context->command_iu_upper = current_sgl->address_upper;
1224         task_context->command_iu_lower = current_sgl->address_lower;
1225         task_context->transfer_length_bytes = length;
1226         task_context->type.stp.fis_type = FIS_DATA;
1227
1228         /* send the new TC out. */
1229         return scic_controller_continue_io(ireq);
1230 }
1231
1232 static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct isci_request *ireq)
1233 {
1234         struct isci_stp_request *stp_req = &ireq->stp.req;
1235         struct scu_sgl_element_pair *sgl_pair;
1236         struct scu_sgl_element *sgl;
1237         enum sci_status status;
1238         u32 offset;
1239         u32 len = 0;
1240
1241         offset = stp_req->sgl.offset;
1242         sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
1243         if (WARN_ONCE(!sgl_pair, "%s: null sgl element", __func__))
1244                 return SCI_FAILURE;
1245
1246         if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) {
1247                 sgl = &sgl_pair->A;
1248                 len = sgl_pair->A.length - offset;
1249         } else {
1250                 sgl = &sgl_pair->B;
1251                 len = sgl_pair->B.length - offset;
1252         }
1253
1254         if (stp_req->pio_len == 0)
1255                 return SCI_SUCCESS;
1256
1257         if (stp_req->pio_len >= len) {
1258                 status = scic_sds_stp_request_pio_data_out_trasmit_data_frame(ireq, len);
1259                 if (status != SCI_SUCCESS)
1260                         return status;
1261                 stp_req->pio_len -= len;
1262
1263                 /* update the current sgl, offset and save for future */
1264                 sgl = pio_sgl_next(stp_req);
1265                 offset = 0;
1266         } else if (stp_req->pio_len < len) {
1267                 scic_sds_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len);
1268
1269                 /* Sgl offset will be adjusted and saved for future */
1270                 offset += stp_req->pio_len;
1271                 sgl->address_lower += stp_req->pio_len;
1272                 stp_req->pio_len = 0;
1273         }
1274
1275         stp_req->sgl.offset = offset;
1276
1277         return status;
1278 }
1279
1280 /**
1281  *
1282  * @stp_request: The request that is used for the SGL processing.
1283  * @data_buffer: The buffer of data to be copied.
1284  * @length: The length of the data transfer.
1285  *
1286  * Copy the data from the buffer for the length specified to the IO reqeust SGL
1287  * specified data region. enum sci_status
1288  */
1289 static enum sci_status
1290 scic_sds_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req,
1291                                                   u8 *data_buf, u32 len)
1292 {
1293         struct isci_request *ireq;
1294         u8 *src_addr;
1295         int copy_len;
1296         struct sas_task *task;
1297         struct scatterlist *sg;
1298         void *kaddr;
1299         int total_len = len;
1300
1301         ireq = to_ireq(stp_req);
1302         task = isci_request_access_task(ireq);
1303         src_addr = data_buf;
1304
1305         if (task->num_scatter > 0) {
1306                 sg = task->scatter;
1307
1308                 while (total_len > 0) {
1309                         struct page *page = sg_page(sg);
1310
1311                         copy_len = min_t(int, total_len, sg_dma_len(sg));
1312                         kaddr = kmap_atomic(page, KM_IRQ0);
1313                         memcpy(kaddr + sg->offset, src_addr, copy_len);
1314                         kunmap_atomic(kaddr, KM_IRQ0);
1315                         total_len -= copy_len;
1316                         src_addr += copy_len;
1317                         sg = sg_next(sg);
1318                 }
1319         } else {
1320                 BUG_ON(task->total_xfer_len < total_len);
1321                 memcpy(task->scatter, src_addr, total_len);
1322         }
1323
1324         return SCI_SUCCESS;
1325 }
1326
1327 /**
1328  *
1329  * @sci_req: The PIO DATA IN request that is to receive the data.
1330  * @data_buffer: The buffer to copy from.
1331  *
1332  * Copy the data buffer to the io request data region. enum sci_status
1333  */
1334 static enum sci_status scic_sds_stp_request_pio_data_in_copy_data(
1335         struct isci_stp_request *stp_req,
1336         u8 *data_buffer)
1337 {
1338         enum sci_status status;
1339
1340         /*
1341          * If there is less than 1K remaining in the transfer request
1342          * copy just the data for the transfer */
1343         if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) {
1344                 status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
1345                         stp_req, data_buffer, stp_req->pio_len);
1346
1347                 if (status == SCI_SUCCESS)
1348                         stp_req->pio_len = 0;
1349         } else {
1350                 /* We are transfering the whole frame so copy */
1351                 status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
1352                         stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
1353
1354                 if (status == SCI_SUCCESS)
1355                         stp_req->pio_len -= SCU_MAX_FRAME_BUFFER_SIZE;
1356         }
1357
1358         return status;
1359 }
1360
1361 static enum sci_status
1362 stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq,
1363                                               u32 completion_code)
1364 {
1365         enum sci_status status = SCI_SUCCESS;
1366
1367         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1368         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1369                 scic_sds_request_set_status(ireq,
1370                                             SCU_TASK_DONE_GOOD,
1371                                             SCI_SUCCESS);
1372
1373                 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1374                 break;
1375
1376         default:
1377                 /* All other completion status cause the IO to be
1378                  * complete.  If a NAK was received, then it is up to
1379                  * the user to retry the request.
1380                  */
1381                 scic_sds_request_set_status(ireq,
1382                                             SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1383                                             SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1384
1385                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1386                 break;
1387         }
1388
1389         return status;
1390 }
1391
1392 static enum sci_status
1393 pio_data_out_tx_done_tc_event(struct isci_request *ireq,
1394                               u32 completion_code)
1395 {
1396         enum sci_status status = SCI_SUCCESS;
1397         bool all_frames_transferred = false;
1398         struct isci_stp_request *stp_req = &ireq->stp.req;
1399
1400         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1401         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1402                 /* Transmit data */
1403                 if (stp_req->pio_len != 0) {
1404                         status = scic_sds_stp_request_pio_data_out_transmit_data(ireq);
1405                         if (status == SCI_SUCCESS) {
1406                                 if (stp_req->pio_len == 0)
1407                                         all_frames_transferred = true;
1408                         }
1409                 } else if (stp_req->pio_len == 0) {
1410                         /*
1411                          * this will happen if the all data is written at the
1412                          * first time after the pio setup fis is received
1413                          */
1414                         all_frames_transferred  = true;
1415                 }
1416
1417                 /* all data transferred. */
1418                 if (all_frames_transferred) {
1419                         /*
1420                          * Change the state to SCI_REQ_STP_PIO_DATA_IN
1421                          * and wait for PIO_SETUP fis / or D2H REg fis. */
1422                         sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1423                 }
1424                 break;
1425
1426         default:
1427                 /*
1428                  * All other completion status cause the IO to be complete.
1429                  * If a NAK was received, then it is up to the user to retry
1430                  * the request.
1431                  */
1432                 scic_sds_request_set_status(
1433                         ireq,
1434                         SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1435                         SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1436
1437                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1438                 break;
1439         }
1440
1441         return status;
1442 }
1443
1444 static void scic_sds_stp_request_udma_complete_request(
1445         struct isci_request *ireq,
1446         u32 scu_status,
1447         enum sci_status sci_status)
1448 {
1449         scic_sds_request_set_status(ireq, scu_status, sci_status);
1450         sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1451 }
1452
1453 static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct isci_request *ireq,
1454                                                                        u32 frame_index)
1455 {
1456         struct scic_sds_controller *scic = ireq->owning_controller;
1457         struct dev_to_host_fis *frame_header;
1458         enum sci_status status;
1459         u32 *frame_buffer;
1460
1461         status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1462                                                                frame_index,
1463                                                                (void **)&frame_header);
1464
1465         if ((status == SCI_SUCCESS) &&
1466             (frame_header->fis_type == FIS_REGD2H)) {
1467                 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1468                                                               frame_index,
1469                                                               (void **)&frame_buffer);
1470
1471                 scic_sds_controller_copy_sata_response(&ireq->stp.rsp,
1472                                                        frame_header,
1473                                                        frame_buffer);
1474         }
1475
1476         scic_sds_controller_release_frame(scic, frame_index);
1477
1478         return status;
1479 }
1480
1481 enum sci_status
1482 scic_sds_io_request_frame_handler(struct isci_request *ireq,
1483                                   u32 frame_index)
1484 {
1485         struct scic_sds_controller *scic = ireq->owning_controller;
1486         struct isci_stp_request *stp_req = &ireq->stp.req;
1487         enum sci_base_request_states state;
1488         enum sci_status status;
1489         ssize_t word_cnt;
1490
1491         state = ireq->sm.current_state_id;
1492         switch (state)  {
1493         case SCI_REQ_STARTED: {
1494                 struct ssp_frame_hdr ssp_hdr;
1495                 void *frame_header;
1496
1497                 scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1498                                                               frame_index,
1499                                                               &frame_header);
1500
1501                 word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32);
1502                 sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt);
1503
1504                 if (ssp_hdr.frame_type == SSP_RESPONSE) {
1505                         struct ssp_response_iu *resp_iu;
1506                         ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
1507
1508                         scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1509                                                                       frame_index,
1510                                                                       (void **)&resp_iu);
1511
1512                         sci_swab32_cpy(&ireq->ssp.rsp, resp_iu, word_cnt);
1513
1514                         resp_iu = &ireq->ssp.rsp;
1515
1516                         if (resp_iu->datapres == 0x01 ||
1517                             resp_iu->datapres == 0x02) {
1518                                 scic_sds_request_set_status(ireq,
1519                                                             SCU_TASK_DONE_CHECK_RESPONSE,
1520                                                             SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1521                         } else
1522                                 scic_sds_request_set_status(ireq,
1523                                                             SCU_TASK_DONE_GOOD,
1524                                                             SCI_SUCCESS);
1525                 } else {
1526                         /* not a response frame, why did it get forwarded? */
1527                         dev_err(scic_to_dev(scic),
1528                                 "%s: SCIC IO Request 0x%p received unexpected "
1529                                 "frame %d type 0x%02x\n", __func__, ireq,
1530                                 frame_index, ssp_hdr.frame_type);
1531                 }
1532
1533                 /*
1534                  * In any case we are done with this frame buffer return it to
1535                  * the controller
1536                  */
1537                 scic_sds_controller_release_frame(scic, frame_index);
1538
1539                 return SCI_SUCCESS;
1540         }
1541
1542         case SCI_REQ_TASK_WAIT_TC_RESP:
1543                 scic_sds_io_request_copy_response(ireq);
1544                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1545                 scic_sds_controller_release_frame(scic,frame_index);
1546                 return SCI_SUCCESS;
1547
1548         case SCI_REQ_SMP_WAIT_RESP: {
1549                 struct smp_resp *rsp_hdr = &ireq->smp.rsp;
1550                 void *frame_header;
1551
1552                 scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1553                                                               frame_index,
1554                                                               &frame_header);
1555
1556                 /* byte swap the header. */
1557                 word_cnt = SMP_RESP_HDR_SZ / sizeof(u32);
1558                 sci_swab32_cpy(rsp_hdr, frame_header, word_cnt);
1559
1560                 if (rsp_hdr->frame_type == SMP_RESPONSE) {
1561                         void *smp_resp;
1562
1563                         scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1564                                                                       frame_index,
1565                                                                       &smp_resp);
1566
1567                         word_cnt = (sizeof(struct smp_resp) - SMP_RESP_HDR_SZ) /
1568                                 sizeof(u32);
1569
1570                         sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ,
1571                                        smp_resp, word_cnt);
1572
1573                         scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD,
1574                                                     SCI_SUCCESS);
1575
1576                         sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP);
1577                 } else {
1578                         /*
1579                          * This was not a response frame why did it get
1580                          * forwarded?
1581                          */
1582                         dev_err(scic_to_dev(scic),
1583                                 "%s: SCIC SMP Request 0x%p received unexpected "
1584                                 "frame %d type 0x%02x\n",
1585                                 __func__,
1586                                 ireq,
1587                                 frame_index,
1588                                 rsp_hdr->frame_type);
1589
1590                         scic_sds_request_set_status(ireq,
1591                                                     SCU_TASK_DONE_SMP_FRM_TYPE_ERR,
1592                                                     SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1593
1594                         sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1595                 }
1596
1597                 scic_sds_controller_release_frame(scic, frame_index);
1598
1599                 return SCI_SUCCESS;
1600         }
1601
1602         case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
1603                 return scic_sds_stp_request_udma_general_frame_handler(ireq,
1604                                                                        frame_index);
1605
1606         case SCI_REQ_STP_UDMA_WAIT_D2H:
1607                 /* Use the general frame handler to copy the resposne data */
1608                 status = scic_sds_stp_request_udma_general_frame_handler(ireq,
1609                                                                          frame_index);
1610
1611                 if (status != SCI_SUCCESS)
1612                         return status;
1613
1614                 scic_sds_stp_request_udma_complete_request(ireq,
1615                                                            SCU_TASK_DONE_CHECK_RESPONSE,
1616                                                            SCI_FAILURE_IO_RESPONSE_VALID);
1617
1618                 return SCI_SUCCESS;
1619
1620         case SCI_REQ_STP_NON_DATA_WAIT_D2H: {
1621                 struct dev_to_host_fis *frame_header;
1622                 u32 *frame_buffer;
1623
1624                 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1625                                                                        frame_index,
1626                                                                        (void **)&frame_header);
1627
1628                 if (status != SCI_SUCCESS) {
1629                         dev_err(scic_to_dev(scic),
1630                                 "%s: SCIC IO Request 0x%p could not get frame "
1631                                 "header for frame index %d, status %x\n",
1632                                 __func__,
1633                                 stp_req,
1634                                 frame_index,
1635                                 status);
1636
1637                         return status;
1638                 }
1639
1640                 switch (frame_header->fis_type) {
1641                 case FIS_REGD2H:
1642                         scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1643                                                                       frame_index,
1644                                                                       (void **)&frame_buffer);
1645
1646                         scic_sds_controller_copy_sata_response(&ireq->stp.rsp,
1647                                                                frame_header,
1648                                                                frame_buffer);
1649
1650                         /* The command has completed with error */
1651                         scic_sds_request_set_status(ireq, SCU_TASK_DONE_CHECK_RESPONSE,
1652                                                     SCI_FAILURE_IO_RESPONSE_VALID);
1653                         break;
1654
1655                 default:
1656                         dev_warn(scic_to_dev(scic),
1657                                  "%s: IO Request:0x%p Frame Id:%d protocol "
1658                                   "violation occurred\n", __func__, stp_req,
1659                                   frame_index);
1660
1661                         scic_sds_request_set_status(ireq, SCU_TASK_DONE_UNEXP_FIS,
1662                                                     SCI_FAILURE_PROTOCOL_VIOLATION);
1663                         break;
1664                 }
1665
1666                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1667
1668                 /* Frame has been decoded return it to the controller */
1669                 scic_sds_controller_release_frame(scic, frame_index);
1670
1671                 return status;
1672         }
1673
1674         case SCI_REQ_STP_PIO_WAIT_FRAME: {
1675                 struct sas_task *task = isci_request_access_task(ireq);
1676                 struct dev_to_host_fis *frame_header;
1677                 u32 *frame_buffer;
1678
1679                 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1680                                                                        frame_index,
1681                                                                        (void **)&frame_header);
1682
1683                 if (status != SCI_SUCCESS) {
1684                         dev_err(scic_to_dev(scic),
1685                                 "%s: SCIC IO Request 0x%p could not get frame "
1686                                 "header for frame index %d, status %x\n",
1687                                 __func__, stp_req, frame_index, status);
1688                         return status;
1689                 }
1690
1691                 switch (frame_header->fis_type) {
1692                 case FIS_PIO_SETUP:
1693                         /* Get from the frame buffer the PIO Setup Data */
1694                         scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1695                                                                       frame_index,
1696                                                                       (void **)&frame_buffer);
1697
1698                         /* Get the data from the PIO Setup The SCU Hardware
1699                          * returns first word in the frame_header and the rest
1700                          * of the data is in the frame buffer so we need to
1701                          * back up one dword
1702                          */
1703
1704                         /* transfer_count: first 16bits in the 4th dword */
1705                         stp_req->pio_len = frame_buffer[3] & 0xffff;
1706
1707                         /* status: 4th byte in the 3rd dword */
1708                         stp_req->status = (frame_buffer[2] >> 24) & 0xff;
1709
1710                         scic_sds_controller_copy_sata_response(&ireq->stp.rsp,
1711                                                                frame_header,
1712                                                                frame_buffer);
1713
1714                         ireq->stp.rsp.status = stp_req->status;
1715
1716                         /* The next state is dependent on whether the
1717                          * request was PIO Data-in or Data out
1718                          */
1719                         if (task->data_dir == DMA_FROM_DEVICE) {
1720                                 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN);
1721                         } else if (task->data_dir == DMA_TO_DEVICE) {
1722                                 /* Transmit data */
1723                                 status = scic_sds_stp_request_pio_data_out_transmit_data(ireq);
1724                                 if (status != SCI_SUCCESS)
1725                                         break;
1726                                 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT);
1727                         }
1728                         break;
1729
1730                 case FIS_SETDEVBITS:
1731                         sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1732                         break;
1733
1734                 case FIS_REGD2H:
1735                         if (frame_header->status & ATA_BUSY) {
1736                                 /*
1737                                  * Now why is the drive sending a D2H Register
1738                                  * FIS when it is still busy?  Do nothing since
1739                                  * we are still in the right state.
1740                                  */
1741                                 dev_dbg(scic_to_dev(scic),
1742                                         "%s: SCIC PIO Request 0x%p received "
1743                                         "D2H Register FIS with BSY status "
1744                                         "0x%x\n",
1745                                         __func__,
1746                                         stp_req,
1747                                         frame_header->status);
1748                                 break;
1749                         }
1750
1751                         scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1752                                                                       frame_index,
1753                                                                       (void **)&frame_buffer);
1754
1755                         scic_sds_controller_copy_sata_response(&ireq->stp.req,
1756                                                                frame_header,
1757                                                                frame_buffer);
1758
1759                         scic_sds_request_set_status(ireq,
1760                                                     SCU_TASK_DONE_CHECK_RESPONSE,
1761                                                     SCI_FAILURE_IO_RESPONSE_VALID);
1762
1763                         sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1764                         break;
1765
1766                 default:
1767                         /* FIXME: what do we do here? */
1768                         break;
1769                 }
1770
1771                 /* Frame is decoded return it to the controller */
1772                 scic_sds_controller_release_frame(scic, frame_index);
1773
1774                 return status;
1775         }
1776
1777         case SCI_REQ_STP_PIO_DATA_IN: {
1778                 struct dev_to_host_fis *frame_header;
1779                 struct sata_fis_data *frame_buffer;
1780
1781                 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1782                                                                        frame_index,
1783                                                                        (void **)&frame_header);
1784
1785                 if (status != SCI_SUCCESS) {
1786                         dev_err(scic_to_dev(scic),
1787                                 "%s: SCIC IO Request 0x%p could not get frame "
1788                                 "header for frame index %d, status %x\n",
1789                                 __func__,
1790                                 stp_req,
1791                                 frame_index,
1792                                 status);
1793                         return status;
1794                 }
1795
1796                 if (frame_header->fis_type != FIS_DATA) {
1797                         dev_err(scic_to_dev(scic),
1798                                 "%s: SCIC PIO Request 0x%p received frame %d "
1799                                 "with fis type 0x%02x when expecting a data "
1800                                 "fis.\n",
1801                                 __func__,
1802                                 stp_req,
1803                                 frame_index,
1804                                 frame_header->fis_type);
1805
1806                         scic_sds_request_set_status(ireq,
1807                                                     SCU_TASK_DONE_GOOD,
1808                                                     SCI_FAILURE_IO_REQUIRES_SCSI_ABORT);
1809
1810                         sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1811
1812                         /* Frame is decoded return it to the controller */
1813                         scic_sds_controller_release_frame(scic, frame_index);
1814                         return status;
1815                 }
1816
1817                 if (stp_req->sgl.index < 0) {
1818                         ireq->saved_rx_frame_index = frame_index;
1819                         stp_req->pio_len = 0;
1820                 } else {
1821                         scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1822                                                                       frame_index,
1823                                                                       (void **)&frame_buffer);
1824
1825                         status = scic_sds_stp_request_pio_data_in_copy_data(stp_req,
1826                                                                             (u8 *)frame_buffer);
1827
1828                         /* Frame is decoded return it to the controller */
1829                         scic_sds_controller_release_frame(scic, frame_index);
1830                 }
1831
1832                 /* Check for the end of the transfer, are there more
1833                  * bytes remaining for this data transfer
1834                  */
1835                 if (status != SCI_SUCCESS || stp_req->pio_len != 0)
1836                         return status;
1837
1838                 if ((stp_req->status & ATA_BUSY) == 0) {
1839                         scic_sds_request_set_status(ireq,
1840                                                     SCU_TASK_DONE_CHECK_RESPONSE,
1841                                                     SCI_FAILURE_IO_RESPONSE_VALID);
1842
1843                         sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1844                 } else {
1845                         sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1846                 }
1847                 return status;
1848         }
1849
1850         case SCI_REQ_STP_SOFT_RESET_WAIT_D2H: {
1851                 struct dev_to_host_fis *frame_header;
1852                 u32 *frame_buffer;
1853
1854                 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1855                                                                        frame_index,
1856                                                                        (void **)&frame_header);
1857                 if (status != SCI_SUCCESS) {
1858                         dev_err(scic_to_dev(scic),
1859                                 "%s: SCIC IO Request 0x%p could not get frame "
1860                                 "header for frame index %d, status %x\n",
1861                                 __func__,
1862                                 stp_req,
1863                                 frame_index,
1864                                 status);
1865                         return status;
1866                 }
1867
1868                 switch (frame_header->fis_type) {
1869                 case FIS_REGD2H:
1870                         scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1871                                                                       frame_index,
1872                                                                       (void **)&frame_buffer);
1873
1874                         scic_sds_controller_copy_sata_response(&ireq->stp.rsp,
1875                                                                frame_header,
1876                                                                frame_buffer);
1877
1878                         /* The command has completed with error */
1879                         scic_sds_request_set_status(ireq,
1880                                                     SCU_TASK_DONE_CHECK_RESPONSE,
1881                                                     SCI_FAILURE_IO_RESPONSE_VALID);
1882                         break;
1883
1884                 default:
1885                         dev_warn(scic_to_dev(scic),
1886                                  "%s: IO Request:0x%p Frame Id:%d protocol "
1887                                  "violation occurred\n",
1888                                  __func__,
1889                                  stp_req,
1890                                  frame_index);
1891
1892                         scic_sds_request_set_status(ireq,
1893                                                     SCU_TASK_DONE_UNEXP_FIS,
1894                                                     SCI_FAILURE_PROTOCOL_VIOLATION);
1895                         break;
1896                 }
1897
1898                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1899
1900                 /* Frame has been decoded return it to the controller */
1901                 scic_sds_controller_release_frame(scic, frame_index);
1902
1903                 return status;
1904         }
1905         case SCI_REQ_ABORTING:
1906                 /*
1907                  * TODO: Is it even possible to get an unsolicited frame in the
1908                  * aborting state?
1909                  */
1910                 scic_sds_controller_release_frame(scic, frame_index);
1911                 return SCI_SUCCESS;
1912
1913         default:
1914                 dev_warn(scic_to_dev(scic),
1915                          "%s: SCIC IO Request given unexpected frame %x while "
1916                          "in state %d\n",
1917                          __func__,
1918                          frame_index,
1919                          state);
1920
1921                 scic_sds_controller_release_frame(scic, frame_index);
1922                 return SCI_FAILURE_INVALID_STATE;
1923         }
1924 }
1925
1926 static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq,
1927                                                        u32 completion_code)
1928 {
1929         enum sci_status status = SCI_SUCCESS;
1930
1931         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1932         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1933                 scic_sds_stp_request_udma_complete_request(ireq,
1934                                                            SCU_TASK_DONE_GOOD,
1935                                                            SCI_SUCCESS);
1936                 break;
1937         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
1938         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
1939                 /* We must check ther response buffer to see if the D2H
1940                  * Register FIS was received before we got the TC
1941                  * completion.
1942                  */
1943                 if (ireq->stp.rsp.fis_type == FIS_REGD2H) {
1944                         scic_sds_remote_device_suspend(ireq->target_device,
1945                                 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
1946
1947                         scic_sds_stp_request_udma_complete_request(ireq,
1948                                                                    SCU_TASK_DONE_CHECK_RESPONSE,
1949                                                                    SCI_FAILURE_IO_RESPONSE_VALID);
1950                 } else {
1951                         /* If we have an error completion status for the
1952                          * TC then we can expect a D2H register FIS from
1953                          * the device so we must change state to wait
1954                          * for it
1955                          */
1956                         sci_change_state(&ireq->sm, SCI_REQ_STP_UDMA_WAIT_D2H);
1957                 }
1958                 break;
1959
1960         /* TODO Check to see if any of these completion status need to
1961          * wait for the device to host register fis.
1962          */
1963         /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR
1964          * - this comes only for B0
1965          */
1966         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN):
1967         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
1968         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
1969         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
1970         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR):
1971                 scic_sds_remote_device_suspend(ireq->target_device,
1972                         SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
1973         /* Fall through to the default case */
1974         default:
1975                 /* All other completion status cause the IO to be complete. */
1976                 scic_sds_stp_request_udma_complete_request(ireq,
1977                                         SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1978                                         SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1979                 break;
1980         }
1981
1982         return status;
1983 }
1984
1985 static enum sci_status
1986 stp_request_soft_reset_await_h2d_asserted_tc_event(struct isci_request *ireq,
1987                                                    u32 completion_code)
1988 {
1989         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1990         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1991                 scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD,
1992                                             SCI_SUCCESS);
1993
1994                 sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG);
1995                 break;
1996
1997         default:
1998                 /*
1999                  * All other completion status cause the IO to be complete.
2000                  * If a NAK was received, then it is up to the user to retry
2001                  * the request.
2002                  */
2003                 scic_sds_request_set_status(ireq,
2004                                             SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
2005                                             SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
2006
2007                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2008                 break;
2009         }
2010
2011         return SCI_SUCCESS;
2012 }
2013
2014 static enum sci_status
2015 stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq,
2016                                                      u32 completion_code)
2017 {
2018         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2019         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2020                 scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD,
2021                                             SCI_SUCCESS);
2022
2023                 sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H);
2024                 break;
2025
2026         default:
2027                 /* All other completion status cause the IO to be complete.  If
2028                  * a NAK was received, then it is up to the user to retry the
2029                  * request.
2030                  */
2031                 scic_sds_request_set_status(ireq,
2032                         SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
2033                         SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
2034
2035                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2036                 break;
2037         }
2038
2039         return SCI_SUCCESS;
2040 }
2041
2042 enum sci_status
2043 scic_sds_io_request_tc_completion(struct isci_request *ireq,
2044                                   u32 completion_code)
2045 {
2046         enum sci_base_request_states state;
2047         struct scic_sds_controller *scic = ireq->owning_controller;
2048
2049         state = ireq->sm.current_state_id;
2050
2051         switch (state) {
2052         case SCI_REQ_STARTED:
2053                 return request_started_state_tc_event(ireq, completion_code);
2054
2055         case SCI_REQ_TASK_WAIT_TC_COMP:
2056                 return ssp_task_request_await_tc_event(ireq,
2057                                                        completion_code);
2058
2059         case SCI_REQ_SMP_WAIT_RESP:
2060                 return smp_request_await_response_tc_event(ireq,
2061                                                            completion_code);
2062
2063         case SCI_REQ_SMP_WAIT_TC_COMP:
2064                 return smp_request_await_tc_event(ireq, completion_code);
2065
2066         case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
2067                 return stp_request_udma_await_tc_event(ireq,
2068                                                        completion_code);
2069
2070         case SCI_REQ_STP_NON_DATA_WAIT_H2D:
2071                 return stp_request_non_data_await_h2d_tc_event(ireq,
2072                                                                completion_code);
2073
2074         case SCI_REQ_STP_PIO_WAIT_H2D:
2075                 return stp_request_pio_await_h2d_completion_tc_event(ireq,
2076                                                                      completion_code);
2077
2078         case SCI_REQ_STP_PIO_DATA_OUT:
2079                 return pio_data_out_tx_done_tc_event(ireq, completion_code);
2080
2081         case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
2082                 return stp_request_soft_reset_await_h2d_asserted_tc_event(ireq,
2083                                                                           completion_code);
2084
2085         case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
2086                 return stp_request_soft_reset_await_h2d_diagnostic_tc_event(ireq,
2087                                                                             completion_code);
2088
2089         case SCI_REQ_ABORTING:
2090                 return request_aborting_state_tc_event(ireq,
2091                                                        completion_code);
2092
2093         default:
2094                 dev_warn(scic_to_dev(scic),
2095                          "%s: SCIC IO Request given task completion "
2096                          "notification %x while in wrong state %d\n",
2097                          __func__,
2098                          completion_code,
2099                          state);
2100                 return SCI_FAILURE_INVALID_STATE;
2101         }
2102 }
2103
2104 /**
2105  * isci_request_process_response_iu() - This function sets the status and
2106  *    response iu, in the task struct, from the request object for the upper
2107  *    layer driver.
2108  * @sas_task: This parameter is the task struct from the upper layer driver.
2109  * @resp_iu: This parameter points to the response iu of the completed request.
2110  * @dev: This parameter specifies the linux device struct.
2111  *
2112  * none.
2113  */
2114 static void isci_request_process_response_iu(
2115         struct sas_task *task,
2116         struct ssp_response_iu *resp_iu,
2117         struct device *dev)
2118 {
2119         dev_dbg(dev,
2120                 "%s: resp_iu = %p "
2121                 "resp_iu->status = 0x%x,\nresp_iu->datapres = %d "
2122                 "resp_iu->response_data_len = %x, "
2123                 "resp_iu->sense_data_len = %x\nrepsonse data: ",
2124                 __func__,
2125                 resp_iu,
2126                 resp_iu->status,
2127                 resp_iu->datapres,
2128                 resp_iu->response_data_len,
2129                 resp_iu->sense_data_len);
2130
2131         task->task_status.stat = resp_iu->status;
2132
2133         /* libsas updates the task status fields based on the response iu. */
2134         sas_ssp_task_response(dev, task, resp_iu);
2135 }
2136
2137 /**
2138  * isci_request_set_open_reject_status() - This function prepares the I/O
2139  *    completion for OPEN_REJECT conditions.
2140  * @request: This parameter is the completed isci_request object.
2141  * @response_ptr: This parameter specifies the service response for the I/O.
2142  * @status_ptr: This parameter specifies the exec status for the I/O.
2143  * @complete_to_host_ptr: This parameter specifies the action to be taken by
2144  *    the LLDD with respect to completing this request or forcing an abort
2145  *    condition on the I/O.
2146  * @open_rej_reason: This parameter specifies the encoded reason for the
2147  *    abandon-class reject.
2148  *
2149  * none.
2150  */
2151 static void isci_request_set_open_reject_status(
2152         struct isci_request *request,
2153         struct sas_task *task,
2154         enum service_response *response_ptr,
2155         enum exec_status *status_ptr,
2156         enum isci_completion_selection *complete_to_host_ptr,
2157         enum sas_open_rej_reason open_rej_reason)
2158 {
2159         /* Task in the target is done. */
2160         set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2161         *response_ptr                     = SAS_TASK_UNDELIVERED;
2162         *status_ptr                       = SAS_OPEN_REJECT;
2163         *complete_to_host_ptr             = isci_perform_normal_io_completion;
2164         task->task_status.open_rej_reason = open_rej_reason;
2165 }
2166
2167 /**
2168  * isci_request_handle_controller_specific_errors() - This function decodes
2169  *    controller-specific I/O completion error conditions.
2170  * @request: This parameter is the completed isci_request object.
2171  * @response_ptr: This parameter specifies the service response for the I/O.
2172  * @status_ptr: This parameter specifies the exec status for the I/O.
2173  * @complete_to_host_ptr: This parameter specifies the action to be taken by
2174  *    the LLDD with respect to completing this request or forcing an abort
2175  *    condition on the I/O.
2176  *
2177  * none.
2178  */
2179 static void isci_request_handle_controller_specific_errors(
2180         struct isci_remote_device *idev,
2181         struct isci_request *request,
2182         struct sas_task *task,
2183         enum service_response *response_ptr,
2184         enum exec_status *status_ptr,
2185         enum isci_completion_selection *complete_to_host_ptr)
2186 {
2187         unsigned int cstatus;
2188
2189         cstatus = request->scu_status;
2190
2191         dev_dbg(&request->isci_host->pdev->dev,
2192                 "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR "
2193                 "- controller status = 0x%x\n",
2194                 __func__, request, cstatus);
2195
2196         /* Decode the controller-specific errors; most
2197          * important is to recognize those conditions in which
2198          * the target may still have a task outstanding that
2199          * must be aborted.
2200          *
2201          * Note that there are SCU completion codes being
2202          * named in the decode below for which SCIC has already
2203          * done work to handle them in a way other than as
2204          * a controller-specific completion code; these are left
2205          * in the decode below for completeness sake.
2206          */
2207         switch (cstatus) {
2208         case SCU_TASK_DONE_DMASETUP_DIRERR:
2209         /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */
2210         case SCU_TASK_DONE_XFERCNT_ERR:
2211                 /* Also SCU_TASK_DONE_SMP_UFI_ERR: */
2212                 if (task->task_proto == SAS_PROTOCOL_SMP) {
2213                         /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */
2214                         *response_ptr = SAS_TASK_COMPLETE;
2215
2216                         /* See if the device has been/is being stopped. Note
2217                          * that we ignore the quiesce state, since we are
2218                          * concerned about the actual device state.
2219                          */
2220                         if (!idev)
2221                                 *status_ptr = SAS_DEVICE_UNKNOWN;
2222                         else
2223                                 *status_ptr = SAS_ABORTED_TASK;
2224
2225                         set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2226
2227                         *complete_to_host_ptr =
2228                                 isci_perform_normal_io_completion;
2229                 } else {
2230                         /* Task in the target is not done. */
2231                         *response_ptr = SAS_TASK_UNDELIVERED;
2232
2233                         if (!idev)
2234                                 *status_ptr = SAS_DEVICE_UNKNOWN;
2235                         else
2236                                 *status_ptr = SAM_STAT_TASK_ABORTED;
2237
2238                         clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2239
2240                         *complete_to_host_ptr =
2241                                 isci_perform_error_io_completion;
2242                 }
2243
2244                 break;
2245
2246         case SCU_TASK_DONE_CRC_ERR:
2247         case SCU_TASK_DONE_NAK_CMD_ERR:
2248         case SCU_TASK_DONE_EXCESS_DATA:
2249         case SCU_TASK_DONE_UNEXP_FIS:
2250         /* Also SCU_TASK_DONE_UNEXP_RESP: */
2251         case SCU_TASK_DONE_VIIT_ENTRY_NV:       /* TODO - conditions? */
2252         case SCU_TASK_DONE_IIT_ENTRY_NV:        /* TODO - conditions? */
2253         case SCU_TASK_DONE_RNCNV_OUTBOUND:      /* TODO - conditions? */
2254                 /* These are conditions in which the target
2255                  * has completed the task, so that no cleanup
2256                  * is necessary.
2257                  */
2258                 *response_ptr = SAS_TASK_COMPLETE;
2259
2260                 /* See if the device has been/is being stopped. Note
2261                  * that we ignore the quiesce state, since we are
2262                  * concerned about the actual device state.
2263                  */
2264                 if (!idev)
2265                         *status_ptr = SAS_DEVICE_UNKNOWN;
2266                 else
2267                         *status_ptr = SAS_ABORTED_TASK;
2268
2269                 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2270
2271                 *complete_to_host_ptr = isci_perform_normal_io_completion;
2272                 break;
2273
2274
2275         /* Note that the only open reject completion codes seen here will be
2276          * abandon-class codes; all others are automatically retried in the SCU.
2277          */
2278         case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
2279
2280                 isci_request_set_open_reject_status(
2281                         request, task, response_ptr, status_ptr,
2282                         complete_to_host_ptr, SAS_OREJ_WRONG_DEST);
2283                 break;
2284
2285         case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
2286
2287                 /* Note - the return of AB0 will change when
2288                  * libsas implements detection of zone violations.
2289                  */
2290                 isci_request_set_open_reject_status(
2291                         request, task, response_ptr, status_ptr,
2292                         complete_to_host_ptr, SAS_OREJ_RESV_AB0);
2293                 break;
2294
2295         case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
2296
2297                 isci_request_set_open_reject_status(
2298                         request, task, response_ptr, status_ptr,
2299                         complete_to_host_ptr, SAS_OREJ_RESV_AB1);
2300                 break;
2301
2302         case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
2303
2304                 isci_request_set_open_reject_status(
2305                         request, task, response_ptr, status_ptr,
2306                         complete_to_host_ptr, SAS_OREJ_RESV_AB2);
2307                 break;
2308
2309         case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
2310
2311                 isci_request_set_open_reject_status(
2312                         request, task, response_ptr, status_ptr,
2313                         complete_to_host_ptr, SAS_OREJ_RESV_AB3);
2314                 break;
2315
2316         case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
2317
2318                 isci_request_set_open_reject_status(
2319                         request, task, response_ptr, status_ptr,
2320                         complete_to_host_ptr, SAS_OREJ_BAD_DEST);
2321                 break;
2322
2323         case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
2324
2325                 isci_request_set_open_reject_status(
2326                         request, task, response_ptr, status_ptr,
2327                         complete_to_host_ptr, SAS_OREJ_STP_NORES);
2328                 break;
2329
2330         case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
2331
2332                 isci_request_set_open_reject_status(
2333                         request, task, response_ptr, status_ptr,
2334                         complete_to_host_ptr, SAS_OREJ_EPROTO);
2335                 break;
2336
2337         case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
2338
2339                 isci_request_set_open_reject_status(
2340                         request, task, response_ptr, status_ptr,
2341                         complete_to_host_ptr, SAS_OREJ_CONN_RATE);
2342                 break;
2343
2344         case SCU_TASK_DONE_LL_R_ERR:
2345         /* Also SCU_TASK_DONE_ACK_NAK_TO: */
2346         case SCU_TASK_DONE_LL_PERR:
2347         case SCU_TASK_DONE_LL_SY_TERM:
2348         /* Also SCU_TASK_DONE_NAK_ERR:*/
2349         case SCU_TASK_DONE_LL_LF_TERM:
2350         /* Also SCU_TASK_DONE_DATA_LEN_ERR: */
2351         case SCU_TASK_DONE_LL_ABORT_ERR:
2352         case SCU_TASK_DONE_SEQ_INV_TYPE:
2353         /* Also SCU_TASK_DONE_UNEXP_XR: */
2354         case SCU_TASK_DONE_XR_IU_LEN_ERR:
2355         case SCU_TASK_DONE_INV_FIS_LEN:
2356         /* Also SCU_TASK_DONE_XR_WD_LEN: */
2357         case SCU_TASK_DONE_SDMA_ERR:
2358         case SCU_TASK_DONE_OFFSET_ERR:
2359         case SCU_TASK_DONE_MAX_PLD_ERR:
2360         case SCU_TASK_DONE_LF_ERR:
2361         case SCU_TASK_DONE_SMP_RESP_TO_ERR:  /* Escalate to dev reset? */
2362         case SCU_TASK_DONE_SMP_LL_RX_ERR:
2363         case SCU_TASK_DONE_UNEXP_DATA:
2364         case SCU_TASK_DONE_UNEXP_SDBFIS:
2365         case SCU_TASK_DONE_REG_ERR:
2366         case SCU_TASK_DONE_SDB_ERR:
2367         case SCU_TASK_DONE_TASK_ABORT:
2368         default:
2369                 /* Task in the target is not done. */
2370                 *response_ptr = SAS_TASK_UNDELIVERED;
2371                 *status_ptr = SAM_STAT_TASK_ABORTED;
2372
2373                 if (task->task_proto == SAS_PROTOCOL_SMP) {
2374                         set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2375
2376                         *complete_to_host_ptr = isci_perform_normal_io_completion;
2377                 } else {
2378                         clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2379
2380                         *complete_to_host_ptr = isci_perform_error_io_completion;
2381                 }
2382                 break;
2383         }
2384 }
2385
2386 /**
2387  * isci_task_save_for_upper_layer_completion() - This function saves the
2388  *    request for later completion to the upper layer driver.
2389  * @host: This parameter is a pointer to the host on which the the request
2390  *    should be queued (either as an error or success).
2391  * @request: This parameter is the completed request.
2392  * @response: This parameter is the response code for the completed task.
2393  * @status: This parameter is the status code for the completed task.
2394  *
2395  * none.
2396  */
2397 static void isci_task_save_for_upper_layer_completion(
2398         struct isci_host *host,
2399         struct isci_request *request,
2400         enum service_response response,
2401         enum exec_status status,
2402         enum isci_completion_selection task_notification_selection)
2403 {
2404         struct sas_task *task = isci_request_access_task(request);
2405
2406         task_notification_selection
2407                 = isci_task_set_completion_status(task, response, status,
2408                                                   task_notification_selection);
2409
2410         /* Tasks aborted specifically by a call to the lldd_abort_task
2411          * function should not be completed to the host in the regular path.
2412          */
2413         switch (task_notification_selection) {
2414
2415         case isci_perform_normal_io_completion:
2416
2417                 /* Normal notification (task_done) */
2418                 dev_dbg(&host->pdev->dev,
2419                         "%s: Normal - task = %p, response=%d (%d), status=%d (%d)\n",
2420                         __func__,
2421                         task,
2422                         task->task_status.resp, response,
2423                         task->task_status.stat, status);
2424                 /* Add to the completed list. */
2425                 list_add(&request->completed_node,
2426                          &host->requests_to_complete);
2427
2428                 /* Take the request off the device's pending request list. */
2429                 list_del_init(&request->dev_node);
2430                 break;
2431
2432         case isci_perform_aborted_io_completion:
2433                 /* No notification to libsas because this request is
2434                  * already in the abort path.
2435                  */
2436                 dev_warn(&host->pdev->dev,
2437                          "%s: Aborted - task = %p, response=%d (%d), status=%d (%d)\n",
2438                          __func__,
2439                          task,
2440                          task->task_status.resp, response,
2441                          task->task_status.stat, status);
2442
2443                 /* Wake up whatever process was waiting for this
2444                  * request to complete.
2445                  */
2446                 WARN_ON(request->io_request_completion == NULL);
2447
2448                 if (request->io_request_completion != NULL) {
2449
2450                         /* Signal whoever is waiting that this
2451                         * request is complete.
2452                         */
2453                         complete(request->io_request_completion);
2454                 }
2455                 break;
2456
2457         case isci_perform_error_io_completion:
2458                 /* Use sas_task_abort */
2459                 dev_warn(&host->pdev->dev,
2460                          "%s: Error - task = %p, response=%d (%d), status=%d (%d)\n",
2461                          __func__,
2462                          task,
2463                          task->task_status.resp, response,
2464                          task->task_status.stat, status);
2465                 /* Add to the aborted list. */
2466                 list_add(&request->completed_node,
2467                          &host->requests_to_errorback);
2468                 break;
2469
2470         default:
2471                 dev_warn(&host->pdev->dev,
2472                          "%s: Unknown - task = %p, response=%d (%d), status=%d (%d)\n",
2473                          __func__,
2474                          task,
2475                          task->task_status.resp, response,
2476                          task->task_status.stat, status);
2477
2478                 /* Add to the error to libsas list. */
2479                 list_add(&request->completed_node,
2480                          &host->requests_to_errorback);
2481                 break;
2482         }
2483 }
2484
2485 static void isci_request_io_request_complete(struct isci_host *isci_host,
2486                                              struct isci_request *request,
2487                                              enum sci_io_status completion_status)
2488 {
2489         struct sas_task *task = isci_request_access_task(request);
2490         struct ssp_response_iu *resp_iu;
2491         void *resp_buf;
2492         unsigned long task_flags;
2493         struct isci_remote_device *idev = isci_lookup_device(task->dev);
2494         enum service_response response       = SAS_TASK_UNDELIVERED;
2495         enum exec_status status         = SAS_ABORTED_TASK;
2496         enum isci_request_status request_status;
2497         enum isci_completion_selection complete_to_host
2498                 = isci_perform_normal_io_completion;
2499
2500         dev_dbg(&isci_host->pdev->dev,
2501                 "%s: request = %p, task = %p,\n"
2502                 "task->data_dir = %d completion_status = 0x%x\n",
2503                 __func__,
2504                 request,
2505                 task,
2506                 task->data_dir,
2507                 completion_status);
2508
2509         spin_lock(&request->state_lock);
2510         request_status = isci_request_get_state(request);
2511
2512         /* Decode the request status.  Note that if the request has been
2513          * aborted by a task management function, we don't care
2514          * what the status is.
2515          */
2516         switch (request_status) {
2517
2518         case aborted:
2519                 /* "aborted" indicates that the request was aborted by a task
2520                  * management function, since once a task management request is
2521                  * perfomed by the device, the request only completes because
2522                  * of the subsequent driver terminate.
2523                  *
2524                  * Aborted also means an external thread is explicitly managing
2525                  * this request, so that we do not complete it up the stack.
2526                  *
2527                  * The target is still there (since the TMF was successful).
2528                  */
2529                 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2530                 response = SAS_TASK_COMPLETE;
2531
2532                 /* See if the device has been/is being stopped. Note
2533                  * that we ignore the quiesce state, since we are
2534                  * concerned about the actual device state.
2535                  */
2536                 if (!idev)
2537                         status = SAS_DEVICE_UNKNOWN;
2538                 else
2539                         status = SAS_ABORTED_TASK;
2540
2541                 complete_to_host = isci_perform_aborted_io_completion;
2542                 /* This was an aborted request. */
2543
2544                 spin_unlock(&request->state_lock);
2545                 break;
2546
2547         case aborting:
2548                 /* aborting means that the task management function tried and
2549                  * failed to abort the request. We need to note the request
2550                  * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the
2551                  * target as down.
2552                  *
2553                  * Aborting also means an external thread is explicitly managing
2554                  * this request, so that we do not complete it up the stack.
2555                  */
2556                 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2557                 response = SAS_TASK_UNDELIVERED;
2558
2559                 if (!idev)
2560                         /* The device has been /is being stopped. Note that
2561                          * we ignore the quiesce state, since we are
2562                          * concerned about the actual device state.
2563                          */
2564                         status = SAS_DEVICE_UNKNOWN;
2565                 else
2566                         status = SAS_PHY_DOWN;
2567
2568                 complete_to_host = isci_perform_aborted_io_completion;
2569
2570                 /* This was an aborted request. */
2571
2572                 spin_unlock(&request->state_lock);
2573                 break;
2574
2575         case terminating:
2576
2577                 /* This was an terminated request.  This happens when
2578                  * the I/O is being terminated because of an action on
2579                  * the device (reset, tear down, etc.), and the I/O needs
2580                  * to be completed up the stack.
2581                  */
2582                 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2583                 response = SAS_TASK_UNDELIVERED;
2584
2585                 /* See if the device has been/is being stopped. Note
2586                  * that we ignore the quiesce state, since we are
2587                  * concerned about the actual device state.
2588                  */
2589                 if (!idev)
2590                         status = SAS_DEVICE_UNKNOWN;
2591                 else
2592                         status = SAS_ABORTED_TASK;
2593
2594                 complete_to_host = isci_perform_aborted_io_completion;
2595
2596                 /* This was a terminated request. */
2597
2598                 spin_unlock(&request->state_lock);
2599                 break;
2600
2601         case dead:
2602                 /* This was a terminated request that timed-out during the
2603                  * termination process.  There is no task to complete to
2604                  * libsas.
2605                  */
2606                 complete_to_host = isci_perform_normal_io_completion;
2607                 spin_unlock(&request->state_lock);
2608                 break;
2609
2610         default:
2611
2612                 /* The request is done from an SCU HW perspective. */
2613                 request->status = completed;
2614
2615                 spin_unlock(&request->state_lock);
2616
2617                 /* This is an active request being completed from the core. */
2618                 switch (completion_status) {
2619
2620                 case SCI_IO_FAILURE_RESPONSE_VALID:
2621                         dev_dbg(&isci_host->pdev->dev,
2622                                 "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
2623                                 __func__,
2624                                 request,
2625                                 task);
2626
2627                         if (sas_protocol_ata(task->task_proto)) {
2628                                 resp_buf = &request->stp.rsp;
2629                                 isci_request_process_stp_response(task,
2630                                                                   resp_buf);
2631                         } else if (SAS_PROTOCOL_SSP == task->task_proto) {
2632
2633                                 /* crack the iu response buffer. */
2634                                 resp_iu = &request->ssp.rsp;
2635                                 isci_request_process_response_iu(task, resp_iu,
2636                                                                  &isci_host->pdev->dev);
2637
2638                         } else if (SAS_PROTOCOL_SMP == task->task_proto) {
2639
2640                                 dev_err(&isci_host->pdev->dev,
2641                                         "%s: SCI_IO_FAILURE_RESPONSE_VALID: "
2642                                         "SAS_PROTOCOL_SMP protocol\n",
2643                                         __func__);
2644
2645                         } else
2646                                 dev_err(&isci_host->pdev->dev,
2647                                         "%s: unknown protocol\n", __func__);
2648
2649                         /* use the task status set in the task struct by the
2650                          * isci_request_process_response_iu call.
2651                          */
2652                         set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2653                         response = task->task_status.resp;
2654                         status = task->task_status.stat;
2655                         break;
2656
2657                 case SCI_IO_SUCCESS:
2658                 case SCI_IO_SUCCESS_IO_DONE_EARLY:
2659
2660                         response = SAS_TASK_COMPLETE;
2661                         status   = SAM_STAT_GOOD;
2662                         set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2663
2664                         if (task->task_proto == SAS_PROTOCOL_SMP) {
2665                                 void *rsp = &request->smp.rsp;
2666
2667                                 dev_dbg(&isci_host->pdev->dev,
2668                                         "%s: SMP protocol completion\n",
2669                                         __func__);
2670
2671                                 sg_copy_from_buffer(
2672                                         &task->smp_task.smp_resp, 1,
2673                                         rsp, sizeof(struct smp_resp));
2674                         } else if (completion_status
2675                                    == SCI_IO_SUCCESS_IO_DONE_EARLY) {
2676
2677                                 /* This was an SSP / STP / SATA transfer.
2678                                  * There is a possibility that less data than
2679                                  * the maximum was transferred.
2680                                  */
2681                                 u32 transferred_length = sci_req_tx_bytes(request);
2682
2683                                 task->task_status.residual
2684                                         = task->total_xfer_len - transferred_length;
2685
2686                                 /* If there were residual bytes, call this an
2687                                  * underrun.
2688                                  */
2689                                 if (task->task_status.residual != 0)
2690                                         status = SAS_DATA_UNDERRUN;
2691
2692                                 dev_dbg(&isci_host->pdev->dev,
2693                                         "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
2694                                         __func__,
2695                                         status);
2696
2697                         } else
2698                                 dev_dbg(&isci_host->pdev->dev,
2699                                         "%s: SCI_IO_SUCCESS\n",
2700                                         __func__);
2701
2702                         break;
2703
2704                 case SCI_IO_FAILURE_TERMINATED:
2705                         dev_dbg(&isci_host->pdev->dev,
2706                                 "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n",
2707                                 __func__,
2708                                 request,
2709                                 task);
2710
2711                         /* The request was terminated explicitly.  No handling
2712                          * is needed in the SCSI error handler path.
2713                          */
2714                         set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2715                         response = SAS_TASK_UNDELIVERED;
2716
2717                         /* See if the device has been/is being stopped. Note
2718                          * that we ignore the quiesce state, since we are
2719                          * concerned about the actual device state.
2720                          */
2721                         if (!idev)
2722                                 status = SAS_DEVICE_UNKNOWN;
2723                         else
2724                                 status = SAS_ABORTED_TASK;
2725
2726                         complete_to_host = isci_perform_normal_io_completion;
2727                         break;
2728
2729                 case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
2730
2731                         isci_request_handle_controller_specific_errors(
2732                                 idev, request, task, &response, &status,
2733                                 &complete_to_host);
2734
2735                         break;
2736
2737                 case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
2738                         /* This is a special case, in that the I/O completion
2739                          * is telling us that the device needs a reset.
2740                          * In order for the device reset condition to be
2741                          * noticed, the I/O has to be handled in the error
2742                          * handler.  Set the reset flag and cause the
2743                          * SCSI error thread to be scheduled.
2744                          */
2745                         spin_lock_irqsave(&task->task_state_lock, task_flags);
2746                         task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
2747                         spin_unlock_irqrestore(&task->task_state_lock, task_flags);
2748
2749                         /* Fail the I/O. */
2750                         response = SAS_TASK_UNDELIVERED;
2751                         status = SAM_STAT_TASK_ABORTED;
2752
2753                         complete_to_host = isci_perform_error_io_completion;
2754                         clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2755                         break;
2756
2757                 case SCI_FAILURE_RETRY_REQUIRED:
2758
2759                         /* Fail the I/O so it can be retried. */
2760                         response = SAS_TASK_UNDELIVERED;
2761                         if (!idev)
2762                                 status = SAS_DEVICE_UNKNOWN;
2763                         else
2764                                 status = SAS_ABORTED_TASK;
2765
2766                         complete_to_host = isci_perform_normal_io_completion;
2767                         set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2768                         break;
2769
2770
2771                 default:
2772                         /* Catch any otherwise unhandled error codes here. */
2773                         dev_warn(&isci_host->pdev->dev,
2774                                  "%s: invalid completion code: 0x%x - "
2775                                  "isci_request = %p\n",
2776                                  __func__, completion_status, request);
2777
2778                         response = SAS_TASK_UNDELIVERED;
2779
2780                         /* See if the device has been/is being stopped. Note
2781                          * that we ignore the quiesce state, since we are
2782                          * concerned about the actual device state.
2783                          */
2784                         if (!idev)
2785                                 status = SAS_DEVICE_UNKNOWN;
2786                         else
2787                                 status = SAS_ABORTED_TASK;
2788
2789                         if (SAS_PROTOCOL_SMP == task->task_proto) {
2790                                 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2791                                 complete_to_host = isci_perform_normal_io_completion;
2792                         } else {
2793                                 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2794                                 complete_to_host = isci_perform_error_io_completion;
2795                         }
2796                         break;
2797                 }
2798                 break;
2799         }
2800
2801         switch (task->task_proto) {
2802         case SAS_PROTOCOL_SSP:
2803                 if (task->data_dir == DMA_NONE)
2804                         break;
2805                 if (task->num_scatter == 0)
2806                         /* 0 indicates a single dma address */
2807                         dma_unmap_single(&isci_host->pdev->dev,
2808                                          request->zero_scatter_daddr,
2809                                          task->total_xfer_len, task->data_dir);
2810                 else  /* unmap the sgl dma addresses */
2811                         dma_unmap_sg(&isci_host->pdev->dev, task->scatter,
2812                                      request->num_sg_entries, task->data_dir);
2813                 break;
2814         case SAS_PROTOCOL_SMP: {
2815                 struct scatterlist *sg = &task->smp_task.smp_req;
2816                 struct smp_req *smp_req;
2817                 void *kaddr;
2818
2819                 dma_unmap_sg(&isci_host->pdev->dev, sg, 1, DMA_TO_DEVICE);
2820
2821                 /* need to swab it back in case the command buffer is re-used */
2822                 kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
2823                 smp_req = kaddr + sg->offset;
2824                 sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
2825                 kunmap_atomic(kaddr, KM_IRQ0);
2826                 break;
2827         }
2828         default:
2829                 break;
2830         }
2831
2832         /* Put the completed request on the correct list */
2833         isci_task_save_for_upper_layer_completion(isci_host, request, response,
2834                                                   status, complete_to_host
2835                                                   );
2836
2837         /* complete the io request to the core. */
2838         scic_controller_complete_io(&isci_host->sci,
2839                                     request->target_device,
2840                                     request);
2841         isci_put_device(idev);
2842
2843         /* set terminated handle so it cannot be completed or
2844          * terminated again, and to cause any calls into abort
2845          * task to recognize the already completed case.
2846          */
2847         set_bit(IREQ_TERMINATED, &request->flags);
2848 }
2849
2850 static void scic_sds_request_started_state_enter(struct sci_base_state_machine *sm)
2851 {
2852         struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2853         struct domain_device *dev = sci_dev_to_domain(ireq->target_device);
2854         struct sas_task *task;
2855
2856         /* XXX as hch said always creating an internal sas_task for tmf
2857          * requests would simplify the driver
2858          */
2859         task = ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL;
2860
2861         /* all unaccelerated request types (non ssp or ncq) handled with
2862          * substates
2863          */
2864         if (!task && dev->dev_type == SAS_END_DEV) {
2865                 sci_change_state(sm, SCI_REQ_TASK_WAIT_TC_COMP);
2866         } else if (!task &&
2867                    (isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high ||
2868                     isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) {
2869                 sci_change_state(sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED);
2870         } else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
2871                 sci_change_state(sm, SCI_REQ_SMP_WAIT_RESP);
2872         } else if (task && sas_protocol_ata(task->task_proto) &&
2873                    !task->ata_task.use_ncq) {
2874                 u32 state;
2875
2876                 if (task->data_dir == DMA_NONE)
2877                         state = SCI_REQ_STP_NON_DATA_WAIT_H2D;
2878                 else if (task->ata_task.dma_xfer)
2879                         state = SCI_REQ_STP_UDMA_WAIT_TC_COMP;
2880                 else /* PIO */
2881                         state = SCI_REQ_STP_PIO_WAIT_H2D;
2882
2883                 sci_change_state(sm, state);
2884         }
2885 }
2886
2887 static void scic_sds_request_completed_state_enter(struct sci_base_state_machine *sm)
2888 {
2889         struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2890         struct scic_sds_controller *scic = ireq->owning_controller;
2891         struct isci_host *ihost = scic_to_ihost(scic);
2892
2893         /* Tell the SCI_USER that the IO request is complete */
2894         if (!test_bit(IREQ_TMF, &ireq->flags))
2895                 isci_request_io_request_complete(ihost, ireq,
2896                                                  ireq->sci_status);
2897         else
2898                 isci_task_request_complete(ihost, ireq, ireq->sci_status);
2899 }
2900
2901 static void scic_sds_request_aborting_state_enter(struct sci_base_state_machine *sm)
2902 {
2903         struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2904
2905         /* Setting the abort bit in the Task Context is required by the silicon. */
2906         ireq->tc->abort = 1;
2907 }
2908
2909 static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm)
2910 {
2911         struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2912
2913         scic_sds_remote_device_set_working_request(ireq->target_device,
2914                                                    ireq);
2915 }
2916
2917 static void scic_sds_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm)
2918 {
2919         struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2920
2921         scic_sds_remote_device_set_working_request(ireq->target_device,
2922                                                    ireq);
2923 }
2924
2925 static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm)
2926 {
2927         struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2928
2929         scic_sds_remote_device_set_working_request(ireq->target_device,
2930                                                    ireq);
2931 }
2932
2933 static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm)
2934 {
2935         struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2936         struct scu_task_context *tc = ireq->tc;
2937         struct host_to_dev_fis *h2d_fis;
2938         enum sci_status status;
2939
2940         /* Clear the SRST bit */
2941         h2d_fis = &ireq->stp.cmd;
2942         h2d_fis->control = 0;
2943
2944         /* Clear the TC control bit */
2945         tc->control_frame = 0;
2946
2947         status = scic_controller_continue_io(ireq);
2948         WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n");
2949 }
2950
2951 static const struct sci_base_state scic_sds_request_state_table[] = {
2952         [SCI_REQ_INIT] = { },
2953         [SCI_REQ_CONSTRUCTED] = { },
2954         [SCI_REQ_STARTED] = {
2955                 .enter_state = scic_sds_request_started_state_enter,
2956         },
2957         [SCI_REQ_STP_NON_DATA_WAIT_H2D] = {
2958                 .enter_state = scic_sds_stp_request_started_non_data_await_h2d_completion_enter,
2959         },
2960         [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { },
2961         [SCI_REQ_STP_PIO_WAIT_H2D] = {
2962                 .enter_state = scic_sds_stp_request_started_pio_await_h2d_completion_enter,
2963         },
2964         [SCI_REQ_STP_PIO_WAIT_FRAME] = { },
2965         [SCI_REQ_STP_PIO_DATA_IN] = { },
2966         [SCI_REQ_STP_PIO_DATA_OUT] = { },
2967         [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { },
2968         [SCI_REQ_STP_UDMA_WAIT_D2H] = { },
2969         [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED] = {
2970                 .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
2971         },
2972         [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG] = {
2973                 .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
2974         },
2975         [SCI_REQ_STP_SOFT_RESET_WAIT_D2H] = { },
2976         [SCI_REQ_TASK_WAIT_TC_COMP] = { },
2977         [SCI_REQ_TASK_WAIT_TC_RESP] = { },
2978         [SCI_REQ_SMP_WAIT_RESP] = { },
2979         [SCI_REQ_SMP_WAIT_TC_COMP] = { },
2980         [SCI_REQ_COMPLETED] = {
2981                 .enter_state = scic_sds_request_completed_state_enter,
2982         },
2983         [SCI_REQ_ABORTING] = {
2984                 .enter_state = scic_sds_request_aborting_state_enter,
2985         },
2986         [SCI_REQ_FINAL] = { },
2987 };
2988
2989 static void
2990 scic_sds_general_request_construct(struct scic_sds_controller *scic,
2991                                    struct scic_sds_remote_device *sci_dev,
2992                                    struct isci_request *ireq)
2993 {
2994         sci_init_sm(&ireq->sm, scic_sds_request_state_table, SCI_REQ_INIT);
2995
2996         ireq->target_device = sci_dev;
2997         ireq->protocol = SCIC_NO_PROTOCOL;
2998         ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
2999
3000         ireq->sci_status   = SCI_SUCCESS;
3001         ireq->scu_status   = 0;
3002         ireq->post_context = 0xFFFFFFFF;
3003 }
3004
3005 static enum sci_status
3006 scic_io_request_construct(struct scic_sds_controller *scic,
3007                           struct scic_sds_remote_device *sci_dev,
3008                           struct isci_request *ireq)
3009 {
3010         struct domain_device *dev = sci_dev_to_domain(sci_dev);
3011         enum sci_status status = SCI_SUCCESS;
3012
3013         /* Build the common part of the request */
3014         scic_sds_general_request_construct(scic, sci_dev, ireq);
3015
3016         if (sci_dev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
3017                 return SCI_FAILURE_INVALID_REMOTE_DEVICE;
3018
3019         if (dev->dev_type == SAS_END_DEV)
3020                 /* pass */;
3021         else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
3022                 memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
3023         else if (dev_is_expander(dev))
3024                 /* pass */;
3025         else
3026                 return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3027
3028         memset(ireq->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab));
3029
3030         return status;
3031 }
3032
3033 enum sci_status scic_task_request_construct(struct scic_sds_controller *scic,
3034                                             struct scic_sds_remote_device *sci_dev,
3035                                             u16 io_tag, struct isci_request *ireq)
3036 {
3037         struct domain_device *dev = sci_dev_to_domain(sci_dev);
3038         enum sci_status status = SCI_SUCCESS;
3039
3040         /* Build the common part of the request */
3041         scic_sds_general_request_construct(scic, sci_dev, ireq);
3042
3043         if (dev->dev_type == SAS_END_DEV ||
3044             dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
3045                 set_bit(IREQ_TMF, &ireq->flags);
3046                 memset(ireq->tc, 0, sizeof(struct scu_task_context));
3047         } else
3048                 status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3049
3050         return status;
3051 }
3052
3053 static enum sci_status isci_request_ssp_request_construct(
3054         struct isci_request *request)
3055 {
3056         enum sci_status status;
3057
3058         dev_dbg(&request->isci_host->pdev->dev,
3059                 "%s: request = %p\n",
3060                 __func__,
3061                 request);
3062         status = scic_io_request_construct_basic_ssp(request);
3063         return status;
3064 }
3065
3066 static enum sci_status isci_request_stp_request_construct(
3067         struct isci_request *request)
3068 {
3069         struct sas_task *task = isci_request_access_task(request);
3070         enum sci_status status;
3071         struct host_to_dev_fis *register_fis;
3072
3073         dev_dbg(&request->isci_host->pdev->dev,
3074                 "%s: request = %p\n",
3075                 __func__,
3076                 request);
3077
3078         /* Get the host_to_dev_fis from the core and copy
3079          * the fis from the task into it.
3080          */
3081         register_fis = isci_sata_task_to_fis_copy(task);
3082
3083         status = scic_io_request_construct_basic_sata(request);
3084
3085         /* Set the ncq tag in the fis, from the queue
3086          * command in the task.
3087          */
3088         if (isci_sata_is_task_ncq(task)) {
3089
3090                 isci_sata_set_ncq_tag(
3091                         register_fis,
3092                         task
3093                         );
3094         }
3095
3096         return status;
3097 }
3098
3099 static enum sci_status
3100 scic_io_request_construct_smp(struct device *dev,
3101                               struct isci_request *ireq,
3102                               struct sas_task *task)
3103 {
3104         struct scatterlist *sg = &task->smp_task.smp_req;
3105         struct scic_sds_remote_device *sci_dev;
3106         struct scu_task_context *task_context;
3107         struct isci_port *iport;
3108         struct smp_req *smp_req;
3109         void *kaddr;
3110         u8 req_len;
3111         u32 cmd;
3112
3113         kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
3114         smp_req = kaddr + sg->offset;
3115         /*
3116          * Look at the SMP requests' header fields; for certain SAS 1.x SMP
3117          * functions under SAS 2.0, a zero request length really indicates
3118          * a non-zero default length.
3119          */
3120         if (smp_req->req_len == 0) {
3121                 switch (smp_req->func) {
3122                 case SMP_DISCOVER:
3123                 case SMP_REPORT_PHY_ERR_LOG:
3124                 case SMP_REPORT_PHY_SATA:
3125                 case SMP_REPORT_ROUTE_INFO:
3126                         smp_req->req_len = 2;
3127                         break;
3128                 case SMP_CONF_ROUTE_INFO:
3129                 case SMP_PHY_CONTROL:
3130                 case SMP_PHY_TEST_FUNCTION:
3131                         smp_req->req_len = 9;
3132                         break;
3133                         /* Default - zero is a valid default for 2.0. */
3134                 }
3135         }
3136         req_len = smp_req->req_len;
3137         sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
3138         cmd = *(u32 *) smp_req;
3139         kunmap_atomic(kaddr, KM_IRQ0);
3140
3141         if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE))
3142                 return SCI_FAILURE;
3143
3144         ireq->protocol = SCIC_SMP_PROTOCOL;
3145
3146         /* byte swap the smp request. */
3147
3148         task_context = ireq->tc;
3149
3150         sci_dev = scic_sds_request_get_device(ireq);
3151         iport = scic_sds_request_get_port(ireq);
3152
3153         /*
3154          * Fill in the TC with the its required data
3155          * 00h
3156          */
3157         task_context->priority = 0;
3158         task_context->initiator_request = 1;
3159         task_context->connection_rate = sci_dev->connection_rate;
3160         task_context->protocol_engine_index =
3161                 scic_sds_controller_get_protocol_engine_group(scic);
3162         task_context->logical_port_index = scic_sds_port_get_index(iport);
3163         task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP;
3164         task_context->abort = 0;
3165         task_context->valid = SCU_TASK_CONTEXT_VALID;
3166         task_context->context_type = SCU_TASK_CONTEXT_TYPE;
3167
3168         /* 04h */
3169         task_context->remote_node_index = sci_dev->rnc.remote_node_index;
3170         task_context->command_code = 0;
3171         task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST;
3172
3173         /* 08h */
3174         task_context->link_layer_control = 0;
3175         task_context->do_not_dma_ssp_good_response = 1;
3176         task_context->strict_ordering = 0;
3177         task_context->control_frame = 1;
3178         task_context->timeout_enable = 0;
3179         task_context->block_guard_enable = 0;
3180
3181         /* 0ch */
3182         task_context->address_modifier = 0;
3183
3184         /* 10h */
3185         task_context->ssp_command_iu_length = req_len;
3186
3187         /* 14h */
3188         task_context->transfer_length_bytes = 0;
3189
3190         /*
3191          * 18h ~ 30h, protocol specific
3192          * since commandIU has been build by framework at this point, we just
3193          * copy the frist DWord from command IU to this location. */
3194         memcpy(&task_context->type.smp, &cmd, sizeof(u32));
3195
3196         /*
3197          * 40h
3198          * "For SMP you could program it to zero. We would prefer that way
3199          * so that done code will be consistent." - Venki
3200          */
3201         task_context->task_phase = 0;
3202
3203         ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
3204                                  (scic_sds_controller_get_protocol_engine_group(scic) <<
3205                                   SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
3206                                  (scic_sds_port_get_index(iport) <<
3207                                   SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
3208                                  ISCI_TAG_TCI(ireq->io_tag));
3209         /*
3210          * Copy the physical address for the command buffer to the SCU Task
3211          * Context command buffer should not contain command header.
3212          */
3213         task_context->command_iu_upper = upper_32_bits(sg_dma_address(sg));
3214         task_context->command_iu_lower = lower_32_bits(sg_dma_address(sg) + sizeof(u32));
3215
3216         /* SMP response comes as UF, so no need to set response IU address. */
3217         task_context->response_iu_upper = 0;
3218         task_context->response_iu_lower = 0;
3219
3220         sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
3221
3222         return SCI_SUCCESS;
3223 }
3224
3225 /*
3226  * isci_smp_request_build() - This function builds the smp request.
3227  * @ireq: This parameter points to the isci_request allocated in the
3228  *    request construct function.
3229  *
3230  * SCI_SUCCESS on successfull completion, or specific failure code.
3231  */
3232 static enum sci_status isci_smp_request_build(struct isci_request *ireq)
3233 {
3234         struct sas_task *task = isci_request_access_task(ireq);
3235         struct device *dev = &ireq->isci_host->pdev->dev;
3236         enum sci_status status = SCI_FAILURE;
3237
3238         status = scic_io_request_construct_smp(dev, ireq, task);
3239         if (status != SCI_SUCCESS)
3240                 dev_warn(&ireq->isci_host->pdev->dev,
3241                          "%s: failed with status = %d\n",
3242                          __func__,
3243                          status);
3244
3245         return status;
3246 }
3247
3248 /**
3249  * isci_io_request_build() - This function builds the io request object.
3250  * @isci_host: This parameter specifies the ISCI host object
3251  * @request: This parameter points to the isci_request object allocated in the
3252  *    request construct function.
3253  * @sci_device: This parameter is the handle for the sci core's remote device
3254  *    object that is the destination for this request.
3255  *
3256  * SCI_SUCCESS on successfull completion, or specific failure code.
3257  */
3258 static enum sci_status isci_io_request_build(struct isci_host *isci_host,
3259                                              struct isci_request *request,
3260                                              struct isci_remote_device *isci_device)
3261 {
3262         enum sci_status status = SCI_SUCCESS;
3263         struct sas_task *task = isci_request_access_task(request);
3264         struct scic_sds_remote_device *sci_device = &isci_device->sci;
3265
3266         dev_dbg(&isci_host->pdev->dev,
3267                 "%s: isci_device = 0x%p; request = %p, "
3268                 "num_scatter = %d\n",
3269                 __func__,
3270                 isci_device,
3271                 request,
3272                 task->num_scatter);
3273
3274         /* map the sgl addresses, if present.
3275          * libata does the mapping for sata devices
3276          * before we get the request.
3277          */
3278         if (task->num_scatter &&
3279             !sas_protocol_ata(task->task_proto) &&
3280             !(SAS_PROTOCOL_SMP & task->task_proto)) {
3281
3282                 request->num_sg_entries = dma_map_sg(
3283                         &isci_host->pdev->dev,
3284                         task->scatter,
3285                         task->num_scatter,
3286                         task->data_dir
3287                         );
3288
3289                 if (request->num_sg_entries == 0)
3290                         return SCI_FAILURE_INSUFFICIENT_RESOURCES;
3291         }
3292
3293         status = scic_io_request_construct(&isci_host->sci, sci_device,
3294                                            request);
3295
3296         if (status != SCI_SUCCESS) {
3297                 dev_warn(&isci_host->pdev->dev,
3298                          "%s: failed request construct\n",
3299                          __func__);
3300                 return SCI_FAILURE;
3301         }
3302
3303         switch (task->task_proto) {
3304         case SAS_PROTOCOL_SMP:
3305                 status = isci_smp_request_build(request);
3306                 break;
3307         case SAS_PROTOCOL_SSP:
3308                 status = isci_request_ssp_request_construct(request);
3309                 break;
3310         case SAS_PROTOCOL_SATA:
3311         case SAS_PROTOCOL_STP:
3312         case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
3313                 status = isci_request_stp_request_construct(request);
3314                 break;
3315         default:
3316                 dev_warn(&isci_host->pdev->dev,
3317                          "%s: unknown protocol\n", __func__);
3318                 return SCI_FAILURE;
3319         }
3320
3321         return SCI_SUCCESS;
3322 }
3323
3324 static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag)
3325 {
3326         struct isci_request *ireq;
3327
3328         ireq = ihost->reqs[ISCI_TAG_TCI(tag)];
3329         ireq->io_tag = tag;
3330         ireq->io_request_completion = NULL;
3331         ireq->flags = 0;
3332         ireq->num_sg_entries = 0;
3333         INIT_LIST_HEAD(&ireq->completed_node);
3334         INIT_LIST_HEAD(&ireq->dev_node);
3335         isci_request_change_state(ireq, allocated);
3336
3337         return ireq;
3338 }
3339
3340 static struct isci_request *isci_io_request_from_tag(struct isci_host *ihost,
3341                                                      struct sas_task *task,
3342                                                      u16 tag)
3343 {
3344         struct isci_request *ireq;
3345
3346         ireq = isci_request_from_tag(ihost, tag);
3347         ireq->ttype_ptr.io_task_ptr = task;
3348         ireq->ttype = io_task;
3349         task->lldd_task = ireq;
3350
3351         return ireq;
3352 }
3353
3354 struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
3355                                                struct isci_tmf *isci_tmf,
3356                                                u16 tag)
3357 {
3358         struct isci_request *ireq;
3359
3360         ireq = isci_request_from_tag(ihost, tag);
3361         ireq->ttype_ptr.tmf_task_ptr = isci_tmf;
3362         ireq->ttype = tmf_task;
3363
3364         return ireq;
3365 }
3366
3367 int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
3368                          struct sas_task *task, u16 tag)
3369 {
3370         enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3371         struct isci_request *ireq;
3372         unsigned long flags;
3373         int ret = 0;
3374
3375         /* do common allocation and init of request object. */
3376         ireq = isci_io_request_from_tag(ihost, task, tag);
3377
3378         status = isci_io_request_build(ihost, ireq, idev);
3379         if (status != SCI_SUCCESS) {
3380                 dev_warn(&ihost->pdev->dev,
3381                          "%s: request_construct failed - status = 0x%x\n",
3382                          __func__,
3383                          status);
3384                 return status;
3385         }
3386
3387         spin_lock_irqsave(&ihost->scic_lock, flags);
3388
3389         if (test_bit(IDEV_IO_NCQERROR, &idev->flags)) {
3390
3391                 if (isci_task_is_ncq_recovery(task)) {
3392
3393                         /* The device is in an NCQ recovery state.  Issue the
3394                          * request on the task side.  Note that it will
3395                          * complete on the I/O request side because the
3396                          * request was built that way (ie.
3397                          * ireq->is_task_management_request is false).
3398                          */
3399                         status = scic_controller_start_task(&ihost->sci,
3400                                                             &idev->sci,
3401                                                             ireq);
3402                 } else {
3403                         status = SCI_FAILURE;
3404                 }
3405         } else {
3406                 /* send the request, let the core assign the IO TAG.    */
3407                 status = scic_controller_start_io(&ihost->sci, &idev->sci,
3408                                                   ireq);
3409         }
3410
3411         if (status != SCI_SUCCESS &&
3412             status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
3413                 dev_warn(&ihost->pdev->dev,
3414                          "%s: failed request start (0x%x)\n",
3415                          __func__, status);
3416                 spin_unlock_irqrestore(&ihost->scic_lock, flags);
3417                 return status;
3418         }
3419
3420         /* Either I/O started OK, or the core has signaled that
3421          * the device needs a target reset.
3422          *
3423          * In either case, hold onto the I/O for later.
3424          *
3425          * Update it's status and add it to the list in the
3426          * remote device object.
3427          */
3428         list_add(&ireq->dev_node, &idev->reqs_in_process);
3429
3430         if (status == SCI_SUCCESS) {
3431                 isci_request_change_state(ireq, started);
3432         } else {
3433                 /* The request did not really start in the
3434                  * hardware, so clear the request handle
3435                  * here so no terminations will be done.
3436                  */
3437                 set_bit(IREQ_TERMINATED, &ireq->flags);
3438                 isci_request_change_state(ireq, completed);
3439         }
3440         spin_unlock_irqrestore(&ihost->scic_lock, flags);
3441
3442         if (status ==
3443             SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
3444                 /* Signal libsas that we need the SCSI error
3445                  * handler thread to work on this I/O and that
3446                  * we want a device reset.
3447                  */
3448                 spin_lock_irqsave(&task->task_state_lock, flags);
3449                 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
3450                 spin_unlock_irqrestore(&task->task_state_lock, flags);
3451
3452                 /* Cause this task to be scheduled in the SCSI error
3453                  * handler thread.
3454                  */
3455                 isci_execpath_callback(ihost, task,
3456                                        sas_task_abort);
3457
3458                 /* Change the status, since we are holding
3459                  * the I/O until it is managed by the SCSI
3460                  * error handler.
3461                  */
3462                 status = SCI_SUCCESS;
3463         }
3464
3465         return ret;
3466 }