isci: unify isci_remote_device and scic_sds_remote_device
[linux-2.6.git] / drivers / scsi / isci / task.c
1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  * redistributing this file, you may do so under either license.
4  *
5  * GPL LICENSE SUMMARY
6  *
7  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21  * The full GNU General Public License is included in this distribution
22  * in the file called LICENSE.GPL.
23  *
24  * BSD LICENSE
25  *
26  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27  * All rights reserved.
28  *
29  * Redistribution and use in source and binary forms, with or without
30  * modification, are permitted provided that the following conditions
31  * are met:
32  *
33  *   * Redistributions of source code must retain the above copyright
34  *     notice, this list of conditions and the following disclaimer.
35  *   * Redistributions in binary form must reproduce the above copyright
36  *     notice, this list of conditions and the following disclaimer in
37  *     the documentation and/or other materials provided with the
38  *     distribution.
39  *   * Neither the name of Intel Corporation nor the names of its
40  *     contributors may be used to endorse or promote products derived
41  *     from this software without specific prior written permission.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54  */
55
56 #include <linux/completion.h>
57 #include <linux/irqflags.h>
58 #include "sas.h"
59 #include <scsi/libsas.h>
60 #include "remote_device.h"
61 #include "remote_node_context.h"
62 #include "isci.h"
63 #include "request.h"
64 #include "sata.h"
65 #include "task.h"
66 #include "host.h"
67
68 /**
69 * isci_task_refuse() - complete the request to the upper layer driver in
70 *     the case where an I/O needs to be completed back in the submit path.
71 * @ihost: host on which the the request was queued
72 * @task: request to complete
73 * @response: response code for the completed task.
74 * @status: status code for the completed task.
75 *
76 */
77 static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
78                              enum service_response response,
79                              enum exec_status status)
80
81 {
82         enum isci_completion_selection disposition;
83
84         disposition = isci_perform_normal_io_completion;
85         disposition = isci_task_set_completion_status(task, response, status,
86                                                       disposition);
87
88         /* Tasks aborted specifically by a call to the lldd_abort_task
89          * function should not be completed to the host in the regular path.
90          */
91         switch (disposition) {
92                 case isci_perform_normal_io_completion:
93                         /* Normal notification (task_done) */
94                         dev_dbg(&ihost->pdev->dev,
95                                 "%s: Normal - task = %p, response=%d, "
96                                 "status=%d\n",
97                                 __func__, task, response, status);
98
99                         task->lldd_task = NULL;
100
101                         isci_execpath_callback(ihost, task, task->task_done);
102                         break;
103
104                 case isci_perform_aborted_io_completion:
105                         /* No notification because this request is already in the
106                         * abort path.
107                         */
108                         dev_warn(&ihost->pdev->dev,
109                                  "%s: Aborted - task = %p, response=%d, "
110                                 "status=%d\n",
111                                  __func__, task, response, status);
112                         break;
113
114                 case isci_perform_error_io_completion:
115                         /* Use sas_task_abort */
116                         dev_warn(&ihost->pdev->dev,
117                                  "%s: Error - task = %p, response=%d, "
118                                 "status=%d\n",
119                                  __func__, task, response, status);
120
121                         isci_execpath_callback(ihost, task, sas_task_abort);
122                         break;
123
124                 default:
125                         dev_warn(&ihost->pdev->dev,
126                                  "%s: isci task notification default case!",
127                                  __func__);
128                         sas_task_abort(task);
129                         break;
130         }
131 }
132
133 #define for_each_sas_task(num, task) \
134         for (; num > 0; num--,\
135              task = list_entry(task->list.next, struct sas_task, list))
136
137
138 static inline int isci_device_io_ready(struct isci_remote_device *idev,
139                                        struct sas_task *task)
140 {
141         return idev ? test_bit(IDEV_IO_READY, &idev->flags) ||
142                       (test_bit(IDEV_IO_NCQERROR, &idev->flags) &&
143                        isci_task_is_ncq_recovery(task))
144                     : 0;
145 }
146 /**
147  * isci_task_execute_task() - This function is one of the SAS Domain Template
148  *    functions. This function is called by libsas to send a task down to
149  *    hardware.
150  * @task: This parameter specifies the SAS task to send.
151  * @num: This parameter specifies the number of tasks to queue.
152  * @gfp_flags: This parameter specifies the context of this call.
153  *
154  * status, zero indicates success.
155  */
156 int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
157 {
158         struct isci_host *ihost = dev_to_ihost(task->dev);
159         struct isci_remote_device *idev;
160         unsigned long flags;
161         bool io_ready;
162         u16 tag;
163
164         dev_dbg(&ihost->pdev->dev, "%s: num=%d\n", __func__, num);
165
166         for_each_sas_task(num, task) {
167                 enum sci_status status = SCI_FAILURE;
168
169                 spin_lock_irqsave(&ihost->scic_lock, flags);
170                 idev = isci_lookup_device(task->dev);
171                 io_ready = isci_device_io_ready(idev, task);
172                 tag = isci_alloc_tag(ihost);
173                 spin_unlock_irqrestore(&ihost->scic_lock, flags);
174
175                 dev_dbg(&ihost->pdev->dev,
176                         "task: %p, num: %d dev: %p idev: %p:%#lx cmd = %p\n",
177                         task, num, task->dev, idev, idev ? idev->flags : 0,
178                         task->uldd_task);
179
180                 if (!idev) {
181                         isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED,
182                                          SAS_DEVICE_UNKNOWN);
183                 } else if (!io_ready || tag == SCI_CONTROLLER_INVALID_IO_TAG) {
184                         /* Indicate QUEUE_FULL so that the scsi midlayer
185                          * retries.
186                           */
187                         isci_task_refuse(ihost, task, SAS_TASK_COMPLETE,
188                                          SAS_QUEUE_FULL);
189                 } else {
190                         /* There is a device and it's ready for I/O. */
191                         spin_lock_irqsave(&task->task_state_lock, flags);
192
193                         if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
194                                 /* The I/O was aborted. */
195                                 spin_unlock_irqrestore(&task->task_state_lock,
196                                                        flags);
197
198                                 isci_task_refuse(ihost, task,
199                                                  SAS_TASK_UNDELIVERED,
200                                                  SAM_STAT_TASK_ABORTED);
201                         } else {
202                                 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
203                                 spin_unlock_irqrestore(&task->task_state_lock, flags);
204
205                                 /* build and send the request. */
206                                 status = isci_request_execute(ihost, idev, task, tag);
207
208                                 if (status != SCI_SUCCESS) {
209
210                                         spin_lock_irqsave(&task->task_state_lock, flags);
211                                         /* Did not really start this command. */
212                                         task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
213                                         spin_unlock_irqrestore(&task->task_state_lock, flags);
214
215                                         /* Indicate QUEUE_FULL so that the scsi
216                                         * midlayer retries. if the request
217                                         * failed for remote device reasons,
218                                         * it gets returned as
219                                         * SAS_TASK_UNDELIVERED next time
220                                         * through.
221                                         */
222                                         isci_task_refuse(ihost, task,
223                                                          SAS_TASK_COMPLETE,
224                                                          SAS_QUEUE_FULL);
225                                 }
226                         }
227                 }
228                 if (status != SCI_SUCCESS && tag != SCI_CONTROLLER_INVALID_IO_TAG) {
229                         spin_lock_irqsave(&ihost->scic_lock, flags);
230                         /* command never hit the device, so just free
231                          * the tci and skip the sequence increment
232                          */
233                         isci_tci_free(ihost, ISCI_TAG_TCI(tag));
234                         spin_unlock_irqrestore(&ihost->scic_lock, flags);
235                 }
236                 isci_put_device(idev);
237         }
238         return 0;
239 }
240
241 static struct isci_request *isci_task_request_build(struct isci_host *ihost,
242                                                     struct isci_remote_device *idev,
243                                                     u16 tag, struct isci_tmf *isci_tmf)
244 {
245         enum sci_status status = SCI_FAILURE;
246         struct isci_request *ireq = NULL;
247         struct domain_device *dev;
248
249         dev_dbg(&ihost->pdev->dev,
250                 "%s: isci_tmf = %p\n", __func__, isci_tmf);
251
252         dev = idev->domain_dev;
253
254         /* do common allocation and init of request object. */
255         ireq = isci_tmf_request_from_tag(ihost, isci_tmf, tag);
256         if (!ireq)
257                 return NULL;
258
259         /* let the core do it's construct. */
260         status = scic_task_request_construct(&ihost->sci, idev, tag,
261                                              ireq);
262
263         if (status != SCI_SUCCESS) {
264                 dev_warn(&ihost->pdev->dev,
265                          "%s: scic_task_request_construct failed - "
266                          "status = 0x%x\n",
267                          __func__,
268                          status);
269                 return NULL;
270         }
271
272         /* XXX convert to get this from task->tproto like other drivers */
273         if (dev->dev_type == SAS_END_DEV) {
274                 isci_tmf->proto = SAS_PROTOCOL_SSP;
275                 status = scic_task_request_construct_ssp(ireq);
276                 if (status != SCI_SUCCESS)
277                         return NULL;
278         }
279
280         if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
281                 isci_tmf->proto = SAS_PROTOCOL_SATA;
282                 status = isci_sata_management_task_request_build(ireq);
283
284                 if (status != SCI_SUCCESS)
285                         return NULL;
286         }
287         return ireq;
288 }
289
290 int isci_task_execute_tmf(struct isci_host *ihost,
291                           struct isci_remote_device *idev,
292                           struct isci_tmf *tmf, unsigned long timeout_ms)
293 {
294         DECLARE_COMPLETION_ONSTACK(completion);
295         enum sci_task_status status = SCI_TASK_FAILURE;
296         struct isci_request *ireq;
297         int ret = TMF_RESP_FUNC_FAILED;
298         unsigned long flags;
299         unsigned long timeleft;
300         u16 tag;
301
302         spin_lock_irqsave(&ihost->scic_lock, flags);
303         tag = isci_alloc_tag(ihost);
304         spin_unlock_irqrestore(&ihost->scic_lock, flags);
305
306         if (tag == SCI_CONTROLLER_INVALID_IO_TAG)
307                 return ret;
308
309         /* sanity check, return TMF_RESP_FUNC_FAILED
310          * if the device is not there and ready.
311          */
312         if (!idev ||
313             (!test_bit(IDEV_IO_READY, &idev->flags) &&
314              !test_bit(IDEV_IO_NCQERROR, &idev->flags))) {
315                 dev_dbg(&ihost->pdev->dev,
316                         "%s: idev = %p not ready (%#lx)\n",
317                         __func__,
318                         idev, idev ? idev->flags : 0);
319                 goto err_tci;
320         } else
321                 dev_dbg(&ihost->pdev->dev,
322                         "%s: idev = %p\n",
323                         __func__, idev);
324
325         /* Assign the pointer to the TMF's completion kernel wait structure. */
326         tmf->complete = &completion;
327
328         ireq = isci_task_request_build(ihost, idev, tag, tmf);
329         if (!ireq)
330                 goto err_tci;
331
332         spin_lock_irqsave(&ihost->scic_lock, flags);
333
334         /* start the TMF io. */
335         status = scic_controller_start_task(&ihost->sci, idev, ireq);
336
337         if (status != SCI_TASK_SUCCESS) {
338                 dev_warn(&ihost->pdev->dev,
339                          "%s: start_io failed - status = 0x%x, request = %p\n",
340                          __func__,
341                          status,
342                          ireq);
343                 spin_unlock_irqrestore(&ihost->scic_lock, flags);
344                 goto err_tci;
345         }
346
347         if (tmf->cb_state_func != NULL)
348                 tmf->cb_state_func(isci_tmf_started, tmf, tmf->cb_data);
349
350         isci_request_change_state(ireq, started);
351
352         /* add the request to the remote device request list. */
353         list_add(&ireq->dev_node, &idev->reqs_in_process);
354
355         spin_unlock_irqrestore(&ihost->scic_lock, flags);
356
357         /* Wait for the TMF to complete, or a timeout. */
358         timeleft = wait_for_completion_timeout(&completion,
359                                                msecs_to_jiffies(timeout_ms));
360
361         if (timeleft == 0) {
362                 spin_lock_irqsave(&ihost->scic_lock, flags);
363
364                 if (tmf->cb_state_func != NULL)
365                         tmf->cb_state_func(isci_tmf_timed_out, tmf, tmf->cb_data);
366
367                 scic_controller_terminate_request(&ihost->sci,
368                                                   idev,
369                                                   ireq);
370
371                 spin_unlock_irqrestore(&ihost->scic_lock, flags);
372
373                 wait_for_completion(tmf->complete);
374         }
375
376         isci_print_tmf(tmf);
377
378         if (tmf->status == SCI_SUCCESS)
379                 ret =  TMF_RESP_FUNC_COMPLETE;
380         else if (tmf->status == SCI_FAILURE_IO_RESPONSE_VALID) {
381                 dev_dbg(&ihost->pdev->dev,
382                         "%s: tmf.status == "
383                         "SCI_FAILURE_IO_RESPONSE_VALID\n",
384                         __func__);
385                 ret =  TMF_RESP_FUNC_COMPLETE;
386         }
387         /* Else - leave the default "failed" status alone. */
388
389         dev_dbg(&ihost->pdev->dev,
390                 "%s: completed request = %p\n",
391                 __func__,
392                 ireq);
393
394         return ret;
395
396  err_tci:
397         spin_lock_irqsave(&ihost->scic_lock, flags);
398         isci_tci_free(ihost, ISCI_TAG_TCI(tag));
399         spin_unlock_irqrestore(&ihost->scic_lock, flags);
400
401         return ret;
402 }
403
404 void isci_task_build_tmf(
405         struct isci_tmf *tmf,
406         enum isci_tmf_function_codes code,
407         void (*tmf_sent_cb)(enum isci_tmf_cb_state,
408                             struct isci_tmf *,
409                             void *),
410         void *cb_data)
411 {
412         memset(tmf, 0, sizeof(*tmf));
413
414         tmf->tmf_code      = code;
415         tmf->cb_state_func = tmf_sent_cb;
416         tmf->cb_data       = cb_data;
417 }
418
419 static void isci_task_build_abort_task_tmf(
420         struct isci_tmf *tmf,
421         enum isci_tmf_function_codes code,
422         void (*tmf_sent_cb)(enum isci_tmf_cb_state,
423                             struct isci_tmf *,
424                             void *),
425         struct isci_request *old_request)
426 {
427         isci_task_build_tmf(tmf, code, tmf_sent_cb,
428                             (void *)old_request);
429         tmf->io_tag = old_request->io_tag;
430 }
431
432 /**
433  * isci_task_validate_request_to_abort() - This function checks the given I/O
434  *    against the "started" state.  If the request is still "started", it's
435  *    state is changed to aborted. NOTE: isci_host->scic_lock MUST BE HELD
436  *    BEFORE CALLING THIS FUNCTION.
437  * @isci_request: This parameter specifies the request object to control.
438  * @isci_host: This parameter specifies the ISCI host object
439  * @isci_device: This is the device to which the request is pending.
440  * @aborted_io_completion: This is a completion structure that will be added to
441  *    the request in case it is changed to aborting; this completion is
442  *    triggered when the request is fully completed.
443  *
444  * Either "started" on successful change of the task status to "aborted", or
445  * "unallocated" if the task cannot be controlled.
446  */
447 static enum isci_request_status isci_task_validate_request_to_abort(
448         struct isci_request *isci_request,
449         struct isci_host *isci_host,
450         struct isci_remote_device *isci_device,
451         struct completion *aborted_io_completion)
452 {
453         enum isci_request_status old_state = unallocated;
454
455         /* Only abort the task if it's in the
456          *  device's request_in_process list
457          */
458         if (isci_request && !list_empty(&isci_request->dev_node)) {
459                 old_state = isci_request_change_started_to_aborted(
460                         isci_request, aborted_io_completion);
461
462         }
463
464         return old_state;
465 }
466
467 /**
468 * isci_request_cleanup_completed_loiterer() - This function will take care of
469 *    the final cleanup on any request which has been explicitly terminated.
470 * @isci_host: This parameter specifies the ISCI host object
471 * @isci_device: This is the device to which the request is pending.
472 * @isci_request: This parameter specifies the terminated request object.
473 * @task: This parameter is the libsas I/O request.
474 */
475 static void isci_request_cleanup_completed_loiterer(
476         struct isci_host          *isci_host,
477         struct isci_remote_device *isci_device,
478         struct isci_request       *isci_request,
479         struct sas_task           *task)
480 {
481         unsigned long flags;
482
483         dev_dbg(&isci_host->pdev->dev,
484                 "%s: isci_device=%p, request=%p, task=%p\n",
485                 __func__, isci_device, isci_request, task);
486
487         if (task != NULL) {
488
489                 spin_lock_irqsave(&task->task_state_lock, flags);
490                 task->lldd_task = NULL;
491
492                 task->task_state_flags &= ~SAS_TASK_NEED_DEV_RESET;
493
494                 isci_set_task_doneflags(task);
495
496                 /* If this task is not in the abort path, call task_done. */
497                 if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
498
499                         spin_unlock_irqrestore(&task->task_state_lock, flags);
500                         task->task_done(task);
501                 } else
502                         spin_unlock_irqrestore(&task->task_state_lock, flags);
503         }
504
505         if (isci_request != NULL) {
506                 spin_lock_irqsave(&isci_host->scic_lock, flags);
507                 list_del_init(&isci_request->dev_node);
508                 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
509         }
510 }
511
512 /**
513  * isci_terminate_request_core() - This function will terminate the given
514  *    request, and wait for it to complete.  This function must only be called
515  *    from a thread that can wait.  Note that the request is terminated and
516  *    completed (back to the host, if started there).
517  * @isci_host: This SCU.
518  * @idev: The target.
519  * @isci_request: The I/O request to be terminated.
520  *
521  */
522 static void isci_terminate_request_core(
523         struct isci_host *isci_host,
524         struct isci_remote_device *idev,
525         struct isci_request *isci_request)
526 {
527         enum sci_status status      = SCI_SUCCESS;
528         bool was_terminated         = false;
529         bool needs_cleanup_handling = false;
530         enum isci_request_status request_status;
531         unsigned long     flags;
532         unsigned long     termination_completed = 1;
533         struct completion *io_request_completion;
534         struct sas_task   *task;
535
536         dev_dbg(&isci_host->pdev->dev,
537                 "%s: device = %p; request = %p\n",
538                 __func__, idev, isci_request);
539
540         spin_lock_irqsave(&isci_host->scic_lock, flags);
541
542         io_request_completion = isci_request->io_request_completion;
543
544         task = (isci_request->ttype == io_task)
545                 ? isci_request_access_task(isci_request)
546                 : NULL;
547
548         /* Note that we are not going to control
549          * the target to abort the request.
550          */
551         set_bit(IREQ_COMPLETE_IN_TARGET, &isci_request->flags);
552
553         /* Make sure the request wasn't just sitting around signalling
554          * device condition (if the request handle is NULL, then the
555          * request completed but needed additional handling here).
556          */
557         if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) {
558                 was_terminated = true;
559                 needs_cleanup_handling = true;
560                 status = scic_controller_terminate_request(
561                         &isci_host->sci,
562                         idev,
563                         isci_request);
564         }
565         spin_unlock_irqrestore(&isci_host->scic_lock, flags);
566
567         /*
568          * The only time the request to terminate will
569          * fail is when the io request is completed and
570          * being aborted.
571          */
572         if (status != SCI_SUCCESS) {
573                 dev_err(&isci_host->pdev->dev,
574                         "%s: scic_controller_terminate_request"
575                         " returned = 0x%x\n",
576                         __func__, status);
577
578                 isci_request->io_request_completion = NULL;
579
580         } else {
581                 if (was_terminated) {
582                         dev_dbg(&isci_host->pdev->dev,
583                                 "%s: before completion wait (%p/%p)\n",
584                                 __func__, isci_request, io_request_completion);
585
586                         /* Wait here for the request to complete. */
587                         #define TERMINATION_TIMEOUT_MSEC 500
588                         termination_completed
589                                 = wait_for_completion_timeout(
590                                    io_request_completion,
591                                    msecs_to_jiffies(TERMINATION_TIMEOUT_MSEC));
592
593                         if (!termination_completed) {
594
595                                 /* The request to terminate has timed out.  */
596                                 spin_lock_irqsave(&isci_host->scic_lock,
597                                                   flags);
598
599                                 /* Check for state changes. */
600                                 if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) {
601
602                                         /* The best we can do is to have the
603                                          * request die a silent death if it
604                                          * ever really completes.
605                                          *
606                                          * Set the request state to "dead",
607                                          * and clear the task pointer so that
608                                          * an actual completion event callback
609                                          * doesn't do anything.
610                                          */
611                                         isci_request->status = dead;
612                                         isci_request->io_request_completion
613                                                 = NULL;
614
615                                         if (isci_request->ttype == io_task) {
616
617                                                 /* Break links with the
618                                                 * sas_task.
619                                                 */
620                                                 isci_request->ttype_ptr.io_task_ptr
621                                                         = NULL;
622                                         }
623                                 } else
624                                         termination_completed = 1;
625
626                                 spin_unlock_irqrestore(&isci_host->scic_lock,
627                                                        flags);
628
629                                 if (!termination_completed) {
630
631                                         dev_err(&isci_host->pdev->dev,
632                                                 "%s: *** Timeout waiting for "
633                                                 "termination(%p/%p)\n",
634                                                 __func__, io_request_completion,
635                                                 isci_request);
636
637                                         /* The request can no longer be referenced
638                                          * safely since it may go away if the
639                                          * termination every really does complete.
640                                          */
641                                         isci_request = NULL;
642                                 }
643                         }
644                         if (termination_completed)
645                                 dev_dbg(&isci_host->pdev->dev,
646                                         "%s: after completion wait (%p/%p)\n",
647                                         __func__, isci_request, io_request_completion);
648                 }
649
650                 if (termination_completed) {
651
652                         isci_request->io_request_completion = NULL;
653
654                         /* Peek at the status of the request.  This will tell
655                          * us if there was special handling on the request such that it
656                          * needs to be detached and freed here.
657                          */
658                         spin_lock_irqsave(&isci_request->state_lock, flags);
659                         request_status = isci_request_get_state(isci_request);
660
661                         if ((isci_request->ttype == io_task) /* TMFs are in their own thread */
662                             && ((request_status == aborted)
663                                 || (request_status == aborting)
664                                 || (request_status == terminating)
665                                 || (request_status == completed)
666                                 || (request_status == dead)
667                                 )
668                             ) {
669
670                                 /* The completion routine won't free a request in
671                                  * the aborted/aborting/etc. states, so we do
672                                  * it here.
673                                  */
674                                 needs_cleanup_handling = true;
675                         }
676                         spin_unlock_irqrestore(&isci_request->state_lock, flags);
677
678                 }
679                 if (needs_cleanup_handling)
680                         isci_request_cleanup_completed_loiterer(
681                                 isci_host, idev, isci_request, task);
682         }
683 }
684
685 /**
686  * isci_terminate_pending_requests() - This function will change the all of the
687  *    requests on the given device's state to "aborting", will terminate the
688  *    requests, and wait for them to complete.  This function must only be
689  *    called from a thread that can wait.  Note that the requests are all
690  *    terminated and completed (back to the host, if started there).
691  * @isci_host: This parameter specifies SCU.
692  * @idev: This parameter specifies the target.
693  *
694  */
695 void isci_terminate_pending_requests(struct isci_host *ihost,
696                                      struct isci_remote_device *idev)
697 {
698         struct completion request_completion;
699         enum isci_request_status old_state;
700         unsigned long flags;
701         LIST_HEAD(list);
702
703         spin_lock_irqsave(&ihost->scic_lock, flags);
704         list_splice_init(&idev->reqs_in_process, &list);
705
706         /* assumes that isci_terminate_request_core deletes from the list */
707         while (!list_empty(&list)) {
708                 struct isci_request *ireq = list_entry(list.next, typeof(*ireq), dev_node);
709
710                 /* Change state to "terminating" if it is currently
711                  * "started".
712                  */
713                 old_state = isci_request_change_started_to_newstate(ireq,
714                                                                     &request_completion,
715                                                                     terminating);
716                 switch (old_state) {
717                 case started:
718                 case completed:
719                 case aborting:
720                         break;
721                 default:
722                         /* termination in progress, or otherwise dispositioned.
723                          * We know the request was on 'list' so should be safe
724                          * to move it back to reqs_in_process
725                          */
726                         list_move(&ireq->dev_node, &idev->reqs_in_process);
727                         ireq = NULL;
728                         break;
729                 }
730
731                 if (!ireq)
732                         continue;
733                 spin_unlock_irqrestore(&ihost->scic_lock, flags);
734
735                 init_completion(&request_completion);
736
737                 dev_dbg(&ihost->pdev->dev,
738                          "%s: idev=%p request=%p; task=%p old_state=%d\n",
739                          __func__, idev, ireq,
740                         ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL,
741                         old_state);
742
743                 /* If the old_state is started:
744                  * This request was not already being aborted. If it had been,
745                  * then the aborting I/O (ie. the TMF request) would not be in
746                  * the aborting state, and thus would be terminated here.  Note
747                  * that since the TMF completion's call to the kernel function
748                  * "complete()" does not happen until the pending I/O request
749                  * terminate fully completes, we do not have to implement a
750                  * special wait here for already aborting requests - the
751                  * termination of the TMF request will force the request
752                  * to finish it's already started terminate.
753                  *
754                  * If old_state == completed:
755                  * This request completed from the SCU hardware perspective
756                  * and now just needs cleaning up in terms of freeing the
757                  * request and potentially calling up to libsas.
758                  *
759                  * If old_state == aborting:
760                  * This request has already gone through a TMF timeout, but may
761                  * not have been terminated; needs cleaning up at least.
762                  */
763                 isci_terminate_request_core(ihost, idev, ireq);
764                 spin_lock_irqsave(&ihost->scic_lock, flags);
765         }
766         spin_unlock_irqrestore(&ihost->scic_lock, flags);
767 }
768
769 /**
770  * isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain
771  *    Template functions.
772  * @lun: This parameter specifies the lun to be reset.
773  *
774  * status, zero indicates success.
775  */
776 static int isci_task_send_lu_reset_sas(
777         struct isci_host *isci_host,
778         struct isci_remote_device *isci_device,
779         u8 *lun)
780 {
781         struct isci_tmf tmf;
782         int ret = TMF_RESP_FUNC_FAILED;
783
784         dev_dbg(&isci_host->pdev->dev,
785                 "%s: isci_host = %p, isci_device = %p\n",
786                 __func__, isci_host, isci_device);
787         /* Send the LUN reset to the target.  By the time the call returns,
788          * the TMF has fully exected in the target (in which case the return
789          * value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or
790          * was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED").
791          */
792         isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset, NULL, NULL);
793
794         #define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */
795         ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, ISCI_LU_RESET_TIMEOUT_MS);
796
797         if (ret == TMF_RESP_FUNC_COMPLETE)
798                 dev_dbg(&isci_host->pdev->dev,
799                         "%s: %p: TMF_LU_RESET passed\n",
800                         __func__, isci_device);
801         else
802                 dev_dbg(&isci_host->pdev->dev,
803                         "%s: %p: TMF_LU_RESET failed (%x)\n",
804                         __func__, isci_device, ret);
805
806         return ret;
807 }
808
809 /**
810  * isci_task_lu_reset() - This function is one of the SAS Domain Template
811  *    functions. This is one of the Task Management functoins called by libsas,
812  *    to reset the given lun. Note the assumption that while this call is
813  *    executing, no I/O will be sent by the host to the device.
814  * @lun: This parameter specifies the lun to be reset.
815  *
816  * status, zero indicates success.
817  */
818 int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun)
819 {
820         struct isci_host *isci_host = dev_to_ihost(domain_device);
821         struct isci_remote_device *isci_device;
822         unsigned long flags;
823         int ret;
824
825         spin_lock_irqsave(&isci_host->scic_lock, flags);
826         isci_device = isci_lookup_device(domain_device);
827         spin_unlock_irqrestore(&isci_host->scic_lock, flags);
828
829         dev_dbg(&isci_host->pdev->dev,
830                 "%s: domain_device=%p, isci_host=%p; isci_device=%p\n",
831                  __func__, domain_device, isci_host, isci_device);
832
833         if (isci_device)
834                 set_bit(IDEV_EH, &isci_device->flags);
835
836         /* If there is a device reset pending on any request in the
837          * device's list, fail this LUN reset request in order to
838          * escalate to the device reset.
839          */
840         if (!isci_device ||
841             isci_device_is_reset_pending(isci_host, isci_device)) {
842                 dev_warn(&isci_host->pdev->dev,
843                          "%s: No dev (%p), or "
844                          "RESET PENDING: domain_device=%p\n",
845                          __func__, isci_device, domain_device);
846                 ret = TMF_RESP_FUNC_FAILED;
847                 goto out;
848         }
849
850         /* Send the task management part of the reset. */
851         if (sas_protocol_ata(domain_device->tproto)) {
852                 ret = isci_task_send_lu_reset_sata(isci_host, isci_device, lun);
853         } else
854                 ret = isci_task_send_lu_reset_sas(isci_host, isci_device, lun);
855
856         /* If the LUN reset worked, all the I/O can now be terminated. */
857         if (ret == TMF_RESP_FUNC_COMPLETE)
858                 /* Terminate all I/O now. */
859                 isci_terminate_pending_requests(isci_host,
860                                                 isci_device);
861
862  out:
863         isci_put_device(isci_device);
864         return ret;
865 }
866
867
868 /*       int (*lldd_clear_nexus_port)(struct asd_sas_port *); */
869 int isci_task_clear_nexus_port(struct asd_sas_port *port)
870 {
871         return TMF_RESP_FUNC_FAILED;
872 }
873
874
875
876 int isci_task_clear_nexus_ha(struct sas_ha_struct *ha)
877 {
878         return TMF_RESP_FUNC_FAILED;
879 }
880
881 /* Task Management Functions. Must be called from process context.       */
882
883 /**
884  * isci_abort_task_process_cb() - This is a helper function for the abort task
885  *    TMF command.  It manages the request state with respect to the successful
886  *    transmission / completion of the abort task request.
887  * @cb_state: This parameter specifies when this function was called - after
888  *    the TMF request has been started and after it has timed-out.
889  * @tmf: This parameter specifies the TMF in progress.
890  *
891  *
892  */
893 static void isci_abort_task_process_cb(
894         enum isci_tmf_cb_state cb_state,
895         struct isci_tmf *tmf,
896         void *cb_data)
897 {
898         struct isci_request *old_request;
899
900         old_request = (struct isci_request *)cb_data;
901
902         dev_dbg(&old_request->isci_host->pdev->dev,
903                 "%s: tmf=%p, old_request=%p\n",
904                 __func__, tmf, old_request);
905
906         switch (cb_state) {
907
908         case isci_tmf_started:
909                 /* The TMF has been started.  Nothing to do here, since the
910                  * request state was already set to "aborted" by the abort
911                  * task function.
912                  */
913                 if ((old_request->status != aborted)
914                         && (old_request->status != completed))
915                         dev_err(&old_request->isci_host->pdev->dev,
916                                 "%s: Bad request status (%d): tmf=%p, old_request=%p\n",
917                                 __func__, old_request->status, tmf, old_request);
918                 break;
919
920         case isci_tmf_timed_out:
921
922                 /* Set the task's state to "aborting", since the abort task
923                  * function thread set it to "aborted" (above) in anticipation
924                  * of the task management request working correctly.  Since the
925                  * timeout has now fired, the TMF request failed.  We set the
926                  * state such that the request completion will indicate the
927                  * device is no longer present.
928                  */
929                 isci_request_change_state(old_request, aborting);
930                 break;
931
932         default:
933                 dev_err(&old_request->isci_host->pdev->dev,
934                         "%s: Bad cb_state (%d): tmf=%p, old_request=%p\n",
935                         __func__, cb_state, tmf, old_request);
936                 break;
937         }
938 }
939
940 /**
941  * isci_task_abort_task() - This function is one of the SAS Domain Template
942  *    functions. This function is called by libsas to abort a specified task.
943  * @task: This parameter specifies the SAS task to abort.
944  *
945  * status, zero indicates success.
946  */
947 int isci_task_abort_task(struct sas_task *task)
948 {
949         struct isci_host *isci_host = dev_to_ihost(task->dev);
950         DECLARE_COMPLETION_ONSTACK(aborted_io_completion);
951         struct isci_request       *old_request = NULL;
952         enum isci_request_status  old_state;
953         struct isci_remote_device *isci_device = NULL;
954         struct isci_tmf           tmf;
955         int                       ret = TMF_RESP_FUNC_FAILED;
956         unsigned long             flags;
957         bool                      any_dev_reset = false;
958
959         /* Get the isci_request reference from the task.  Note that
960          * this check does not depend on the pending request list
961          * in the device, because tasks driving resets may land here
962          * after completion in the core.
963          */
964         spin_lock_irqsave(&isci_host->scic_lock, flags);
965         spin_lock(&task->task_state_lock);
966
967         old_request = task->lldd_task;
968
969         /* If task is already done, the request isn't valid */
970         if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
971             (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
972             old_request)
973                 isci_device = isci_lookup_device(task->dev);
974
975         spin_unlock(&task->task_state_lock);
976         spin_unlock_irqrestore(&isci_host->scic_lock, flags);
977
978         dev_dbg(&isci_host->pdev->dev,
979                 "%s: task = %p\n", __func__, task);
980
981         if (!isci_device || !old_request)
982                 goto out;
983
984         set_bit(IDEV_EH, &isci_device->flags);
985
986         /* This version of the driver will fail abort requests for
987          * SATA/STP.  Failing the abort request this way will cause the
988          * SCSI error handler thread to escalate to LUN reset
989          */
990         if (sas_protocol_ata(task->task_proto)) {
991                 dev_warn(&isci_host->pdev->dev,
992                             " task %p is for a STP/SATA device;"
993                             " returning TMF_RESP_FUNC_FAILED\n"
994                             " to cause a LUN reset...\n", task);
995                 goto out;
996         }
997
998         dev_dbg(&isci_host->pdev->dev,
999                 "%s: old_request == %p\n", __func__, old_request);
1000
1001         any_dev_reset = isci_device_is_reset_pending(isci_host,isci_device);
1002
1003         spin_lock_irqsave(&task->task_state_lock, flags);
1004
1005         any_dev_reset = any_dev_reset || (task->task_state_flags & SAS_TASK_NEED_DEV_RESET);
1006
1007         /* If the extraction of the request reference from the task
1008          * failed, then the request has been completed (or if there is a
1009          * pending reset then this abort request function must be failed
1010          * in order to escalate to the target reset).
1011          */
1012         if ((old_request == NULL) || any_dev_reset) {
1013
1014                 /* If the device reset task flag is set, fail the task
1015                  * management request.  Otherwise, the original request
1016                  * has completed.
1017                  */
1018                 if (any_dev_reset) {
1019
1020                         /* Turn off the task's DONE to make sure this
1021                          * task is escalated to a target reset.
1022                          */
1023                         task->task_state_flags &= ~SAS_TASK_STATE_DONE;
1024
1025                         /* Make the reset happen as soon as possible. */
1026                         task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
1027
1028                         spin_unlock_irqrestore(&task->task_state_lock, flags);
1029
1030                         /* Fail the task management request in order to
1031                          * escalate to the target reset.
1032                          */
1033                         ret = TMF_RESP_FUNC_FAILED;
1034
1035                         dev_dbg(&isci_host->pdev->dev,
1036                                 "%s: Failing task abort in order to "
1037                                 "escalate to target reset because\n"
1038                                 "SAS_TASK_NEED_DEV_RESET is set for "
1039                                 "task %p on dev %p\n",
1040                                 __func__, task, isci_device);
1041
1042
1043                 } else {
1044                         /* The request has already completed and there
1045                          * is nothing to do here other than to set the task
1046                          * done bit, and indicate that the task abort function
1047                          * was sucessful.
1048                          */
1049                         isci_set_task_doneflags(task);
1050
1051                         spin_unlock_irqrestore(&task->task_state_lock, flags);
1052
1053                         ret = TMF_RESP_FUNC_COMPLETE;
1054
1055                         dev_dbg(&isci_host->pdev->dev,
1056                                 "%s: abort task not needed for %p\n",
1057                                 __func__, task);
1058                 }
1059                 goto out;
1060         }
1061         else
1062                 spin_unlock_irqrestore(&task->task_state_lock, flags);
1063
1064         spin_lock_irqsave(&isci_host->scic_lock, flags);
1065
1066         /* Check the request status and change to "aborted" if currently
1067          * "starting"; if true then set the I/O kernel completion
1068          * struct that will be triggered when the request completes.
1069          */
1070         old_state = isci_task_validate_request_to_abort(
1071                                 old_request, isci_host, isci_device,
1072                                 &aborted_io_completion);
1073         if ((old_state != started) &&
1074             (old_state != completed) &&
1075             (old_state != aborting)) {
1076
1077                 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1078
1079                 /* The request was already being handled by someone else (because
1080                 * they got to set the state away from started).
1081                 */
1082                 dev_dbg(&isci_host->pdev->dev,
1083                         "%s:  device = %p; old_request %p already being aborted\n",
1084                         __func__,
1085                         isci_device, old_request);
1086                 ret = TMF_RESP_FUNC_COMPLETE;
1087                 goto out;
1088         }
1089         if (task->task_proto == SAS_PROTOCOL_SMP ||
1090             test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) {
1091
1092                 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1093
1094                 dev_dbg(&isci_host->pdev->dev,
1095                         "%s: SMP request (%d)"
1096                         " or complete_in_target (%d), thus no TMF\n",
1097                         __func__, (task->task_proto == SAS_PROTOCOL_SMP),
1098                         test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags));
1099
1100                 /* Set the state on the task. */
1101                 isci_task_all_done(task);
1102
1103                 ret = TMF_RESP_FUNC_COMPLETE;
1104
1105                 /* Stopping and SMP devices are not sent a TMF, and are not
1106                  * reset, but the outstanding I/O request is terminated below.
1107                  */
1108         } else {
1109                 /* Fill in the tmf stucture */
1110                 isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort,
1111                                                isci_abort_task_process_cb,
1112                                                old_request);
1113
1114                 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1115
1116                 #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* half second timeout. */
1117                 ret = isci_task_execute_tmf(isci_host, isci_device, &tmf,
1118                                             ISCI_ABORT_TASK_TIMEOUT_MS);
1119
1120                 if (ret != TMF_RESP_FUNC_COMPLETE)
1121                         dev_err(&isci_host->pdev->dev,
1122                                 "%s: isci_task_send_tmf failed\n",
1123                                 __func__);
1124         }
1125         if (ret == TMF_RESP_FUNC_COMPLETE) {
1126                 set_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags);
1127
1128                 /* Clean up the request on our side, and wait for the aborted
1129                  * I/O to complete.
1130                  */
1131                 isci_terminate_request_core(isci_host, isci_device, old_request);
1132         }
1133
1134         /* Make sure we do not leave a reference to aborted_io_completion */
1135         old_request->io_request_completion = NULL;
1136  out:
1137         isci_put_device(isci_device);
1138         return ret;
1139 }
1140
1141 /**
1142  * isci_task_abort_task_set() - This function is one of the SAS Domain Template
1143  *    functions. This is one of the Task Management functoins called by libsas,
1144  *    to abort all task for the given lun.
1145  * @d_device: This parameter specifies the domain device associated with this
1146  *    request.
1147  * @lun: This parameter specifies the lun associated with this request.
1148  *
1149  * status, zero indicates success.
1150  */
1151 int isci_task_abort_task_set(
1152         struct domain_device *d_device,
1153         u8 *lun)
1154 {
1155         return TMF_RESP_FUNC_FAILED;
1156 }
1157
1158
1159 /**
1160  * isci_task_clear_aca() - This function is one of the SAS Domain Template
1161  *    functions. This is one of the Task Management functoins called by libsas.
1162  * @d_device: This parameter specifies the domain device associated with this
1163  *    request.
1164  * @lun: This parameter specifies the lun        associated with this request.
1165  *
1166  * status, zero indicates success.
1167  */
1168 int isci_task_clear_aca(
1169         struct domain_device *d_device,
1170         u8 *lun)
1171 {
1172         return TMF_RESP_FUNC_FAILED;
1173 }
1174
1175
1176
1177 /**
1178  * isci_task_clear_task_set() - This function is one of the SAS Domain Template
1179  *    functions. This is one of the Task Management functoins called by libsas.
1180  * @d_device: This parameter specifies the domain device associated with this
1181  *    request.
1182  * @lun: This parameter specifies the lun        associated with this request.
1183  *
1184  * status, zero indicates success.
1185  */
1186 int isci_task_clear_task_set(
1187         struct domain_device *d_device,
1188         u8 *lun)
1189 {
1190         return TMF_RESP_FUNC_FAILED;
1191 }
1192
1193
1194 /**
1195  * isci_task_query_task() - This function is implemented to cause libsas to
1196  *    correctly escalate the failed abort to a LUN or target reset (this is
1197  *    because sas_scsi_find_task libsas function does not correctly interpret
1198  *    all return codes from the abort task call).  When TMF_RESP_FUNC_SUCC is
1199  *    returned, libsas turns this into a LUN reset; when FUNC_FAILED is
1200  *    returned, libsas will turn this into a target reset
1201  * @task: This parameter specifies the sas task being queried.
1202  * @lun: This parameter specifies the lun associated with this request.
1203  *
1204  * status, zero indicates success.
1205  */
1206 int isci_task_query_task(
1207         struct sas_task *task)
1208 {
1209         /* See if there is a pending device reset for this device. */
1210         if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET)
1211                 return TMF_RESP_FUNC_FAILED;
1212         else
1213                 return TMF_RESP_FUNC_SUCC;
1214 }
1215
1216 /*
1217  * isci_task_request_complete() - This function is called by the sci core when
1218  *    an task request completes.
1219  * @ihost: This parameter specifies the ISCI host object
1220  * @ireq: This parameter is the completed isci_request object.
1221  * @completion_status: This parameter specifies the completion status from the
1222  *    sci core.
1223  *
1224  * none.
1225  */
1226 void
1227 isci_task_request_complete(struct isci_host *ihost,
1228                            struct isci_request *ireq,
1229                            enum sci_task_status completion_status)
1230 {
1231         struct isci_tmf *tmf = isci_request_access_tmf(ireq);
1232         struct completion *tmf_complete;
1233
1234         dev_dbg(&ihost->pdev->dev,
1235                 "%s: request = %p, status=%d\n",
1236                 __func__, ireq, completion_status);
1237
1238         isci_request_change_state(ireq, completed);
1239
1240         tmf->status = completion_status;
1241         set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags);
1242
1243         if (tmf->proto == SAS_PROTOCOL_SSP) {
1244                 memcpy(&tmf->resp.resp_iu,
1245                        &ireq->ssp.rsp,
1246                        SSP_RESP_IU_MAX_SIZE);
1247         } else if (tmf->proto == SAS_PROTOCOL_SATA) {
1248                 memcpy(&tmf->resp.d2h_fis,
1249                        &ireq->stp.rsp,
1250                        sizeof(struct dev_to_host_fis));
1251         }
1252
1253         /* PRINT_TMF( ((struct isci_tmf *)request->task)); */
1254         tmf_complete = tmf->complete;
1255
1256         scic_controller_complete_io(&ihost->sci, ireq->target_device, ireq);
1257         /* set the 'terminated' flag handle to make sure it cannot be terminated
1258          *  or completed again.
1259          */
1260         set_bit(IREQ_TERMINATED, &ireq->flags);
1261
1262         isci_request_change_state(ireq, unallocated);
1263         list_del_init(&ireq->dev_node);
1264
1265         /* The task management part completes last. */
1266         complete(tmf_complete);
1267 }
1268
1269 static void isci_smp_task_timedout(unsigned long _task)
1270 {
1271         struct sas_task *task = (void *) _task;
1272         unsigned long flags;
1273
1274         spin_lock_irqsave(&task->task_state_lock, flags);
1275         if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
1276                 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1277         spin_unlock_irqrestore(&task->task_state_lock, flags);
1278
1279         complete(&task->completion);
1280 }
1281
1282 static void isci_smp_task_done(struct sas_task *task)
1283 {
1284         if (!del_timer(&task->timer))
1285                 return;
1286         complete(&task->completion);
1287 }
1288
1289 static struct sas_task *isci_alloc_task(void)
1290 {
1291         struct sas_task *task = kzalloc(sizeof(*task), GFP_KERNEL);
1292
1293         if (task) {
1294                 INIT_LIST_HEAD(&task->list);
1295                 spin_lock_init(&task->task_state_lock);
1296                 task->task_state_flags = SAS_TASK_STATE_PENDING;
1297                 init_timer(&task->timer);
1298                 init_completion(&task->completion);
1299         }
1300
1301         return task;
1302 }
1303
1304 static void isci_free_task(struct isci_host *ihost, struct sas_task  *task)
1305 {
1306         if (task) {
1307                 BUG_ON(!list_empty(&task->list));
1308                 kfree(task);
1309         }
1310 }
1311
1312 static int isci_smp_execute_task(struct isci_host *ihost,
1313                                  struct domain_device *dev, void *req,
1314                                  int req_size, void *resp, int resp_size)
1315 {
1316         int res, retry;
1317         struct sas_task *task = NULL;
1318
1319         for (retry = 0; retry < 3; retry++) {
1320                 task = isci_alloc_task();
1321                 if (!task)
1322                         return -ENOMEM;
1323
1324                 task->dev = dev;
1325                 task->task_proto = dev->tproto;
1326                 sg_init_one(&task->smp_task.smp_req, req, req_size);
1327                 sg_init_one(&task->smp_task.smp_resp, resp, resp_size);
1328
1329                 task->task_done = isci_smp_task_done;
1330
1331                 task->timer.data = (unsigned long) task;
1332                 task->timer.function = isci_smp_task_timedout;
1333                 task->timer.expires = jiffies + 10*HZ;
1334                 add_timer(&task->timer);
1335
1336                 res = isci_task_execute_task(task, 1, GFP_KERNEL);
1337
1338                 if (res) {
1339                         del_timer(&task->timer);
1340                         dev_err(&ihost->pdev->dev,
1341                                 "%s: executing SMP task failed:%d\n",
1342                                 __func__, res);
1343                         goto ex_err;
1344                 }
1345
1346                 wait_for_completion(&task->completion);
1347                 res = -ECOMM;
1348                 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1349                         dev_err(&ihost->pdev->dev,
1350                                 "%s: smp task timed out or aborted\n",
1351                                 __func__);
1352                         isci_task_abort_task(task);
1353                         if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1354                                 dev_err(&ihost->pdev->dev,
1355                                         "%s: SMP task aborted and not done\n",
1356                                         __func__);
1357                                 goto ex_err;
1358                         }
1359                 }
1360                 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1361                     task->task_status.stat == SAM_STAT_GOOD) {
1362                         res = 0;
1363                         break;
1364                 }
1365                 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1366                       task->task_status.stat == SAS_DATA_UNDERRUN) {
1367                         /* no error, but return the number of bytes of
1368                         * underrun */
1369                         res = task->task_status.residual;
1370                         break;
1371                 }
1372                 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1373                       task->task_status.stat == SAS_DATA_OVERRUN) {
1374                         res = -EMSGSIZE;
1375                         break;
1376                 } else {
1377                         dev_err(&ihost->pdev->dev,
1378                                 "%s: task to dev %016llx response: 0x%x "
1379                                 "status 0x%x\n", __func__,
1380                                 SAS_ADDR(dev->sas_addr),
1381                                 task->task_status.resp,
1382                                 task->task_status.stat);
1383                         isci_free_task(ihost, task);
1384                         task = NULL;
1385                 }
1386         }
1387 ex_err:
1388         BUG_ON(retry == 3 && task != NULL);
1389         isci_free_task(ihost, task);
1390         return res;
1391 }
1392
1393 #define DISCOVER_REQ_SIZE  16
1394 #define DISCOVER_RESP_SIZE 56
1395
1396 int isci_smp_get_phy_attached_dev_type(struct isci_host *ihost,
1397                                        struct domain_device *dev,
1398                                        int phy_id, int *adt)
1399 {
1400         struct smp_resp *disc_resp;
1401         u8 *disc_req;
1402         int res;
1403
1404         disc_resp = kzalloc(DISCOVER_RESP_SIZE, GFP_KERNEL);
1405         if (!disc_resp)
1406                 return -ENOMEM;
1407
1408         disc_req = kzalloc(DISCOVER_REQ_SIZE, GFP_KERNEL);
1409         if (disc_req) {
1410                 disc_req[0] = SMP_REQUEST;
1411                 disc_req[1] = SMP_DISCOVER;
1412                 disc_req[9] = phy_id;
1413         } else {
1414                 kfree(disc_resp);
1415                 return -ENOMEM;
1416         }
1417         res = isci_smp_execute_task(ihost, dev, disc_req, DISCOVER_REQ_SIZE,
1418                                     disc_resp, DISCOVER_RESP_SIZE);
1419         if (!res) {
1420                 if (disc_resp->result != SMP_RESP_FUNC_ACC)
1421                         res = disc_resp->result;
1422                 else
1423                         *adt = disc_resp->disc.attached_dev_type;
1424         }
1425         kfree(disc_req);
1426         kfree(disc_resp);
1427
1428         return res;
1429 }
1430
1431 static void isci_wait_for_smp_phy_reset(struct isci_remote_device *idev, int phy_num)
1432 {
1433         struct domain_device *dev = idev->domain_dev;
1434         struct isci_port *iport = idev->isci_port;
1435         struct isci_host *ihost = iport->isci_host;
1436         int res, iteration = 0, attached_device_type;
1437         #define STP_WAIT_MSECS 25000
1438         unsigned long tmo = msecs_to_jiffies(STP_WAIT_MSECS);
1439         unsigned long deadline = jiffies + tmo;
1440         enum {
1441                 SMP_PHYWAIT_PHYDOWN,
1442                 SMP_PHYWAIT_PHYUP,
1443                 SMP_PHYWAIT_DONE
1444         } phy_state = SMP_PHYWAIT_PHYDOWN;
1445
1446         /* While there is time, wait for the phy to go away and come back */
1447         while (time_is_after_jiffies(deadline) && phy_state != SMP_PHYWAIT_DONE) {
1448                 int event = atomic_read(&iport->event);
1449
1450                 ++iteration;
1451
1452                 tmo = wait_event_timeout(ihost->eventq,
1453                                          event != atomic_read(&iport->event) ||
1454                                          !test_bit(IPORT_BCN_BLOCKED, &iport->flags),
1455                                          tmo);
1456                 /* link down, stop polling */
1457                 if (!test_bit(IPORT_BCN_BLOCKED, &iport->flags))
1458                         break;
1459
1460                 dev_dbg(&ihost->pdev->dev,
1461                         "%s: iport %p, iteration %d,"
1462                         " phase %d: time_remaining %lu, bcns = %d\n",
1463                         __func__, iport, iteration, phy_state,
1464                         tmo, test_bit(IPORT_BCN_PENDING, &iport->flags));
1465
1466                 res = isci_smp_get_phy_attached_dev_type(ihost, dev, phy_num,
1467                                                          &attached_device_type);
1468                 tmo = deadline - jiffies;
1469
1470                 if (res) {
1471                         dev_warn(&ihost->pdev->dev,
1472                                  "%s: iteration %d, phase %d:"
1473                                  " SMP error=%d, time_remaining=%lu\n",
1474                                  __func__, iteration, phy_state, res, tmo);
1475                         break;
1476                 }
1477                 dev_dbg(&ihost->pdev->dev,
1478                         "%s: iport %p, iteration %d,"
1479                         " phase %d: time_remaining %lu, bcns = %d, "
1480                         "attdevtype = %x\n",
1481                         __func__, iport, iteration, phy_state,
1482                         tmo, test_bit(IPORT_BCN_PENDING, &iport->flags),
1483                         attached_device_type);
1484
1485                 switch (phy_state) {
1486                 case SMP_PHYWAIT_PHYDOWN:
1487                         /* Has the device gone away? */
1488                         if (!attached_device_type)
1489                                 phy_state = SMP_PHYWAIT_PHYUP;
1490
1491                         break;
1492
1493                 case SMP_PHYWAIT_PHYUP:
1494                         /* Has the device come back? */
1495                         if (attached_device_type)
1496                                 phy_state = SMP_PHYWAIT_DONE;
1497                         break;
1498
1499                 case SMP_PHYWAIT_DONE:
1500                         break;
1501                 }
1502
1503         }
1504         dev_dbg(&ihost->pdev->dev, "%s: done\n",  __func__);
1505 }
1506
1507 static int isci_reset_device(struct isci_host *ihost,
1508                              struct isci_remote_device *idev, int hard_reset)
1509 {
1510         struct sas_phy *phy = sas_find_local_phy(idev->domain_dev);
1511         struct isci_port *iport = idev->isci_port;
1512         enum sci_status status;
1513         unsigned long flags;
1514         int rc;
1515
1516         dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev);
1517
1518         spin_lock_irqsave(&ihost->scic_lock, flags);
1519         status = scic_remote_device_reset(idev);
1520         if (status != SCI_SUCCESS) {
1521                 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1522
1523                 dev_warn(&ihost->pdev->dev,
1524                          "%s: scic_remote_device_reset(%p) returned %d!\n",
1525                          __func__, idev, status);
1526
1527                 return TMF_RESP_FUNC_FAILED;
1528         }
1529         spin_unlock_irqrestore(&ihost->scic_lock, flags);
1530
1531         /* Make sure all pending requests are able to be fully terminated. */
1532         isci_device_clear_reset_pending(ihost, idev);
1533
1534         /* If this is a device on an expander, disable BCN processing. */
1535         if (!scsi_is_sas_phy_local(phy))
1536                 set_bit(IPORT_BCN_BLOCKED, &iport->flags);
1537
1538         rc = sas_phy_reset(phy, hard_reset);
1539
1540         /* Terminate in-progress I/O now. */
1541         isci_remote_device_nuke_requests(ihost, idev);
1542
1543         /* Since all pending TCs have been cleaned, resume the RNC. */
1544         spin_lock_irqsave(&ihost->scic_lock, flags);
1545         status = scic_remote_device_reset_complete(idev);
1546         spin_unlock_irqrestore(&ihost->scic_lock, flags);
1547
1548         /* If this is a device on an expander, bring the phy back up. */
1549         if (!scsi_is_sas_phy_local(phy)) {
1550                 /* A phy reset will cause the device to go away then reappear.
1551                  * Since libsas will take action on incoming BCNs (eg. remove
1552                  * a device going through an SMP phy-control driven reset),
1553                  * we need to wait until the phy comes back up before letting
1554                  * discovery proceed in libsas.
1555                  */
1556                 isci_wait_for_smp_phy_reset(idev, phy->number);
1557
1558                 spin_lock_irqsave(&ihost->scic_lock, flags);
1559                 isci_port_bcn_enable(ihost, idev->isci_port);
1560                 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1561         }
1562
1563         if (status != SCI_SUCCESS) {
1564                 dev_warn(&ihost->pdev->dev,
1565                          "%s: scic_remote_device_reset_complete(%p) "
1566                          "returned %d!\n", __func__, idev, status);
1567         }
1568
1569         dev_dbg(&ihost->pdev->dev, "%s: idev %p complete.\n", __func__, idev);
1570
1571         return rc;
1572 }
1573
1574 int isci_task_I_T_nexus_reset(struct domain_device *dev)
1575 {
1576         struct isci_host *ihost = dev_to_ihost(dev);
1577         struct isci_remote_device *idev;
1578         int ret, hard_reset = 1;
1579         unsigned long flags;
1580
1581         spin_lock_irqsave(&ihost->scic_lock, flags);
1582         idev = isci_lookup_device(dev);
1583         spin_unlock_irqrestore(&ihost->scic_lock, flags);
1584
1585         if (!idev || !test_bit(IDEV_EH, &idev->flags)) {
1586                 ret = TMF_RESP_FUNC_COMPLETE;
1587                 goto out;
1588         }
1589
1590         if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
1591                 hard_reset = 0;
1592
1593         ret = isci_reset_device(ihost, idev, hard_reset);
1594  out:
1595         isci_put_device(idev);
1596         return ret;
1597 }
1598
1599 int isci_bus_reset_handler(struct scsi_cmnd *cmd)
1600 {
1601         struct domain_device *dev = sdev_to_domain_dev(cmd->device);
1602         struct isci_host *ihost = dev_to_ihost(dev);
1603         struct isci_remote_device *idev;
1604         int ret, hard_reset = 1;
1605         unsigned long flags;
1606
1607         if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
1608                 hard_reset = 0;
1609
1610         spin_lock_irqsave(&ihost->scic_lock, flags);
1611         idev = isci_lookup_device(dev);
1612         spin_unlock_irqrestore(&ihost->scic_lock, flags);
1613
1614         if (!idev) {
1615                 ret = TMF_RESP_FUNC_COMPLETE;
1616                 goto out;
1617         }
1618
1619         ret = isci_reset_device(ihost, idev, hard_reset);
1620  out:
1621         isci_put_device(idev);
1622         return ret;
1623 }