9be5769d44aa48db105a21de6f85ae135d58f8ea
[linux-2.6.git] / drivers / scsi / scsi_lib.c
1 /*
2  *  scsi_lib.c Copyright (C) 1999 Eric Youngdale
3  *
4  *  SCSI queueing library.
5  *      Initial versions: Eric Youngdale (eric@andante.org).
6  *                        Based upon conversations with large numbers
7  *                        of people at Linux Expo.
8  */
9
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/completion.h>
13 #include <linux/kernel.h>
14 #include <linux/mempool.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/pci.h>
18 #include <linux/delay.h>
19
20 #include <scsi/scsi.h>
21 #include <scsi/scsi_dbg.h>
22 #include <scsi/scsi_device.h>
23 #include <scsi/scsi_driver.h>
24 #include <scsi/scsi_eh.h>
25 #include <scsi/scsi_host.h>
26 #include <scsi/scsi_request.h>
27
28 #include "scsi_priv.h"
29 #include "scsi_logging.h"
30
31
32 #define SG_MEMPOOL_NR           (sizeof(scsi_sg_pools)/sizeof(struct scsi_host_sg_pool))
33 #define SG_MEMPOOL_SIZE         32
34
35 struct scsi_host_sg_pool {
36         size_t          size;
37         char            *name; 
38         kmem_cache_t    *slab;
39         mempool_t       *pool;
40 };
41
42 #if (SCSI_MAX_PHYS_SEGMENTS < 32)
43 #error SCSI_MAX_PHYS_SEGMENTS is too small
44 #endif
45
46 #define SP(x) { x, "sgpool-" #x } 
47 static struct scsi_host_sg_pool scsi_sg_pools[] = {
48         SP(8),
49         SP(16),
50         SP(32),
51 #if (SCSI_MAX_PHYS_SEGMENTS > 32)
52         SP(64),
53 #if (SCSI_MAX_PHYS_SEGMENTS > 64)
54         SP(128),
55 #if (SCSI_MAX_PHYS_SEGMENTS > 128)
56         SP(256),
57 #if (SCSI_MAX_PHYS_SEGMENTS > 256)
58 #error SCSI_MAX_PHYS_SEGMENTS is too large
59 #endif
60 #endif
61 #endif
62 #endif
63 };      
64 #undef SP
65
66
67 /*
68  * Function:    scsi_insert_special_req()
69  *
70  * Purpose:     Insert pre-formed request into request queue.
71  *
72  * Arguments:   sreq    - request that is ready to be queued.
73  *              at_head - boolean.  True if we should insert at head
74  *                        of queue, false if we should insert at tail.
75  *
76  * Lock status: Assumed that lock is not held upon entry.
77  *
78  * Returns:     Nothing
79  *
80  * Notes:       This function is called from character device and from
81  *              ioctl types of functions where the caller knows exactly
82  *              what SCSI command needs to be issued.   The idea is that
83  *              we merely inject the command into the queue (at the head
84  *              for now), and then call the queue request function to actually
85  *              process it.
86  */
87 int scsi_insert_special_req(struct scsi_request *sreq, int at_head)
88 {
89         /*
90          * Because users of this function are apt to reuse requests with no
91          * modification, we have to sanitise the request flags here
92          */
93         sreq->sr_request->flags &= ~REQ_DONTPREP;
94         blk_insert_request(sreq->sr_device->request_queue, sreq->sr_request,
95                            at_head, sreq);
96         return 0;
97 }
98
99 static void scsi_run_queue(struct request_queue *q);
100
101 /*
102  * Function:    scsi_unprep_request()
103  *
104  * Purpose:     Remove all preparation done for a request, including its
105  *              associated scsi_cmnd, so that it can be requeued.
106  *
107  * Arguments:   req     - request to unprepare
108  *
109  * Lock status: Assumed that no locks are held upon entry.
110  *
111  * Returns:     Nothing.
112  */
113 static void scsi_unprep_request(struct request *req)
114 {
115         struct scsi_cmnd *cmd = req->special;
116
117         req->flags &= ~REQ_DONTPREP;
118         req->special = (req->flags & REQ_SPECIAL) ? cmd->sc_request : NULL;
119
120         scsi_put_command(cmd);
121 }
122
123 /*
124  * Function:    scsi_queue_insert()
125  *
126  * Purpose:     Insert a command in the midlevel queue.
127  *
128  * Arguments:   cmd    - command that we are adding to queue.
129  *              reason - why we are inserting command to queue.
130  *
131  * Lock status: Assumed that lock is not held upon entry.
132  *
133  * Returns:     Nothing.
134  *
135  * Notes:       We do this for one of two cases.  Either the host is busy
136  *              and it cannot accept any more commands for the time being,
137  *              or the device returned QUEUE_FULL and can accept no more
138  *              commands.
139  * Notes:       This could be called either from an interrupt context or a
140  *              normal process context.
141  */
142 int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
143 {
144         struct Scsi_Host *host = cmd->device->host;
145         struct scsi_device *device = cmd->device;
146         struct request_queue *q = device->request_queue;
147         unsigned long flags;
148
149         SCSI_LOG_MLQUEUE(1,
150                  printk("Inserting command %p into mlqueue\n", cmd));
151
152         /*
153          * Set the appropriate busy bit for the device/host.
154          *
155          * If the host/device isn't busy, assume that something actually
156          * completed, and that we should be able to queue a command now.
157          *
158          * Note that the prior mid-layer assumption that any host could
159          * always queue at least one command is now broken.  The mid-layer
160          * will implement a user specifiable stall (see
161          * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
162          * if a command is requeued with no other commands outstanding
163          * either for the device or for the host.
164          */
165         if (reason == SCSI_MLQUEUE_HOST_BUSY)
166                 host->host_blocked = host->max_host_blocked;
167         else if (reason == SCSI_MLQUEUE_DEVICE_BUSY)
168                 device->device_blocked = device->max_device_blocked;
169
170         /*
171          * Decrement the counters, since these commands are no longer
172          * active on the host/device.
173          */
174         scsi_device_unbusy(device);
175
176         /*
177          * Requeue this command.  It will go before all other commands
178          * that are already in the queue.
179          *
180          * NOTE: there is magic here about the way the queue is plugged if
181          * we have no outstanding commands.
182          * 
183          * Although we *don't* plug the queue, we call the request
184          * function.  The SCSI request function detects the blocked condition
185          * and plugs the queue appropriately.
186          */
187         spin_lock_irqsave(q->queue_lock, flags);
188         blk_requeue_request(q, cmd->request);
189         spin_unlock_irqrestore(q->queue_lock, flags);
190
191         scsi_run_queue(q);
192
193         return 0;
194 }
195
196 /*
197  * Function:    scsi_do_req
198  *
199  * Purpose:     Queue a SCSI request
200  *
201  * Arguments:   sreq      - command descriptor.
202  *              cmnd      - actual SCSI command to be performed.
203  *              buffer    - data buffer.
204  *              bufflen   - size of data buffer.
205  *              done      - completion function to be run.
206  *              timeout   - how long to let it run before timeout.
207  *              retries   - number of retries we allow.
208  *
209  * Lock status: No locks held upon entry.
210  *
211  * Returns:     Nothing.
212  *
213  * Notes:       This function is only used for queueing requests for things
214  *              like ioctls and character device requests - this is because
215  *              we essentially just inject a request into the queue for the
216  *              device.
217  *
218  *              In order to support the scsi_device_quiesce function, we
219  *              now inject requests on the *head* of the device queue
220  *              rather than the tail.
221  */
222 void scsi_do_req(struct scsi_request *sreq, const void *cmnd,
223                  void *buffer, unsigned bufflen,
224                  void (*done)(struct scsi_cmnd *),
225                  int timeout, int retries)
226 {
227         /*
228          * If the upper level driver is reusing these things, then
229          * we should release the low-level block now.  Another one will
230          * be allocated later when this request is getting queued.
231          */
232         __scsi_release_request(sreq);
233
234         /*
235          * Our own function scsi_done (which marks the host as not busy,
236          * disables the timeout counter, etc) will be called by us or by the
237          * scsi_hosts[host].queuecommand() function needs to also call
238          * the completion function for the high level driver.
239          */
240         memcpy(sreq->sr_cmnd, cmnd, sizeof(sreq->sr_cmnd));
241         sreq->sr_bufflen = bufflen;
242         sreq->sr_buffer = buffer;
243         sreq->sr_allowed = retries;
244         sreq->sr_done = done;
245         sreq->sr_timeout_per_command = timeout;
246
247         if (sreq->sr_cmd_len == 0)
248                 sreq->sr_cmd_len = COMMAND_SIZE(sreq->sr_cmnd[0]);
249
250         /*
251          * head injection *required* here otherwise quiesce won't work
252          */
253         scsi_insert_special_req(sreq, 1);
254 }
255 EXPORT_SYMBOL(scsi_do_req);
256
257 /**
258  * scsi_execute - insert request and wait for the result
259  * @sdev:       scsi device
260  * @cmd:        scsi command
261  * @data_direction: data direction
262  * @buffer:     data buffer
263  * @bufflen:    len of buffer
264  * @sense:      optional sense buffer
265  * @timeout:    request timeout in seconds
266  * @retries:    number of times to retry request
267  * @flags:      or into request flags;
268  *
269  * returns the req->errors value which is the the scsi_cmnd result
270  * field.
271  **/
272 int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
273                  int data_direction, void *buffer, unsigned bufflen,
274                  unsigned char *sense, int timeout, int retries, int flags)
275 {
276         struct request *req;
277         int write = (data_direction == DMA_TO_DEVICE);
278         int ret = DRIVER_ERROR << 24;
279
280         req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
281
282         if (bufflen &&  blk_rq_map_kern(sdev->request_queue, req,
283                                         buffer, bufflen, __GFP_WAIT))
284                 goto out;
285
286         req->cmd_len = COMMAND_SIZE(cmd[0]);
287         memcpy(req->cmd, cmd, req->cmd_len);
288         req->sense = sense;
289         req->sense_len = 0;
290         req->timeout = timeout;
291         req->flags |= flags | REQ_BLOCK_PC | REQ_SPECIAL | REQ_QUIET;
292
293         /*
294          * head injection *required* here otherwise quiesce won't work
295          */
296         blk_execute_rq(req->q, NULL, req, 1);
297
298         ret = req->errors;
299  out:
300         blk_put_request(req);
301
302         return ret;
303 }
304 EXPORT_SYMBOL(scsi_execute);
305
306
307 int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
308                      int data_direction, void *buffer, unsigned bufflen,
309                      struct scsi_sense_hdr *sshdr, int timeout, int retries)
310 {
311         char *sense = NULL;
312         int result;
313         
314         if (sshdr) {
315                 sense = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
316                 if (!sense)
317                         return DRIVER_ERROR << 24;
318                 memset(sense, 0, SCSI_SENSE_BUFFERSIZE);
319         }
320         result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
321                                   sense, timeout, retries, 0);
322         if (sshdr)
323                 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
324
325         kfree(sense);
326         return result;
327 }
328 EXPORT_SYMBOL(scsi_execute_req);
329
330 /*
331  * Function:    scsi_init_cmd_errh()
332  *
333  * Purpose:     Initialize cmd fields related to error handling.
334  *
335  * Arguments:   cmd     - command that is ready to be queued.
336  *
337  * Returns:     Nothing
338  *
339  * Notes:       This function has the job of initializing a number of
340  *              fields related to error handling.   Typically this will
341  *              be called once for each command, as required.
342  */
343 static int scsi_init_cmd_errh(struct scsi_cmnd *cmd)
344 {
345         cmd->serial_number = 0;
346
347         memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer);
348
349         if (cmd->cmd_len == 0)
350                 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
351
352         /*
353          * We need saved copies of a number of fields - this is because
354          * error handling may need to overwrite these with different values
355          * to run different commands, and once error handling is complete,
356          * we will need to restore these values prior to running the actual
357          * command.
358          */
359         cmd->old_use_sg = cmd->use_sg;
360         cmd->old_cmd_len = cmd->cmd_len;
361         cmd->sc_old_data_direction = cmd->sc_data_direction;
362         cmd->old_underflow = cmd->underflow;
363         memcpy(cmd->data_cmnd, cmd->cmnd, sizeof(cmd->cmnd));
364         cmd->buffer = cmd->request_buffer;
365         cmd->bufflen = cmd->request_bufflen;
366
367         return 1;
368 }
369
370 /*
371  * Function:   scsi_setup_cmd_retry()
372  *
373  * Purpose:    Restore the command state for a retry
374  *
375  * Arguments:  cmd      - command to be restored
376  *
377  * Returns:    Nothing
378  *
379  * Notes:      Immediately prior to retrying a command, we need
380  *             to restore certain fields that we saved above.
381  */
382 void scsi_setup_cmd_retry(struct scsi_cmnd *cmd)
383 {
384         memcpy(cmd->cmnd, cmd->data_cmnd, sizeof(cmd->data_cmnd));
385         cmd->request_buffer = cmd->buffer;
386         cmd->request_bufflen = cmd->bufflen;
387         cmd->use_sg = cmd->old_use_sg;
388         cmd->cmd_len = cmd->old_cmd_len;
389         cmd->sc_data_direction = cmd->sc_old_data_direction;
390         cmd->underflow = cmd->old_underflow;
391 }
392
393 void scsi_device_unbusy(struct scsi_device *sdev)
394 {
395         struct Scsi_Host *shost = sdev->host;
396         unsigned long flags;
397
398         spin_lock_irqsave(shost->host_lock, flags);
399         shost->host_busy--;
400         if (unlikely(scsi_host_in_recovery(shost) &&
401                      shost->host_failed))
402                 scsi_eh_wakeup(shost);
403         spin_unlock(shost->host_lock);
404         spin_lock(sdev->request_queue->queue_lock);
405         sdev->device_busy--;
406         spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
407 }
408
409 /*
410  * Called for single_lun devices on IO completion. Clear starget_sdev_user,
411  * and call blk_run_queue for all the scsi_devices on the target -
412  * including current_sdev first.
413  *
414  * Called with *no* scsi locks held.
415  */
416 static void scsi_single_lun_run(struct scsi_device *current_sdev)
417 {
418         struct Scsi_Host *shost = current_sdev->host;
419         struct scsi_device *sdev, *tmp;
420         struct scsi_target *starget = scsi_target(current_sdev);
421         unsigned long flags;
422
423         spin_lock_irqsave(shost->host_lock, flags);
424         starget->starget_sdev_user = NULL;
425         spin_unlock_irqrestore(shost->host_lock, flags);
426
427         /*
428          * Call blk_run_queue for all LUNs on the target, starting with
429          * current_sdev. We race with others (to set starget_sdev_user),
430          * but in most cases, we will be first. Ideally, each LU on the
431          * target would get some limited time or requests on the target.
432          */
433         blk_run_queue(current_sdev->request_queue);
434
435         spin_lock_irqsave(shost->host_lock, flags);
436         if (starget->starget_sdev_user)
437                 goto out;
438         list_for_each_entry_safe(sdev, tmp, &starget->devices,
439                         same_target_siblings) {
440                 if (sdev == current_sdev)
441                         continue;
442                 if (scsi_device_get(sdev))
443                         continue;
444
445                 spin_unlock_irqrestore(shost->host_lock, flags);
446                 blk_run_queue(sdev->request_queue);
447                 spin_lock_irqsave(shost->host_lock, flags);
448         
449                 scsi_device_put(sdev);
450         }
451  out:
452         spin_unlock_irqrestore(shost->host_lock, flags);
453 }
454
455 /*
456  * Function:    scsi_run_queue()
457  *
458  * Purpose:     Select a proper request queue to serve next
459  *
460  * Arguments:   q       - last request's queue
461  *
462  * Returns:     Nothing
463  *
464  * Notes:       The previous command was completely finished, start
465  *              a new one if possible.
466  */
467 static void scsi_run_queue(struct request_queue *q)
468 {
469         struct scsi_device *sdev = q->queuedata;
470         struct Scsi_Host *shost = sdev->host;
471         unsigned long flags;
472
473         if (sdev->single_lun)
474                 scsi_single_lun_run(sdev);
475
476         spin_lock_irqsave(shost->host_lock, flags);
477         while (!list_empty(&shost->starved_list) &&
478                !shost->host_blocked && !shost->host_self_blocked &&
479                 !((shost->can_queue > 0) &&
480                   (shost->host_busy >= shost->can_queue))) {
481                 /*
482                  * As long as shost is accepting commands and we have
483                  * starved queues, call blk_run_queue. scsi_request_fn
484                  * drops the queue_lock and can add us back to the
485                  * starved_list.
486                  *
487                  * host_lock protects the starved_list and starved_entry.
488                  * scsi_request_fn must get the host_lock before checking
489                  * or modifying starved_list or starved_entry.
490                  */
491                 sdev = list_entry(shost->starved_list.next,
492                                           struct scsi_device, starved_entry);
493                 list_del_init(&sdev->starved_entry);
494                 spin_unlock_irqrestore(shost->host_lock, flags);
495
496                 blk_run_queue(sdev->request_queue);
497
498                 spin_lock_irqsave(shost->host_lock, flags);
499                 if (unlikely(!list_empty(&sdev->starved_entry)))
500                         /*
501                          * sdev lost a race, and was put back on the
502                          * starved list. This is unlikely but without this
503                          * in theory we could loop forever.
504                          */
505                         break;
506         }
507         spin_unlock_irqrestore(shost->host_lock, flags);
508
509         blk_run_queue(q);
510 }
511
512 /*
513  * Function:    scsi_requeue_command()
514  *
515  * Purpose:     Handle post-processing of completed commands.
516  *
517  * Arguments:   q       - queue to operate on
518  *              cmd     - command that may need to be requeued.
519  *
520  * Returns:     Nothing
521  *
522  * Notes:       After command completion, there may be blocks left
523  *              over which weren't finished by the previous command
524  *              this can be for a number of reasons - the main one is
525  *              I/O errors in the middle of the request, in which case
526  *              we need to request the blocks that come after the bad
527  *              sector.
528  * Notes:       Upon return, cmd is a stale pointer.
529  */
530 static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
531 {
532         struct request *req = cmd->request;
533         unsigned long flags;
534
535         scsi_unprep_request(req);
536         spin_lock_irqsave(q->queue_lock, flags);
537         blk_requeue_request(q, req);
538         spin_unlock_irqrestore(q->queue_lock, flags);
539
540         scsi_run_queue(q);
541 }
542
543 void scsi_next_command(struct scsi_cmnd *cmd)
544 {
545         struct request_queue *q = cmd->device->request_queue;
546
547         scsi_put_command(cmd);
548         scsi_run_queue(q);
549 }
550
551 void scsi_run_host_queues(struct Scsi_Host *shost)
552 {
553         struct scsi_device *sdev;
554
555         shost_for_each_device(sdev, shost)
556                 scsi_run_queue(sdev->request_queue);
557 }
558
559 /*
560  * Function:    scsi_end_request()
561  *
562  * Purpose:     Post-processing of completed commands (usually invoked at end
563  *              of upper level post-processing and scsi_io_completion).
564  *
565  * Arguments:   cmd      - command that is complete.
566  *              uptodate - 1 if I/O indicates success, <= 0 for I/O error.
567  *              bytes    - number of bytes of completed I/O
568  *              requeue  - indicates whether we should requeue leftovers.
569  *
570  * Lock status: Assumed that lock is not held upon entry.
571  *
572  * Returns:     cmd if requeue required, NULL otherwise.
573  *
574  * Notes:       This is called for block device requests in order to
575  *              mark some number of sectors as complete.
576  * 
577  *              We are guaranteeing that the request queue will be goosed
578  *              at some point during this call.
579  * Notes:       If cmd was requeued, upon return it will be a stale pointer.
580  */
581 static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
582                                           int bytes, int requeue)
583 {
584         request_queue_t *q = cmd->device->request_queue;
585         struct request *req = cmd->request;
586         unsigned long flags;
587
588         /*
589          * If there are blocks left over at the end, set up the command
590          * to queue the remainder of them.
591          */
592         if (end_that_request_chunk(req, uptodate, bytes)) {
593                 int leftover = (req->hard_nr_sectors << 9);
594
595                 if (blk_pc_request(req))
596                         leftover = req->data_len;
597
598                 /* kill remainder if no retrys */
599                 if (!uptodate && blk_noretry_request(req))
600                         end_that_request_chunk(req, 0, leftover);
601                 else {
602                         if (requeue) {
603                                 /*
604                                  * Bleah.  Leftovers again.  Stick the
605                                  * leftovers in the front of the
606                                  * queue, and goose the queue again.
607                                  */
608                                 scsi_requeue_command(q, cmd);
609                                 cmd = NULL;
610                         }
611                         return cmd;
612                 }
613         }
614
615         add_disk_randomness(req->rq_disk);
616
617         spin_lock_irqsave(q->queue_lock, flags);
618         if (blk_rq_tagged(req))
619                 blk_queue_end_tag(q, req);
620         end_that_request_last(req);
621         spin_unlock_irqrestore(q->queue_lock, flags);
622
623         /*
624          * This will goose the queue request function at the end, so we don't
625          * need to worry about launching another command.
626          */
627         scsi_next_command(cmd);
628         return NULL;
629 }
630
631 static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
632 {
633         struct scsi_host_sg_pool *sgp;
634         struct scatterlist *sgl;
635
636         BUG_ON(!cmd->use_sg);
637
638         switch (cmd->use_sg) {
639         case 1 ... 8:
640                 cmd->sglist_len = 0;
641                 break;
642         case 9 ... 16:
643                 cmd->sglist_len = 1;
644                 break;
645         case 17 ... 32:
646                 cmd->sglist_len = 2;
647                 break;
648 #if (SCSI_MAX_PHYS_SEGMENTS > 32)
649         case 33 ... 64:
650                 cmd->sglist_len = 3;
651                 break;
652 #if (SCSI_MAX_PHYS_SEGMENTS > 64)
653         case 65 ... 128:
654                 cmd->sglist_len = 4;
655                 break;
656 #if (SCSI_MAX_PHYS_SEGMENTS  > 128)
657         case 129 ... 256:
658                 cmd->sglist_len = 5;
659                 break;
660 #endif
661 #endif
662 #endif
663         default:
664                 return NULL;
665         }
666
667         sgp = scsi_sg_pools + cmd->sglist_len;
668         sgl = mempool_alloc(sgp->pool, gfp_mask);
669         return sgl;
670 }
671
672 static void scsi_free_sgtable(struct scatterlist *sgl, int index)
673 {
674         struct scsi_host_sg_pool *sgp;
675
676         BUG_ON(index >= SG_MEMPOOL_NR);
677
678         sgp = scsi_sg_pools + index;
679         mempool_free(sgl, sgp->pool);
680 }
681
682 /*
683  * Function:    scsi_release_buffers()
684  *
685  * Purpose:     Completion processing for block device I/O requests.
686  *
687  * Arguments:   cmd     - command that we are bailing.
688  *
689  * Lock status: Assumed that no lock is held upon entry.
690  *
691  * Returns:     Nothing
692  *
693  * Notes:       In the event that an upper level driver rejects a
694  *              command, we must release resources allocated during
695  *              the __init_io() function.  Primarily this would involve
696  *              the scatter-gather table, and potentially any bounce
697  *              buffers.
698  */
699 static void scsi_release_buffers(struct scsi_cmnd *cmd)
700 {
701         struct request *req = cmd->request;
702
703         /*
704          * Free up any indirection buffers we allocated for DMA purposes. 
705          */
706         if (cmd->use_sg)
707                 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
708         else if (cmd->request_buffer != req->buffer)
709                 kfree(cmd->request_buffer);
710
711         /*
712          * Zero these out.  They now point to freed memory, and it is
713          * dangerous to hang onto the pointers.
714          */
715         cmd->buffer  = NULL;
716         cmd->bufflen = 0;
717         cmd->request_buffer = NULL;
718         cmd->request_bufflen = 0;
719 }
720
721 /*
722  * Function:    scsi_io_completion()
723  *
724  * Purpose:     Completion processing for block device I/O requests.
725  *
726  * Arguments:   cmd   - command that is finished.
727  *
728  * Lock status: Assumed that no lock is held upon entry.
729  *
730  * Returns:     Nothing
731  *
732  * Notes:       This function is matched in terms of capabilities to
733  *              the function that created the scatter-gather list.
734  *              In other words, if there are no bounce buffers
735  *              (the normal case for most drivers), we don't need
736  *              the logic to deal with cleaning up afterwards.
737  *
738  *              We must do one of several things here:
739  *
740  *              a) Call scsi_end_request.  This will finish off the
741  *                 specified number of sectors.  If we are done, the
742  *                 command block will be released, and the queue
743  *                 function will be goosed.  If we are not done, then
744  *                 scsi_end_request will directly goose the queue.
745  *
746  *              b) We can just use scsi_requeue_command() here.  This would
747  *                 be used if we just wanted to retry, for example.
748  */
749 void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
750                         unsigned int block_bytes)
751 {
752         int result = cmd->result;
753         int this_count = cmd->bufflen;
754         request_queue_t *q = cmd->device->request_queue;
755         struct request *req = cmd->request;
756         int clear_errors = 1;
757         struct scsi_sense_hdr sshdr;
758         int sense_valid = 0;
759         int sense_deferred = 0;
760
761         if (blk_complete_barrier_rq(q, req, good_bytes >> 9))
762                 return;
763
764         /*
765          * Free up any indirection buffers we allocated for DMA purposes. 
766          * For the case of a READ, we need to copy the data out of the
767          * bounce buffer and into the real buffer.
768          */
769         if (cmd->use_sg)
770                 scsi_free_sgtable(cmd->buffer, cmd->sglist_len);
771         else if (cmd->buffer != req->buffer) {
772                 if (rq_data_dir(req) == READ) {
773                         unsigned long flags;
774                         char *to = bio_kmap_irq(req->bio, &flags);
775                         memcpy(to, cmd->buffer, cmd->bufflen);
776                         bio_kunmap_irq(to, &flags);
777                 }
778                 kfree(cmd->buffer);
779         }
780
781         if (result) {
782                 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
783                 if (sense_valid)
784                         sense_deferred = scsi_sense_is_deferred(&sshdr);
785         }
786         if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
787                 req->errors = result;
788                 if (result) {
789                         clear_errors = 0;
790                         if (sense_valid && req->sense) {
791                                 /*
792                                  * SG_IO wants current and deferred errors
793                                  */
794                                 int len = 8 + cmd->sense_buffer[7];
795
796                                 if (len > SCSI_SENSE_BUFFERSIZE)
797                                         len = SCSI_SENSE_BUFFERSIZE;
798                                 memcpy(req->sense, cmd->sense_buffer,  len);
799                                 req->sense_len = len;
800                         }
801                 } else
802                         req->data_len = cmd->resid;
803         }
804
805         /*
806          * Zero these out.  They now point to freed memory, and it is
807          * dangerous to hang onto the pointers.
808          */
809         cmd->buffer  = NULL;
810         cmd->bufflen = 0;
811         cmd->request_buffer = NULL;
812         cmd->request_bufflen = 0;
813
814         /*
815          * Next deal with any sectors which we were able to correctly
816          * handle.
817          */
818         if (good_bytes >= 0) {
819                 SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, %d bytes done.\n",
820                                               req->nr_sectors, good_bytes));
821                 SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd->use_sg));
822
823                 if (clear_errors)
824                         req->errors = 0;
825                 /*
826                  * If multiple sectors are requested in one buffer, then
827                  * they will have been finished off by the first command.
828                  * If not, then we have a multi-buffer command.
829                  *
830                  * If block_bytes != 0, it means we had a medium error
831                  * of some sort, and that we want to mark some number of
832                  * sectors as not uptodate.  Thus we want to inhibit
833                  * requeueing right here - we will requeue down below
834                  * when we handle the bad sectors.
835                  */
836
837                 /*
838                  * If the command completed without error, then either
839                  * finish off the rest of the command, or start a new one.
840                  */
841                 if (scsi_end_request(cmd, 1, good_bytes, result == 0) == NULL)
842                         return;
843         }
844         /*
845          * Now, if we were good little boys and girls, Santa left us a request
846          * sense buffer.  We can extract information from this, so we
847          * can choose a block to remap, etc.
848          */
849         if (sense_valid && !sense_deferred) {
850                 switch (sshdr.sense_key) {
851                 case UNIT_ATTENTION:
852                         if (cmd->device->removable) {
853                                 /* detected disc change.  set a bit 
854                                  * and quietly refuse further access.
855                                  */
856                                 cmd->device->changed = 1;
857                                 scsi_end_request(cmd, 0,
858                                                 this_count, 1);
859                                 return;
860                         } else {
861                                 /*
862                                 * Must have been a power glitch, or a
863                                 * bus reset.  Could not have been a
864                                 * media change, so we just retry the
865                                 * request and see what happens.  
866                                 */
867                                 scsi_requeue_command(q, cmd);
868                                 return;
869                         }
870                         break;
871                 case ILLEGAL_REQUEST:
872                         /*
873                         * If we had an ILLEGAL REQUEST returned, then we may
874                         * have performed an unsupported command.  The only
875                         * thing this should be would be a ten byte read where
876                         * only a six byte read was supported.  Also, on a
877                         * system where READ CAPACITY failed, we may have read
878                         * past the end of the disk.
879                         */
880                         if (cmd->device->use_10_for_rw &&
881                             (cmd->cmnd[0] == READ_10 ||
882                              cmd->cmnd[0] == WRITE_10)) {
883                                 cmd->device->use_10_for_rw = 0;
884                                 /*
885                                  * This will cause a retry with a 6-byte
886                                  * command.
887                                  */
888                                 scsi_requeue_command(q, cmd);
889                                 result = 0;
890                         } else {
891                                 scsi_end_request(cmd, 0, this_count, 1);
892                                 return;
893                         }
894                         break;
895                 case NOT_READY:
896                         /*
897                          * If the device is in the process of becoming ready,
898                          * retry.
899                          */
900                         if (sshdr.asc == 0x04 && sshdr.ascq == 0x01) {
901                                 scsi_requeue_command(q, cmd);
902                                 return;
903                         }
904                         if (!(req->flags & REQ_QUIET))
905                                 scmd_printk(KERN_INFO, cmd,
906                                            "Device not ready.\n");
907                         scsi_end_request(cmd, 0, this_count, 1);
908                         return;
909                 case VOLUME_OVERFLOW:
910                         if (!(req->flags & REQ_QUIET)) {
911                                 scmd_printk(KERN_INFO, cmd,
912                                            "Volume overflow, CDB: ");
913                                 __scsi_print_command(cmd->data_cmnd);
914                                 scsi_print_sense("", cmd);
915                         }
916                         scsi_end_request(cmd, 0, block_bytes, 1);
917                         return;
918                 default:
919                         break;
920                 }
921         }                       /* driver byte != 0 */
922         if (host_byte(result) == DID_RESET) {
923                 /*
924                  * Third party bus reset or reset for error
925                  * recovery reasons.  Just retry the request
926                  * and see what happens.  
927                  */
928                 scsi_requeue_command(q, cmd);
929                 return;
930         }
931         if (result) {
932                 if (!(req->flags & REQ_QUIET)) {
933                         scmd_printk(KERN_INFO, cmd,
934                                    "SCSI error: return code = 0x%x\n", result);
935
936                         if (driver_byte(result) & DRIVER_SENSE)
937                                 scsi_print_sense("", cmd);
938                 }
939                 /*
940                  * Mark a single buffer as not uptodate.  Queue the remainder.
941                  * We sometimes get this cruft in the event that a medium error
942                  * isn't properly reported.
943                  */
944                 block_bytes = req->hard_cur_sectors << 9;
945                 if (!block_bytes)
946                         block_bytes = req->data_len;
947                 scsi_end_request(cmd, 0, block_bytes, 1);
948         }
949 }
950 EXPORT_SYMBOL(scsi_io_completion);
951
952 /*
953  * Function:    scsi_init_io()
954  *
955  * Purpose:     SCSI I/O initialize function.
956  *
957  * Arguments:   cmd   - Command descriptor we wish to initialize
958  *
959  * Returns:     0 on success
960  *              BLKPREP_DEFER if the failure is retryable
961  *              BLKPREP_KILL if the failure is fatal
962  */
963 static int scsi_init_io(struct scsi_cmnd *cmd)
964 {
965         struct request     *req = cmd->request;
966         struct scatterlist *sgpnt;
967         int                count;
968
969         /*
970          * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer
971          */
972         if ((req->flags & REQ_BLOCK_PC) && !req->bio) {
973                 cmd->request_bufflen = req->data_len;
974                 cmd->request_buffer = req->data;
975                 req->buffer = req->data;
976                 cmd->use_sg = 0;
977                 return 0;
978         }
979
980         /*
981          * we used to not use scatter-gather for single segment request,
982          * but now we do (it makes highmem I/O easier to support without
983          * kmapping pages)
984          */
985         cmd->use_sg = req->nr_phys_segments;
986
987         /*
988          * if sg table allocation fails, requeue request later.
989          */
990         sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
991         if (unlikely(!sgpnt)) {
992                 scsi_unprep_request(req);
993                 return BLKPREP_DEFER;
994         }
995
996         cmd->request_buffer = (char *) sgpnt;
997         cmd->request_bufflen = req->nr_sectors << 9;
998         if (blk_pc_request(req))
999                 cmd->request_bufflen = req->data_len;
1000         req->buffer = NULL;
1001
1002         /* 
1003          * Next, walk the list, and fill in the addresses and sizes of
1004          * each segment.
1005          */
1006         count = blk_rq_map_sg(req->q, req, cmd->request_buffer);
1007
1008         /*
1009          * mapped well, send it off
1010          */
1011         if (likely(count <= cmd->use_sg)) {
1012                 cmd->use_sg = count;
1013                 return 0;
1014         }
1015
1016         printk(KERN_ERR "Incorrect number of segments after building list\n");
1017         printk(KERN_ERR "counted %d, received %d\n", count, cmd->use_sg);
1018         printk(KERN_ERR "req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors,
1019                         req->current_nr_sectors);
1020
1021         /* release the command and kill it */
1022         scsi_release_buffers(cmd);
1023         scsi_put_command(cmd);
1024         return BLKPREP_KILL;
1025 }
1026
1027 static int scsi_prepare_flush_fn(request_queue_t *q, struct request *rq)
1028 {
1029         struct scsi_device *sdev = q->queuedata;
1030         struct scsi_driver *drv;
1031
1032         if (sdev->sdev_state == SDEV_RUNNING) {
1033                 drv = *(struct scsi_driver **) rq->rq_disk->private_data;
1034
1035                 if (drv->prepare_flush)
1036                         return drv->prepare_flush(q, rq);
1037         }
1038
1039         return 0;
1040 }
1041
1042 static void scsi_end_flush_fn(request_queue_t *q, struct request *rq)
1043 {
1044         struct scsi_device *sdev = q->queuedata;
1045         struct request *flush_rq = rq->end_io_data;
1046         struct scsi_driver *drv;
1047
1048         if (flush_rq->errors) {
1049                 printk("scsi: barrier error, disabling flush support\n");
1050                 blk_queue_ordered(q, QUEUE_ORDERED_NONE);
1051         }
1052
1053         if (sdev->sdev_state == SDEV_RUNNING) {
1054                 drv = *(struct scsi_driver **) rq->rq_disk->private_data;
1055                 drv->end_flush(q, rq);
1056         }
1057 }
1058
1059 static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
1060                                sector_t *error_sector)
1061 {
1062         struct scsi_device *sdev = q->queuedata;
1063         struct scsi_driver *drv;
1064
1065         if (sdev->sdev_state != SDEV_RUNNING)
1066                 return -ENXIO;
1067
1068         drv = *(struct scsi_driver **) disk->private_data;
1069         if (drv->issue_flush)
1070                 return drv->issue_flush(&sdev->sdev_gendev, error_sector);
1071
1072         return -EOPNOTSUPP;
1073 }
1074
1075 static void scsi_generic_done(struct scsi_cmnd *cmd)
1076 {
1077         BUG_ON(!blk_pc_request(cmd->request));
1078         scsi_io_completion(cmd, cmd->result == 0 ? cmd->bufflen : 0, 0);
1079 }
1080
1081 void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd, int retries)
1082 {
1083         struct request *req = cmd->request;
1084
1085         BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd));
1086         memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
1087         cmd->cmd_len = req->cmd_len;
1088         if (!req->data_len)
1089                 cmd->sc_data_direction = DMA_NONE;
1090         else if (rq_data_dir(req) == WRITE)
1091                 cmd->sc_data_direction = DMA_TO_DEVICE;
1092         else
1093                 cmd->sc_data_direction = DMA_FROM_DEVICE;
1094         
1095         cmd->transfersize = req->data_len;
1096         cmd->allowed = retries;
1097         cmd->timeout_per_command = req->timeout;
1098 }
1099 EXPORT_SYMBOL_GPL(scsi_setup_blk_pc_cmnd);
1100
1101 static int scsi_prep_fn(struct request_queue *q, struct request *req)
1102 {
1103         struct scsi_device *sdev = q->queuedata;
1104         struct scsi_cmnd *cmd;
1105         int specials_only = 0;
1106
1107         /*
1108          * Just check to see if the device is online.  If it isn't, we
1109          * refuse to process any commands.  The device must be brought
1110          * online before trying any recovery commands
1111          */
1112         if (unlikely(!scsi_device_online(sdev))) {
1113                 sdev_printk(KERN_ERR, sdev,
1114                             "rejecting I/O to offline device\n");
1115                 goto kill;
1116         }
1117         if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1118                 /* OK, we're not in a running state don't prep
1119                  * user commands */
1120                 if (sdev->sdev_state == SDEV_DEL) {
1121                         /* Device is fully deleted, no commands
1122                          * at all allowed down */
1123                         sdev_printk(KERN_ERR, sdev,
1124                                     "rejecting I/O to dead device\n");
1125                         goto kill;
1126                 }
1127                 /* OK, we only allow special commands (i.e. not
1128                  * user initiated ones */
1129                 specials_only = sdev->sdev_state;
1130         }
1131
1132         /*
1133          * Find the actual device driver associated with this command.
1134          * The SPECIAL requests are things like character device or
1135          * ioctls, which did not originate from ll_rw_blk.  Note that
1136          * the special field is also used to indicate the cmd for
1137          * the remainder of a partially fulfilled request that can 
1138          * come up when there is a medium error.  We have to treat
1139          * these two cases differently.  We differentiate by looking
1140          * at request->cmd, as this tells us the real story.
1141          */
1142         if (req->flags & REQ_SPECIAL && req->special) {
1143                 struct scsi_request *sreq = req->special;
1144
1145                 if (sreq->sr_magic == SCSI_REQ_MAGIC) {
1146                         cmd = scsi_get_command(sreq->sr_device, GFP_ATOMIC);
1147                         if (unlikely(!cmd))
1148                                 goto defer;
1149                         scsi_init_cmd_from_req(cmd, sreq);
1150                 } else
1151                         cmd = req->special;
1152         } else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1153
1154                 if(unlikely(specials_only) && !(req->flags & REQ_SPECIAL)) {
1155                         if(specials_only == SDEV_QUIESCE ||
1156                                         specials_only == SDEV_BLOCK)
1157                                 goto defer;
1158                         
1159                         sdev_printk(KERN_ERR, sdev,
1160                                     "rejecting I/O to device being removed\n");
1161                         goto kill;
1162                 }
1163                         
1164                         
1165                 /*
1166                  * Now try and find a command block that we can use.
1167                  */
1168                 if (!req->special) {
1169                         cmd = scsi_get_command(sdev, GFP_ATOMIC);
1170                         if (unlikely(!cmd))
1171                                 goto defer;
1172                 } else
1173                         cmd = req->special;
1174                 
1175                 /* pull a tag out of the request if we have one */
1176                 cmd->tag = req->tag;
1177         } else {
1178                 blk_dump_rq_flags(req, "SCSI bad req");
1179                 goto kill;
1180         }
1181         
1182         /* note the overloading of req->special.  When the tag
1183          * is active it always means cmd.  If the tag goes
1184          * back for re-queueing, it may be reset */
1185         req->special = cmd;
1186         cmd->request = req;
1187         
1188         /*
1189          * FIXME: drop the lock here because the functions below
1190          * expect to be called without the queue lock held.  Also,
1191          * previously, we dequeued the request before dropping the
1192          * lock.  We hope REQ_STARTED prevents anything untoward from
1193          * happening now.
1194          */
1195         if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1196                 struct scsi_driver *drv;
1197                 int ret;
1198
1199                 /*
1200                  * This will do a couple of things:
1201                  *  1) Fill in the actual SCSI command.
1202                  *  2) Fill in any other upper-level specific fields
1203                  * (timeout).
1204                  *
1205                  * If this returns 0, it means that the request failed
1206                  * (reading past end of disk, reading offline device,
1207                  * etc).   This won't actually talk to the device, but
1208                  * some kinds of consistency checking may cause the     
1209                  * request to be rejected immediately.
1210                  */
1211
1212                 /* 
1213                  * This sets up the scatter-gather table (allocating if
1214                  * required).
1215                  */
1216                 ret = scsi_init_io(cmd);
1217                 switch(ret) {
1218                         /* For BLKPREP_KILL/DEFER the cmd was released */
1219                 case BLKPREP_KILL:
1220                         goto kill;
1221                 case BLKPREP_DEFER:
1222                         goto defer;
1223                 }
1224                 
1225                 /*
1226                  * Initialize the actual SCSI command for this request.
1227                  */
1228                 if (req->rq_disk) {
1229                         drv = *(struct scsi_driver **)req->rq_disk->private_data;
1230                         if (unlikely(!drv->init_command(cmd))) {
1231                                 scsi_release_buffers(cmd);
1232                                 scsi_put_command(cmd);
1233                                 goto kill;
1234                         }
1235                 } else {
1236                         scsi_setup_blk_pc_cmnd(cmd, 3);
1237                         cmd->done = scsi_generic_done;
1238                 }
1239         }
1240
1241         /*
1242          * The request is now prepped, no need to come back here
1243          */
1244         req->flags |= REQ_DONTPREP;
1245         return BLKPREP_OK;
1246
1247  defer:
1248         /* If we defer, the elv_next_request() returns NULL, but the
1249          * queue must be restarted, so we plug here if no returning
1250          * command will automatically do that. */
1251         if (sdev->device_busy == 0)
1252                 blk_plug_device(q);
1253         return BLKPREP_DEFER;
1254  kill:
1255         req->errors = DID_NO_CONNECT << 16;
1256         return BLKPREP_KILL;
1257 }
1258
1259 /*
1260  * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1261  * return 0.
1262  *
1263  * Called with the queue_lock held.
1264  */
1265 static inline int scsi_dev_queue_ready(struct request_queue *q,
1266                                   struct scsi_device *sdev)
1267 {
1268         if (sdev->device_busy >= sdev->queue_depth)
1269                 return 0;
1270         if (sdev->device_busy == 0 && sdev->device_blocked) {
1271                 /*
1272                  * unblock after device_blocked iterates to zero
1273                  */
1274                 if (--sdev->device_blocked == 0) {
1275                         SCSI_LOG_MLQUEUE(3,
1276                                    sdev_printk(KERN_INFO, sdev,
1277                                    "unblocking device at zero depth\n"));
1278                 } else {
1279                         blk_plug_device(q);
1280                         return 0;
1281                 }
1282         }
1283         if (sdev->device_blocked)
1284                 return 0;
1285
1286         return 1;
1287 }
1288
1289 /*
1290  * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1291  * return 0. We must end up running the queue again whenever 0 is
1292  * returned, else IO can hang.
1293  *
1294  * Called with host_lock held.
1295  */
1296 static inline int scsi_host_queue_ready(struct request_queue *q,
1297                                    struct Scsi_Host *shost,
1298                                    struct scsi_device *sdev)
1299 {
1300         if (scsi_host_in_recovery(shost))
1301                 return 0;
1302         if (shost->host_busy == 0 && shost->host_blocked) {
1303                 /*
1304                  * unblock after host_blocked iterates to zero
1305                  */
1306                 if (--shost->host_blocked == 0) {
1307                         SCSI_LOG_MLQUEUE(3,
1308                                 printk("scsi%d unblocking host at zero depth\n",
1309                                         shost->host_no));
1310                 } else {
1311                         blk_plug_device(q);
1312                         return 0;
1313                 }
1314         }
1315         if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
1316             shost->host_blocked || shost->host_self_blocked) {
1317                 if (list_empty(&sdev->starved_entry))
1318                         list_add_tail(&sdev->starved_entry, &shost->starved_list);
1319                 return 0;
1320         }
1321
1322         /* We're OK to process the command, so we can't be starved */
1323         if (!list_empty(&sdev->starved_entry))
1324                 list_del_init(&sdev->starved_entry);
1325
1326         return 1;
1327 }
1328
1329 /*
1330  * Kill a request for a dead device
1331  */
1332 static void scsi_kill_request(struct request *req, request_queue_t *q)
1333 {
1334         struct scsi_cmnd *cmd = req->special;
1335
1336         blkdev_dequeue_request(req);
1337
1338         if (unlikely(cmd == NULL)) {
1339                 printk(KERN_CRIT "impossible request in %s.\n",
1340                                  __FUNCTION__);
1341                 BUG();
1342         }
1343
1344         scsi_init_cmd_errh(cmd);
1345         cmd->result = DID_NO_CONNECT << 16;
1346         atomic_inc(&cmd->device->iorequest_cnt);
1347         __scsi_done(cmd);
1348 }
1349
1350 /*
1351  * Function:    scsi_request_fn()
1352  *
1353  * Purpose:     Main strategy routine for SCSI.
1354  *
1355  * Arguments:   q       - Pointer to actual queue.
1356  *
1357  * Returns:     Nothing
1358  *
1359  * Lock status: IO request lock assumed to be held when called.
1360  */
1361 static void scsi_request_fn(struct request_queue *q)
1362 {
1363         struct scsi_device *sdev = q->queuedata;
1364         struct Scsi_Host *shost;
1365         struct scsi_cmnd *cmd;
1366         struct request *req;
1367
1368         if (!sdev) {
1369                 printk("scsi: killing requests for dead queue\n");
1370                 while ((req = elv_next_request(q)) != NULL)
1371                         scsi_kill_request(req, q);
1372                 return;
1373         }
1374
1375         if(!get_device(&sdev->sdev_gendev))
1376                 /* We must be tearing the block queue down already */
1377                 return;
1378
1379         /*
1380          * To start with, we keep looping until the queue is empty, or until
1381          * the host is no longer able to accept any more requests.
1382          */
1383         shost = sdev->host;
1384         while (!blk_queue_plugged(q)) {
1385                 int rtn;
1386                 /*
1387                  * get next queueable request.  We do this early to make sure
1388                  * that the request is fully prepared even if we cannot 
1389                  * accept it.
1390                  */
1391                 req = elv_next_request(q);
1392                 if (!req || !scsi_dev_queue_ready(q, sdev))
1393                         break;
1394
1395                 if (unlikely(!scsi_device_online(sdev))) {
1396                         sdev_printk(KERN_ERR, sdev,
1397                                     "rejecting I/O to offline device\n");
1398                         scsi_kill_request(req, q);
1399                         continue;
1400                 }
1401
1402
1403                 /*
1404                  * Remove the request from the request list.
1405                  */
1406                 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1407                         blkdev_dequeue_request(req);
1408                 sdev->device_busy++;
1409
1410                 spin_unlock(q->queue_lock);
1411                 cmd = req->special;
1412                 if (unlikely(cmd == NULL)) {
1413                         printk(KERN_CRIT "impossible request in %s.\n"
1414                                          "please mail a stack trace to "
1415                                          "linux-scsi@vger.kernel.org",
1416                                          __FUNCTION__);
1417                         BUG();
1418                 }
1419                 spin_lock(shost->host_lock);
1420
1421                 if (!scsi_host_queue_ready(q, shost, sdev))
1422                         goto not_ready;
1423                 if (sdev->single_lun) {
1424                         if (scsi_target(sdev)->starget_sdev_user &&
1425                             scsi_target(sdev)->starget_sdev_user != sdev)
1426                                 goto not_ready;
1427                         scsi_target(sdev)->starget_sdev_user = sdev;
1428                 }
1429                 shost->host_busy++;
1430
1431                 /*
1432                  * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1433                  *              take the lock again.
1434                  */
1435                 spin_unlock_irq(shost->host_lock);
1436
1437                 /*
1438                  * Finally, initialize any error handling parameters, and set up
1439                  * the timers for timeouts.
1440                  */
1441                 scsi_init_cmd_errh(cmd);
1442
1443                 /*
1444                  * Dispatch the command to the low-level driver.
1445                  */
1446                 rtn = scsi_dispatch_cmd(cmd);
1447                 spin_lock_irq(q->queue_lock);
1448                 if(rtn) {
1449                         /* we're refusing the command; because of
1450                          * the way locks get dropped, we need to 
1451                          * check here if plugging is required */
1452                         if(sdev->device_busy == 0)
1453                                 blk_plug_device(q);
1454
1455                         break;
1456                 }
1457         }
1458
1459         goto out;
1460
1461  not_ready:
1462         spin_unlock_irq(shost->host_lock);
1463
1464         /*
1465          * lock q, handle tag, requeue req, and decrement device_busy. We
1466          * must return with queue_lock held.
1467          *
1468          * Decrementing device_busy without checking it is OK, as all such
1469          * cases (host limits or settings) should run the queue at some
1470          * later time.
1471          */
1472         spin_lock_irq(q->queue_lock);
1473         blk_requeue_request(q, req);
1474         sdev->device_busy--;
1475         if(sdev->device_busy == 0)
1476                 blk_plug_device(q);
1477  out:
1478         /* must be careful here...if we trigger the ->remove() function
1479          * we cannot be holding the q lock */
1480         spin_unlock_irq(q->queue_lock);
1481         put_device(&sdev->sdev_gendev);
1482         spin_lock_irq(q->queue_lock);
1483 }
1484
1485 u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1486 {
1487         struct device *host_dev;
1488         u64 bounce_limit = 0xffffffff;
1489
1490         if (shost->unchecked_isa_dma)
1491                 return BLK_BOUNCE_ISA;
1492         /*
1493          * Platforms with virtual-DMA translation
1494          * hardware have no practical limit.
1495          */
1496         if (!PCI_DMA_BUS_IS_PHYS)
1497                 return BLK_BOUNCE_ANY;
1498
1499         host_dev = scsi_get_device(shost);
1500         if (host_dev && host_dev->dma_mask)
1501                 bounce_limit = *host_dev->dma_mask;
1502
1503         return bounce_limit;
1504 }
1505 EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1506
1507 struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1508 {
1509         struct Scsi_Host *shost = sdev->host;
1510         struct request_queue *q;
1511
1512         q = blk_init_queue(scsi_request_fn, NULL);
1513         if (!q)
1514                 return NULL;
1515
1516         blk_queue_prep_rq(q, scsi_prep_fn);
1517
1518         blk_queue_max_hw_segments(q, shost->sg_tablesize);
1519         blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS);
1520         blk_queue_max_sectors(q, shost->max_sectors);
1521         blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1522         blk_queue_segment_boundary(q, shost->dma_boundary);
1523         blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
1524
1525         /*
1526          * ordered tags are superior to flush ordering
1527          */
1528         if (shost->ordered_tag)
1529                 blk_queue_ordered(q, QUEUE_ORDERED_TAG);
1530         else if (shost->ordered_flush) {
1531                 blk_queue_ordered(q, QUEUE_ORDERED_FLUSH);
1532                 q->prepare_flush_fn = scsi_prepare_flush_fn;
1533                 q->end_flush_fn = scsi_end_flush_fn;
1534         }
1535
1536         if (!shost->use_clustering)
1537                 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
1538         return q;
1539 }
1540
1541 void scsi_free_queue(struct request_queue *q)
1542 {
1543         blk_cleanup_queue(q);
1544 }
1545
1546 /*
1547  * Function:    scsi_block_requests()
1548  *
1549  * Purpose:     Utility function used by low-level drivers to prevent further
1550  *              commands from being queued to the device.
1551  *
1552  * Arguments:   shost       - Host in question
1553  *
1554  * Returns:     Nothing
1555  *
1556  * Lock status: No locks are assumed held.
1557  *
1558  * Notes:       There is no timer nor any other means by which the requests
1559  *              get unblocked other than the low-level driver calling
1560  *              scsi_unblock_requests().
1561  */
1562 void scsi_block_requests(struct Scsi_Host *shost)
1563 {
1564         shost->host_self_blocked = 1;
1565 }
1566 EXPORT_SYMBOL(scsi_block_requests);
1567
1568 /*
1569  * Function:    scsi_unblock_requests()
1570  *
1571  * Purpose:     Utility function used by low-level drivers to allow further
1572  *              commands from being queued to the device.
1573  *
1574  * Arguments:   shost       - Host in question
1575  *
1576  * Returns:     Nothing
1577  *
1578  * Lock status: No locks are assumed held.
1579  *
1580  * Notes:       There is no timer nor any other means by which the requests
1581  *              get unblocked other than the low-level driver calling
1582  *              scsi_unblock_requests().
1583  *
1584  *              This is done as an API function so that changes to the
1585  *              internals of the scsi mid-layer won't require wholesale
1586  *              changes to drivers that use this feature.
1587  */
1588 void scsi_unblock_requests(struct Scsi_Host *shost)
1589 {
1590         shost->host_self_blocked = 0;
1591         scsi_run_host_queues(shost);
1592 }
1593 EXPORT_SYMBOL(scsi_unblock_requests);
1594
1595 int __init scsi_init_queue(void)
1596 {
1597         int i;
1598
1599         for (i = 0; i < SG_MEMPOOL_NR; i++) {
1600                 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1601                 int size = sgp->size * sizeof(struct scatterlist);
1602
1603                 sgp->slab = kmem_cache_create(sgp->name, size, 0,
1604                                 SLAB_HWCACHE_ALIGN, NULL, NULL);
1605                 if (!sgp->slab) {
1606                         printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1607                                         sgp->name);
1608                 }
1609
1610                 sgp->pool = mempool_create(SG_MEMPOOL_SIZE,
1611                                 mempool_alloc_slab, mempool_free_slab,
1612                                 sgp->slab);
1613                 if (!sgp->pool) {
1614                         printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1615                                         sgp->name);
1616                 }
1617         }
1618
1619         return 0;
1620 }
1621
1622 void scsi_exit_queue(void)
1623 {
1624         int i;
1625
1626         for (i = 0; i < SG_MEMPOOL_NR; i++) {
1627                 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1628                 mempool_destroy(sgp->pool);
1629                 kmem_cache_destroy(sgp->slab);
1630         }
1631 }
1632 /**
1633  *      scsi_mode_sense - issue a mode sense, falling back from 10 to 
1634  *              six bytes if necessary.
1635  *      @sdev:  SCSI device to be queried
1636  *      @dbd:   set if mode sense will allow block descriptors to be returned
1637  *      @modepage: mode page being requested
1638  *      @buffer: request buffer (may not be smaller than eight bytes)
1639  *      @len:   length of request buffer.
1640  *      @timeout: command timeout
1641  *      @retries: number of retries before failing
1642  *      @data: returns a structure abstracting the mode header data
1643  *      @sense: place to put sense data (or NULL if no sense to be collected).
1644  *              must be SCSI_SENSE_BUFFERSIZE big.
1645  *
1646  *      Returns zero if unsuccessful, or the header offset (either 4
1647  *      or 8 depending on whether a six or ten byte command was
1648  *      issued) if successful.
1649  **/
1650 int
1651 scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1652                   unsigned char *buffer, int len, int timeout, int retries,
1653                   struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) {
1654         unsigned char cmd[12];
1655         int use_10_for_ms;
1656         int header_length;
1657         int result;
1658         struct scsi_sense_hdr my_sshdr;
1659
1660         memset(data, 0, sizeof(*data));
1661         memset(&cmd[0], 0, 12);
1662         cmd[1] = dbd & 0x18;    /* allows DBD and LLBA bits */
1663         cmd[2] = modepage;
1664
1665         /* caller might not be interested in sense, but we need it */
1666         if (!sshdr)
1667                 sshdr = &my_sshdr;
1668
1669  retry:
1670         use_10_for_ms = sdev->use_10_for_ms;
1671
1672         if (use_10_for_ms) {
1673                 if (len < 8)
1674                         len = 8;
1675
1676                 cmd[0] = MODE_SENSE_10;
1677                 cmd[8] = len;
1678                 header_length = 8;
1679         } else {
1680                 if (len < 4)
1681                         len = 4;
1682
1683                 cmd[0] = MODE_SENSE;
1684                 cmd[4] = len;
1685                 header_length = 4;
1686         }
1687
1688         memset(buffer, 0, len);
1689
1690         result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
1691                                   sshdr, timeout, retries);
1692
1693         /* This code looks awful: what it's doing is making sure an
1694          * ILLEGAL REQUEST sense return identifies the actual command
1695          * byte as the problem.  MODE_SENSE commands can return
1696          * ILLEGAL REQUEST if the code page isn't supported */
1697
1698         if (use_10_for_ms && !scsi_status_is_good(result) &&
1699             (driver_byte(result) & DRIVER_SENSE)) {
1700                 if (scsi_sense_valid(sshdr)) {
1701                         if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
1702                             (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
1703                                 /* 
1704                                  * Invalid command operation code
1705                                  */
1706                                 sdev->use_10_for_ms = 0;
1707                                 goto retry;
1708                         }
1709                 }
1710         }
1711
1712         if(scsi_status_is_good(result)) {
1713                 data->header_length = header_length;
1714                 if(use_10_for_ms) {
1715                         data->length = buffer[0]*256 + buffer[1] + 2;
1716                         data->medium_type = buffer[2];
1717                         data->device_specific = buffer[3];
1718                         data->longlba = buffer[4] & 0x01;
1719                         data->block_descriptor_length = buffer[6]*256
1720                                 + buffer[7];
1721                 } else {
1722                         data->length = buffer[0] + 1;
1723                         data->medium_type = buffer[1];
1724                         data->device_specific = buffer[2];
1725                         data->block_descriptor_length = buffer[3];
1726                 }
1727         }
1728
1729         return result;
1730 }
1731 EXPORT_SYMBOL(scsi_mode_sense);
1732
1733 int
1734 scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries)
1735 {
1736         char cmd[] = {
1737                 TEST_UNIT_READY, 0, 0, 0, 0, 0,
1738         };
1739         struct scsi_sense_hdr sshdr;
1740         int result;
1741         
1742         result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, &sshdr,
1743                                   timeout, retries);
1744
1745         if ((driver_byte(result) & DRIVER_SENSE) && sdev->removable) {
1746
1747                 if ((scsi_sense_valid(&sshdr)) &&
1748                     ((sshdr.sense_key == UNIT_ATTENTION) ||
1749                      (sshdr.sense_key == NOT_READY))) {
1750                         sdev->changed = 1;
1751                         result = 0;
1752                 }
1753         }
1754         return result;
1755 }
1756 EXPORT_SYMBOL(scsi_test_unit_ready);
1757
1758 /**
1759  *      scsi_device_set_state - Take the given device through the device
1760  *              state model.
1761  *      @sdev:  scsi device to change the state of.
1762  *      @state: state to change to.
1763  *
1764  *      Returns zero if unsuccessful or an error if the requested 
1765  *      transition is illegal.
1766  **/
1767 int
1768 scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
1769 {
1770         enum scsi_device_state oldstate = sdev->sdev_state;
1771
1772         if (state == oldstate)
1773                 return 0;
1774
1775         switch (state) {
1776         case SDEV_CREATED:
1777                 /* There are no legal states that come back to
1778                  * created.  This is the manually initialised start
1779                  * state */
1780                 goto illegal;
1781                         
1782         case SDEV_RUNNING:
1783                 switch (oldstate) {
1784                 case SDEV_CREATED:
1785                 case SDEV_OFFLINE:
1786                 case SDEV_QUIESCE:
1787                 case SDEV_BLOCK:
1788                         break;
1789                 default:
1790                         goto illegal;
1791                 }
1792                 break;
1793
1794         case SDEV_QUIESCE:
1795                 switch (oldstate) {
1796                 case SDEV_RUNNING:
1797                 case SDEV_OFFLINE:
1798                         break;
1799                 default:
1800                         goto illegal;
1801                 }
1802                 break;
1803
1804         case SDEV_OFFLINE:
1805                 switch (oldstate) {
1806                 case SDEV_CREATED:
1807                 case SDEV_RUNNING:
1808                 case SDEV_QUIESCE:
1809                 case SDEV_BLOCK:
1810                         break;
1811                 default:
1812                         goto illegal;
1813                 }
1814                 break;
1815
1816         case SDEV_BLOCK:
1817                 switch (oldstate) {
1818                 case SDEV_CREATED:
1819                 case SDEV_RUNNING:
1820                         break;
1821                 default:
1822                         goto illegal;
1823                 }
1824                 break;
1825
1826         case SDEV_CANCEL:
1827                 switch (oldstate) {
1828                 case SDEV_CREATED:
1829                 case SDEV_RUNNING:
1830                 case SDEV_OFFLINE:
1831                 case SDEV_BLOCK:
1832                         break;
1833                 default:
1834                         goto illegal;
1835                 }
1836                 break;
1837
1838         case SDEV_DEL:
1839                 switch (oldstate) {
1840                 case SDEV_CANCEL:
1841                         break;
1842                 default:
1843                         goto illegal;
1844                 }
1845                 break;
1846
1847         }
1848         sdev->sdev_state = state;
1849         return 0;
1850
1851  illegal:
1852         SCSI_LOG_ERROR_RECOVERY(1, 
1853                                 sdev_printk(KERN_ERR, sdev,
1854                                             "Illegal state transition %s->%s\n",
1855                                             scsi_device_state_name(oldstate),
1856                                             scsi_device_state_name(state))
1857                                 );
1858         return -EINVAL;
1859 }
1860 EXPORT_SYMBOL(scsi_device_set_state);
1861
1862 /**
1863  *      scsi_device_quiesce - Block user issued commands.
1864  *      @sdev:  scsi device to quiesce.
1865  *
1866  *      This works by trying to transition to the SDEV_QUIESCE state
1867  *      (which must be a legal transition).  When the device is in this
1868  *      state, only special requests will be accepted, all others will
1869  *      be deferred.  Since special requests may also be requeued requests,
1870  *      a successful return doesn't guarantee the device will be 
1871  *      totally quiescent.
1872  *
1873  *      Must be called with user context, may sleep.
1874  *
1875  *      Returns zero if unsuccessful or an error if not.
1876  **/
1877 int
1878 scsi_device_quiesce(struct scsi_device *sdev)
1879 {
1880         int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
1881         if (err)
1882                 return err;
1883
1884         scsi_run_queue(sdev->request_queue);
1885         while (sdev->device_busy) {
1886                 msleep_interruptible(200);
1887                 scsi_run_queue(sdev->request_queue);
1888         }
1889         return 0;
1890 }
1891 EXPORT_SYMBOL(scsi_device_quiesce);
1892
1893 /**
1894  *      scsi_device_resume - Restart user issued commands to a quiesced device.
1895  *      @sdev:  scsi device to resume.
1896  *
1897  *      Moves the device from quiesced back to running and restarts the
1898  *      queues.
1899  *
1900  *      Must be called with user context, may sleep.
1901  **/
1902 void
1903 scsi_device_resume(struct scsi_device *sdev)
1904 {
1905         if(scsi_device_set_state(sdev, SDEV_RUNNING))
1906                 return;
1907         scsi_run_queue(sdev->request_queue);
1908 }
1909 EXPORT_SYMBOL(scsi_device_resume);
1910
1911 static void
1912 device_quiesce_fn(struct scsi_device *sdev, void *data)
1913 {
1914         scsi_device_quiesce(sdev);
1915 }
1916
1917 void
1918 scsi_target_quiesce(struct scsi_target *starget)
1919 {
1920         starget_for_each_device(starget, NULL, device_quiesce_fn);
1921 }
1922 EXPORT_SYMBOL(scsi_target_quiesce);
1923
1924 static void
1925 device_resume_fn(struct scsi_device *sdev, void *data)
1926 {
1927         scsi_device_resume(sdev);
1928 }
1929
1930 void
1931 scsi_target_resume(struct scsi_target *starget)
1932 {
1933         starget_for_each_device(starget, NULL, device_resume_fn);
1934 }
1935 EXPORT_SYMBOL(scsi_target_resume);
1936
1937 /**
1938  * scsi_internal_device_block - internal function to put a device
1939  *                              temporarily into the SDEV_BLOCK state
1940  * @sdev:       device to block
1941  *
1942  * Block request made by scsi lld's to temporarily stop all
1943  * scsi commands on the specified device.  Called from interrupt
1944  * or normal process context.
1945  *
1946  * Returns zero if successful or error if not
1947  *
1948  * Notes:       
1949  *      This routine transitions the device to the SDEV_BLOCK state
1950  *      (which must be a legal transition).  When the device is in this
1951  *      state, all commands are deferred until the scsi lld reenables
1952  *      the device with scsi_device_unblock or device_block_tmo fires.
1953  *      This routine assumes the host_lock is held on entry.
1954  **/
1955 int
1956 scsi_internal_device_block(struct scsi_device *sdev)
1957 {
1958         request_queue_t *q = sdev->request_queue;
1959         unsigned long flags;
1960         int err = 0;
1961
1962         err = scsi_device_set_state(sdev, SDEV_BLOCK);
1963         if (err)
1964                 return err;
1965
1966         /* 
1967          * The device has transitioned to SDEV_BLOCK.  Stop the
1968          * block layer from calling the midlayer with this device's
1969          * request queue. 
1970          */
1971         spin_lock_irqsave(q->queue_lock, flags);
1972         blk_stop_queue(q);
1973         spin_unlock_irqrestore(q->queue_lock, flags);
1974
1975         return 0;
1976 }
1977 EXPORT_SYMBOL_GPL(scsi_internal_device_block);
1978  
1979 /**
1980  * scsi_internal_device_unblock - resume a device after a block request
1981  * @sdev:       device to resume
1982  *
1983  * Called by scsi lld's or the midlayer to restart the device queue
1984  * for the previously suspended scsi device.  Called from interrupt or
1985  * normal process context.
1986  *
1987  * Returns zero if successful or error if not.
1988  *
1989  * Notes:       
1990  *      This routine transitions the device to the SDEV_RUNNING state
1991  *      (which must be a legal transition) allowing the midlayer to
1992  *      goose the queue for this device.  This routine assumes the 
1993  *      host_lock is held upon entry.
1994  **/
1995 int
1996 scsi_internal_device_unblock(struct scsi_device *sdev)
1997 {
1998         request_queue_t *q = sdev->request_queue; 
1999         int err;
2000         unsigned long flags;
2001         
2002         /* 
2003          * Try to transition the scsi device to SDEV_RUNNING
2004          * and goose the device queue if successful.  
2005          */
2006         err = scsi_device_set_state(sdev, SDEV_RUNNING);
2007         if (err)
2008                 return err;
2009
2010         spin_lock_irqsave(q->queue_lock, flags);
2011         blk_start_queue(q);
2012         spin_unlock_irqrestore(q->queue_lock, flags);
2013
2014         return 0;
2015 }
2016 EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
2017
2018 static void
2019 device_block(struct scsi_device *sdev, void *data)
2020 {
2021         scsi_internal_device_block(sdev);
2022 }
2023
2024 static int
2025 target_block(struct device *dev, void *data)
2026 {
2027         if (scsi_is_target_device(dev))
2028                 starget_for_each_device(to_scsi_target(dev), NULL,
2029                                         device_block);
2030         return 0;
2031 }
2032
2033 void
2034 scsi_target_block(struct device *dev)
2035 {
2036         if (scsi_is_target_device(dev))
2037                 starget_for_each_device(to_scsi_target(dev), NULL,
2038                                         device_block);
2039         else
2040                 device_for_each_child(dev, NULL, target_block);
2041 }
2042 EXPORT_SYMBOL_GPL(scsi_target_block);
2043
2044 static void
2045 device_unblock(struct scsi_device *sdev, void *data)
2046 {
2047         scsi_internal_device_unblock(sdev);
2048 }
2049
2050 static int
2051 target_unblock(struct device *dev, void *data)
2052 {
2053         if (scsi_is_target_device(dev))
2054                 starget_for_each_device(to_scsi_target(dev), NULL,
2055                                         device_unblock);
2056         return 0;
2057 }
2058
2059 void
2060 scsi_target_unblock(struct device *dev)
2061 {
2062         if (scsi_is_target_device(dev))
2063                 starget_for_each_device(to_scsi_target(dev), NULL,
2064                                         device_unblock);
2065         else
2066                 device_for_each_child(dev, NULL, target_unblock);
2067 }
2068 EXPORT_SYMBOL_GPL(scsi_target_unblock);