PM: Remove CONFIG_PM_OPS
[linux-2.6.git] / drivers / scsi / scsi_lib.c
index dd3f9d2..fb2bb35 100644 (file)
@@ -85,7 +85,7 @@ static void scsi_unprep_request(struct request *req)
 {
        struct scsi_cmnd *cmd = req->special;
 
-       req->cmd_flags &= ~REQ_DONTPREP;
+       blk_unprep_request(req);
        req->special = NULL;
 
        scsi_put_command(cmd);
@@ -443,7 +443,7 @@ static void scsi_run_queue(struct request_queue *q)
                                        &sdev->request_queue->queue_flags);
                if (flagset)
                        queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
-               __blk_run_queue(sdev->request_queue);
+               __blk_run_queue(sdev->request_queue, false);
                if (flagset)
                        queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
                spin_unlock(sdev->request_queue->queue_lock);
@@ -722,7 +722,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
                        sense_deferred = scsi_sense_is_deferred(&sshdr);
        }
 
-       if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
+       if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */
                req->errors = result;
                if (result) {
                        if (sense_valid && req->sense) {
@@ -749,15 +749,16 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
                         */
                        req->next_rq->resid_len = scsi_in(cmd)->resid;
 
+                       scsi_release_buffers(cmd);
                        blk_end_request_all(req, 0);
 
-                       scsi_release_buffers(cmd);
                        scsi_next_command(cmd);
                        return;
                }
        }
 
-       BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */
+       /* no bidi support for !REQ_TYPE_BLOCK_PC yet */
+       BUG_ON(blk_bidi_rq(req));
 
        /*
         * Next deal with any sectors which we were able to correctly
@@ -773,8 +774,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
         * we already took a copy of the original into rq->errors which
         * is what gets returned to the user
         */
-       if (sense_valid && sshdr.sense_key == RECOVERED_ERROR) {
-               if (!(req->cmd_flags & REQ_QUIET))
+       if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
+               /* if ATA PASS-THROUGH INFORMATION AVAILABLE skip
+                * print since caller wants ATA registers. Only occurs on
+                * SCSI ATA PASS_THROUGH commands when CK_COND=1
+                */
+               if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
+                       ;
+               else if (!(req->cmd_flags & REQ_QUIET))
                        scsi_print_sense("", cmd);
                result = 0;
                /* BLOCK_PC may have set error */
@@ -859,6 +866,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
                                case 0x07: /* operation in progress */
                                case 0x08: /* Long write in progress */
                                case 0x09: /* self test in progress */
+                               case 0x14: /* space allocation in progress */
                                        action = ACTION_DELAYED_RETRY;
                                        break;
                                default:
@@ -896,9 +904,12 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
                        scsi_print_result(cmd);
                        if (driver_byte(result) & DRIVER_SENSE)
                                scsi_print_sense("", cmd);
+                       scsi_print_command(cmd);
                }
-               blk_end_request_all(req, -EIO);
-               scsi_next_command(cmd);
+               if (blk_end_request_err(req, error))
+                       scsi_requeue_command(q, cmd);
+               else
+                       scsi_next_command(cmd);
                break;
        case ACTION_REPREP:
                /* Unprep the request and put it back at the head of the queue.
@@ -957,11 +968,13 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
  */
 int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
 {
-       int error = scsi_init_sgtable(cmd->request, &cmd->sdb, gfp_mask);
+       struct request *rq = cmd->request;
+
+       int error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask);
        if (error)
                goto err_exit;
 
-       if (blk_bidi_rq(cmd->request)) {
+       if (blk_bidi_rq(rq)) {
                struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc(
                        scsi_sdb_cache, GFP_ATOMIC);
                if (!bidi_sdb) {
@@ -969,28 +982,28 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
                        goto err_exit;
                }
 
-               cmd->request->next_rq->special = bidi_sdb;
-               error = scsi_init_sgtable(cmd->request->next_rq, bidi_sdb,
-                                                                   GFP_ATOMIC);
+               rq->next_rq->special = bidi_sdb;
+               error = scsi_init_sgtable(rq->next_rq, bidi_sdb, GFP_ATOMIC);
                if (error)
                        goto err_exit;
        }
 
-       if (blk_integrity_rq(cmd->request)) {
+       if (blk_integrity_rq(rq)) {
                struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
                int ivecs, count;
 
                BUG_ON(prot_sdb == NULL);
-               ivecs = blk_rq_count_integrity_sg(cmd->request);
+               ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
 
                if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) {
                        error = BLKPREP_DEFER;
                        goto err_exit;
                }
 
-               count = blk_rq_map_integrity_sg(cmd->request,
+               count = blk_rq_map_integrity_sg(rq->q, rq->bio,
                                                prot_sdb->table.sgl);
                BUG_ON(unlikely(count > ivecs));
+               BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q)));
 
                cmd->prot_sdb = prot_sdb;
                cmd->prot_sdb->table.nents = count;
@@ -1000,11 +1013,8 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
 
 err_exit:
        scsi_release_buffers(cmd);
-       if (error == BLKPREP_KILL)
-               scsi_put_command(cmd);
-       else /* BLKPREP_DEFER */
-               scsi_unprep_request(cmd->request);
-
+       cmd->request->special = NULL;
+       scsi_put_command(cmd);
        return error;
 }
 EXPORT_SYMBOL(scsi_init_io);
@@ -1207,6 +1217,7 @@ int scsi_prep_fn(struct request_queue *q, struct request *req)
                ret = scsi_setup_blk_pc_cmnd(sdev, req);
        return scsi_prep_return(q, req, ret);
 }
+EXPORT_SYMBOL(scsi_prep_fn);
 
 /*
  * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
@@ -1267,11 +1278,10 @@ static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
        }
 
        if (scsi_target_is_busy(starget)) {
-               if (list_empty(&sdev->starved_entry)) {
+               if (list_empty(&sdev->starved_entry))
                        list_add_tail(&sdev->starved_entry,
                                      &shost->starved_list);
-                       return 0;
-               }
+               return 0;
        }
 
        /* We're OK to process the command, so we can't be starved */
@@ -1355,18 +1365,15 @@ static int scsi_lld_busy(struct request_queue *q)
 static void scsi_kill_request(struct request *req, struct request_queue *q)
 {
        struct scsi_cmnd *cmd = req->special;
-       struct scsi_device *sdev = cmd->device;
-       struct scsi_target *starget = scsi_target(sdev);
-       struct Scsi_Host *shost = sdev->host;
+       struct scsi_device *sdev;
+       struct scsi_target *starget;
+       struct Scsi_Host *shost;
 
        blk_start_request(req);
 
-       if (unlikely(cmd == NULL)) {
-               printk(KERN_CRIT "impossible request in %s.\n",
-                                __func__);
-               BUG();
-       }
-
+       sdev = cmd->device;
+       starget = scsi_target(sdev);
+       shost = sdev->host;
        scsi_init_cmd_errh(cmd);
        cmd->result = DID_NO_CONNECT << 16;
        atomic_inc(&cmd->device->iorequest_cnt);
@@ -1395,11 +1402,6 @@ static void scsi_softirq_done(struct request *rq)
 
        INIT_LIST_HEAD(&cmd->eh_entry);
 
-       /*
-        * Set the serial numbers back to zero
-        */
-       cmd->serial_number = 0;
-
        atomic_inc(&cmd->device->iodone_cnt);
        if (cmd->result)
                atomic_inc(&cmd->device->ioerr_cnt);
@@ -1616,19 +1618,26 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
        /*
         * this limit is imposed by hardware restrictions
         */
-       blk_queue_max_hw_segments(q, shost->sg_tablesize);
-       blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS);
+       blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
+                                       SCSI_MAX_SG_CHAIN_SEGMENTS));
+
+       if (scsi_host_prot_dma(shost)) {
+               shost->sg_prot_tablesize =
+                       min_not_zero(shost->sg_prot_tablesize,
+                                    (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS);
+               BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize);
+               blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
+       }
 
-       blk_queue_max_sectors(q, shost->max_sectors);
+       blk_queue_max_hw_sectors(q, shost->max_sectors);
        blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
        blk_queue_segment_boundary(q, shost->dma_boundary);
        dma_set_seg_boundary(dev, shost->dma_boundary);
 
        blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
 
-       /* New queue, no concurrency on queue_flags */
        if (!shost->use_clustering)
-               queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
+               q->limits.cluster = 0;
 
        /*
         * set a reasonable default alignment on word boundaries: the
@@ -1968,8 +1977,7 @@ EXPORT_SYMBOL(scsi_mode_sense);
  *             in.
  *
  *     Returns zero if unsuccessful or an error if TUR failed.  For
- *     removable media, a return of NOT_READY or UNIT_ATTENTION is
- *     translated to success, with the ->changed flag updated.
+ *     removable media, UNIT_ATTENTION sets ->changed flag.
  **/
 int
 scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
@@ -1996,16 +2004,6 @@ scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
        } while (scsi_sense_valid(sshdr) &&
                 sshdr->sense_key == UNIT_ATTENTION && --retries);
 
-       if (!sshdr)
-               /* could not allocate sense buffer, so can't process it */
-               return result;
-
-       if (sdev->removable && scsi_sense_valid(sshdr) &&
-           (sshdr->sense_key == UNIT_ATTENTION ||
-            sshdr->sense_key == NOT_READY)) {
-               sdev->changed = 1;
-               result = 0;
-       }
        if (!sshdr_external)
                kfree(sshdr);
        return result;
@@ -2412,20 +2410,19 @@ int
 scsi_internal_device_unblock(struct scsi_device *sdev)
 {
        struct request_queue *q = sdev->request_queue; 
-       int err;
        unsigned long flags;
        
        /* 
         * Try to transition the scsi device to SDEV_RUNNING
         * and goose the device queue if successful.  
         */
-       err = scsi_device_set_state(sdev, SDEV_RUNNING);
-       if (err) {
-               err = scsi_device_set_state(sdev, SDEV_CREATED);
-
-               if (err)
-                       return err;
-       }
+       if (sdev->sdev_state == SDEV_BLOCK)
+               sdev->sdev_state = SDEV_RUNNING;
+       else if (sdev->sdev_state == SDEV_CREATED_BLOCK)
+               sdev->sdev_state = SDEV_CREATED;
+       else if (sdev->sdev_state != SDEV_CANCEL &&
+                sdev->sdev_state != SDEV_OFFLINE)
+               return -EINVAL;
 
        spin_lock_irqsave(q->queue_lock, flags);
        blk_start_queue(q);