rbd: warn on update_snaps failure on notify
[linux-2.6.git] / drivers / block / virtio_blk.c
index 23b7c48..6ecf89c 100644 (file)
@@ -65,13 +65,18 @@ static void blk_done(struct virtqueue *vq)
                        break;
                }
 
-               if (blk_pc_request(vbr->req)) {
+               switch (vbr->req->cmd_type) {
+               case REQ_TYPE_BLOCK_PC:
                        vbr->req->resid_len = vbr->in_hdr.residual;
                        vbr->req->sense_len = vbr->in_hdr.sense_len;
                        vbr->req->errors = vbr->in_hdr.errors;
-               }
-               if (blk_special_request(vbr->req))
+                       break;
+               case REQ_TYPE_SPECIAL:
                        vbr->req->errors = (error != 0);
+                       break;
+               default:
+                       break;
+               }
 
                __blk_end_request_all(vbr->req, error);
                list_del(&vbr->list);
@@ -94,38 +99,34 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
                return false;
 
        vbr->req = req;
-       switch (req->cmd_type) {
-       case REQ_TYPE_FS:
-               vbr->out_hdr.type = 0;
-               vbr->out_hdr.sector = blk_rq_pos(vbr->req);
-               vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
-               break;
-       case REQ_TYPE_BLOCK_PC:
-               vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
-               vbr->out_hdr.sector = 0;
-               vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
-               break;
-       case REQ_TYPE_SPECIAL:
-               vbr->out_hdr.type = VIRTIO_BLK_T_GET_ID;
+
+       if (req->cmd_flags & REQ_FLUSH) {
+               vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
                vbr->out_hdr.sector = 0;
                vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
-               break;
-       case REQ_TYPE_LINUX_BLOCK:
-               if (req->cmd[0] == REQ_LB_OP_FLUSH) {
-                       vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
+       } else {
+               switch (req->cmd_type) {
+               case REQ_TYPE_FS:
+                       vbr->out_hdr.type = 0;
+                       vbr->out_hdr.sector = blk_rq_pos(vbr->req);
+                       vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
+                       break;
+               case REQ_TYPE_BLOCK_PC:
+                       vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
                        vbr->out_hdr.sector = 0;
                        vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
                        break;
+               case REQ_TYPE_SPECIAL:
+                       vbr->out_hdr.type = VIRTIO_BLK_T_GET_ID;
+                       vbr->out_hdr.sector = 0;
+                       vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
+                       break;
+               default:
+                       /* We don't put anything else in the queue. */
+                       BUG();
                }
-               /*FALLTHRU*/
-       default:
-               /* We don't put anything else in the queue. */
-               BUG();
        }
 
-       if (blk_barrier_rq(vbr->req))
-               vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER;
-
        sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
 
        /*
@@ -134,12 +135,12 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
         * block, and before the normal inhdr we put the sense data and the
         * inhdr with additional status information before the normal inhdr.
         */
-       if (blk_pc_request(vbr->req))
+       if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC)
                sg_set_buf(&vblk->sg[out++], vbr->req->cmd, vbr->req->cmd_len);
 
        num = blk_rq_map_sg(q, vbr->req, vblk->sg + out);
 
-       if (blk_pc_request(vbr->req)) {
+       if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC) {
                sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, 96);
                sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr,
                           sizeof(vbr->in_hdr));
@@ -190,12 +191,6 @@ static void do_virtblk_request(struct request_queue *q)
                virtqueue_kick(vblk->vq);
 }
 
-static void virtblk_prepare_flush(struct request_queue *q, struct request *req)
-{
-       req->cmd_type = REQ_TYPE_LINUX_BLOCK;
-       req->cmd[0] = REQ_LB_OP_FLUSH;
-}
-
 /* return id (s/n) string for *disk to *id_str
  */
 static int virtblk_get_id(struct gendisk *disk, char *id_str)
@@ -203,6 +198,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
        struct virtio_blk *vblk = disk->private_data;
        struct request *req;
        struct bio *bio;
+       int err;
 
        bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES,
                           GFP_KERNEL);
@@ -216,11 +212,14 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
        }
 
        req->cmd_type = REQ_TYPE_SPECIAL;
-       return blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
+       err = blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
+       blk_put_request(req);
+
+       return err;
 }
 
 static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
-                        unsigned cmd, unsigned long data)
+                            unsigned int cmd, unsigned long data)
 {
        struct gendisk *disk = bdev->bd_disk;
        struct virtio_blk *vblk = disk->private_data;
@@ -261,7 +260,7 @@ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
 }
 
 static const struct block_device_operations virtblk_fops = {
-       .locked_ioctl = virtblk_ioctl,
+       .ioctl  = virtblk_ioctl,
        .owner  = THIS_MODULE,
        .getgeo = virtblk_getgeo,
 };
@@ -377,32 +376,9 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
        vblk->disk->driverfs_dev = &vdev->dev;
        index++;
 
-       if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH)) {
-               /*
-                * If the FLUSH feature is supported we do have support for
-                * flushing a volatile write cache on the host.  Use that
-                * to implement write barrier support.
-                */
-               blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH,
-                                 virtblk_prepare_flush);
-       } else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER)) {
-               /*
-                * If the BARRIER feature is supported the host expects us
-                * to order request by tags.  This implies there is not
-                * volatile write cache on the host, and that the host
-                * never re-orders outstanding I/O.  This feature is not
-                * useful for real life scenarious and deprecated.
-                */
-               blk_queue_ordered(q, QUEUE_ORDERED_TAG, NULL);
-       } else {
-               /*
-                * If the FLUSH feature is not supported we must assume that
-                * the host does not perform any kind of volatile write
-                * caching. We still need to drain the queue to provider
-                * proper barrier semantics.
-                */
-               blk_queue_ordered(q, QUEUE_ORDERED_DRAIN, NULL);
-       }
+       /* configure queue flush support */
+       if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH))
+               blk_queue_flush(q, REQ_FLUSH);
 
        /* If disk is read-only in the host, the guest should obey */
        if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
@@ -521,9 +497,9 @@ static const struct virtio_device_id id_table[] = {
 };
 
 static unsigned int features[] = {
-       VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX,
-       VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
-       VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY
+       VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
+       VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, VIRTIO_BLK_F_SCSI,
+       VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY
 };
 
 /*