]> nv-tegra.nvidia Code Review - linux-2.6.git/blobdiff - block/blk-barrier.c
block: set the bounce_pfn to the actual DMA limit rather than to max memory
[linux-2.6.git] / block / blk-barrier.c
index 0ab81a0a7502303a3f9ae06382c6a958af1d9c1e..f0faefca032ff59d739460be5e7898a61d33cc48 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/module.h>
 #include <linux/bio.h>
 #include <linux/blkdev.h>
+#include <linux/gfp.h>
 
 #include "blk.h"
 
@@ -12,7 +13,6 @@
  * blk_queue_ordered - does this queue support ordered writes
  * @q:        the request queue
  * @ordered:  one of QUEUE_ORDERED_*
- * @prepare_flush_fn: rq setup helper for cache flush ordered writes
  *
  * Description:
  *   For journalled file systems, doing ordered writes on a commit
  *   feature should call this function and indicate so.
  *
  **/
-int blk_queue_ordered(struct request_queue *q, unsigned ordered,
-                     prepare_flush_fn *prepare_flush_fn)
+int blk_queue_ordered(struct request_queue *q, unsigned ordered)
 {
-       if (!prepare_flush_fn && (ordered & (QUEUE_ORDERED_DO_PREFLUSH |
-                                            QUEUE_ORDERED_DO_POSTFLUSH))) {
-               printk(KERN_ERR "%s: prepare_flush_fn required\n", __func__);
-               return -EINVAL;
-       }
-
        if (ordered != QUEUE_ORDERED_NONE &&
            ordered != QUEUE_ORDERED_DRAIN &&
            ordered != QUEUE_ORDERED_DRAIN_FLUSH &&
@@ -43,7 +36,6 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered,
 
        q->ordered = ordered;
        q->next_ordered = ordered;
-       q->prepare_flush_fn = prepare_flush_fn;
 
        return 0;
 }
@@ -78,7 +70,7 @@ unsigned blk_ordered_req_seq(struct request *rq)
         *
         * http://thread.gmane.org/gmane.linux.kernel/537473
         */
-       if (!blk_fs_request(rq))
+       if (rq->cmd_type != REQ_TYPE_FS)
                return QUEUE_ORDSEQ_DRAIN;
 
        if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
@@ -142,10 +134,10 @@ static void queue_flush(struct request_queue *q, unsigned which)
        }
 
        blk_rq_init(q, rq);
-       rq->cmd_flags = REQ_HARDBARRIER;
-       rq->rq_disk = q->bar_rq.rq_disk;
+       rq->cmd_type = REQ_TYPE_FS;
+       rq->cmd_flags = REQ_HARDBARRIER | REQ_FLUSH;
+       rq->rq_disk = q->orig_bar_rq->rq_disk;
        rq->end_io = end_io;
-       q->prepare_flush_fn(q, rq);
 
        elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
 }
@@ -202,7 +194,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
                /* initialize proxy request and queue it */
                blk_rq_init(q, rq);
                if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
-                       rq->cmd_flags |= REQ_RW;
+                       rq->cmd_flags |= REQ_WRITE;
                if (q->ordered & QUEUE_ORDERED_DO_FUA)
                        rq->cmd_flags |= REQ_FUA;
                init_request_from_bio(rq, q->orig_bar_rq->bio);
@@ -218,7 +210,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
        } else
                skip |= QUEUE_ORDSEQ_PREFLUSH;
 
-       if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && q->in_flight)
+       if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && queue_in_flight(q))
                rq = NULL;
        else
                skip |= QUEUE_ORDSEQ_DRAIN;
@@ -235,7 +227,8 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
 bool blk_do_ordered(struct request_queue *q, struct request **rqp)
 {
        struct request *rq = *rqp;
-       const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
+       const int is_barrier = rq->cmd_type == REQ_TYPE_FS &&
+                               (rq->cmd_flags & REQ_HARDBARRIER);
 
        if (!q->ordseq) {
                if (!is_barrier)
@@ -260,7 +253,7 @@ bool blk_do_ordered(struct request_queue *q, struct request **rqp)
         */
 
        /* Special requests are not subject to ordering rules. */
-       if (!blk_fs_request(rq) &&
+       if (rq->cmd_type != REQ_TYPE_FS &&
            rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
                return true;
 
@@ -285,26 +278,31 @@ static void bio_end_empty_barrier(struct bio *bio, int err)
                        set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
                clear_bit(BIO_UPTODATE, &bio->bi_flags);
        }
-
-       complete(bio->bi_private);
+       if (bio->bi_private)
+               complete(bio->bi_private);
+       bio_put(bio);
 }
 
 /**
  * blkdev_issue_flush - queue a flush
  * @bdev:      blockdev to issue flush for
+ * @gfp_mask:  memory allocation flags (for bio_alloc)
  * @error_sector:      error sector
+ * @flags:     BLKDEV_IFL_* flags to control behaviour
  *
  * Description:
  *    Issue a flush for the block device in question. Caller can supply
  *    room for storing the error offset in case of a flush error, if they
- *    wish to.
+ *    wish to. If WAIT flag is not passed then caller may check only what
+ *    request was pushed in some internal queue for later handling.
  */
-int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
+int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
+               sector_t *error_sector, unsigned long flags)
 {
        DECLARE_COMPLETION_ONSTACK(wait);
        struct request_queue *q;
        struct bio *bio;
-       int ret;
+       int ret = 0;
 
        if (bdev->bd_disk == NULL)
                return -ENXIO;
@@ -313,23 +311,34 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
        if (!q)
                return -ENXIO;
 
-       bio = bio_alloc(GFP_KERNEL, 0);
+       /*
+        * some block devices may not have their queue correctly set up here
+        * (e.g. loop device without a backing file) and so issuing a flush
+        * here will panic. Ensure there is a request function before issuing
+        * the barrier.
+        */
+       if (!q->make_request_fn)
+               return -ENXIO;
+
+       bio = bio_alloc(gfp_mask, 0);
        bio->bi_end_io = bio_end_empty_barrier;
-       bio->bi_private = &wait;
        bio->bi_bdev = bdev;
-       submit_bio(WRITE_BARRIER, bio);
-
-       wait_for_completion(&wait);
+       if (test_bit(BLKDEV_WAIT, &flags))
+               bio->bi_private = &wait;
 
-       /*
-        * The driver must store the error location in ->bi_sector, if
-        * it supports it. For non-stacked drivers, this should be copied
-        * from blk_rq_pos(rq).
-        */
-       if (error_sector)
-               *error_sector = bio->bi_sector;
+       bio_get(bio);
+       submit_bio(WRITE_BARRIER, bio);
+       if (test_bit(BLKDEV_WAIT, &flags)) {
+               wait_for_completion(&wait);
+               /*
+                * The driver must store the error location in ->bi_sector, if
+                * it supports it. For non-stacked drivers, this should be
+                * copied from blk_rq_pos(rq).
+                */
+               if (error_sector)
+                       *error_sector = bio->bi_sector;
+       }
 
-       ret = 0;
        if (bio_flagged(bio, BIO_EOPNOTSUPP))
                ret = -EOPNOTSUPP;
        else if (!bio_flagged(bio, BIO_UPTODATE))
@@ -339,73 +348,3 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
        return ret;
 }
 EXPORT_SYMBOL(blkdev_issue_flush);
-
-static void blkdev_discard_end_io(struct bio *bio, int err)
-{
-       if (err) {
-               if (err == -EOPNOTSUPP)
-                       set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
-               clear_bit(BIO_UPTODATE, &bio->bi_flags);
-       }
-
-       bio_put(bio);
-}
-
-/**
- * blkdev_issue_discard - queue a discard
- * @bdev:      blockdev to issue discard for
- * @sector:    start sector
- * @nr_sects:  number of sectors to discard
- * @gfp_mask:  memory allocation flags (for bio_alloc)
- *
- * Description:
- *    Issue a discard request for the sectors in question. Does not wait.
- */
-int blkdev_issue_discard(struct block_device *bdev,
-                        sector_t sector, sector_t nr_sects, gfp_t gfp_mask)
-{
-       struct request_queue *q;
-       struct bio *bio;
-       int ret = 0;
-
-       if (bdev->bd_disk == NULL)
-               return -ENXIO;
-
-       q = bdev_get_queue(bdev);
-       if (!q)
-               return -ENXIO;
-
-       if (!q->prepare_discard_fn)
-               return -EOPNOTSUPP;
-
-       while (nr_sects && !ret) {
-               bio = bio_alloc(gfp_mask, 0);
-               if (!bio)
-                       return -ENOMEM;
-
-               bio->bi_end_io = blkdev_discard_end_io;
-               bio->bi_bdev = bdev;
-
-               bio->bi_sector = sector;
-
-               if (nr_sects > q->max_hw_sectors) {
-                       bio->bi_size = q->max_hw_sectors << 9;
-                       nr_sects -= q->max_hw_sectors;
-                       sector += q->max_hw_sectors;
-               } else {
-                       bio->bi_size = nr_sects << 9;
-                       nr_sects = 0;
-               }
-               bio_get(bio);
-               submit_bio(DISCARD_BARRIER, bio);
-
-               /* Check if it failed immediately */
-               if (bio_flagged(bio, BIO_EOPNOTSUPP))
-                       ret = -EOPNOTSUPP;
-               else if (!bio_flagged(bio, BIO_UPTODATE))
-                       ret = -EIO;
-               bio_put(bio);
-       }
-       return ret;
-}
-EXPORT_SYMBOL(blkdev_issue_discard);