d95a1440e29dab5a84fd9f3a1cf90e341c55871f
[linux-2.6.git] / block / blk-barrier.c
1 /*
2  * Functions related to barrier IO handling
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8 #include <linux/gfp.h>
9
10 #include "blk.h"
11
12 /**
13  * blk_queue_ordered - does this queue support ordered writes
14  * @q:        the request queue
15  * @ordered:  one of QUEUE_ORDERED_*
16  *
17  * Description:
18  *   For journalled file systems, doing ordered writes on a commit
19  *   block instead of explicitly doing wait_on_buffer (which is bad
20  *   for performance) can be a big win. Block drivers supporting this
21  *   feature should call this function and indicate so.
22  *
23  **/
24 int blk_queue_ordered(struct request_queue *q, unsigned ordered)
25 {
26         if (ordered != QUEUE_ORDERED_NONE &&
27             ordered != QUEUE_ORDERED_DRAIN &&
28             ordered != QUEUE_ORDERED_DRAIN_FLUSH &&
29             ordered != QUEUE_ORDERED_DRAIN_FUA &&
30             ordered != QUEUE_ORDERED_TAG &&
31             ordered != QUEUE_ORDERED_TAG_FLUSH &&
32             ordered != QUEUE_ORDERED_TAG_FUA) {
33                 printk(KERN_ERR "blk_queue_ordered: bad value %d\n", ordered);
34                 return -EINVAL;
35         }
36
37         q->ordered = ordered;
38         q->next_ordered = ordered;
39
40         return 0;
41 }
42 EXPORT_SYMBOL(blk_queue_ordered);
43
44 /*
45  * Cache flushing for ordered writes handling
46  */
47 unsigned blk_ordered_cur_seq(struct request_queue *q)
48 {
49         if (!q->ordseq)
50                 return 0;
51         return 1 << ffz(q->ordseq);
52 }
53
54 unsigned blk_ordered_req_seq(struct request *rq)
55 {
56         struct request_queue *q = rq->q;
57
58         BUG_ON(q->ordseq == 0);
59
60         if (rq == &q->pre_flush_rq)
61                 return QUEUE_ORDSEQ_PREFLUSH;
62         if (rq == &q->bar_rq)
63                 return QUEUE_ORDSEQ_BAR;
64         if (rq == &q->post_flush_rq)
65                 return QUEUE_ORDSEQ_POSTFLUSH;
66
67         /*
68          * !fs requests don't need to follow barrier ordering.  Always
69          * put them at the front.  This fixes the following deadlock.
70          *
71          * http://thread.gmane.org/gmane.linux.kernel/537473
72          */
73         if (rq->cmd_type != REQ_TYPE_FS)
74                 return QUEUE_ORDSEQ_DRAIN;
75
76         if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
77             (q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR))
78                 return QUEUE_ORDSEQ_DRAIN;
79         else
80                 return QUEUE_ORDSEQ_DONE;
81 }
82
83 bool blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
84 {
85         struct request *rq;
86
87         if (error && !q->orderr)
88                 q->orderr = error;
89
90         BUG_ON(q->ordseq & seq);
91         q->ordseq |= seq;
92
93         if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE)
94                 return false;
95
96         /*
97          * Okay, sequence complete.
98          */
99         q->ordseq = 0;
100         rq = q->orig_bar_rq;
101         __blk_end_request_all(rq, q->orderr);
102         return true;
103 }
104
105 static void pre_flush_end_io(struct request *rq, int error)
106 {
107         elv_completed_request(rq->q, rq);
108         blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_PREFLUSH, error);
109 }
110
111 static void bar_end_io(struct request *rq, int error)
112 {
113         elv_completed_request(rq->q, rq);
114         blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_BAR, error);
115 }
116
117 static void post_flush_end_io(struct request *rq, int error)
118 {
119         elv_completed_request(rq->q, rq);
120         blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error);
121 }
122
123 static void queue_flush(struct request_queue *q, unsigned which)
124 {
125         struct request *rq;
126         rq_end_io_fn *end_io;
127
128         if (which == QUEUE_ORDERED_DO_PREFLUSH) {
129                 rq = &q->pre_flush_rq;
130                 end_io = pre_flush_end_io;
131         } else {
132                 rq = &q->post_flush_rq;
133                 end_io = post_flush_end_io;
134         }
135
136         blk_rq_init(q, rq);
137         rq->cmd_type = REQ_TYPE_FS;
138         rq->cmd_flags = REQ_HARDBARRIER | REQ_FLUSH;
139         rq->rq_disk = q->bar_rq.rq_disk;
140         rq->end_io = end_io;
141
142         elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
143 }
144
145 static inline bool start_ordered(struct request_queue *q, struct request **rqp)
146 {
147         struct request *rq = *rqp;
148         unsigned skip = 0;
149
150         q->orderr = 0;
151         q->ordered = q->next_ordered;
152         q->ordseq |= QUEUE_ORDSEQ_STARTED;
153
154         /*
155          * For an empty barrier, there's no actual BAR request, which
156          * in turn makes POSTFLUSH unnecessary.  Mask them off.
157          */
158         if (!blk_rq_sectors(rq)) {
159                 q->ordered &= ~(QUEUE_ORDERED_DO_BAR |
160                                 QUEUE_ORDERED_DO_POSTFLUSH);
161                 /*
162                  * Empty barrier on a write-through device w/ ordered
163                  * tag has no command to issue and without any command
164                  * to issue, ordering by tag can't be used.  Drain
165                  * instead.
166                  */
167                 if ((q->ordered & QUEUE_ORDERED_BY_TAG) &&
168                     !(q->ordered & QUEUE_ORDERED_DO_PREFLUSH)) {
169                         q->ordered &= ~QUEUE_ORDERED_BY_TAG;
170                         q->ordered |= QUEUE_ORDERED_BY_DRAIN;
171                 }
172         }
173
174         /* stash away the original request */
175         blk_dequeue_request(rq);
176         q->orig_bar_rq = rq;
177         rq = NULL;
178
179         /*
180          * Queue ordered sequence.  As we stack them at the head, we
181          * need to queue in reverse order.  Note that we rely on that
182          * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
183          * request gets inbetween ordered sequence.
184          */
185         if (q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) {
186                 queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH);
187                 rq = &q->post_flush_rq;
188         } else
189                 skip |= QUEUE_ORDSEQ_POSTFLUSH;
190
191         if (q->ordered & QUEUE_ORDERED_DO_BAR) {
192                 rq = &q->bar_rq;
193
194                 /* initialize proxy request and queue it */
195                 blk_rq_init(q, rq);
196                 if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
197                         rq->cmd_flags |= REQ_WRITE;
198                 if (q->ordered & QUEUE_ORDERED_DO_FUA)
199                         rq->cmd_flags |= REQ_FUA;
200                 init_request_from_bio(rq, q->orig_bar_rq->bio);
201                 rq->end_io = bar_end_io;
202
203                 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
204         } else
205                 skip |= QUEUE_ORDSEQ_BAR;
206
207         if (q->ordered & QUEUE_ORDERED_DO_PREFLUSH) {
208                 queue_flush(q, QUEUE_ORDERED_DO_PREFLUSH);
209                 rq = &q->pre_flush_rq;
210         } else
211                 skip |= QUEUE_ORDSEQ_PREFLUSH;
212
213         if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && queue_in_flight(q))
214                 rq = NULL;
215         else
216                 skip |= QUEUE_ORDSEQ_DRAIN;
217
218         *rqp = rq;
219
220         /*
221          * Complete skipped sequences.  If whole sequence is complete,
222          * return false to tell elevator that this request is gone.
223          */
224         return !blk_ordered_complete_seq(q, skip, 0);
225 }
226
227 bool blk_do_ordered(struct request_queue *q, struct request **rqp)
228 {
229         struct request *rq = *rqp;
230         const int is_barrier = rq->cmd_type == REQ_TYPE_FS &&
231                                 (rq->cmd_flags & REQ_HARDBARRIER);
232
233         if (!q->ordseq) {
234                 if (!is_barrier)
235                         return true;
236
237                 if (q->next_ordered != QUEUE_ORDERED_NONE)
238                         return start_ordered(q, rqp);
239                 else {
240                         /*
241                          * Queue ordering not supported.  Terminate
242                          * with prejudice.
243                          */
244                         blk_dequeue_request(rq);
245                         __blk_end_request_all(rq, -EOPNOTSUPP);
246                         *rqp = NULL;
247                         return false;
248                 }
249         }
250
251         /*
252          * Ordered sequence in progress
253          */
254
255         /* Special requests are not subject to ordering rules. */
256         if (rq->cmd_type != REQ_TYPE_FS &&
257             rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
258                 return true;
259
260         if (q->ordered & QUEUE_ORDERED_BY_TAG) {
261                 /* Ordered by tag.  Blocking the next barrier is enough. */
262                 if (is_barrier && rq != &q->bar_rq)
263                         *rqp = NULL;
264         } else {
265                 /* Ordered by draining.  Wait for turn. */
266                 WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
267                 if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
268                         *rqp = NULL;
269         }
270
271         return true;
272 }
273
274 static void bio_end_empty_barrier(struct bio *bio, int err)
275 {
276         if (err) {
277                 if (err == -EOPNOTSUPP)
278                         set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
279                 clear_bit(BIO_UPTODATE, &bio->bi_flags);
280         }
281         if (bio->bi_private)
282                 complete(bio->bi_private);
283         bio_put(bio);
284 }
285
286 /**
287  * blkdev_issue_flush - queue a flush
288  * @bdev:       blockdev to issue flush for
289  * @gfp_mask:   memory allocation flags (for bio_alloc)
290  * @error_sector:       error sector
291  * @flags:      BLKDEV_IFL_* flags to control behaviour
292  *
293  * Description:
294  *    Issue a flush for the block device in question. Caller can supply
295  *    room for storing the error offset in case of a flush error, if they
296  *    wish to. If WAIT flag is not passed then caller may check only what
297  *    request was pushed in some internal queue for later handling.
298  */
299 int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
300                 sector_t *error_sector, unsigned long flags)
301 {
302         DECLARE_COMPLETION_ONSTACK(wait);
303         struct request_queue *q;
304         struct bio *bio;
305         int ret = 0;
306
307         if (bdev->bd_disk == NULL)
308                 return -ENXIO;
309
310         q = bdev_get_queue(bdev);
311         if (!q)
312                 return -ENXIO;
313
314         /*
315          * some block devices may not have their queue correctly set up here
316          * (e.g. loop device without a backing file) and so issuing a flush
317          * here will panic. Ensure there is a request function before issuing
318          * the barrier.
319          */
320         if (!q->make_request_fn)
321                 return -ENXIO;
322
323         bio = bio_alloc(gfp_mask, 0);
324         bio->bi_end_io = bio_end_empty_barrier;
325         bio->bi_bdev = bdev;
326         if (test_bit(BLKDEV_WAIT, &flags))
327                 bio->bi_private = &wait;
328
329         bio_get(bio);
330         submit_bio(WRITE_BARRIER, bio);
331         if (test_bit(BLKDEV_WAIT, &flags)) {
332                 wait_for_completion(&wait);
333                 /*
334                  * The driver must store the error location in ->bi_sector, if
335                  * it supports it. For non-stacked drivers, this should be
336                  * copied from blk_rq_pos(rq).
337                  */
338                 if (error_sector)
339                         *error_sector = bio->bi_sector;
340         }
341
342         if (bio_flagged(bio, BIO_EOPNOTSUPP))
343                 ret = -EOPNOTSUPP;
344         else if (!bio_flagged(bio, BIO_UPTODATE))
345                 ret = -EIO;
346
347         bio_put(bio);
348         return ret;
349 }
350 EXPORT_SYMBOL(blkdev_issue_flush);