block: use linux/uaccess.h in elevator.c instead of asm variant
[linux-3.10.git] / block / elevator.c
1 /*
2  *  Block device elevator/IO-scheduler.
3  *
4  *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5  *
6  * 30042000 Jens Axboe <axboe@kernel.dk> :
7  *
8  * Split the elevator a bit so that it is possible to choose a different
9  * one or even write a new "plug in". There are three pieces:
10  * - elevator_fn, inserts a new request in the queue list
11  * - elevator_merge_fn, decides whether a new buffer can be merged with
12  *   an existing request
13  * - elevator_dequeue_fn, called when a request is taken off the active list
14  *
15  * 20082000 Dave Jones <davej@suse.de> :
16  * Removed tests for max-bomb-segments, which was breaking elvtune
17  *  when run without -bN
18  *
19  * Jens:
20  * - Rework again to work with bio instead of buffer_heads
21  * - loose bi_dev comparisons, partition handling is right now
22  * - completely modularize elevator setup and teardown
23  *
24  */
25 #include <linux/kernel.h>
26 #include <linux/fs.h>
27 #include <linux/blkdev.h>
28 #include <linux/elevator.h>
29 #include <linux/bio.h>
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/compiler.h>
34 #include <linux/delay.h>
35 #include <linux/blktrace_api.h>
36 #include <linux/hash.h>
37 #include <linux/uaccess.h>
38
39 static DEFINE_SPINLOCK(elv_list_lock);
40 static LIST_HEAD(elv_list);
41
42 /*
43  * Merge hash stuff.
44  */
45 static const int elv_hash_shift = 6;
46 #define ELV_HASH_BLOCK(sec)     ((sec) >> 3)
47 #define ELV_HASH_FN(sec)        \
48                 (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
49 #define ELV_HASH_ENTRIES        (1 << elv_hash_shift)
50 #define rq_hash_key(rq)         ((rq)->sector + (rq)->nr_sectors)
51 #define ELV_ON_HASH(rq)         (!hlist_unhashed(&(rq)->hash))
52
53 /*
54  * Query io scheduler to see if the current process issuing bio may be
55  * merged with rq.
56  */
57 static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
58 {
59         struct request_queue *q = rq->q;
60         elevator_t *e = q->elevator;
61
62         if (e->ops->elevator_allow_merge_fn)
63                 return e->ops->elevator_allow_merge_fn(q, rq, bio);
64
65         return 1;
66 }
67
68 /*
69  * can we safely merge with this request?
70  */
71 int elv_rq_merge_ok(struct request *rq, struct bio *bio)
72 {
73         if (!rq_mergeable(rq))
74                 return 0;
75
76         /*
77          * Don't merge file system requests and discard requests
78          */
79         if (bio_discard(bio) != bio_discard(rq->bio))
80                 return 0;
81
82         /*
83          * different data direction or already started, don't merge
84          */
85         if (bio_data_dir(bio) != rq_data_dir(rq))
86                 return 0;
87
88         /*
89          * must be same device and not a special request
90          */
91         if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
92                 return 0;
93
94         /*
95          * only merge integrity protected bio into ditto rq
96          */
97         if (bio_integrity(bio) != blk_integrity_rq(rq))
98                 return 0;
99
100         if (!elv_iosched_allow_merge(rq, bio))
101                 return 0;
102
103         return 1;
104 }
105 EXPORT_SYMBOL(elv_rq_merge_ok);
106
107 static inline int elv_try_merge(struct request *__rq, struct bio *bio)
108 {
109         int ret = ELEVATOR_NO_MERGE;
110
111         /*
112          * we can merge and sequence is ok, check if it's possible
113          */
114         if (elv_rq_merge_ok(__rq, bio)) {
115                 if (__rq->sector + __rq->nr_sectors == bio->bi_sector)
116                         ret = ELEVATOR_BACK_MERGE;
117                 else if (__rq->sector - bio_sectors(bio) == bio->bi_sector)
118                         ret = ELEVATOR_FRONT_MERGE;
119         }
120
121         return ret;
122 }
123
124 static struct elevator_type *elevator_find(const char *name)
125 {
126         struct elevator_type *e;
127
128         list_for_each_entry(e, &elv_list, list) {
129                 if (!strcmp(e->elevator_name, name))
130                         return e;
131         }
132
133         return NULL;
134 }
135
136 static void elevator_put(struct elevator_type *e)
137 {
138         module_put(e->elevator_owner);
139 }
140
141 static struct elevator_type *elevator_get(const char *name)
142 {
143         struct elevator_type *e;
144
145         spin_lock(&elv_list_lock);
146
147         e = elevator_find(name);
148         if (!e) {
149                 char elv[ELV_NAME_MAX + strlen("-iosched")];
150
151                 spin_unlock(&elv_list_lock);
152
153                 if (!strcmp(name, "anticipatory"))
154                         sprintf(elv, "as-iosched");
155                 else
156                         sprintf(elv, "%s-iosched", name);
157
158                 request_module("%s", elv);
159                 spin_lock(&elv_list_lock);
160                 e = elevator_find(name);
161         }
162
163         if (e && !try_module_get(e->elevator_owner))
164                 e = NULL;
165
166         spin_unlock(&elv_list_lock);
167
168         return e;
169 }
170
171 static void *elevator_init_queue(struct request_queue *q,
172                                  struct elevator_queue *eq)
173 {
174         return eq->ops->elevator_init_fn(q);
175 }
176
177 static void elevator_attach(struct request_queue *q, struct elevator_queue *eq,
178                            void *data)
179 {
180         q->elevator = eq;
181         eq->elevator_data = data;
182 }
183
184 static char chosen_elevator[16];
185
186 static int __init elevator_setup(char *str)
187 {
188         /*
189          * Be backwards-compatible with previous kernels, so users
190          * won't get the wrong elevator.
191          */
192         if (!strcmp(str, "as"))
193                 strcpy(chosen_elevator, "anticipatory");
194         else
195                 strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
196         return 1;
197 }
198
199 __setup("elevator=", elevator_setup);
200
201 static struct kobj_type elv_ktype;
202
203 static elevator_t *elevator_alloc(struct request_queue *q,
204                                   struct elevator_type *e)
205 {
206         elevator_t *eq;
207         int i;
208
209         eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL | __GFP_ZERO, q->node);
210         if (unlikely(!eq))
211                 goto err;
212
213         eq->ops = &e->ops;
214         eq->elevator_type = e;
215         kobject_init(&eq->kobj, &elv_ktype);
216         mutex_init(&eq->sysfs_lock);
217
218         eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
219                                         GFP_KERNEL, q->node);
220         if (!eq->hash)
221                 goto err;
222
223         for (i = 0; i < ELV_HASH_ENTRIES; i++)
224                 INIT_HLIST_HEAD(&eq->hash[i]);
225
226         return eq;
227 err:
228         kfree(eq);
229         elevator_put(e);
230         return NULL;
231 }
232
233 static void elevator_release(struct kobject *kobj)
234 {
235         elevator_t *e = container_of(kobj, elevator_t, kobj);
236
237         elevator_put(e->elevator_type);
238         kfree(e->hash);
239         kfree(e);
240 }
241
242 int elevator_init(struct request_queue *q, char *name)
243 {
244         struct elevator_type *e = NULL;
245         struct elevator_queue *eq;
246         int ret = 0;
247         void *data;
248
249         INIT_LIST_HEAD(&q->queue_head);
250         q->last_merge = NULL;
251         q->end_sector = 0;
252         q->boundary_rq = NULL;
253
254         if (name) {
255                 e = elevator_get(name);
256                 if (!e)
257                         return -EINVAL;
258         }
259
260         if (!e && *chosen_elevator) {
261                 e = elevator_get(chosen_elevator);
262                 if (!e)
263                         printk(KERN_ERR "I/O scheduler %s not found\n",
264                                                         chosen_elevator);
265         }
266
267         if (!e) {
268                 e = elevator_get(CONFIG_DEFAULT_IOSCHED);
269                 if (!e) {
270                         printk(KERN_ERR
271                                 "Default I/O scheduler not found. " \
272                                 "Using noop.\n");
273                         e = elevator_get("noop");
274                 }
275         }
276
277         eq = elevator_alloc(q, e);
278         if (!eq)
279                 return -ENOMEM;
280
281         data = elevator_init_queue(q, eq);
282         if (!data) {
283                 kobject_put(&eq->kobj);
284                 return -ENOMEM;
285         }
286
287         elevator_attach(q, eq, data);
288         return ret;
289 }
290 EXPORT_SYMBOL(elevator_init);
291
292 void elevator_exit(elevator_t *e)
293 {
294         mutex_lock(&e->sysfs_lock);
295         if (e->ops->elevator_exit_fn)
296                 e->ops->elevator_exit_fn(e);
297         e->ops = NULL;
298         mutex_unlock(&e->sysfs_lock);
299
300         kobject_put(&e->kobj);
301 }
302 EXPORT_SYMBOL(elevator_exit);
303
304 static void elv_activate_rq(struct request_queue *q, struct request *rq)
305 {
306         elevator_t *e = q->elevator;
307
308         if (e->ops->elevator_activate_req_fn)
309                 e->ops->elevator_activate_req_fn(q, rq);
310 }
311
312 static void elv_deactivate_rq(struct request_queue *q, struct request *rq)
313 {
314         elevator_t *e = q->elevator;
315
316         if (e->ops->elevator_deactivate_req_fn)
317                 e->ops->elevator_deactivate_req_fn(q, rq);
318 }
319
320 static inline void __elv_rqhash_del(struct request *rq)
321 {
322         hlist_del_init(&rq->hash);
323 }
324
325 static void elv_rqhash_del(struct request_queue *q, struct request *rq)
326 {
327         if (ELV_ON_HASH(rq))
328                 __elv_rqhash_del(rq);
329 }
330
331 static void elv_rqhash_add(struct request_queue *q, struct request *rq)
332 {
333         elevator_t *e = q->elevator;
334
335         BUG_ON(ELV_ON_HASH(rq));
336         hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
337 }
338
339 static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
340 {
341         __elv_rqhash_del(rq);
342         elv_rqhash_add(q, rq);
343 }
344
345 static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
346 {
347         elevator_t *e = q->elevator;
348         struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
349         struct hlist_node *entry, *next;
350         struct request *rq;
351
352         hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) {
353                 BUG_ON(!ELV_ON_HASH(rq));
354
355                 if (unlikely(!rq_mergeable(rq))) {
356                         __elv_rqhash_del(rq);
357                         continue;
358                 }
359
360                 if (rq_hash_key(rq) == offset)
361                         return rq;
362         }
363
364         return NULL;
365 }
366
367 /*
368  * RB-tree support functions for inserting/lookup/removal of requests
369  * in a sorted RB tree.
370  */
371 struct request *elv_rb_add(struct rb_root *root, struct request *rq)
372 {
373         struct rb_node **p = &root->rb_node;
374         struct rb_node *parent = NULL;
375         struct request *__rq;
376
377         while (*p) {
378                 parent = *p;
379                 __rq = rb_entry(parent, struct request, rb_node);
380
381                 if (rq->sector < __rq->sector)
382                         p = &(*p)->rb_left;
383                 else if (rq->sector > __rq->sector)
384                         p = &(*p)->rb_right;
385                 else
386                         return __rq;
387         }
388
389         rb_link_node(&rq->rb_node, parent, p);
390         rb_insert_color(&rq->rb_node, root);
391         return NULL;
392 }
393 EXPORT_SYMBOL(elv_rb_add);
394
395 void elv_rb_del(struct rb_root *root, struct request *rq)
396 {
397         BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
398         rb_erase(&rq->rb_node, root);
399         RB_CLEAR_NODE(&rq->rb_node);
400 }
401 EXPORT_SYMBOL(elv_rb_del);
402
403 struct request *elv_rb_find(struct rb_root *root, sector_t sector)
404 {
405         struct rb_node *n = root->rb_node;
406         struct request *rq;
407
408         while (n) {
409                 rq = rb_entry(n, struct request, rb_node);
410
411                 if (sector < rq->sector)
412                         n = n->rb_left;
413                 else if (sector > rq->sector)
414                         n = n->rb_right;
415                 else
416                         return rq;
417         }
418
419         return NULL;
420 }
421 EXPORT_SYMBOL(elv_rb_find);
422
423 /*
424  * Insert rq into dispatch queue of q.  Queue lock must be held on
425  * entry.  rq is sort instead into the dispatch queue. To be used by
426  * specific elevators.
427  */
428 void elv_dispatch_sort(struct request_queue *q, struct request *rq)
429 {
430         sector_t boundary;
431         struct list_head *entry;
432         int stop_flags;
433
434         if (q->last_merge == rq)
435                 q->last_merge = NULL;
436
437         elv_rqhash_del(q, rq);
438
439         q->nr_sorted--;
440
441         boundary = q->end_sector;
442         stop_flags = REQ_SOFTBARRIER | REQ_HARDBARRIER | REQ_STARTED;
443         list_for_each_prev(entry, &q->queue_head) {
444                 struct request *pos = list_entry_rq(entry);
445
446                 if (blk_discard_rq(rq) != blk_discard_rq(pos))
447                         break;
448                 if (rq_data_dir(rq) != rq_data_dir(pos))
449                         break;
450                 if (pos->cmd_flags & stop_flags)
451                         break;
452                 if (rq->sector >= boundary) {
453                         if (pos->sector < boundary)
454                                 continue;
455                 } else {
456                         if (pos->sector >= boundary)
457                                 break;
458                 }
459                 if (rq->sector >= pos->sector)
460                         break;
461         }
462
463         list_add(&rq->queuelist, entry);
464 }
465 EXPORT_SYMBOL(elv_dispatch_sort);
466
467 /*
468  * Insert rq into dispatch queue of q.  Queue lock must be held on
469  * entry.  rq is added to the back of the dispatch queue. To be used by
470  * specific elevators.
471  */
472 void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
473 {
474         if (q->last_merge == rq)
475                 q->last_merge = NULL;
476
477         elv_rqhash_del(q, rq);
478
479         q->nr_sorted--;
480
481         q->end_sector = rq_end_sector(rq);
482         q->boundary_rq = rq;
483         list_add_tail(&rq->queuelist, &q->queue_head);
484 }
485 EXPORT_SYMBOL(elv_dispatch_add_tail);
486
487 int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
488 {
489         elevator_t *e = q->elevator;
490         struct request *__rq;
491         int ret;
492
493         /*
494          * First try one-hit cache.
495          */
496         if (q->last_merge) {
497                 ret = elv_try_merge(q->last_merge, bio);
498                 if (ret != ELEVATOR_NO_MERGE) {
499                         *req = q->last_merge;
500                         return ret;
501                 }
502         }
503
504         if (blk_queue_nomerges(q))
505                 return ELEVATOR_NO_MERGE;
506
507         /*
508          * See if our hash lookup can find a potential backmerge.
509          */
510         __rq = elv_rqhash_find(q, bio->bi_sector);
511         if (__rq && elv_rq_merge_ok(__rq, bio)) {
512                 *req = __rq;
513                 return ELEVATOR_BACK_MERGE;
514         }
515
516         if (e->ops->elevator_merge_fn)
517                 return e->ops->elevator_merge_fn(q, req, bio);
518
519         return ELEVATOR_NO_MERGE;
520 }
521
522 void elv_merged_request(struct request_queue *q, struct request *rq, int type)
523 {
524         elevator_t *e = q->elevator;
525
526         if (e->ops->elevator_merged_fn)
527                 e->ops->elevator_merged_fn(q, rq, type);
528
529         if (type == ELEVATOR_BACK_MERGE)
530                 elv_rqhash_reposition(q, rq);
531
532         q->last_merge = rq;
533 }
534
535 void elv_merge_requests(struct request_queue *q, struct request *rq,
536                              struct request *next)
537 {
538         elevator_t *e = q->elevator;
539
540         if (e->ops->elevator_merge_req_fn)
541                 e->ops->elevator_merge_req_fn(q, rq, next);
542
543         elv_rqhash_reposition(q, rq);
544         elv_rqhash_del(q, next);
545
546         q->nr_sorted--;
547         q->last_merge = rq;
548 }
549
550 void elv_requeue_request(struct request_queue *q, struct request *rq)
551 {
552         /*
553          * it already went through dequeue, we need to decrement the
554          * in_flight count again
555          */
556         if (blk_account_rq(rq)) {
557                 q->in_flight--;
558                 if (blk_sorted_rq(rq))
559                         elv_deactivate_rq(q, rq);
560         }
561
562         rq->cmd_flags &= ~REQ_STARTED;
563
564         elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
565 }
566
567 static void elv_drain_elevator(struct request_queue *q)
568 {
569         static int printed;
570         while (q->elevator->ops->elevator_dispatch_fn(q, 1))
571                 ;
572         if (q->nr_sorted == 0)
573                 return;
574         if (printed++ < 10) {
575                 printk(KERN_ERR "%s: forced dispatching is broken "
576                        "(nr_sorted=%u), please report this\n",
577                        q->elevator->elevator_type->elevator_name, q->nr_sorted);
578         }
579 }
580
581 void elv_insert(struct request_queue *q, struct request *rq, int where)
582 {
583         struct list_head *pos;
584         unsigned ordseq;
585         int unplug_it = 1;
586
587         blk_add_trace_rq(q, rq, BLK_TA_INSERT);
588
589         rq->q = q;
590
591         switch (where) {
592         case ELEVATOR_INSERT_FRONT:
593                 rq->cmd_flags |= REQ_SOFTBARRIER;
594
595                 list_add(&rq->queuelist, &q->queue_head);
596                 break;
597
598         case ELEVATOR_INSERT_BACK:
599                 rq->cmd_flags |= REQ_SOFTBARRIER;
600                 elv_drain_elevator(q);
601                 list_add_tail(&rq->queuelist, &q->queue_head);
602                 /*
603                  * We kick the queue here for the following reasons.
604                  * - The elevator might have returned NULL previously
605                  *   to delay requests and returned them now.  As the
606                  *   queue wasn't empty before this request, ll_rw_blk
607                  *   won't run the queue on return, resulting in hang.
608                  * - Usually, back inserted requests won't be merged
609                  *   with anything.  There's no point in delaying queue
610                  *   processing.
611                  */
612                 blk_remove_plug(q);
613                 q->request_fn(q);
614                 break;
615
616         case ELEVATOR_INSERT_SORT:
617                 BUG_ON(!blk_fs_request(rq) && !blk_discard_rq(rq));
618                 rq->cmd_flags |= REQ_SORTED;
619                 q->nr_sorted++;
620                 if (rq_mergeable(rq)) {
621                         elv_rqhash_add(q, rq);
622                         if (!q->last_merge)
623                                 q->last_merge = rq;
624                 }
625
626                 /*
627                  * Some ioscheds (cfq) run q->request_fn directly, so
628                  * rq cannot be accessed after calling
629                  * elevator_add_req_fn.
630                  */
631                 q->elevator->ops->elevator_add_req_fn(q, rq);
632                 break;
633
634         case ELEVATOR_INSERT_REQUEUE:
635                 /*
636                  * If ordered flush isn't in progress, we do front
637                  * insertion; otherwise, requests should be requeued
638                  * in ordseq order.
639                  */
640                 rq->cmd_flags |= REQ_SOFTBARRIER;
641
642                 /*
643                  * Most requeues happen because of a busy condition,
644                  * don't force unplug of the queue for that case.
645                  */
646                 unplug_it = 0;
647
648                 if (q->ordseq == 0) {
649                         list_add(&rq->queuelist, &q->queue_head);
650                         break;
651                 }
652
653                 ordseq = blk_ordered_req_seq(rq);
654
655                 list_for_each(pos, &q->queue_head) {
656                         struct request *pos_rq = list_entry_rq(pos);
657                         if (ordseq <= blk_ordered_req_seq(pos_rq))
658                                 break;
659                 }
660
661                 list_add_tail(&rq->queuelist, pos);
662                 break;
663
664         default:
665                 printk(KERN_ERR "%s: bad insertion point %d\n",
666                        __func__, where);
667                 BUG();
668         }
669
670         if (unplug_it && blk_queue_plugged(q)) {
671                 int nrq = q->rq.count[READ] + q->rq.count[WRITE]
672                         - q->in_flight;
673
674                 if (nrq >= q->unplug_thresh)
675                         __generic_unplug_device(q);
676         }
677 }
678
679 void __elv_add_request(struct request_queue *q, struct request *rq, int where,
680                        int plug)
681 {
682         if (q->ordcolor)
683                 rq->cmd_flags |= REQ_ORDERED_COLOR;
684
685         if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
686                 /*
687                  * toggle ordered color
688                  */
689                 if (blk_barrier_rq(rq))
690                         q->ordcolor ^= 1;
691
692                 /*
693                  * barriers implicitly indicate back insertion
694                  */
695                 if (where == ELEVATOR_INSERT_SORT)
696                         where = ELEVATOR_INSERT_BACK;
697
698                 /*
699                  * this request is scheduling boundary, update
700                  * end_sector
701                  */
702                 if (blk_fs_request(rq) || blk_discard_rq(rq)) {
703                         q->end_sector = rq_end_sector(rq);
704                         q->boundary_rq = rq;
705                 }
706         } else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
707                     where == ELEVATOR_INSERT_SORT)
708                 where = ELEVATOR_INSERT_BACK;
709
710         if (plug)
711                 blk_plug_device(q);
712
713         elv_insert(q, rq, where);
714 }
715 EXPORT_SYMBOL(__elv_add_request);
716
717 void elv_add_request(struct request_queue *q, struct request *rq, int where,
718                      int plug)
719 {
720         unsigned long flags;
721
722         spin_lock_irqsave(q->queue_lock, flags);
723         __elv_add_request(q, rq, where, plug);
724         spin_unlock_irqrestore(q->queue_lock, flags);
725 }
726 EXPORT_SYMBOL(elv_add_request);
727
728 static inline struct request *__elv_next_request(struct request_queue *q)
729 {
730         struct request *rq;
731
732         while (1) {
733                 while (!list_empty(&q->queue_head)) {
734                         rq = list_entry_rq(q->queue_head.next);
735                         if (blk_do_ordered(q, &rq))
736                                 return rq;
737                 }
738
739                 if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
740                         return NULL;
741         }
742 }
743
744 struct request *elv_next_request(struct request_queue *q)
745 {
746         struct request *rq;
747         int ret;
748
749         while ((rq = __elv_next_request(q)) != NULL) {
750                 /*
751                  * Kill the empty barrier place holder, the driver must
752                  * not ever see it.
753                  */
754                 if (blk_empty_barrier(rq)) {
755                         end_queued_request(rq, 1);
756                         continue;
757                 }
758                 if (!(rq->cmd_flags & REQ_STARTED)) {
759                         /*
760                          * This is the first time the device driver
761                          * sees this request (possibly after
762                          * requeueing).  Notify IO scheduler.
763                          */
764                         if (blk_sorted_rq(rq))
765                                 elv_activate_rq(q, rq);
766
767                         /*
768                          * just mark as started even if we don't start
769                          * it, a request that has been delayed should
770                          * not be passed by new incoming requests
771                          */
772                         rq->cmd_flags |= REQ_STARTED;
773                         blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
774                 }
775
776                 if (!q->boundary_rq || q->boundary_rq == rq) {
777                         q->end_sector = rq_end_sector(rq);
778                         q->boundary_rq = NULL;
779                 }
780
781                 if (rq->cmd_flags & REQ_DONTPREP)
782                         break;
783
784                 if (q->dma_drain_size && rq->data_len) {
785                         /*
786                          * make sure space for the drain appears we
787                          * know we can do this because max_hw_segments
788                          * has been adjusted to be one fewer than the
789                          * device can handle
790                          */
791                         rq->nr_phys_segments++;
792                 }
793
794                 if (!q->prep_rq_fn)
795                         break;
796
797                 ret = q->prep_rq_fn(q, rq);
798                 if (ret == BLKPREP_OK) {
799                         break;
800                 } else if (ret == BLKPREP_DEFER) {
801                         /*
802                          * the request may have been (partially) prepped.
803                          * we need to keep this request in the front to
804                          * avoid resource deadlock.  REQ_STARTED will
805                          * prevent other fs requests from passing this one.
806                          */
807                         if (q->dma_drain_size && rq->data_len &&
808                             !(rq->cmd_flags & REQ_DONTPREP)) {
809                                 /*
810                                  * remove the space for the drain we added
811                                  * so that we don't add it again
812                                  */
813                                 --rq->nr_phys_segments;
814                         }
815
816                         rq = NULL;
817                         break;
818                 } else if (ret == BLKPREP_KILL) {
819                         rq->cmd_flags |= REQ_QUIET;
820                         end_queued_request(rq, 0);
821                 } else {
822                         printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
823                         break;
824                 }
825         }
826
827         return rq;
828 }
829 EXPORT_SYMBOL(elv_next_request);
830
831 void elv_dequeue_request(struct request_queue *q, struct request *rq)
832 {
833         BUG_ON(list_empty(&rq->queuelist));
834         BUG_ON(ELV_ON_HASH(rq));
835
836         list_del_init(&rq->queuelist);
837
838         /*
839          * the time frame between a request being removed from the lists
840          * and to it is freed is accounted as io that is in progress at
841          * the driver side.
842          */
843         if (blk_account_rq(rq))
844                 q->in_flight++;
845 }
846 EXPORT_SYMBOL(elv_dequeue_request);
847
848 int elv_queue_empty(struct request_queue *q)
849 {
850         elevator_t *e = q->elevator;
851
852         if (!list_empty(&q->queue_head))
853                 return 0;
854
855         if (e->ops->elevator_queue_empty_fn)
856                 return e->ops->elevator_queue_empty_fn(q);
857
858         return 1;
859 }
860 EXPORT_SYMBOL(elv_queue_empty);
861
862 struct request *elv_latter_request(struct request_queue *q, struct request *rq)
863 {
864         elevator_t *e = q->elevator;
865
866         if (e->ops->elevator_latter_req_fn)
867                 return e->ops->elevator_latter_req_fn(q, rq);
868         return NULL;
869 }
870
871 struct request *elv_former_request(struct request_queue *q, struct request *rq)
872 {
873         elevator_t *e = q->elevator;
874
875         if (e->ops->elevator_former_req_fn)
876                 return e->ops->elevator_former_req_fn(q, rq);
877         return NULL;
878 }
879
880 int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
881 {
882         elevator_t *e = q->elevator;
883
884         if (e->ops->elevator_set_req_fn)
885                 return e->ops->elevator_set_req_fn(q, rq, gfp_mask);
886
887         rq->elevator_private = NULL;
888         return 0;
889 }
890
891 void elv_put_request(struct request_queue *q, struct request *rq)
892 {
893         elevator_t *e = q->elevator;
894
895         if (e->ops->elevator_put_req_fn)
896                 e->ops->elevator_put_req_fn(rq);
897 }
898
899 int elv_may_queue(struct request_queue *q, int rw)
900 {
901         elevator_t *e = q->elevator;
902
903         if (e->ops->elevator_may_queue_fn)
904                 return e->ops->elevator_may_queue_fn(q, rw);
905
906         return ELV_MQUEUE_MAY;
907 }
908
909 void elv_completed_request(struct request_queue *q, struct request *rq)
910 {
911         elevator_t *e = q->elevator;
912
913         /*
914          * request is released from the driver, io must be done
915          */
916         if (blk_account_rq(rq)) {
917                 q->in_flight--;
918                 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
919                         e->ops->elevator_completed_req_fn(q, rq);
920         }
921
922         /*
923          * Check if the queue is waiting for fs requests to be
924          * drained for flush sequence.
925          */
926         if (unlikely(q->ordseq)) {
927                 struct request *first_rq = list_entry_rq(q->queue_head.next);
928                 if (q->in_flight == 0 &&
929                     blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
930                     blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) {
931                         blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
932                         q->request_fn(q);
933                 }
934         }
935 }
936
937 #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
938
939 static ssize_t
940 elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
941 {
942         elevator_t *e = container_of(kobj, elevator_t, kobj);
943         struct elv_fs_entry *entry = to_elv(attr);
944         ssize_t error;
945
946         if (!entry->show)
947                 return -EIO;
948
949         mutex_lock(&e->sysfs_lock);
950         error = e->ops ? entry->show(e, page) : -ENOENT;
951         mutex_unlock(&e->sysfs_lock);
952         return error;
953 }
954
955 static ssize_t
956 elv_attr_store(struct kobject *kobj, struct attribute *attr,
957                const char *page, size_t length)
958 {
959         elevator_t *e = container_of(kobj, elevator_t, kobj);
960         struct elv_fs_entry *entry = to_elv(attr);
961         ssize_t error;
962
963         if (!entry->store)
964                 return -EIO;
965
966         mutex_lock(&e->sysfs_lock);
967         error = e->ops ? entry->store(e, page, length) : -ENOENT;
968         mutex_unlock(&e->sysfs_lock);
969         return error;
970 }
971
972 static struct sysfs_ops elv_sysfs_ops = {
973         .show   = elv_attr_show,
974         .store  = elv_attr_store,
975 };
976
977 static struct kobj_type elv_ktype = {
978         .sysfs_ops      = &elv_sysfs_ops,
979         .release        = elevator_release,
980 };
981
982 int elv_register_queue(struct request_queue *q)
983 {
984         elevator_t *e = q->elevator;
985         int error;
986
987         error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
988         if (!error) {
989                 struct elv_fs_entry *attr = e->elevator_type->elevator_attrs;
990                 if (attr) {
991                         while (attr->attr.name) {
992                                 if (sysfs_create_file(&e->kobj, &attr->attr))
993                                         break;
994                                 attr++;
995                         }
996                 }
997                 kobject_uevent(&e->kobj, KOBJ_ADD);
998         }
999         return error;
1000 }
1001
1002 static void __elv_unregister_queue(elevator_t *e)
1003 {
1004         kobject_uevent(&e->kobj, KOBJ_REMOVE);
1005         kobject_del(&e->kobj);
1006 }
1007
1008 void elv_unregister_queue(struct request_queue *q)
1009 {
1010         if (q)
1011                 __elv_unregister_queue(q->elevator);
1012 }
1013
1014 void elv_register(struct elevator_type *e)
1015 {
1016         char *def = "";
1017
1018         spin_lock(&elv_list_lock);
1019         BUG_ON(elevator_find(e->elevator_name));
1020         list_add_tail(&e->list, &elv_list);
1021         spin_unlock(&elv_list_lock);
1022
1023         if (!strcmp(e->elevator_name, chosen_elevator) ||
1024                         (!*chosen_elevator &&
1025                          !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
1026                                 def = " (default)";
1027
1028         printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
1029                                                                 def);
1030 }
1031 EXPORT_SYMBOL_GPL(elv_register);
1032
1033 void elv_unregister(struct elevator_type *e)
1034 {
1035         struct task_struct *g, *p;
1036
1037         /*
1038          * Iterate every thread in the process to remove the io contexts.
1039          */
1040         if (e->ops.trim) {
1041                 read_lock(&tasklist_lock);
1042                 do_each_thread(g, p) {
1043                         task_lock(p);
1044                         if (p->io_context)
1045                                 e->ops.trim(p->io_context);
1046                         task_unlock(p);
1047                 } while_each_thread(g, p);
1048                 read_unlock(&tasklist_lock);
1049         }
1050
1051         spin_lock(&elv_list_lock);
1052         list_del_init(&e->list);
1053         spin_unlock(&elv_list_lock);
1054 }
1055 EXPORT_SYMBOL_GPL(elv_unregister);
1056
1057 /*
1058  * switch to new_e io scheduler. be careful not to introduce deadlocks -
1059  * we don't free the old io scheduler, before we have allocated what we
1060  * need for the new one. this way we have a chance of going back to the old
1061  * one, if the new one fails init for some reason.
1062  */
1063 static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
1064 {
1065         elevator_t *old_elevator, *e;
1066         void *data;
1067
1068         /*
1069          * Allocate new elevator
1070          */
1071         e = elevator_alloc(q, new_e);
1072         if (!e)
1073                 return 0;
1074
1075         data = elevator_init_queue(q, e);
1076         if (!data) {
1077                 kobject_put(&e->kobj);
1078                 return 0;
1079         }
1080
1081         /*
1082          * Turn on BYPASS and drain all requests w/ elevator private data
1083          */
1084         spin_lock_irq(q->queue_lock);
1085
1086         queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
1087
1088         elv_drain_elevator(q);
1089
1090         while (q->rq.elvpriv) {
1091                 blk_remove_plug(q);
1092                 q->request_fn(q);
1093                 spin_unlock_irq(q->queue_lock);
1094                 msleep(10);
1095                 spin_lock_irq(q->queue_lock);
1096                 elv_drain_elevator(q);
1097         }
1098
1099         /*
1100          * Remember old elevator.
1101          */
1102         old_elevator = q->elevator;
1103
1104         /*
1105          * attach and start new elevator
1106          */
1107         elevator_attach(q, e, data);
1108
1109         spin_unlock_irq(q->queue_lock);
1110
1111         __elv_unregister_queue(old_elevator);
1112
1113         if (elv_register_queue(q))
1114                 goto fail_register;
1115
1116         /*
1117          * finally exit old elevator and turn off BYPASS.
1118          */
1119         elevator_exit(old_elevator);
1120         spin_lock_irq(q->queue_lock);
1121         queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
1122         spin_unlock_irq(q->queue_lock);
1123
1124         blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name);
1125
1126         return 1;
1127
1128 fail_register:
1129         /*
1130          * switch failed, exit the new io scheduler and reattach the old
1131          * one again (along with re-adding the sysfs dir)
1132          */
1133         elevator_exit(e);
1134         q->elevator = old_elevator;
1135         elv_register_queue(q);
1136
1137         spin_lock_irq(q->queue_lock);
1138         queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
1139         spin_unlock_irq(q->queue_lock);
1140
1141         return 0;
1142 }
1143
1144 ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1145                           size_t count)
1146 {
1147         char elevator_name[ELV_NAME_MAX];
1148         size_t len;
1149         struct elevator_type *e;
1150
1151         elevator_name[sizeof(elevator_name) - 1] = '\0';
1152         strncpy(elevator_name, name, sizeof(elevator_name) - 1);
1153         len = strlen(elevator_name);
1154
1155         if (len && elevator_name[len - 1] == '\n')
1156                 elevator_name[len - 1] = '\0';
1157
1158         e = elevator_get(elevator_name);
1159         if (!e) {
1160                 printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
1161                 return -EINVAL;
1162         }
1163
1164         if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
1165                 elevator_put(e);
1166                 return count;
1167         }
1168
1169         if (!elevator_switch(q, e))
1170                 printk(KERN_ERR "elevator: switch to %s failed\n",
1171                                                         elevator_name);
1172         return count;
1173 }
1174
1175 ssize_t elv_iosched_show(struct request_queue *q, char *name)
1176 {
1177         elevator_t *e = q->elevator;
1178         struct elevator_type *elv = e->elevator_type;
1179         struct elevator_type *__e;
1180         int len = 0;
1181
1182         spin_lock(&elv_list_lock);
1183         list_for_each_entry(__e, &elv_list, list) {
1184                 if (!strcmp(elv->elevator_name, __e->elevator_name))
1185                         len += sprintf(name+len, "[%s] ", elv->elevator_name);
1186                 else
1187                         len += sprintf(name+len, "%s ", __e->elevator_name);
1188         }
1189         spin_unlock(&elv_list_lock);
1190
1191         len += sprintf(len+name, "\n");
1192         return len;
1193 }
1194
1195 struct request *elv_rb_former_request(struct request_queue *q,
1196                                       struct request *rq)
1197 {
1198         struct rb_node *rbprev = rb_prev(&rq->rb_node);
1199
1200         if (rbprev)
1201                 return rb_entry_rq(rbprev);
1202
1203         return NULL;
1204 }
1205 EXPORT_SYMBOL(elv_rb_former_request);
1206
1207 struct request *elv_rb_latter_request(struct request_queue *q,
1208                                       struct request *rq)
1209 {
1210         struct rb_node *rbnext = rb_next(&rq->rb_node);
1211
1212         if (rbnext)
1213                 return rb_entry_rq(rbnext);
1214
1215         return NULL;
1216 }
1217 EXPORT_SYMBOL(elv_rb_latter_request);