]> nv-tegra.nvidia Code Review - linux-2.6.git/blob - block/elevator.c
drop vmerge accounting
[linux-2.6.git] / block / elevator.c
1 /*
2  *  Block device elevator/IO-scheduler.
3  *
4  *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5  *
6  * 30042000 Jens Axboe <axboe@kernel.dk> :
7  *
8  * Split the elevator a bit so that it is possible to choose a different
9  * one or even write a new "plug in". There are three pieces:
10  * - elevator_fn, inserts a new request in the queue list
11  * - elevator_merge_fn, decides whether a new buffer can be merged with
12  *   an existing request
13  * - elevator_dequeue_fn, called when a request is taken off the active list
14  *
15  * 20082000 Dave Jones <davej@suse.de> :
16  * Removed tests for max-bomb-segments, which was breaking elvtune
17  *  when run without -bN
18  *
19  * Jens:
20  * - Rework again to work with bio instead of buffer_heads
21  * - loose bi_dev comparisons, partition handling is right now
22  * - completely modularize elevator setup and teardown
23  *
24  */
25 #include <linux/kernel.h>
26 #include <linux/fs.h>
27 #include <linux/blkdev.h>
28 #include <linux/elevator.h>
29 #include <linux/bio.h>
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/compiler.h>
34 #include <linux/delay.h>
35 #include <linux/blktrace_api.h>
36 #include <linux/hash.h>
37
38 #include <asm/uaccess.h>
39
40 static DEFINE_SPINLOCK(elv_list_lock);
41 static LIST_HEAD(elv_list);
42
43 /*
44  * Merge hash stuff.
45  */
46 static const int elv_hash_shift = 6;
47 #define ELV_HASH_BLOCK(sec)     ((sec) >> 3)
48 #define ELV_HASH_FN(sec)        \
49                 (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
50 #define ELV_HASH_ENTRIES        (1 << elv_hash_shift)
51 #define rq_hash_key(rq)         ((rq)->sector + (rq)->nr_sectors)
52 #define ELV_ON_HASH(rq)         (!hlist_unhashed(&(rq)->hash))
53
54 /*
55  * Query io scheduler to see if the current process issuing bio may be
56  * merged with rq.
57  */
58 static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
59 {
60         struct request_queue *q = rq->q;
61         elevator_t *e = q->elevator;
62
63         if (e->ops->elevator_allow_merge_fn)
64                 return e->ops->elevator_allow_merge_fn(q, rq, bio);
65
66         return 1;
67 }
68
69 /*
70  * can we safely merge with this request?
71  */
72 int elv_rq_merge_ok(struct request *rq, struct bio *bio)
73 {
74         if (!rq_mergeable(rq))
75                 return 0;
76
77         /*
78          * Don't merge file system requests and discard requests
79          */
80         if (bio_discard(bio) != bio_discard(rq->bio))
81                 return 0;
82
83         /*
84          * different data direction or already started, don't merge
85          */
86         if (bio_data_dir(bio) != rq_data_dir(rq))
87                 return 0;
88
89         /*
90          * must be same device and not a special request
91          */
92         if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
93                 return 0;
94
95         /*
96          * only merge integrity protected bio into ditto rq
97          */
98         if (bio_integrity(bio) != blk_integrity_rq(rq))
99                 return 0;
100
101         if (!elv_iosched_allow_merge(rq, bio))
102                 return 0;
103
104         return 1;
105 }
106 EXPORT_SYMBOL(elv_rq_merge_ok);
107
108 static inline int elv_try_merge(struct request *__rq, struct bio *bio)
109 {
110         int ret = ELEVATOR_NO_MERGE;
111
112         /*
113          * we can merge and sequence is ok, check if it's possible
114          */
115         if (elv_rq_merge_ok(__rq, bio)) {
116                 if (__rq->sector + __rq->nr_sectors == bio->bi_sector)
117                         ret = ELEVATOR_BACK_MERGE;
118                 else if (__rq->sector - bio_sectors(bio) == bio->bi_sector)
119                         ret = ELEVATOR_FRONT_MERGE;
120         }
121
122         return ret;
123 }
124
125 static struct elevator_type *elevator_find(const char *name)
126 {
127         struct elevator_type *e;
128
129         list_for_each_entry(e, &elv_list, list) {
130                 if (!strcmp(e->elevator_name, name))
131                         return e;
132         }
133
134         return NULL;
135 }
136
137 static void elevator_put(struct elevator_type *e)
138 {
139         module_put(e->elevator_owner);
140 }
141
142 static struct elevator_type *elevator_get(const char *name)
143 {
144         struct elevator_type *e;
145
146         spin_lock(&elv_list_lock);
147
148         e = elevator_find(name);
149         if (!e) {
150                 char elv[ELV_NAME_MAX + strlen("-iosched")];
151
152                 spin_unlock(&elv_list_lock);
153
154                 if (!strcmp(name, "anticipatory"))
155                         sprintf(elv, "as-iosched");
156                 else
157                         sprintf(elv, "%s-iosched", name);
158
159                 request_module("%s", elv);
160                 spin_lock(&elv_list_lock);
161                 e = elevator_find(name);
162         }
163
164         if (e && !try_module_get(e->elevator_owner))
165                 e = NULL;
166
167         spin_unlock(&elv_list_lock);
168
169         return e;
170 }
171
172 static void *elevator_init_queue(struct request_queue *q,
173                                  struct elevator_queue *eq)
174 {
175         return eq->ops->elevator_init_fn(q);
176 }
177
178 static void elevator_attach(struct request_queue *q, struct elevator_queue *eq,
179                            void *data)
180 {
181         q->elevator = eq;
182         eq->elevator_data = data;
183 }
184
185 static char chosen_elevator[16];
186
187 static int __init elevator_setup(char *str)
188 {
189         /*
190          * Be backwards-compatible with previous kernels, so users
191          * won't get the wrong elevator.
192          */
193         if (!strcmp(str, "as"))
194                 strcpy(chosen_elevator, "anticipatory");
195         else
196                 strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
197         return 1;
198 }
199
200 __setup("elevator=", elevator_setup);
201
202 static struct kobj_type elv_ktype;
203
204 static elevator_t *elevator_alloc(struct request_queue *q,
205                                   struct elevator_type *e)
206 {
207         elevator_t *eq;
208         int i;
209
210         eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL | __GFP_ZERO, q->node);
211         if (unlikely(!eq))
212                 goto err;
213
214         eq->ops = &e->ops;
215         eq->elevator_type = e;
216         kobject_init(&eq->kobj, &elv_ktype);
217         mutex_init(&eq->sysfs_lock);
218
219         eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
220                                         GFP_KERNEL, q->node);
221         if (!eq->hash)
222                 goto err;
223
224         for (i = 0; i < ELV_HASH_ENTRIES; i++)
225                 INIT_HLIST_HEAD(&eq->hash[i]);
226
227         return eq;
228 err:
229         kfree(eq);
230         elevator_put(e);
231         return NULL;
232 }
233
234 static void elevator_release(struct kobject *kobj)
235 {
236         elevator_t *e = container_of(kobj, elevator_t, kobj);
237
238         elevator_put(e->elevator_type);
239         kfree(e->hash);
240         kfree(e);
241 }
242
243 int elevator_init(struct request_queue *q, char *name)
244 {
245         struct elevator_type *e = NULL;
246         struct elevator_queue *eq;
247         int ret = 0;
248         void *data;
249
250         INIT_LIST_HEAD(&q->queue_head);
251         q->last_merge = NULL;
252         q->end_sector = 0;
253         q->boundary_rq = NULL;
254
255         if (name) {
256                 e = elevator_get(name);
257                 if (!e)
258                         return -EINVAL;
259         }
260
261         if (!e && *chosen_elevator) {
262                 e = elevator_get(chosen_elevator);
263                 if (!e)
264                         printk(KERN_ERR "I/O scheduler %s not found\n",
265                                                         chosen_elevator);
266         }
267
268         if (!e) {
269                 e = elevator_get(CONFIG_DEFAULT_IOSCHED);
270                 if (!e) {
271                         printk(KERN_ERR
272                                 "Default I/O scheduler not found. " \
273                                 "Using noop.\n");
274                         e = elevator_get("noop");
275                 }
276         }
277
278         eq = elevator_alloc(q, e);
279         if (!eq)
280                 return -ENOMEM;
281
282         data = elevator_init_queue(q, eq);
283         if (!data) {
284                 kobject_put(&eq->kobj);
285                 return -ENOMEM;
286         }
287
288         elevator_attach(q, eq, data);
289         return ret;
290 }
291 EXPORT_SYMBOL(elevator_init);
292
293 void elevator_exit(elevator_t *e)
294 {
295         mutex_lock(&e->sysfs_lock);
296         if (e->ops->elevator_exit_fn)
297                 e->ops->elevator_exit_fn(e);
298         e->ops = NULL;
299         mutex_unlock(&e->sysfs_lock);
300
301         kobject_put(&e->kobj);
302 }
303 EXPORT_SYMBOL(elevator_exit);
304
305 static void elv_activate_rq(struct request_queue *q, struct request *rq)
306 {
307         elevator_t *e = q->elevator;
308
309         if (e->ops->elevator_activate_req_fn)
310                 e->ops->elevator_activate_req_fn(q, rq);
311 }
312
313 static void elv_deactivate_rq(struct request_queue *q, struct request *rq)
314 {
315         elevator_t *e = q->elevator;
316
317         if (e->ops->elevator_deactivate_req_fn)
318                 e->ops->elevator_deactivate_req_fn(q, rq);
319 }
320
321 static inline void __elv_rqhash_del(struct request *rq)
322 {
323         hlist_del_init(&rq->hash);
324 }
325
326 static void elv_rqhash_del(struct request_queue *q, struct request *rq)
327 {
328         if (ELV_ON_HASH(rq))
329                 __elv_rqhash_del(rq);
330 }
331
332 static void elv_rqhash_add(struct request_queue *q, struct request *rq)
333 {
334         elevator_t *e = q->elevator;
335
336         BUG_ON(ELV_ON_HASH(rq));
337         hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
338 }
339
340 static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
341 {
342         __elv_rqhash_del(rq);
343         elv_rqhash_add(q, rq);
344 }
345
346 static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
347 {
348         elevator_t *e = q->elevator;
349         struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
350         struct hlist_node *entry, *next;
351         struct request *rq;
352
353         hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) {
354                 BUG_ON(!ELV_ON_HASH(rq));
355
356                 if (unlikely(!rq_mergeable(rq))) {
357                         __elv_rqhash_del(rq);
358                         continue;
359                 }
360
361                 if (rq_hash_key(rq) == offset)
362                         return rq;
363         }
364
365         return NULL;
366 }
367
368 /*
369  * RB-tree support functions for inserting/lookup/removal of requests
370  * in a sorted RB tree.
371  */
372 struct request *elv_rb_add(struct rb_root *root, struct request *rq)
373 {
374         struct rb_node **p = &root->rb_node;
375         struct rb_node *parent = NULL;
376         struct request *__rq;
377
378         while (*p) {
379                 parent = *p;
380                 __rq = rb_entry(parent, struct request, rb_node);
381
382                 if (rq->sector < __rq->sector)
383                         p = &(*p)->rb_left;
384                 else if (rq->sector > __rq->sector)
385                         p = &(*p)->rb_right;
386                 else
387                         return __rq;
388         }
389
390         rb_link_node(&rq->rb_node, parent, p);
391         rb_insert_color(&rq->rb_node, root);
392         return NULL;
393 }
394 EXPORT_SYMBOL(elv_rb_add);
395
396 void elv_rb_del(struct rb_root *root, struct request *rq)
397 {
398         BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
399         rb_erase(&rq->rb_node, root);
400         RB_CLEAR_NODE(&rq->rb_node);
401 }
402 EXPORT_SYMBOL(elv_rb_del);
403
404 struct request *elv_rb_find(struct rb_root *root, sector_t sector)
405 {
406         struct rb_node *n = root->rb_node;
407         struct request *rq;
408
409         while (n) {
410                 rq = rb_entry(n, struct request, rb_node);
411
412                 if (sector < rq->sector)
413                         n = n->rb_left;
414                 else if (sector > rq->sector)
415                         n = n->rb_right;
416                 else
417                         return rq;
418         }
419
420         return NULL;
421 }
422 EXPORT_SYMBOL(elv_rb_find);
423
424 /*
425  * Insert rq into dispatch queue of q.  Queue lock must be held on
426  * entry.  rq is sort instead into the dispatch queue. To be used by
427  * specific elevators.
428  */
429 void elv_dispatch_sort(struct request_queue *q, struct request *rq)
430 {
431         sector_t boundary;
432         struct list_head *entry;
433         int stop_flags;
434
435         if (q->last_merge == rq)
436                 q->last_merge = NULL;
437
438         elv_rqhash_del(q, rq);
439
440         q->nr_sorted--;
441
442         boundary = q->end_sector;
443         stop_flags = REQ_SOFTBARRIER | REQ_HARDBARRIER | REQ_STARTED;
444         list_for_each_prev(entry, &q->queue_head) {
445                 struct request *pos = list_entry_rq(entry);
446
447                 if (blk_discard_rq(rq) != blk_discard_rq(pos))
448                         break;
449                 if (rq_data_dir(rq) != rq_data_dir(pos))
450                         break;
451                 if (pos->cmd_flags & stop_flags)
452                         break;
453                 if (rq->sector >= boundary) {
454                         if (pos->sector < boundary)
455                                 continue;
456                 } else {
457                         if (pos->sector >= boundary)
458                                 break;
459                 }
460                 if (rq->sector >= pos->sector)
461                         break;
462         }
463
464         list_add(&rq->queuelist, entry);
465 }
466 EXPORT_SYMBOL(elv_dispatch_sort);
467
468 /*
469  * Insert rq into dispatch queue of q.  Queue lock must be held on
470  * entry.  rq is added to the back of the dispatch queue. To be used by
471  * specific elevators.
472  */
473 void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
474 {
475         if (q->last_merge == rq)
476                 q->last_merge = NULL;
477
478         elv_rqhash_del(q, rq);
479
480         q->nr_sorted--;
481
482         q->end_sector = rq_end_sector(rq);
483         q->boundary_rq = rq;
484         list_add_tail(&rq->queuelist, &q->queue_head);
485 }
486 EXPORT_SYMBOL(elv_dispatch_add_tail);
487
488 int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
489 {
490         elevator_t *e = q->elevator;
491         struct request *__rq;
492         int ret;
493
494         /*
495          * First try one-hit cache.
496          */
497         if (q->last_merge) {
498                 ret = elv_try_merge(q->last_merge, bio);
499                 if (ret != ELEVATOR_NO_MERGE) {
500                         *req = q->last_merge;
501                         return ret;
502                 }
503         }
504
505         if (blk_queue_nomerges(q))
506                 return ELEVATOR_NO_MERGE;
507
508         /*
509          * See if our hash lookup can find a potential backmerge.
510          */
511         __rq = elv_rqhash_find(q, bio->bi_sector);
512         if (__rq && elv_rq_merge_ok(__rq, bio)) {
513                 *req = __rq;
514                 return ELEVATOR_BACK_MERGE;
515         }
516
517         if (e->ops->elevator_merge_fn)
518                 return e->ops->elevator_merge_fn(q, req, bio);
519
520         return ELEVATOR_NO_MERGE;
521 }
522
523 void elv_merged_request(struct request_queue *q, struct request *rq, int type)
524 {
525         elevator_t *e = q->elevator;
526
527         if (e->ops->elevator_merged_fn)
528                 e->ops->elevator_merged_fn(q, rq, type);
529
530         if (type == ELEVATOR_BACK_MERGE)
531                 elv_rqhash_reposition(q, rq);
532
533         q->last_merge = rq;
534 }
535
536 void elv_merge_requests(struct request_queue *q, struct request *rq,
537                              struct request *next)
538 {
539         elevator_t *e = q->elevator;
540
541         if (e->ops->elevator_merge_req_fn)
542                 e->ops->elevator_merge_req_fn(q, rq, next);
543
544         elv_rqhash_reposition(q, rq);
545         elv_rqhash_del(q, next);
546
547         q->nr_sorted--;
548         q->last_merge = rq;
549 }
550
551 void elv_requeue_request(struct request_queue *q, struct request *rq)
552 {
553         /*
554          * it already went through dequeue, we need to decrement the
555          * in_flight count again
556          */
557         if (blk_account_rq(rq)) {
558                 q->in_flight--;
559                 if (blk_sorted_rq(rq))
560                         elv_deactivate_rq(q, rq);
561         }
562
563         rq->cmd_flags &= ~REQ_STARTED;
564
565         elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
566 }
567
568 static void elv_drain_elevator(struct request_queue *q)
569 {
570         static int printed;
571         while (q->elevator->ops->elevator_dispatch_fn(q, 1))
572                 ;
573         if (q->nr_sorted == 0)
574                 return;
575         if (printed++ < 10) {
576                 printk(KERN_ERR "%s: forced dispatching is broken "
577                        "(nr_sorted=%u), please report this\n",
578                        q->elevator->elevator_type->elevator_name, q->nr_sorted);
579         }
580 }
581
582 void elv_insert(struct request_queue *q, struct request *rq, int where)
583 {
584         struct list_head *pos;
585         unsigned ordseq;
586         int unplug_it = 1;
587
588         blk_add_trace_rq(q, rq, BLK_TA_INSERT);
589
590         rq->q = q;
591
592         switch (where) {
593         case ELEVATOR_INSERT_FRONT:
594                 rq->cmd_flags |= REQ_SOFTBARRIER;
595
596                 list_add(&rq->queuelist, &q->queue_head);
597                 break;
598
599         case ELEVATOR_INSERT_BACK:
600                 rq->cmd_flags |= REQ_SOFTBARRIER;
601                 elv_drain_elevator(q);
602                 list_add_tail(&rq->queuelist, &q->queue_head);
603                 /*
604                  * We kick the queue here for the following reasons.
605                  * - The elevator might have returned NULL previously
606                  *   to delay requests and returned them now.  As the
607                  *   queue wasn't empty before this request, ll_rw_blk
608                  *   won't run the queue on return, resulting in hang.
609                  * - Usually, back inserted requests won't be merged
610                  *   with anything.  There's no point in delaying queue
611                  *   processing.
612                  */
613                 blk_remove_plug(q);
614                 q->request_fn(q);
615                 break;
616
617         case ELEVATOR_INSERT_SORT:
618                 BUG_ON(!blk_fs_request(rq) && !blk_discard_rq(rq));
619                 rq->cmd_flags |= REQ_SORTED;
620                 q->nr_sorted++;
621                 if (rq_mergeable(rq)) {
622                         elv_rqhash_add(q, rq);
623                         if (!q->last_merge)
624                                 q->last_merge = rq;
625                 }
626
627                 /*
628                  * Some ioscheds (cfq) run q->request_fn directly, so
629                  * rq cannot be accessed after calling
630                  * elevator_add_req_fn.
631                  */
632                 q->elevator->ops->elevator_add_req_fn(q, rq);
633                 break;
634
635         case ELEVATOR_INSERT_REQUEUE:
636                 /*
637                  * If ordered flush isn't in progress, we do front
638                  * insertion; otherwise, requests should be requeued
639                  * in ordseq order.
640                  */
641                 rq->cmd_flags |= REQ_SOFTBARRIER;
642
643                 /*
644                  * Most requeues happen because of a busy condition,
645                  * don't force unplug of the queue for that case.
646                  */
647                 unplug_it = 0;
648
649                 if (q->ordseq == 0) {
650                         list_add(&rq->queuelist, &q->queue_head);
651                         break;
652                 }
653
654                 ordseq = blk_ordered_req_seq(rq);
655
656                 list_for_each(pos, &q->queue_head) {
657                         struct request *pos_rq = list_entry_rq(pos);
658                         if (ordseq <= blk_ordered_req_seq(pos_rq))
659                                 break;
660                 }
661
662                 list_add_tail(&rq->queuelist, pos);
663                 break;
664
665         default:
666                 printk(KERN_ERR "%s: bad insertion point %d\n",
667                        __func__, where);
668                 BUG();
669         }
670
671         if (unplug_it && blk_queue_plugged(q)) {
672                 int nrq = q->rq.count[READ] + q->rq.count[WRITE]
673                         - q->in_flight;
674
675                 if (nrq >= q->unplug_thresh)
676                         __generic_unplug_device(q);
677         }
678 }
679
680 void __elv_add_request(struct request_queue *q, struct request *rq, int where,
681                        int plug)
682 {
683         if (q->ordcolor)
684                 rq->cmd_flags |= REQ_ORDERED_COLOR;
685
686         if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
687                 /*
688                  * toggle ordered color
689                  */
690                 if (blk_barrier_rq(rq))
691                         q->ordcolor ^= 1;
692
693                 /*
694                  * barriers implicitly indicate back insertion
695                  */
696                 if (where == ELEVATOR_INSERT_SORT)
697                         where = ELEVATOR_INSERT_BACK;
698
699                 /*
700                  * this request is scheduling boundary, update
701                  * end_sector
702                  */
703                 if (blk_fs_request(rq) || blk_discard_rq(rq)) {
704                         q->end_sector = rq_end_sector(rq);
705                         q->boundary_rq = rq;
706                 }
707         } else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
708                     where == ELEVATOR_INSERT_SORT)
709                 where = ELEVATOR_INSERT_BACK;
710
711         if (plug)
712                 blk_plug_device(q);
713
714         elv_insert(q, rq, where);
715 }
716 EXPORT_SYMBOL(__elv_add_request);
717
718 void elv_add_request(struct request_queue *q, struct request *rq, int where,
719                      int plug)
720 {
721         unsigned long flags;
722
723         spin_lock_irqsave(q->queue_lock, flags);
724         __elv_add_request(q, rq, where, plug);
725         spin_unlock_irqrestore(q->queue_lock, flags);
726 }
727 EXPORT_SYMBOL(elv_add_request);
728
729 static inline struct request *__elv_next_request(struct request_queue *q)
730 {
731         struct request *rq;
732
733         while (1) {
734                 while (!list_empty(&q->queue_head)) {
735                         rq = list_entry_rq(q->queue_head.next);
736                         if (blk_do_ordered(q, &rq))
737                                 return rq;
738                 }
739
740                 if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
741                         return NULL;
742         }
743 }
744
745 struct request *elv_next_request(struct request_queue *q)
746 {
747         struct request *rq;
748         int ret;
749
750         while ((rq = __elv_next_request(q)) != NULL) {
751                 /*
752                  * Kill the empty barrier place holder, the driver must
753                  * not ever see it.
754                  */
755                 if (blk_empty_barrier(rq)) {
756                         end_queued_request(rq, 1);
757                         continue;
758                 }
759                 if (!(rq->cmd_flags & REQ_STARTED)) {
760                         /*
761                          * This is the first time the device driver
762                          * sees this request (possibly after
763                          * requeueing).  Notify IO scheduler.
764                          */
765                         if (blk_sorted_rq(rq))
766                                 elv_activate_rq(q, rq);
767
768                         /*
769                          * just mark as started even if we don't start
770                          * it, a request that has been delayed should
771                          * not be passed by new incoming requests
772                          */
773                         rq->cmd_flags |= REQ_STARTED;
774                         blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
775                 }
776
777                 if (!q->boundary_rq || q->boundary_rq == rq) {
778                         q->end_sector = rq_end_sector(rq);
779                         q->boundary_rq = NULL;
780                 }
781
782                 if (rq->cmd_flags & REQ_DONTPREP)
783                         break;
784
785                 if (q->dma_drain_size && rq->data_len) {
786                         /*
787                          * make sure space for the drain appears we
788                          * know we can do this because max_hw_segments
789                          * has been adjusted to be one fewer than the
790                          * device can handle
791                          */
792                         rq->nr_phys_segments++;
793                 }
794
795                 if (!q->prep_rq_fn)
796                         break;
797
798                 ret = q->prep_rq_fn(q, rq);
799                 if (ret == BLKPREP_OK) {
800                         break;
801                 } else if (ret == BLKPREP_DEFER) {
802                         /*
803                          * the request may have been (partially) prepped.
804                          * we need to keep this request in the front to
805                          * avoid resource deadlock.  REQ_STARTED will
806                          * prevent other fs requests from passing this one.
807                          */
808                         if (q->dma_drain_size && rq->data_len &&
809                             !(rq->cmd_flags & REQ_DONTPREP)) {
810                                 /*
811                                  * remove the space for the drain we added
812                                  * so that we don't add it again
813                                  */
814                                 --rq->nr_phys_segments;
815                         }
816
817                         rq = NULL;
818                         break;
819                 } else if (ret == BLKPREP_KILL) {
820                         rq->cmd_flags |= REQ_QUIET;
821                         end_queued_request(rq, 0);
822                 } else {
823                         printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
824                         break;
825                 }
826         }
827
828         return rq;
829 }
830 EXPORT_SYMBOL(elv_next_request);
831
832 void elv_dequeue_request(struct request_queue *q, struct request *rq)
833 {
834         BUG_ON(list_empty(&rq->queuelist));
835         BUG_ON(ELV_ON_HASH(rq));
836
837         list_del_init(&rq->queuelist);
838
839         /*
840          * the time frame between a request being removed from the lists
841          * and to it is freed is accounted as io that is in progress at
842          * the driver side.
843          */
844         if (blk_account_rq(rq))
845                 q->in_flight++;
846 }
847 EXPORT_SYMBOL(elv_dequeue_request);
848
849 int elv_queue_empty(struct request_queue *q)
850 {
851         elevator_t *e = q->elevator;
852
853         if (!list_empty(&q->queue_head))
854                 return 0;
855
856         if (e->ops->elevator_queue_empty_fn)
857                 return e->ops->elevator_queue_empty_fn(q);
858
859         return 1;
860 }
861 EXPORT_SYMBOL(elv_queue_empty);
862
863 struct request *elv_latter_request(struct request_queue *q, struct request *rq)
864 {
865         elevator_t *e = q->elevator;
866
867         if (e->ops->elevator_latter_req_fn)
868                 return e->ops->elevator_latter_req_fn(q, rq);
869         return NULL;
870 }
871
872 struct request *elv_former_request(struct request_queue *q, struct request *rq)
873 {
874         elevator_t *e = q->elevator;
875
876         if (e->ops->elevator_former_req_fn)
877                 return e->ops->elevator_former_req_fn(q, rq);
878         return NULL;
879 }
880
881 int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
882 {
883         elevator_t *e = q->elevator;
884
885         if (e->ops->elevator_set_req_fn)
886                 return e->ops->elevator_set_req_fn(q, rq, gfp_mask);
887
888         rq->elevator_private = NULL;
889         return 0;
890 }
891
892 void elv_put_request(struct request_queue *q, struct request *rq)
893 {
894         elevator_t *e = q->elevator;
895
896         if (e->ops->elevator_put_req_fn)
897                 e->ops->elevator_put_req_fn(rq);
898 }
899
900 int elv_may_queue(struct request_queue *q, int rw)
901 {
902         elevator_t *e = q->elevator;
903
904         if (e->ops->elevator_may_queue_fn)
905                 return e->ops->elevator_may_queue_fn(q, rw);
906
907         return ELV_MQUEUE_MAY;
908 }
909
910 void elv_completed_request(struct request_queue *q, struct request *rq)
911 {
912         elevator_t *e = q->elevator;
913
914         /*
915          * request is released from the driver, io must be done
916          */
917         if (blk_account_rq(rq)) {
918                 q->in_flight--;
919                 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
920                         e->ops->elevator_completed_req_fn(q, rq);
921         }
922
923         /*
924          * Check if the queue is waiting for fs requests to be
925          * drained for flush sequence.
926          */
927         if (unlikely(q->ordseq)) {
928                 struct request *first_rq = list_entry_rq(q->queue_head.next);
929                 if (q->in_flight == 0 &&
930                     blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
931                     blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) {
932                         blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
933                         q->request_fn(q);
934                 }
935         }
936 }
937
938 #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
939
940 static ssize_t
941 elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
942 {
943         elevator_t *e = container_of(kobj, elevator_t, kobj);
944         struct elv_fs_entry *entry = to_elv(attr);
945         ssize_t error;
946
947         if (!entry->show)
948                 return -EIO;
949
950         mutex_lock(&e->sysfs_lock);
951         error = e->ops ? entry->show(e, page) : -ENOENT;
952         mutex_unlock(&e->sysfs_lock);
953         return error;
954 }
955
956 static ssize_t
957 elv_attr_store(struct kobject *kobj, struct attribute *attr,
958                const char *page, size_t length)
959 {
960         elevator_t *e = container_of(kobj, elevator_t, kobj);
961         struct elv_fs_entry *entry = to_elv(attr);
962         ssize_t error;
963
964         if (!entry->store)
965                 return -EIO;
966
967         mutex_lock(&e->sysfs_lock);
968         error = e->ops ? entry->store(e, page, length) : -ENOENT;
969         mutex_unlock(&e->sysfs_lock);
970         return error;
971 }
972
973 static struct sysfs_ops elv_sysfs_ops = {
974         .show   = elv_attr_show,
975         .store  = elv_attr_store,
976 };
977
978 static struct kobj_type elv_ktype = {
979         .sysfs_ops      = &elv_sysfs_ops,
980         .release        = elevator_release,
981 };
982
983 int elv_register_queue(struct request_queue *q)
984 {
985         elevator_t *e = q->elevator;
986         int error;
987
988         error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
989         if (!error) {
990                 struct elv_fs_entry *attr = e->elevator_type->elevator_attrs;
991                 if (attr) {
992                         while (attr->attr.name) {
993                                 if (sysfs_create_file(&e->kobj, &attr->attr))
994                                         break;
995                                 attr++;
996                         }
997                 }
998                 kobject_uevent(&e->kobj, KOBJ_ADD);
999         }
1000         return error;
1001 }
1002
1003 static void __elv_unregister_queue(elevator_t *e)
1004 {
1005         kobject_uevent(&e->kobj, KOBJ_REMOVE);
1006         kobject_del(&e->kobj);
1007 }
1008
1009 void elv_unregister_queue(struct request_queue *q)
1010 {
1011         if (q)
1012                 __elv_unregister_queue(q->elevator);
1013 }
1014
1015 void elv_register(struct elevator_type *e)
1016 {
1017         char *def = "";
1018
1019         spin_lock(&elv_list_lock);
1020         BUG_ON(elevator_find(e->elevator_name));
1021         list_add_tail(&e->list, &elv_list);
1022         spin_unlock(&elv_list_lock);
1023
1024         if (!strcmp(e->elevator_name, chosen_elevator) ||
1025                         (!*chosen_elevator &&
1026                          !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
1027                                 def = " (default)";
1028
1029         printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
1030                                                                 def);
1031 }
1032 EXPORT_SYMBOL_GPL(elv_register);
1033
1034 void elv_unregister(struct elevator_type *e)
1035 {
1036         struct task_struct *g, *p;
1037
1038         /*
1039          * Iterate every thread in the process to remove the io contexts.
1040          */
1041         if (e->ops.trim) {
1042                 read_lock(&tasklist_lock);
1043                 do_each_thread(g, p) {
1044                         task_lock(p);
1045                         if (p->io_context)
1046                                 e->ops.trim(p->io_context);
1047                         task_unlock(p);
1048                 } while_each_thread(g, p);
1049                 read_unlock(&tasklist_lock);
1050         }
1051
1052         spin_lock(&elv_list_lock);
1053         list_del_init(&e->list);
1054         spin_unlock(&elv_list_lock);
1055 }
1056 EXPORT_SYMBOL_GPL(elv_unregister);
1057
1058 /*
1059  * switch to new_e io scheduler. be careful not to introduce deadlocks -
1060  * we don't free the old io scheduler, before we have allocated what we
1061  * need for the new one. this way we have a chance of going back to the old
1062  * one, if the new one fails init for some reason.
1063  */
1064 static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
1065 {
1066         elevator_t *old_elevator, *e;
1067         void *data;
1068
1069         /*
1070          * Allocate new elevator
1071          */
1072         e = elevator_alloc(q, new_e);
1073         if (!e)
1074                 return 0;
1075
1076         data = elevator_init_queue(q, e);
1077         if (!data) {
1078                 kobject_put(&e->kobj);
1079                 return 0;
1080         }
1081
1082         /*
1083          * Turn on BYPASS and drain all requests w/ elevator private data
1084          */
1085         spin_lock_irq(q->queue_lock);
1086
1087         queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
1088
1089         elv_drain_elevator(q);
1090
1091         while (q->rq.elvpriv) {
1092                 blk_remove_plug(q);
1093                 q->request_fn(q);
1094                 spin_unlock_irq(q->queue_lock);
1095                 msleep(10);
1096                 spin_lock_irq(q->queue_lock);
1097                 elv_drain_elevator(q);
1098         }
1099
1100         /*
1101          * Remember old elevator.
1102          */
1103         old_elevator = q->elevator;
1104
1105         /*
1106          * attach and start new elevator
1107          */
1108         elevator_attach(q, e, data);
1109
1110         spin_unlock_irq(q->queue_lock);
1111
1112         __elv_unregister_queue(old_elevator);
1113
1114         if (elv_register_queue(q))
1115                 goto fail_register;
1116
1117         /*
1118          * finally exit old elevator and turn off BYPASS.
1119          */
1120         elevator_exit(old_elevator);
1121         spin_lock_irq(q->queue_lock);
1122         queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
1123         spin_unlock_irq(q->queue_lock);
1124
1125         blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name);
1126
1127         return 1;
1128
1129 fail_register:
1130         /*
1131          * switch failed, exit the new io scheduler and reattach the old
1132          * one again (along with re-adding the sysfs dir)
1133          */
1134         elevator_exit(e);
1135         q->elevator = old_elevator;
1136         elv_register_queue(q);
1137
1138         spin_lock_irq(q->queue_lock);
1139         queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
1140         spin_unlock_irq(q->queue_lock);
1141
1142         return 0;
1143 }
1144
1145 ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1146                           size_t count)
1147 {
1148         char elevator_name[ELV_NAME_MAX];
1149         size_t len;
1150         struct elevator_type *e;
1151
1152         elevator_name[sizeof(elevator_name) - 1] = '\0';
1153         strncpy(elevator_name, name, sizeof(elevator_name) - 1);
1154         len = strlen(elevator_name);
1155
1156         if (len && elevator_name[len - 1] == '\n')
1157                 elevator_name[len - 1] = '\0';
1158
1159         e = elevator_get(elevator_name);
1160         if (!e) {
1161                 printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
1162                 return -EINVAL;
1163         }
1164
1165         if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
1166                 elevator_put(e);
1167                 return count;
1168         }
1169
1170         if (!elevator_switch(q, e))
1171                 printk(KERN_ERR "elevator: switch to %s failed\n",
1172                                                         elevator_name);
1173         return count;
1174 }
1175
1176 ssize_t elv_iosched_show(struct request_queue *q, char *name)
1177 {
1178         elevator_t *e = q->elevator;
1179         struct elevator_type *elv = e->elevator_type;
1180         struct elevator_type *__e;
1181         int len = 0;
1182
1183         spin_lock(&elv_list_lock);
1184         list_for_each_entry(__e, &elv_list, list) {
1185                 if (!strcmp(elv->elevator_name, __e->elevator_name))
1186                         len += sprintf(name+len, "[%s] ", elv->elevator_name);
1187                 else
1188                         len += sprintf(name+len, "%s ", __e->elevator_name);
1189         }
1190         spin_unlock(&elv_list_lock);
1191
1192         len += sprintf(len+name, "\n");
1193         return len;
1194 }
1195
1196 struct request *elv_rb_former_request(struct request_queue *q,
1197                                       struct request *rq)
1198 {
1199         struct rb_node *rbprev = rb_prev(&rq->rb_node);
1200
1201         if (rbprev)
1202                 return rb_entry_rq(rbprev);
1203
1204         return NULL;
1205 }
1206 EXPORT_SYMBOL(elv_rb_former_request);
1207
1208 struct request *elv_rb_latter_request(struct request_queue *q,
1209                                       struct request *rq)
1210 {
1211         struct rb_node *rbnext = rb_next(&rq->rb_node);
1212
1213         if (rbnext)
1214                 return rb_entry_rq(rbnext);
1215
1216         return NULL;
1217 }
1218 EXPORT_SYMBOL(elv_rb_latter_request);