dc73690dec44407e86c338d80581d15e93a8fe16
[linux-3.10.git] / block / cfq-iosched.c
1 /*
2  *  CFQ, or complete fairness queueing, disk scheduler.
3  *
4  *  Based on ideas from a previously unfinished io
5  *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6  *
7  *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8  */
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/blkdev.h>
12 #include <linux/elevator.h>
13 #include <linux/jiffies.h>
14 #include <linux/rbtree.h>
15 #include <linux/ioprio.h>
16 #include <linux/blktrace_api.h>
17 #include "blk.h"
18 #include "cfq.h"
19
20 static struct blkio_policy_type blkio_policy_cfq;
21
22 /*
23  * tunables
24  */
25 /* max queue in one round of service */
26 static const int cfq_quantum = 8;
27 static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
28 /* maximum backwards seek, in KiB */
29 static const int cfq_back_max = 16 * 1024;
30 /* penalty of a backwards seek */
31 static const int cfq_back_penalty = 2;
32 static const int cfq_slice_sync = HZ / 10;
33 static int cfq_slice_async = HZ / 25;
34 static const int cfq_slice_async_rq = 2;
35 static int cfq_slice_idle = HZ / 125;
36 static int cfq_group_idle = HZ / 125;
37 static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
38 static const int cfq_hist_divisor = 4;
39
40 /*
41  * offset from end of service tree
42  */
43 #define CFQ_IDLE_DELAY          (HZ / 5)
44
45 /*
46  * below this threshold, we consider thinktime immediate
47  */
48 #define CFQ_MIN_TT              (2)
49
50 #define CFQ_SLICE_SCALE         (5)
51 #define CFQ_HW_QUEUE_MIN        (5)
52 #define CFQ_SERVICE_SHIFT       12
53
54 #define CFQQ_SEEK_THR           (sector_t)(8 * 100)
55 #define CFQQ_CLOSE_THR          (sector_t)(8 * 1024)
56 #define CFQQ_SECT_THR_NONROT    (sector_t)(2 * 32)
57 #define CFQQ_SEEKY(cfqq)        (hweight32(cfqq->seek_history) > 32/8)
58
59 #define RQ_CIC(rq)              icq_to_cic((rq)->elv.icq)
60 #define RQ_CFQQ(rq)             (struct cfq_queue *) ((rq)->elv.priv[0])
61 #define RQ_CFQG(rq)             (struct cfq_group *) ((rq)->elv.priv[1])
62
63 static struct kmem_cache *cfq_pool;
64
65 #define CFQ_PRIO_LISTS          IOPRIO_BE_NR
66 #define cfq_class_idle(cfqq)    ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
67 #define cfq_class_rt(cfqq)      ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
68
69 #define sample_valid(samples)   ((samples) > 80)
70 #define rb_entry_cfqg(node)     rb_entry((node), struct cfq_group, rb_node)
71
72 struct cfq_ttime {
73         unsigned long last_end_request;
74
75         unsigned long ttime_total;
76         unsigned long ttime_samples;
77         unsigned long ttime_mean;
78 };
79
80 /*
81  * Most of our rbtree usage is for sorting with min extraction, so
82  * if we cache the leftmost node we don't have to walk down the tree
83  * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
84  * move this into the elevator for the rq sorting as well.
85  */
86 struct cfq_rb_root {
87         struct rb_root rb;
88         struct rb_node *left;
89         unsigned count;
90         unsigned total_weight;
91         u64 min_vdisktime;
92         struct cfq_ttime ttime;
93 };
94 #define CFQ_RB_ROOT     (struct cfq_rb_root) { .rb = RB_ROOT, \
95                         .ttime = {.last_end_request = jiffies,},}
96
97 /*
98  * Per process-grouping structure
99  */
100 struct cfq_queue {
101         /* reference count */
102         int ref;
103         /* various state flags, see below */
104         unsigned int flags;
105         /* parent cfq_data */
106         struct cfq_data *cfqd;
107         /* service_tree member */
108         struct rb_node rb_node;
109         /* service_tree key */
110         unsigned long rb_key;
111         /* prio tree member */
112         struct rb_node p_node;
113         /* prio tree root we belong to, if any */
114         struct rb_root *p_root;
115         /* sorted list of pending requests */
116         struct rb_root sort_list;
117         /* if fifo isn't expired, next request to serve */
118         struct request *next_rq;
119         /* requests queued in sort_list */
120         int queued[2];
121         /* currently allocated requests */
122         int allocated[2];
123         /* fifo list of requests in sort_list */
124         struct list_head fifo;
125
126         /* time when queue got scheduled in to dispatch first request. */
127         unsigned long dispatch_start;
128         unsigned int allocated_slice;
129         unsigned int slice_dispatch;
130         /* time when first request from queue completed and slice started. */
131         unsigned long slice_start;
132         unsigned long slice_end;
133         long slice_resid;
134
135         /* pending priority requests */
136         int prio_pending;
137         /* number of requests that are on the dispatch list or inside driver */
138         int dispatched;
139
140         /* io prio of this group */
141         unsigned short ioprio, org_ioprio;
142         unsigned short ioprio_class;
143
144         pid_t pid;
145
146         u32 seek_history;
147         sector_t last_request_pos;
148
149         struct cfq_rb_root *service_tree;
150         struct cfq_queue *new_cfqq;
151         struct cfq_group *cfqg;
152         /* Number of sectors dispatched from queue in single dispatch round */
153         unsigned long nr_sectors;
154 };
155
156 /*
157  * First index in the service_trees.
158  * IDLE is handled separately, so it has negative index
159  */
160 enum wl_prio_t {
161         BE_WORKLOAD = 0,
162         RT_WORKLOAD = 1,
163         IDLE_WORKLOAD = 2,
164         CFQ_PRIO_NR,
165 };
166
167 /*
168  * Second index in the service_trees.
169  */
170 enum wl_type_t {
171         ASYNC_WORKLOAD = 0,
172         SYNC_NOIDLE_WORKLOAD = 1,
173         SYNC_WORKLOAD = 2
174 };
175
176 /* This is per cgroup per device grouping structure */
177 struct cfq_group {
178         /* group service_tree member */
179         struct rb_node rb_node;
180
181         /* group service_tree key */
182         u64 vdisktime;
183         unsigned int weight;
184         unsigned int new_weight;
185         bool needs_update;
186
187         /* number of cfqq currently on this group */
188         int nr_cfqq;
189
190         /*
191          * Per group busy queues average. Useful for workload slice calc. We
192          * create the array for each prio class but at run time it is used
193          * only for RT and BE class and slot for IDLE class remains unused.
194          * This is primarily done to avoid confusion and a gcc warning.
195          */
196         unsigned int busy_queues_avg[CFQ_PRIO_NR];
197         /*
198          * rr lists of queues with requests. We maintain service trees for
199          * RT and BE classes. These trees are subdivided in subclasses
200          * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
201          * class there is no subclassification and all the cfq queues go on
202          * a single tree service_tree_idle.
203          * Counts are embedded in the cfq_rb_root
204          */
205         struct cfq_rb_root service_trees[2][3];
206         struct cfq_rb_root service_tree_idle;
207
208         unsigned long saved_workload_slice;
209         enum wl_type_t saved_workload;
210         enum wl_prio_t saved_serving_prio;
211
212         /* number of requests that are on the dispatch list or inside driver */
213         int dispatched;
214         struct cfq_ttime ttime;
215 };
216
217 struct cfq_io_cq {
218         struct io_cq            icq;            /* must be the first member */
219         struct cfq_queue        *cfqq[2];
220         struct cfq_ttime        ttime;
221 };
222
223 /*
224  * Per block device queue structure
225  */
226 struct cfq_data {
227         struct request_queue *queue;
228         /* Root service tree for cfq_groups */
229         struct cfq_rb_root grp_service_tree;
230         struct cfq_group *root_group;
231
232         /*
233          * The priority currently being served
234          */
235         enum wl_prio_t serving_prio;
236         enum wl_type_t serving_type;
237         unsigned long workload_expires;
238         struct cfq_group *serving_group;
239
240         /*
241          * Each priority tree is sorted by next_request position.  These
242          * trees are used when determining if two or more queues are
243          * interleaving requests (see cfq_close_cooperator).
244          */
245         struct rb_root prio_trees[CFQ_PRIO_LISTS];
246
247         unsigned int busy_queues;
248         unsigned int busy_sync_queues;
249
250         int rq_in_driver;
251         int rq_in_flight[2];
252
253         /*
254          * queue-depth detection
255          */
256         int rq_queued;
257         int hw_tag;
258         /*
259          * hw_tag can be
260          * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
261          *  1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
262          *  0 => no NCQ
263          */
264         int hw_tag_est_depth;
265         unsigned int hw_tag_samples;
266
267         /*
268          * idle window management
269          */
270         struct timer_list idle_slice_timer;
271         struct work_struct unplug_work;
272
273         struct cfq_queue *active_queue;
274         struct cfq_io_cq *active_cic;
275
276         /*
277          * async queue for each priority case
278          */
279         struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
280         struct cfq_queue *async_idle_cfqq;
281
282         sector_t last_position;
283
284         /*
285          * tunables, see top of file
286          */
287         unsigned int cfq_quantum;
288         unsigned int cfq_fifo_expire[2];
289         unsigned int cfq_back_penalty;
290         unsigned int cfq_back_max;
291         unsigned int cfq_slice[2];
292         unsigned int cfq_slice_async_rq;
293         unsigned int cfq_slice_idle;
294         unsigned int cfq_group_idle;
295         unsigned int cfq_latency;
296
297         /*
298          * Fallback dummy cfqq for extreme OOM conditions
299          */
300         struct cfq_queue oom_cfqq;
301
302         unsigned long last_delayed_sync;
303 };
304
305 static inline struct cfq_group *blkg_to_cfqg(struct blkio_group *blkg)
306 {
307         return blkg_to_pdata(blkg, &blkio_policy_cfq);
308 }
309
310 static inline struct blkio_group *cfqg_to_blkg(struct cfq_group *cfqg)
311 {
312         return pdata_to_blkg(cfqg, &blkio_policy_cfq);
313 }
314
315 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
316
317 static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
318                                             enum wl_prio_t prio,
319                                             enum wl_type_t type)
320 {
321         if (!cfqg)
322                 return NULL;
323
324         if (prio == IDLE_WORKLOAD)
325                 return &cfqg->service_tree_idle;
326
327         return &cfqg->service_trees[prio][type];
328 }
329
330 enum cfqq_state_flags {
331         CFQ_CFQQ_FLAG_on_rr = 0,        /* on round-robin busy list */
332         CFQ_CFQQ_FLAG_wait_request,     /* waiting for a request */
333         CFQ_CFQQ_FLAG_must_dispatch,    /* must be allowed a dispatch */
334         CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
335         CFQ_CFQQ_FLAG_fifo_expire,      /* FIFO checked in this slice */
336         CFQ_CFQQ_FLAG_idle_window,      /* slice idling enabled */
337         CFQ_CFQQ_FLAG_prio_changed,     /* task priority has changed */
338         CFQ_CFQQ_FLAG_slice_new,        /* no requests dispatched in slice */
339         CFQ_CFQQ_FLAG_sync,             /* synchronous queue */
340         CFQ_CFQQ_FLAG_coop,             /* cfqq is shared */
341         CFQ_CFQQ_FLAG_split_coop,       /* shared cfqq will be splitted */
342         CFQ_CFQQ_FLAG_deep,             /* sync cfqq experienced large depth */
343         CFQ_CFQQ_FLAG_wait_busy,        /* Waiting for next request */
344 };
345
346 #define CFQ_CFQQ_FNS(name)                                              \
347 static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)         \
348 {                                                                       \
349         (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name);                   \
350 }                                                                       \
351 static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)        \
352 {                                                                       \
353         (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);                  \
354 }                                                                       \
355 static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)         \
356 {                                                                       \
357         return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;      \
358 }
359
360 CFQ_CFQQ_FNS(on_rr);
361 CFQ_CFQQ_FNS(wait_request);
362 CFQ_CFQQ_FNS(must_dispatch);
363 CFQ_CFQQ_FNS(must_alloc_slice);
364 CFQ_CFQQ_FNS(fifo_expire);
365 CFQ_CFQQ_FNS(idle_window);
366 CFQ_CFQQ_FNS(prio_changed);
367 CFQ_CFQQ_FNS(slice_new);
368 CFQ_CFQQ_FNS(sync);
369 CFQ_CFQQ_FNS(coop);
370 CFQ_CFQQ_FNS(split_coop);
371 CFQ_CFQQ_FNS(deep);
372 CFQ_CFQQ_FNS(wait_busy);
373 #undef CFQ_CFQQ_FNS
374
375 #ifdef CONFIG_CFQ_GROUP_IOSCHED
376 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  \
377         blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
378                         cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
379                         blkg_path(cfqg_to_blkg((cfqq)->cfqg)), ##args)
380
381 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)                          \
382         blk_add_trace_msg((cfqd)->queue, "%s " fmt,                     \
383                         blkg_path(cfqg_to_blkg((cfqg))), ##args)        \
384
385 #else
386 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  \
387         blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
388 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)          do {} while (0)
389 #endif
390 #define cfq_log(cfqd, fmt, args...)     \
391         blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
392
393 /* Traverses through cfq group service trees */
394 #define for_each_cfqg_st(cfqg, i, j, st) \
395         for (i = 0; i <= IDLE_WORKLOAD; i++) \
396                 for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
397                         : &cfqg->service_tree_idle; \
398                         (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
399                         (i == IDLE_WORKLOAD && j == 0); \
400                         j++, st = i < IDLE_WORKLOAD ? \
401                         &cfqg->service_trees[i][j]: NULL) \
402
403 static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
404         struct cfq_ttime *ttime, bool group_idle)
405 {
406         unsigned long slice;
407         if (!sample_valid(ttime->ttime_samples))
408                 return false;
409         if (group_idle)
410                 slice = cfqd->cfq_group_idle;
411         else
412                 slice = cfqd->cfq_slice_idle;
413         return ttime->ttime_mean > slice;
414 }
415
416 static inline bool iops_mode(struct cfq_data *cfqd)
417 {
418         /*
419          * If we are not idling on queues and it is a NCQ drive, parallel
420          * execution of requests is on and measuring time is not possible
421          * in most of the cases until and unless we drive shallower queue
422          * depths and that becomes a performance bottleneck. In such cases
423          * switch to start providing fairness in terms of number of IOs.
424          */
425         if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
426                 return true;
427         else
428                 return false;
429 }
430
431 static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
432 {
433         if (cfq_class_idle(cfqq))
434                 return IDLE_WORKLOAD;
435         if (cfq_class_rt(cfqq))
436                 return RT_WORKLOAD;
437         return BE_WORKLOAD;
438 }
439
440
441 static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
442 {
443         if (!cfq_cfqq_sync(cfqq))
444                 return ASYNC_WORKLOAD;
445         if (!cfq_cfqq_idle_window(cfqq))
446                 return SYNC_NOIDLE_WORKLOAD;
447         return SYNC_WORKLOAD;
448 }
449
450 static inline int cfq_group_busy_queues_wl(enum wl_prio_t wl,
451                                         struct cfq_data *cfqd,
452                                         struct cfq_group *cfqg)
453 {
454         if (wl == IDLE_WORKLOAD)
455                 return cfqg->service_tree_idle.count;
456
457         return cfqg->service_trees[wl][ASYNC_WORKLOAD].count
458                 + cfqg->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count
459                 + cfqg->service_trees[wl][SYNC_WORKLOAD].count;
460 }
461
462 static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
463                                         struct cfq_group *cfqg)
464 {
465         return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count
466                 + cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
467 }
468
469 static void cfq_dispatch_insert(struct request_queue *, struct request *);
470 static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
471                                        struct io_context *, gfp_t);
472
473 static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
474 {
475         /* cic->icq is the first member, %NULL will convert to %NULL */
476         return container_of(icq, struct cfq_io_cq, icq);
477 }
478
479 static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd,
480                                                struct io_context *ioc)
481 {
482         if (ioc)
483                 return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue));
484         return NULL;
485 }
486
487 static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync)
488 {
489         return cic->cfqq[is_sync];
490 }
491
492 static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq,
493                                 bool is_sync)
494 {
495         cic->cfqq[is_sync] = cfqq;
496 }
497
498 static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
499 {
500         return cic->icq.q->elevator->elevator_data;
501 }
502
503 /*
504  * We regard a request as SYNC, if it's either a read or has the SYNC bit
505  * set (in which case it could also be direct WRITE).
506  */
507 static inline bool cfq_bio_sync(struct bio *bio)
508 {
509         return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
510 }
511
512 /*
513  * scheduler run of queue, if there are requests pending and no one in the
514  * driver that will restart queueing
515  */
516 static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
517 {
518         if (cfqd->busy_queues) {
519                 cfq_log(cfqd, "schedule dispatch");
520                 kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
521         }
522 }
523
524 /*
525  * Scale schedule slice based on io priority. Use the sync time slice only
526  * if a queue is marked sync and has sync io queued. A sync queue with async
527  * io only, should not get full sync slice length.
528  */
529 static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
530                                  unsigned short prio)
531 {
532         const int base_slice = cfqd->cfq_slice[sync];
533
534         WARN_ON(prio >= IOPRIO_BE_NR);
535
536         return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
537 }
538
539 static inline int
540 cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
541 {
542         return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
543 }
544
545 static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg)
546 {
547         u64 d = delta << CFQ_SERVICE_SHIFT;
548
549         d = d * BLKIO_WEIGHT_DEFAULT;
550         do_div(d, cfqg->weight);
551         return d;
552 }
553
554 static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
555 {
556         s64 delta = (s64)(vdisktime - min_vdisktime);
557         if (delta > 0)
558                 min_vdisktime = vdisktime;
559
560         return min_vdisktime;
561 }
562
563 static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
564 {
565         s64 delta = (s64)(vdisktime - min_vdisktime);
566         if (delta < 0)
567                 min_vdisktime = vdisktime;
568
569         return min_vdisktime;
570 }
571
572 static void update_min_vdisktime(struct cfq_rb_root *st)
573 {
574         struct cfq_group *cfqg;
575
576         if (st->left) {
577                 cfqg = rb_entry_cfqg(st->left);
578                 st->min_vdisktime = max_vdisktime(st->min_vdisktime,
579                                                   cfqg->vdisktime);
580         }
581 }
582
583 /*
584  * get averaged number of queues of RT/BE priority.
585  * average is updated, with a formula that gives more weight to higher numbers,
586  * to quickly follows sudden increases and decrease slowly
587  */
588
589 static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
590                                         struct cfq_group *cfqg, bool rt)
591 {
592         unsigned min_q, max_q;
593         unsigned mult  = cfq_hist_divisor - 1;
594         unsigned round = cfq_hist_divisor / 2;
595         unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
596
597         min_q = min(cfqg->busy_queues_avg[rt], busy);
598         max_q = max(cfqg->busy_queues_avg[rt], busy);
599         cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
600                 cfq_hist_divisor;
601         return cfqg->busy_queues_avg[rt];
602 }
603
604 static inline unsigned
605 cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
606 {
607         struct cfq_rb_root *st = &cfqd->grp_service_tree;
608
609         return cfq_target_latency * cfqg->weight / st->total_weight;
610 }
611
612 static inline unsigned
613 cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
614 {
615         unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
616         if (cfqd->cfq_latency) {
617                 /*
618                  * interested queues (we consider only the ones with the same
619                  * priority class in the cfq group)
620                  */
621                 unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
622                                                 cfq_class_rt(cfqq));
623                 unsigned sync_slice = cfqd->cfq_slice[1];
624                 unsigned expect_latency = sync_slice * iq;
625                 unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
626
627                 if (expect_latency > group_slice) {
628                         unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
629                         /* scale low_slice according to IO priority
630                          * and sync vs async */
631                         unsigned low_slice =
632                                 min(slice, base_low_slice * slice / sync_slice);
633                         /* the adapted slice value is scaled to fit all iqs
634                          * into the target latency */
635                         slice = max(slice * group_slice / expect_latency,
636                                     low_slice);
637                 }
638         }
639         return slice;
640 }
641
642 static inline void
643 cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
644 {
645         unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
646
647         cfqq->slice_start = jiffies;
648         cfqq->slice_end = jiffies + slice;
649         cfqq->allocated_slice = slice;
650         cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
651 }
652
653 /*
654  * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
655  * isn't valid until the first request from the dispatch is activated
656  * and the slice time set.
657  */
658 static inline bool cfq_slice_used(struct cfq_queue *cfqq)
659 {
660         if (cfq_cfqq_slice_new(cfqq))
661                 return false;
662         if (time_before(jiffies, cfqq->slice_end))
663                 return false;
664
665         return true;
666 }
667
668 /*
669  * Lifted from AS - choose which of rq1 and rq2 that is best served now.
670  * We choose the request that is closest to the head right now. Distance
671  * behind the head is penalized and only allowed to a certain extent.
672  */
673 static struct request *
674 cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
675 {
676         sector_t s1, s2, d1 = 0, d2 = 0;
677         unsigned long back_max;
678 #define CFQ_RQ1_WRAP    0x01 /* request 1 wraps */
679 #define CFQ_RQ2_WRAP    0x02 /* request 2 wraps */
680         unsigned wrap = 0; /* bit mask: requests behind the disk head? */
681
682         if (rq1 == NULL || rq1 == rq2)
683                 return rq2;
684         if (rq2 == NULL)
685                 return rq1;
686
687         if (rq_is_sync(rq1) != rq_is_sync(rq2))
688                 return rq_is_sync(rq1) ? rq1 : rq2;
689
690         if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
691                 return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
692
693         s1 = blk_rq_pos(rq1);
694         s2 = blk_rq_pos(rq2);
695
696         /*
697          * by definition, 1KiB is 2 sectors
698          */
699         back_max = cfqd->cfq_back_max * 2;
700
701         /*
702          * Strict one way elevator _except_ in the case where we allow
703          * short backward seeks which are biased as twice the cost of a
704          * similar forward seek.
705          */
706         if (s1 >= last)
707                 d1 = s1 - last;
708         else if (s1 + back_max >= last)
709                 d1 = (last - s1) * cfqd->cfq_back_penalty;
710         else
711                 wrap |= CFQ_RQ1_WRAP;
712
713         if (s2 >= last)
714                 d2 = s2 - last;
715         else if (s2 + back_max >= last)
716                 d2 = (last - s2) * cfqd->cfq_back_penalty;
717         else
718                 wrap |= CFQ_RQ2_WRAP;
719
720         /* Found required data */
721
722         /*
723          * By doing switch() on the bit mask "wrap" we avoid having to
724          * check two variables for all permutations: --> faster!
725          */
726         switch (wrap) {
727         case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
728                 if (d1 < d2)
729                         return rq1;
730                 else if (d2 < d1)
731                         return rq2;
732                 else {
733                         if (s1 >= s2)
734                                 return rq1;
735                         else
736                                 return rq2;
737                 }
738
739         case CFQ_RQ2_WRAP:
740                 return rq1;
741         case CFQ_RQ1_WRAP:
742                 return rq2;
743         case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
744         default:
745                 /*
746                  * Since both rqs are wrapped,
747                  * start with the one that's further behind head
748                  * (--> only *one* back seek required),
749                  * since back seek takes more time than forward.
750                  */
751                 if (s1 <= s2)
752                         return rq1;
753                 else
754                         return rq2;
755         }
756 }
757
758 /*
759  * The below is leftmost cache rbtree addon
760  */
761 static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
762 {
763         /* Service tree is empty */
764         if (!root->count)
765                 return NULL;
766
767         if (!root->left)
768                 root->left = rb_first(&root->rb);
769
770         if (root->left)
771                 return rb_entry(root->left, struct cfq_queue, rb_node);
772
773         return NULL;
774 }
775
776 static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
777 {
778         if (!root->left)
779                 root->left = rb_first(&root->rb);
780
781         if (root->left)
782                 return rb_entry_cfqg(root->left);
783
784         return NULL;
785 }
786
787 static void rb_erase_init(struct rb_node *n, struct rb_root *root)
788 {
789         rb_erase(n, root);
790         RB_CLEAR_NODE(n);
791 }
792
793 static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
794 {
795         if (root->left == n)
796                 root->left = NULL;
797         rb_erase_init(n, &root->rb);
798         --root->count;
799 }
800
801 /*
802  * would be nice to take fifo expire time into account as well
803  */
804 static struct request *
805 cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
806                   struct request *last)
807 {
808         struct rb_node *rbnext = rb_next(&last->rb_node);
809         struct rb_node *rbprev = rb_prev(&last->rb_node);
810         struct request *next = NULL, *prev = NULL;
811
812         BUG_ON(RB_EMPTY_NODE(&last->rb_node));
813
814         if (rbprev)
815                 prev = rb_entry_rq(rbprev);
816
817         if (rbnext)
818                 next = rb_entry_rq(rbnext);
819         else {
820                 rbnext = rb_first(&cfqq->sort_list);
821                 if (rbnext && rbnext != &last->rb_node)
822                         next = rb_entry_rq(rbnext);
823         }
824
825         return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
826 }
827
828 static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
829                                       struct cfq_queue *cfqq)
830 {
831         /*
832          * just an approximation, should be ok.
833          */
834         return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
835                        cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
836 }
837
838 static inline s64
839 cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
840 {
841         return cfqg->vdisktime - st->min_vdisktime;
842 }
843
844 static void
845 __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
846 {
847         struct rb_node **node = &st->rb.rb_node;
848         struct rb_node *parent = NULL;
849         struct cfq_group *__cfqg;
850         s64 key = cfqg_key(st, cfqg);
851         int left = 1;
852
853         while (*node != NULL) {
854                 parent = *node;
855                 __cfqg = rb_entry_cfqg(parent);
856
857                 if (key < cfqg_key(st, __cfqg))
858                         node = &parent->rb_left;
859                 else {
860                         node = &parent->rb_right;
861                         left = 0;
862                 }
863         }
864
865         if (left)
866                 st->left = &cfqg->rb_node;
867
868         rb_link_node(&cfqg->rb_node, parent, node);
869         rb_insert_color(&cfqg->rb_node, &st->rb);
870 }
871
872 static void
873 cfq_update_group_weight(struct cfq_group *cfqg)
874 {
875         BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
876         if (cfqg->needs_update) {
877                 cfqg->weight = cfqg->new_weight;
878                 cfqg->needs_update = false;
879         }
880 }
881
882 static void
883 cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
884 {
885         BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
886
887         cfq_update_group_weight(cfqg);
888         __cfq_group_service_tree_add(st, cfqg);
889         st->total_weight += cfqg->weight;
890 }
891
892 static void
893 cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
894 {
895         struct cfq_rb_root *st = &cfqd->grp_service_tree;
896         struct cfq_group *__cfqg;
897         struct rb_node *n;
898
899         cfqg->nr_cfqq++;
900         if (!RB_EMPTY_NODE(&cfqg->rb_node))
901                 return;
902
903         /*
904          * Currently put the group at the end. Later implement something
905          * so that groups get lesser vtime based on their weights, so that
906          * if group does not loose all if it was not continuously backlogged.
907          */
908         n = rb_last(&st->rb);
909         if (n) {
910                 __cfqg = rb_entry_cfqg(n);
911                 cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
912         } else
913                 cfqg->vdisktime = st->min_vdisktime;
914         cfq_group_service_tree_add(st, cfqg);
915 }
916
917 static void
918 cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
919 {
920         st->total_weight -= cfqg->weight;
921         if (!RB_EMPTY_NODE(&cfqg->rb_node))
922                 cfq_rb_erase(&cfqg->rb_node, st);
923 }
924
925 static void
926 cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
927 {
928         struct cfq_rb_root *st = &cfqd->grp_service_tree;
929
930         BUG_ON(cfqg->nr_cfqq < 1);
931         cfqg->nr_cfqq--;
932
933         /* If there are other cfq queues under this group, don't delete it */
934         if (cfqg->nr_cfqq)
935                 return;
936
937         cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
938         cfq_group_service_tree_del(st, cfqg);
939         cfqg->saved_workload_slice = 0;
940         cfq_blkiocg_update_dequeue_stats(cfqg_to_blkg(cfqg),
941                                          &blkio_policy_cfq, 1);
942 }
943
944 static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
945                                                 unsigned int *unaccounted_time)
946 {
947         unsigned int slice_used;
948
949         /*
950          * Queue got expired before even a single request completed or
951          * got expired immediately after first request completion.
952          */
953         if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
954                 /*
955                  * Also charge the seek time incurred to the group, otherwise
956                  * if there are mutiple queues in the group, each can dispatch
957                  * a single request on seeky media and cause lots of seek time
958                  * and group will never know it.
959                  */
960                 slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
961                                         1);
962         } else {
963                 slice_used = jiffies - cfqq->slice_start;
964                 if (slice_used > cfqq->allocated_slice) {
965                         *unaccounted_time = slice_used - cfqq->allocated_slice;
966                         slice_used = cfqq->allocated_slice;
967                 }
968                 if (time_after(cfqq->slice_start, cfqq->dispatch_start))
969                         *unaccounted_time += cfqq->slice_start -
970                                         cfqq->dispatch_start;
971         }
972
973         return slice_used;
974 }
975
976 static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
977                                 struct cfq_queue *cfqq)
978 {
979         struct cfq_rb_root *st = &cfqd->grp_service_tree;
980         unsigned int used_sl, charge, unaccounted_sl = 0;
981         int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
982                         - cfqg->service_tree_idle.count;
983
984         BUG_ON(nr_sync < 0);
985         used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
986
987         if (iops_mode(cfqd))
988                 charge = cfqq->slice_dispatch;
989         else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
990                 charge = cfqq->allocated_slice;
991
992         /* Can't update vdisktime while group is on service tree */
993         cfq_group_service_tree_del(st, cfqg);
994         cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
995         /* If a new weight was requested, update now, off tree */
996         cfq_group_service_tree_add(st, cfqg);
997
998         /* This group is being expired. Save the context */
999         if (time_after(cfqd->workload_expires, jiffies)) {
1000                 cfqg->saved_workload_slice = cfqd->workload_expires
1001                                                 - jiffies;
1002                 cfqg->saved_workload = cfqd->serving_type;
1003                 cfqg->saved_serving_prio = cfqd->serving_prio;
1004         } else
1005                 cfqg->saved_workload_slice = 0;
1006
1007         cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
1008                                         st->min_vdisktime);
1009         cfq_log_cfqq(cfqq->cfqd, cfqq,
1010                      "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
1011                      used_sl, cfqq->slice_dispatch, charge,
1012                      iops_mode(cfqd), cfqq->nr_sectors);
1013         cfq_blkiocg_update_timeslice_used(cfqg_to_blkg(cfqg), &blkio_policy_cfq,
1014                                           used_sl, unaccounted_sl);
1015         cfq_blkiocg_set_start_empty_time(cfqg_to_blkg(cfqg), &blkio_policy_cfq);
1016 }
1017
1018 /**
1019  * cfq_init_cfqg_base - initialize base part of a cfq_group
1020  * @cfqg: cfq_group to initialize
1021  *
1022  * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED
1023  * is enabled or not.
1024  */
1025 static void cfq_init_cfqg_base(struct cfq_group *cfqg)
1026 {
1027         struct cfq_rb_root *st;
1028         int i, j;
1029
1030         for_each_cfqg_st(cfqg, i, j, st)
1031                 *st = CFQ_RB_ROOT;
1032         RB_CLEAR_NODE(&cfqg->rb_node);
1033
1034         cfqg->ttime.last_end_request = jiffies;
1035 }
1036
1037 #ifdef CONFIG_CFQ_GROUP_IOSCHED
1038 static void cfq_update_blkio_group_weight(struct request_queue *q,
1039                                           struct blkio_group *blkg,
1040                                           unsigned int weight)
1041 {
1042         struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1043
1044         cfqg->new_weight = weight;
1045         cfqg->needs_update = true;
1046 }
1047
1048 static void cfq_init_blkio_group(struct blkio_group *blkg)
1049 {
1050         struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1051
1052         cfq_init_cfqg_base(cfqg);
1053         cfqg->weight = blkg->blkcg->weight;
1054 }
1055
1056 /*
1057  * Search for the cfq group current task belongs to. request_queue lock must
1058  * be held.
1059  */
1060 static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
1061                                                 struct blkio_cgroup *blkcg)
1062 {
1063         struct request_queue *q = cfqd->queue;
1064         struct cfq_group *cfqg = NULL;
1065
1066         /* avoid lookup for the common case where there's no blkio cgroup */
1067         if (blkcg == &blkio_root_cgroup) {
1068                 cfqg = cfqd->root_group;
1069         } else {
1070                 struct blkio_group *blkg;
1071
1072                 blkg = blkg_lookup_create(blkcg, q, BLKIO_POLICY_PROP, false);
1073                 if (!IS_ERR(blkg))
1074                         cfqg = blkg_to_cfqg(blkg);
1075         }
1076
1077         return cfqg;
1078 }
1079
1080 static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1081 {
1082         /* Currently, all async queues are mapped to root group */
1083         if (!cfq_cfqq_sync(cfqq))
1084                 cfqg = cfqq->cfqd->root_group;
1085
1086         cfqq->cfqg = cfqg;
1087         /* cfqq reference on cfqg */
1088         blkg_get(cfqg_to_blkg(cfqg));
1089 }
1090
1091 #else /* GROUP_IOSCHED */
1092 static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
1093                                                 struct blkio_cgroup *blkcg)
1094 {
1095         return cfqd->root_group;
1096 }
1097
1098 static inline void
1099 cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
1100         cfqq->cfqg = cfqg;
1101 }
1102
1103 #endif /* GROUP_IOSCHED */
1104
1105 /*
1106  * The cfqd->service_trees holds all pending cfq_queue's that have
1107  * requests waiting to be processed. It is sorted in the order that
1108  * we will service the queues.
1109  */
1110 static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1111                                  bool add_front)
1112 {
1113         struct rb_node **p, *parent;
1114         struct cfq_queue *__cfqq;
1115         unsigned long rb_key;
1116         struct cfq_rb_root *service_tree;
1117         int left;
1118         int new_cfqq = 1;
1119
1120         service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
1121                                                 cfqq_type(cfqq));
1122         if (cfq_class_idle(cfqq)) {
1123                 rb_key = CFQ_IDLE_DELAY;
1124                 parent = rb_last(&service_tree->rb);
1125                 if (parent && parent != &cfqq->rb_node) {
1126                         __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1127                         rb_key += __cfqq->rb_key;
1128                 } else
1129                         rb_key += jiffies;
1130         } else if (!add_front) {
1131                 /*
1132                  * Get our rb key offset. Subtract any residual slice
1133                  * value carried from last service. A negative resid
1134                  * count indicates slice overrun, and this should position
1135                  * the next service time further away in the tree.
1136                  */
1137                 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
1138                 rb_key -= cfqq->slice_resid;
1139                 cfqq->slice_resid = 0;
1140         } else {
1141                 rb_key = -HZ;
1142                 __cfqq = cfq_rb_first(service_tree);
1143                 rb_key += __cfqq ? __cfqq->rb_key : jiffies;
1144         }
1145
1146         if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
1147                 new_cfqq = 0;
1148                 /*
1149                  * same position, nothing more to do
1150                  */
1151                 if (rb_key == cfqq->rb_key &&
1152                     cfqq->service_tree == service_tree)
1153                         return;
1154
1155                 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1156                 cfqq->service_tree = NULL;
1157         }
1158
1159         left = 1;
1160         parent = NULL;
1161         cfqq->service_tree = service_tree;
1162         p = &service_tree->rb.rb_node;
1163         while (*p) {
1164                 struct rb_node **n;
1165
1166                 parent = *p;
1167                 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1168
1169                 /*
1170                  * sort by key, that represents service time.
1171                  */
1172                 if (time_before(rb_key, __cfqq->rb_key))
1173                         n = &(*p)->rb_left;
1174                 else {
1175                         n = &(*p)->rb_right;
1176                         left = 0;
1177                 }
1178
1179                 p = n;
1180         }
1181
1182         if (left)
1183                 service_tree->left = &cfqq->rb_node;
1184
1185         cfqq->rb_key = rb_key;
1186         rb_link_node(&cfqq->rb_node, parent, p);
1187         rb_insert_color(&cfqq->rb_node, &service_tree->rb);
1188         service_tree->count++;
1189         if (add_front || !new_cfqq)
1190                 return;
1191         cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
1192 }
1193
1194 static struct cfq_queue *
1195 cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
1196                      sector_t sector, struct rb_node **ret_parent,
1197                      struct rb_node ***rb_link)
1198 {
1199         struct rb_node **p, *parent;
1200         struct cfq_queue *cfqq = NULL;
1201
1202         parent = NULL;
1203         p = &root->rb_node;
1204         while (*p) {
1205                 struct rb_node **n;
1206
1207                 parent = *p;
1208                 cfqq = rb_entry(parent, struct cfq_queue, p_node);
1209
1210                 /*
1211                  * Sort strictly based on sector.  Smallest to the left,
1212                  * largest to the right.
1213                  */
1214                 if (sector > blk_rq_pos(cfqq->next_rq))
1215                         n = &(*p)->rb_right;
1216                 else if (sector < blk_rq_pos(cfqq->next_rq))
1217                         n = &(*p)->rb_left;
1218                 else
1219                         break;
1220                 p = n;
1221                 cfqq = NULL;
1222         }
1223
1224         *ret_parent = parent;
1225         if (rb_link)
1226                 *rb_link = p;
1227         return cfqq;
1228 }
1229
1230 static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1231 {
1232         struct rb_node **p, *parent;
1233         struct cfq_queue *__cfqq;
1234
1235         if (cfqq->p_root) {
1236                 rb_erase(&cfqq->p_node, cfqq->p_root);
1237                 cfqq->p_root = NULL;
1238         }
1239
1240         if (cfq_class_idle(cfqq))
1241                 return;
1242         if (!cfqq->next_rq)
1243                 return;
1244
1245         cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
1246         __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
1247                                       blk_rq_pos(cfqq->next_rq), &parent, &p);
1248         if (!__cfqq) {
1249                 rb_link_node(&cfqq->p_node, parent, p);
1250                 rb_insert_color(&cfqq->p_node, cfqq->p_root);
1251         } else
1252                 cfqq->p_root = NULL;
1253 }
1254
1255 /*
1256  * Update cfqq's position in the service tree.
1257  */
1258 static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1259 {
1260         /*
1261          * Resorting requires the cfqq to be on the RR list already.
1262          */
1263         if (cfq_cfqq_on_rr(cfqq)) {
1264                 cfq_service_tree_add(cfqd, cfqq, 0);
1265                 cfq_prio_tree_add(cfqd, cfqq);
1266         }
1267 }
1268
1269 /*
1270  * add to busy list of queues for service, trying to be fair in ordering
1271  * the pending list according to last request service
1272  */
1273 static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1274 {
1275         cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
1276         BUG_ON(cfq_cfqq_on_rr(cfqq));
1277         cfq_mark_cfqq_on_rr(cfqq);
1278         cfqd->busy_queues++;
1279         if (cfq_cfqq_sync(cfqq))
1280                 cfqd->busy_sync_queues++;
1281
1282         cfq_resort_rr_list(cfqd, cfqq);
1283 }
1284
1285 /*
1286  * Called when the cfqq no longer has requests pending, remove it from
1287  * the service tree.
1288  */
1289 static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1290 {
1291         cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
1292         BUG_ON(!cfq_cfqq_on_rr(cfqq));
1293         cfq_clear_cfqq_on_rr(cfqq);
1294
1295         if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
1296                 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1297                 cfqq->service_tree = NULL;
1298         }
1299         if (cfqq->p_root) {
1300                 rb_erase(&cfqq->p_node, cfqq->p_root);
1301                 cfqq->p_root = NULL;
1302         }
1303
1304         cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
1305         BUG_ON(!cfqd->busy_queues);
1306         cfqd->busy_queues--;
1307         if (cfq_cfqq_sync(cfqq))
1308                 cfqd->busy_sync_queues--;
1309 }
1310
1311 /*
1312  * rb tree support functions
1313  */
1314 static void cfq_del_rq_rb(struct request *rq)
1315 {
1316         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1317         const int sync = rq_is_sync(rq);
1318
1319         BUG_ON(!cfqq->queued[sync]);
1320         cfqq->queued[sync]--;
1321
1322         elv_rb_del(&cfqq->sort_list, rq);
1323
1324         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
1325                 /*
1326                  * Queue will be deleted from service tree when we actually
1327                  * expire it later. Right now just remove it from prio tree
1328                  * as it is empty.
1329                  */
1330                 if (cfqq->p_root) {
1331                         rb_erase(&cfqq->p_node, cfqq->p_root);
1332                         cfqq->p_root = NULL;
1333                 }
1334         }
1335 }
1336
1337 static void cfq_add_rq_rb(struct request *rq)
1338 {
1339         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1340         struct cfq_data *cfqd = cfqq->cfqd;
1341         struct request *prev;
1342
1343         cfqq->queued[rq_is_sync(rq)]++;
1344
1345         elv_rb_add(&cfqq->sort_list, rq);
1346
1347         if (!cfq_cfqq_on_rr(cfqq))
1348                 cfq_add_cfqq_rr(cfqd, cfqq);
1349
1350         /*
1351          * check if this request is a better next-serve candidate
1352          */
1353         prev = cfqq->next_rq;
1354         cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
1355
1356         /*
1357          * adjust priority tree position, if ->next_rq changes
1358          */
1359         if (prev != cfqq->next_rq)
1360                 cfq_prio_tree_add(cfqd, cfqq);
1361
1362         BUG_ON(!cfqq->next_rq);
1363 }
1364
1365 static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
1366 {
1367         elv_rb_del(&cfqq->sort_list, rq);
1368         cfqq->queued[rq_is_sync(rq)]--;
1369         cfq_blkiocg_update_io_remove_stats(cfqg_to_blkg(RQ_CFQG(rq)),
1370                                            &blkio_policy_cfq, rq_data_dir(rq),
1371                                            rq_is_sync(rq));
1372         cfq_add_rq_rb(rq);
1373         cfq_blkiocg_update_io_add_stats(cfqg_to_blkg(RQ_CFQG(rq)),
1374                                         &blkio_policy_cfq,
1375                                         cfqg_to_blkg(cfqq->cfqd->serving_group),
1376                                         rq_data_dir(rq), rq_is_sync(rq));
1377 }
1378
1379 static struct request *
1380 cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
1381 {
1382         struct task_struct *tsk = current;
1383         struct cfq_io_cq *cic;
1384         struct cfq_queue *cfqq;
1385
1386         cic = cfq_cic_lookup(cfqd, tsk->io_context);
1387         if (!cic)
1388                 return NULL;
1389
1390         cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
1391         if (cfqq) {
1392                 sector_t sector = bio->bi_sector + bio_sectors(bio);
1393
1394                 return elv_rb_find(&cfqq->sort_list, sector);
1395         }
1396
1397         return NULL;
1398 }
1399
1400 static void cfq_activate_request(struct request_queue *q, struct request *rq)
1401 {
1402         struct cfq_data *cfqd = q->elevator->elevator_data;
1403
1404         cfqd->rq_in_driver++;
1405         cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
1406                                                 cfqd->rq_in_driver);
1407
1408         cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
1409 }
1410
1411 static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
1412 {
1413         struct cfq_data *cfqd = q->elevator->elevator_data;
1414
1415         WARN_ON(!cfqd->rq_in_driver);
1416         cfqd->rq_in_driver--;
1417         cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
1418                                                 cfqd->rq_in_driver);
1419 }
1420
1421 static void cfq_remove_request(struct request *rq)
1422 {
1423         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1424
1425         if (cfqq->next_rq == rq)
1426                 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
1427
1428         list_del_init(&rq->queuelist);
1429         cfq_del_rq_rb(rq);
1430
1431         cfqq->cfqd->rq_queued--;
1432         cfq_blkiocg_update_io_remove_stats(cfqg_to_blkg(RQ_CFQG(rq)),
1433                                            &blkio_policy_cfq, rq_data_dir(rq),
1434                                            rq_is_sync(rq));
1435         if (rq->cmd_flags & REQ_PRIO) {
1436                 WARN_ON(!cfqq->prio_pending);
1437                 cfqq->prio_pending--;
1438         }
1439 }
1440
1441 static int cfq_merge(struct request_queue *q, struct request **req,
1442                      struct bio *bio)
1443 {
1444         struct cfq_data *cfqd = q->elevator->elevator_data;
1445         struct request *__rq;
1446
1447         __rq = cfq_find_rq_fmerge(cfqd, bio);
1448         if (__rq && elv_rq_merge_ok(__rq, bio)) {
1449                 *req = __rq;
1450                 return ELEVATOR_FRONT_MERGE;
1451         }
1452
1453         return ELEVATOR_NO_MERGE;
1454 }
1455
1456 static void cfq_merged_request(struct request_queue *q, struct request *req,
1457                                int type)
1458 {
1459         if (type == ELEVATOR_FRONT_MERGE) {
1460                 struct cfq_queue *cfqq = RQ_CFQQ(req);
1461
1462                 cfq_reposition_rq_rb(cfqq, req);
1463         }
1464 }
1465
1466 static void cfq_bio_merged(struct request_queue *q, struct request *req,
1467                                 struct bio *bio)
1468 {
1469         cfq_blkiocg_update_io_merged_stats(cfqg_to_blkg(RQ_CFQG(req)),
1470                                            &blkio_policy_cfq, bio_data_dir(bio),
1471                                            cfq_bio_sync(bio));
1472 }
1473
1474 static void
1475 cfq_merged_requests(struct request_queue *q, struct request *rq,
1476                     struct request *next)
1477 {
1478         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1479         struct cfq_data *cfqd = q->elevator->elevator_data;
1480
1481         /*
1482          * reposition in fifo if next is older than rq
1483          */
1484         if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
1485             time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
1486                 list_move(&rq->queuelist, &next->queuelist);
1487                 rq_set_fifo_time(rq, rq_fifo_time(next));
1488         }
1489
1490         if (cfqq->next_rq == next)
1491                 cfqq->next_rq = rq;
1492         cfq_remove_request(next);
1493         cfq_blkiocg_update_io_merged_stats(cfqg_to_blkg(RQ_CFQG(rq)),
1494                                            &blkio_policy_cfq, rq_data_dir(next),
1495                                            rq_is_sync(next));
1496
1497         cfqq = RQ_CFQQ(next);
1498         /*
1499          * all requests of this queue are merged to other queues, delete it
1500          * from the service tree. If it's the active_queue,
1501          * cfq_dispatch_requests() will choose to expire it or do idle
1502          */
1503         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
1504             cfqq != cfqd->active_queue)
1505                 cfq_del_cfqq_rr(cfqd, cfqq);
1506 }
1507
1508 static int cfq_allow_merge(struct request_queue *q, struct request *rq,
1509                            struct bio *bio)
1510 {
1511         struct cfq_data *cfqd = q->elevator->elevator_data;
1512         struct cfq_io_cq *cic;
1513         struct cfq_queue *cfqq;
1514
1515         /*
1516          * Disallow merge of a sync bio into an async request.
1517          */
1518         if (cfq_bio_sync(bio) && !rq_is_sync(rq))
1519                 return false;
1520
1521         /*
1522          * Lookup the cfqq that this bio will be queued with and allow
1523          * merge only if rq is queued there.
1524          */
1525         cic = cfq_cic_lookup(cfqd, current->io_context);
1526         if (!cic)
1527                 return false;
1528
1529         cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
1530         return cfqq == RQ_CFQQ(rq);
1531 }
1532
1533 static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1534 {
1535         del_timer(&cfqd->idle_slice_timer);
1536         cfq_blkiocg_update_idle_time_stats(cfqg_to_blkg(cfqq->cfqg),
1537                                            &blkio_policy_cfq);
1538 }
1539
1540 static void __cfq_set_active_queue(struct cfq_data *cfqd,
1541                                    struct cfq_queue *cfqq)
1542 {
1543         if (cfqq) {
1544                 cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
1545                                 cfqd->serving_prio, cfqd->serving_type);
1546                 cfq_blkiocg_update_avg_queue_size_stats(cfqg_to_blkg(cfqq->cfqg),
1547                                                         &blkio_policy_cfq);
1548                 cfqq->slice_start = 0;
1549                 cfqq->dispatch_start = jiffies;
1550                 cfqq->allocated_slice = 0;
1551                 cfqq->slice_end = 0;
1552                 cfqq->slice_dispatch = 0;
1553                 cfqq->nr_sectors = 0;
1554
1555                 cfq_clear_cfqq_wait_request(cfqq);
1556                 cfq_clear_cfqq_must_dispatch(cfqq);
1557                 cfq_clear_cfqq_must_alloc_slice(cfqq);
1558                 cfq_clear_cfqq_fifo_expire(cfqq);
1559                 cfq_mark_cfqq_slice_new(cfqq);
1560
1561                 cfq_del_timer(cfqd, cfqq);
1562         }
1563
1564         cfqd->active_queue = cfqq;
1565 }
1566
1567 /*
1568  * current cfqq expired its slice (or was too idle), select new one
1569  */
1570 static void
1571 __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1572                     bool timed_out)
1573 {
1574         cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
1575
1576         if (cfq_cfqq_wait_request(cfqq))
1577                 cfq_del_timer(cfqd, cfqq);
1578
1579         cfq_clear_cfqq_wait_request(cfqq);
1580         cfq_clear_cfqq_wait_busy(cfqq);
1581
1582         /*
1583          * If this cfqq is shared between multiple processes, check to
1584          * make sure that those processes are still issuing I/Os within
1585          * the mean seek distance.  If not, it may be time to break the
1586          * queues apart again.
1587          */
1588         if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
1589                 cfq_mark_cfqq_split_coop(cfqq);
1590
1591         /*
1592          * store what was left of this slice, if the queue idled/timed out
1593          */
1594         if (timed_out) {
1595                 if (cfq_cfqq_slice_new(cfqq))
1596                         cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
1597                 else
1598                         cfqq->slice_resid = cfqq->slice_end - jiffies;
1599                 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
1600         }
1601
1602         cfq_group_served(cfqd, cfqq->cfqg, cfqq);
1603
1604         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
1605                 cfq_del_cfqq_rr(cfqd, cfqq);
1606
1607         cfq_resort_rr_list(cfqd, cfqq);
1608
1609         if (cfqq == cfqd->active_queue)
1610                 cfqd->active_queue = NULL;
1611
1612         if (cfqd->active_cic) {
1613                 put_io_context(cfqd->active_cic->icq.ioc);
1614                 cfqd->active_cic = NULL;
1615         }
1616 }
1617
1618 static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
1619 {
1620         struct cfq_queue *cfqq = cfqd->active_queue;
1621
1622         if (cfqq)
1623                 __cfq_slice_expired(cfqd, cfqq, timed_out);
1624 }
1625
1626 /*
1627  * Get next queue for service. Unless we have a queue preemption,
1628  * we'll simply select the first cfqq in the service tree.
1629  */
1630 static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
1631 {
1632         struct cfq_rb_root *service_tree =
1633                 service_tree_for(cfqd->serving_group, cfqd->serving_prio,
1634                                         cfqd->serving_type);
1635
1636         if (!cfqd->rq_queued)
1637                 return NULL;
1638
1639         /* There is nothing to dispatch */
1640         if (!service_tree)
1641                 return NULL;
1642         if (RB_EMPTY_ROOT(&service_tree->rb))
1643                 return NULL;
1644         return cfq_rb_first(service_tree);
1645 }
1646
1647 static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
1648 {
1649         struct cfq_group *cfqg;
1650         struct cfq_queue *cfqq;
1651         int i, j;
1652         struct cfq_rb_root *st;
1653
1654         if (!cfqd->rq_queued)
1655                 return NULL;
1656
1657         cfqg = cfq_get_next_cfqg(cfqd);
1658         if (!cfqg)
1659                 return NULL;
1660
1661         for_each_cfqg_st(cfqg, i, j, st)
1662                 if ((cfqq = cfq_rb_first(st)) != NULL)
1663                         return cfqq;
1664         return NULL;
1665 }
1666
1667 /*
1668  * Get and set a new active queue for service.
1669  */
1670 static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
1671                                               struct cfq_queue *cfqq)
1672 {
1673         if (!cfqq)
1674                 cfqq = cfq_get_next_queue(cfqd);
1675
1676         __cfq_set_active_queue(cfqd, cfqq);
1677         return cfqq;
1678 }
1679
1680 static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
1681                                           struct request *rq)
1682 {
1683         if (blk_rq_pos(rq) >= cfqd->last_position)
1684                 return blk_rq_pos(rq) - cfqd->last_position;
1685         else
1686                 return cfqd->last_position - blk_rq_pos(rq);
1687 }
1688
1689 static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1690                                struct request *rq)
1691 {
1692         return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
1693 }
1694
1695 static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
1696                                     struct cfq_queue *cur_cfqq)
1697 {
1698         struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
1699         struct rb_node *parent, *node;
1700         struct cfq_queue *__cfqq;
1701         sector_t sector = cfqd->last_position;
1702
1703         if (RB_EMPTY_ROOT(root))
1704                 return NULL;
1705
1706         /*
1707          * First, if we find a request starting at the end of the last
1708          * request, choose it.
1709          */
1710         __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
1711         if (__cfqq)
1712                 return __cfqq;
1713
1714         /*
1715          * If the exact sector wasn't found, the parent of the NULL leaf
1716          * will contain the closest sector.
1717          */
1718         __cfqq = rb_entry(parent, struct cfq_queue, p_node);
1719         if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1720                 return __cfqq;
1721
1722         if (blk_rq_pos(__cfqq->next_rq) < sector)
1723                 node = rb_next(&__cfqq->p_node);
1724         else
1725                 node = rb_prev(&__cfqq->p_node);
1726         if (!node)
1727                 return NULL;
1728
1729         __cfqq = rb_entry(node, struct cfq_queue, p_node);
1730         if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1731                 return __cfqq;
1732
1733         return NULL;
1734 }
1735
1736 /*
1737  * cfqd - obvious
1738  * cur_cfqq - passed in so that we don't decide that the current queue is
1739  *            closely cooperating with itself.
1740  *
1741  * So, basically we're assuming that that cur_cfqq has dispatched at least
1742  * one request, and that cfqd->last_position reflects a position on the disk
1743  * associated with the I/O issued by cur_cfqq.  I'm not sure this is a valid
1744  * assumption.
1745  */
1746 static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
1747                                               struct cfq_queue *cur_cfqq)
1748 {
1749         struct cfq_queue *cfqq;
1750
1751         if (cfq_class_idle(cur_cfqq))
1752                 return NULL;
1753         if (!cfq_cfqq_sync(cur_cfqq))
1754                 return NULL;
1755         if (CFQQ_SEEKY(cur_cfqq))
1756                 return NULL;
1757
1758         /*
1759          * Don't search priority tree if it's the only queue in the group.
1760          */
1761         if (cur_cfqq->cfqg->nr_cfqq == 1)
1762                 return NULL;
1763
1764         /*
1765          * We should notice if some of the queues are cooperating, eg
1766          * working closely on the same area of the disk. In that case,
1767          * we can group them together and don't waste time idling.
1768          */
1769         cfqq = cfqq_close(cfqd, cur_cfqq);
1770         if (!cfqq)
1771                 return NULL;
1772
1773         /* If new queue belongs to different cfq_group, don't choose it */
1774         if (cur_cfqq->cfqg != cfqq->cfqg)
1775                 return NULL;
1776
1777         /*
1778          * It only makes sense to merge sync queues.
1779          */
1780         if (!cfq_cfqq_sync(cfqq))
1781                 return NULL;
1782         if (CFQQ_SEEKY(cfqq))
1783                 return NULL;
1784
1785         /*
1786          * Do not merge queues of different priority classes
1787          */
1788         if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
1789                 return NULL;
1790
1791         return cfqq;
1792 }
1793
1794 /*
1795  * Determine whether we should enforce idle window for this queue.
1796  */
1797
1798 static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1799 {
1800         enum wl_prio_t prio = cfqq_prio(cfqq);
1801         struct cfq_rb_root *service_tree = cfqq->service_tree;
1802
1803         BUG_ON(!service_tree);
1804         BUG_ON(!service_tree->count);
1805
1806         if (!cfqd->cfq_slice_idle)
1807                 return false;
1808
1809         /* We never do for idle class queues. */
1810         if (prio == IDLE_WORKLOAD)
1811                 return false;
1812
1813         /* We do for queues that were marked with idle window flag. */
1814         if (cfq_cfqq_idle_window(cfqq) &&
1815            !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
1816                 return true;
1817
1818         /*
1819          * Otherwise, we do only if they are the last ones
1820          * in their service tree.
1821          */
1822         if (service_tree->count == 1 && cfq_cfqq_sync(cfqq) &&
1823            !cfq_io_thinktime_big(cfqd, &service_tree->ttime, false))
1824                 return true;
1825         cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
1826                         service_tree->count);
1827         return false;
1828 }
1829
1830 static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1831 {
1832         struct cfq_queue *cfqq = cfqd->active_queue;
1833         struct cfq_io_cq *cic;
1834         unsigned long sl, group_idle = 0;
1835
1836         /*
1837          * SSD device without seek penalty, disable idling. But only do so
1838          * for devices that support queuing, otherwise we still have a problem
1839          * with sync vs async workloads.
1840          */
1841         if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
1842                 return;
1843
1844         WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
1845         WARN_ON(cfq_cfqq_slice_new(cfqq));
1846
1847         /*
1848          * idle is disabled, either manually or by past process history
1849          */
1850         if (!cfq_should_idle(cfqd, cfqq)) {
1851                 /* no queue idling. Check for group idling */
1852                 if (cfqd->cfq_group_idle)
1853                         group_idle = cfqd->cfq_group_idle;
1854                 else
1855                         return;
1856         }
1857
1858         /*
1859          * still active requests from this queue, don't idle
1860          */
1861         if (cfqq->dispatched)
1862                 return;
1863
1864         /*
1865          * task has exited, don't wait
1866          */
1867         cic = cfqd->active_cic;
1868         if (!cic || !atomic_read(&cic->icq.ioc->nr_tasks))
1869                 return;
1870
1871         /*
1872          * If our average think time is larger than the remaining time
1873          * slice, then don't idle. This avoids overrunning the allotted
1874          * time slice.
1875          */
1876         if (sample_valid(cic->ttime.ttime_samples) &&
1877             (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) {
1878                 cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu",
1879                              cic->ttime.ttime_mean);
1880                 return;
1881         }
1882
1883         /* There are other queues in the group, don't do group idle */
1884         if (group_idle && cfqq->cfqg->nr_cfqq > 1)
1885                 return;
1886
1887         cfq_mark_cfqq_wait_request(cfqq);
1888
1889         if (group_idle)
1890                 sl = cfqd->cfq_group_idle;
1891         else
1892                 sl = cfqd->cfq_slice_idle;
1893
1894         mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
1895         cfq_blkiocg_update_set_idle_time_stats(cfqg_to_blkg(cfqq->cfqg),
1896                                                &blkio_policy_cfq);
1897         cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
1898                         group_idle ? 1 : 0);
1899 }
1900
1901 /*
1902  * Move request from internal lists to the request queue dispatch list.
1903  */
1904 static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
1905 {
1906         struct cfq_data *cfqd = q->elevator->elevator_data;
1907         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1908
1909         cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
1910
1911         cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
1912         cfq_remove_request(rq);
1913         cfqq->dispatched++;
1914         (RQ_CFQG(rq))->dispatched++;
1915         elv_dispatch_sort(q, rq);
1916
1917         cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
1918         cfqq->nr_sectors += blk_rq_sectors(rq);
1919         cfq_blkiocg_update_dispatch_stats(cfqg_to_blkg(cfqq->cfqg),
1920                                           &blkio_policy_cfq, blk_rq_bytes(rq),
1921                                           rq_data_dir(rq), rq_is_sync(rq));
1922 }
1923
1924 /*
1925  * return expired entry, or NULL to just start from scratch in rbtree
1926  */
1927 static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
1928 {
1929         struct request *rq = NULL;
1930
1931         if (cfq_cfqq_fifo_expire(cfqq))
1932                 return NULL;
1933
1934         cfq_mark_cfqq_fifo_expire(cfqq);
1935
1936         if (list_empty(&cfqq->fifo))
1937                 return NULL;
1938
1939         rq = rq_entry_fifo(cfqq->fifo.next);
1940         if (time_before(jiffies, rq_fifo_time(rq)))
1941                 rq = NULL;
1942
1943         cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
1944         return rq;
1945 }
1946
1947 static inline int
1948 cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1949 {
1950         const int base_rq = cfqd->cfq_slice_async_rq;
1951
1952         WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
1953
1954         return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
1955 }
1956
1957 /*
1958  * Must be called with the queue_lock held.
1959  */
1960 static int cfqq_process_refs(struct cfq_queue *cfqq)
1961 {
1962         int process_refs, io_refs;
1963
1964         io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
1965         process_refs = cfqq->ref - io_refs;
1966         BUG_ON(process_refs < 0);
1967         return process_refs;
1968 }
1969
1970 static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
1971 {
1972         int process_refs, new_process_refs;
1973         struct cfq_queue *__cfqq;
1974
1975         /*
1976          * If there are no process references on the new_cfqq, then it is
1977          * unsafe to follow the ->new_cfqq chain as other cfqq's in the
1978          * chain may have dropped their last reference (not just their
1979          * last process reference).
1980          */
1981         if (!cfqq_process_refs(new_cfqq))
1982                 return;
1983
1984         /* Avoid a circular list and skip interim queue merges */
1985         while ((__cfqq = new_cfqq->new_cfqq)) {
1986                 if (__cfqq == cfqq)
1987                         return;
1988                 new_cfqq = __cfqq;
1989         }
1990
1991         process_refs = cfqq_process_refs(cfqq);
1992         new_process_refs = cfqq_process_refs(new_cfqq);
1993         /*
1994          * If the process for the cfqq has gone away, there is no
1995          * sense in merging the queues.
1996          */
1997         if (process_refs == 0 || new_process_refs == 0)
1998                 return;
1999
2000         /*
2001          * Merge in the direction of the lesser amount of work.
2002          */
2003         if (new_process_refs >= process_refs) {
2004                 cfqq->new_cfqq = new_cfqq;
2005                 new_cfqq->ref += process_refs;
2006         } else {
2007                 new_cfqq->new_cfqq = cfqq;
2008                 cfqq->ref += new_process_refs;
2009         }
2010 }
2011
2012 static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
2013                                 struct cfq_group *cfqg, enum wl_prio_t prio)
2014 {
2015         struct cfq_queue *queue;
2016         int i;
2017         bool key_valid = false;
2018         unsigned long lowest_key = 0;
2019         enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
2020
2021         for (i = 0; i <= SYNC_WORKLOAD; ++i) {
2022                 /* select the one with lowest rb_key */
2023                 queue = cfq_rb_first(service_tree_for(cfqg, prio, i));
2024                 if (queue &&
2025                     (!key_valid || time_before(queue->rb_key, lowest_key))) {
2026                         lowest_key = queue->rb_key;
2027                         cur_best = i;
2028                         key_valid = true;
2029                 }
2030         }
2031
2032         return cur_best;
2033 }
2034
2035 static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
2036 {
2037         unsigned slice;
2038         unsigned count;
2039         struct cfq_rb_root *st;
2040         unsigned group_slice;
2041         enum wl_prio_t original_prio = cfqd->serving_prio;
2042
2043         /* Choose next priority. RT > BE > IDLE */
2044         if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
2045                 cfqd->serving_prio = RT_WORKLOAD;
2046         else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
2047                 cfqd->serving_prio = BE_WORKLOAD;
2048         else {
2049                 cfqd->serving_prio = IDLE_WORKLOAD;
2050                 cfqd->workload_expires = jiffies + 1;
2051                 return;
2052         }
2053
2054         if (original_prio != cfqd->serving_prio)
2055                 goto new_workload;
2056
2057         /*
2058          * For RT and BE, we have to choose also the type
2059          * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
2060          * expiration time
2061          */
2062         st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2063         count = st->count;
2064
2065         /*
2066          * check workload expiration, and that we still have other queues ready
2067          */
2068         if (count && !time_after(jiffies, cfqd->workload_expires))
2069                 return;
2070
2071 new_workload:
2072         /* otherwise select new workload type */
2073         cfqd->serving_type =
2074                 cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
2075         st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2076         count = st->count;
2077
2078         /*
2079          * the workload slice is computed as a fraction of target latency
2080          * proportional to the number of queues in that workload, over
2081          * all the queues in the same priority class
2082          */
2083         group_slice = cfq_group_slice(cfqd, cfqg);
2084
2085         slice = group_slice * count /
2086                 max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio],
2087                       cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg));
2088
2089         if (cfqd->serving_type == ASYNC_WORKLOAD) {
2090                 unsigned int tmp;
2091
2092                 /*
2093                  * Async queues are currently system wide. Just taking
2094                  * proportion of queues with-in same group will lead to higher
2095                  * async ratio system wide as generally root group is going
2096                  * to have higher weight. A more accurate thing would be to
2097                  * calculate system wide asnc/sync ratio.
2098                  */
2099                 tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg);
2100                 tmp = tmp/cfqd->busy_queues;
2101                 slice = min_t(unsigned, slice, tmp);
2102
2103                 /* async workload slice is scaled down according to
2104                  * the sync/async slice ratio. */
2105                 slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
2106         } else
2107                 /* sync workload slice is at least 2 * cfq_slice_idle */
2108                 slice = max(slice, 2 * cfqd->cfq_slice_idle);
2109
2110         slice = max_t(unsigned, slice, CFQ_MIN_TT);
2111         cfq_log(cfqd, "workload slice:%d", slice);
2112         cfqd->workload_expires = jiffies + slice;
2113 }
2114
2115 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
2116 {
2117         struct cfq_rb_root *st = &cfqd->grp_service_tree;
2118         struct cfq_group *cfqg;
2119
2120         if (RB_EMPTY_ROOT(&st->rb))
2121                 return NULL;
2122         cfqg = cfq_rb_first_group(st);
2123         update_min_vdisktime(st);
2124         return cfqg;
2125 }
2126
2127 static void cfq_choose_cfqg(struct cfq_data *cfqd)
2128 {
2129         struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
2130
2131         cfqd->serving_group = cfqg;
2132
2133         /* Restore the workload type data */
2134         if (cfqg->saved_workload_slice) {
2135                 cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
2136                 cfqd->serving_type = cfqg->saved_workload;
2137                 cfqd->serving_prio = cfqg->saved_serving_prio;
2138         } else
2139                 cfqd->workload_expires = jiffies - 1;
2140
2141         choose_service_tree(cfqd, cfqg);
2142 }
2143
2144 /*
2145  * Select a queue for service. If we have a current active queue,
2146  * check whether to continue servicing it, or retrieve and set a new one.
2147  */
2148 static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
2149 {
2150         struct cfq_queue *cfqq, *new_cfqq = NULL;
2151
2152         cfqq = cfqd->active_queue;
2153         if (!cfqq)
2154                 goto new_queue;
2155
2156         if (!cfqd->rq_queued)
2157                 return NULL;
2158
2159         /*
2160          * We were waiting for group to get backlogged. Expire the queue
2161          */
2162         if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
2163                 goto expire;
2164
2165         /*
2166          * The active queue has run out of time, expire it and select new.
2167          */
2168         if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
2169                 /*
2170                  * If slice had not expired at the completion of last request
2171                  * we might not have turned on wait_busy flag. Don't expire
2172                  * the queue yet. Allow the group to get backlogged.
2173                  *
2174                  * The very fact that we have used the slice, that means we
2175                  * have been idling all along on this queue and it should be
2176                  * ok to wait for this request to complete.
2177                  */
2178                 if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
2179                     && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2180                         cfqq = NULL;
2181                         goto keep_queue;
2182                 } else
2183                         goto check_group_idle;
2184         }
2185
2186         /*
2187          * The active queue has requests and isn't expired, allow it to
2188          * dispatch.
2189          */
2190         if (!RB_EMPTY_ROOT(&cfqq->sort_list))
2191                 goto keep_queue;
2192
2193         /*
2194          * If another queue has a request waiting within our mean seek
2195          * distance, let it run.  The expire code will check for close
2196          * cooperators and put the close queue at the front of the service
2197          * tree.  If possible, merge the expiring queue with the new cfqq.
2198          */
2199         new_cfqq = cfq_close_cooperator(cfqd, cfqq);
2200         if (new_cfqq) {
2201                 if (!cfqq->new_cfqq)
2202                         cfq_setup_merge(cfqq, new_cfqq);
2203                 goto expire;
2204         }
2205
2206         /*
2207          * No requests pending. If the active queue still has requests in
2208          * flight or is idling for a new request, allow either of these
2209          * conditions to happen (or time out) before selecting a new queue.
2210          */
2211         if (timer_pending(&cfqd->idle_slice_timer)) {
2212                 cfqq = NULL;
2213                 goto keep_queue;
2214         }
2215
2216         /*
2217          * This is a deep seek queue, but the device is much faster than
2218          * the queue can deliver, don't idle
2219          **/
2220         if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
2221             (cfq_cfqq_slice_new(cfqq) ||
2222             (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
2223                 cfq_clear_cfqq_deep(cfqq);
2224                 cfq_clear_cfqq_idle_window(cfqq);
2225         }
2226
2227         if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2228                 cfqq = NULL;
2229                 goto keep_queue;
2230         }
2231
2232         /*
2233          * If group idle is enabled and there are requests dispatched from
2234          * this group, wait for requests to complete.
2235          */
2236 check_group_idle:
2237         if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
2238             cfqq->cfqg->dispatched &&
2239             !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
2240                 cfqq = NULL;
2241                 goto keep_queue;
2242         }
2243
2244 expire:
2245         cfq_slice_expired(cfqd, 0);
2246 new_queue:
2247         /*
2248          * Current queue expired. Check if we have to switch to a new
2249          * service tree
2250          */
2251         if (!new_cfqq)
2252                 cfq_choose_cfqg(cfqd);
2253
2254         cfqq = cfq_set_active_queue(cfqd, new_cfqq);
2255 keep_queue:
2256         return cfqq;
2257 }
2258
2259 static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
2260 {
2261         int dispatched = 0;
2262
2263         while (cfqq->next_rq) {
2264                 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
2265                 dispatched++;
2266         }
2267
2268         BUG_ON(!list_empty(&cfqq->fifo));
2269
2270         /* By default cfqq is not expired if it is empty. Do it explicitly */
2271         __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
2272         return dispatched;
2273 }
2274
2275 /*
2276  * Drain our current requests. Used for barriers and when switching
2277  * io schedulers on-the-fly.
2278  */
2279 static int cfq_forced_dispatch(struct cfq_data *cfqd)
2280 {
2281         struct cfq_queue *cfqq;
2282         int dispatched = 0;
2283
2284         /* Expire the timeslice of the current active queue first */
2285         cfq_slice_expired(cfqd, 0);
2286         while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
2287                 __cfq_set_active_queue(cfqd, cfqq);
2288                 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
2289         }
2290
2291         BUG_ON(cfqd->busy_queues);
2292
2293         cfq_log(cfqd, "forced_dispatch=%d", dispatched);
2294         return dispatched;
2295 }
2296
2297 static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
2298         struct cfq_queue *cfqq)
2299 {
2300         /* the queue hasn't finished any request, can't estimate */
2301         if (cfq_cfqq_slice_new(cfqq))
2302                 return true;
2303         if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
2304                 cfqq->slice_end))
2305                 return true;
2306
2307         return false;
2308 }
2309
2310 static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2311 {
2312         unsigned int max_dispatch;
2313
2314         /*
2315          * Drain async requests before we start sync IO
2316          */
2317         if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
2318                 return false;
2319
2320         /*
2321          * If this is an async queue and we have sync IO in flight, let it wait
2322          */
2323         if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
2324                 return false;
2325
2326         max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
2327         if (cfq_class_idle(cfqq))
2328                 max_dispatch = 1;
2329
2330         /*
2331          * Does this cfqq already have too much IO in flight?
2332          */
2333         if (cfqq->dispatched >= max_dispatch) {
2334                 bool promote_sync = false;
2335                 /*
2336                  * idle queue must always only have a single IO in flight
2337                  */
2338                 if (cfq_class_idle(cfqq))
2339                         return false;
2340
2341                 /*
2342                  * If there is only one sync queue
2343                  * we can ignore async queue here and give the sync
2344                  * queue no dispatch limit. The reason is a sync queue can
2345                  * preempt async queue, limiting the sync queue doesn't make
2346                  * sense. This is useful for aiostress test.
2347                  */
2348                 if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
2349                         promote_sync = true;
2350
2351                 /*
2352                  * We have other queues, don't allow more IO from this one
2353                  */
2354                 if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
2355                                 !promote_sync)
2356                         return false;
2357
2358                 /*
2359                  * Sole queue user, no limit
2360                  */
2361                 if (cfqd->busy_queues == 1 || promote_sync)
2362                         max_dispatch = -1;
2363                 else
2364                         /*
2365                          * Normally we start throttling cfqq when cfq_quantum/2
2366                          * requests have been dispatched. But we can drive
2367                          * deeper queue depths at the beginning of slice
2368                          * subjected to upper limit of cfq_quantum.
2369                          * */
2370                         max_dispatch = cfqd->cfq_quantum;
2371         }
2372
2373         /*
2374          * Async queues must wait a bit before being allowed dispatch.
2375          * We also ramp up the dispatch depth gradually for async IO,
2376          * based on the last sync IO we serviced
2377          */
2378         if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
2379                 unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
2380                 unsigned int depth;
2381
2382                 depth = last_sync / cfqd->cfq_slice[1];
2383                 if (!depth && !cfqq->dispatched)
2384                         depth = 1;
2385                 if (depth < max_dispatch)
2386                         max_dispatch = depth;
2387         }
2388
2389         /*
2390          * If we're below the current max, allow a dispatch
2391          */
2392         return cfqq->dispatched < max_dispatch;
2393 }
2394
2395 /*
2396  * Dispatch a request from cfqq, moving them to the request queue
2397  * dispatch list.
2398  */
2399 static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2400 {
2401         struct request *rq;
2402
2403         BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
2404
2405         if (!cfq_may_dispatch(cfqd, cfqq))
2406                 return false;
2407
2408         /*
2409          * follow expired path, else get first next available
2410          */
2411         rq = cfq_check_fifo(cfqq);
2412         if (!rq)
2413                 rq = cfqq->next_rq;
2414
2415         /*
2416          * insert request into driver dispatch list
2417          */
2418         cfq_dispatch_insert(cfqd->queue, rq);
2419
2420         if (!cfqd->active_cic) {
2421                 struct cfq_io_cq *cic = RQ_CIC(rq);
2422
2423                 atomic_long_inc(&cic->icq.ioc->refcount);
2424                 cfqd->active_cic = cic;
2425         }
2426
2427         return true;
2428 }
2429
2430 /*
2431  * Find the cfqq that we need to service and move a request from that to the
2432  * dispatch list
2433  */
2434 static int cfq_dispatch_requests(struct request_queue *q, int force)
2435 {
2436         struct cfq_data *cfqd = q->elevator->elevator_data;
2437         struct cfq_queue *cfqq;
2438
2439         if (!cfqd->busy_queues)
2440                 return 0;
2441
2442         if (unlikely(force))
2443                 return cfq_forced_dispatch(cfqd);
2444
2445         cfqq = cfq_select_queue(cfqd);
2446         if (!cfqq)
2447                 return 0;
2448
2449         /*
2450          * Dispatch a request from this cfqq, if it is allowed
2451          */
2452         if (!cfq_dispatch_request(cfqd, cfqq))
2453                 return 0;
2454
2455         cfqq->slice_dispatch++;
2456         cfq_clear_cfqq_must_dispatch(cfqq);
2457
2458         /*
2459          * expire an async queue immediately if it has used up its slice. idle
2460          * queue always expire after 1 dispatch round.
2461          */
2462         if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
2463             cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
2464             cfq_class_idle(cfqq))) {
2465                 cfqq->slice_end = jiffies + 1;
2466                 cfq_slice_expired(cfqd, 0);
2467         }
2468
2469         cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
2470         return 1;
2471 }
2472
2473 /*
2474  * task holds one reference to the queue, dropped when task exits. each rq
2475  * in-flight on this queue also holds a reference, dropped when rq is freed.
2476  *
2477  * Each cfq queue took a reference on the parent group. Drop it now.
2478  * queue lock must be held here.
2479  */
2480 static void cfq_put_queue(struct cfq_queue *cfqq)
2481 {
2482         struct cfq_data *cfqd = cfqq->cfqd;
2483         struct cfq_group *cfqg;
2484
2485         BUG_ON(cfqq->ref <= 0);
2486
2487         cfqq->ref--;
2488         if (cfqq->ref)
2489                 return;
2490
2491         cfq_log_cfqq(cfqd, cfqq, "put_queue");
2492         BUG_ON(rb_first(&cfqq->sort_list));
2493         BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
2494         cfqg = cfqq->cfqg;
2495
2496         if (unlikely(cfqd->active_queue == cfqq)) {
2497                 __cfq_slice_expired(cfqd, cfqq, 0);
2498                 cfq_schedule_dispatch(cfqd);
2499         }
2500
2501         BUG_ON(cfq_cfqq_on_rr(cfqq));
2502         kmem_cache_free(cfq_pool, cfqq);
2503         blkg_put(cfqg_to_blkg(cfqg));
2504 }
2505
2506 static void cfq_put_cooperator(struct cfq_queue *cfqq)
2507 {
2508         struct cfq_queue *__cfqq, *next;
2509
2510         /*
2511          * If this queue was scheduled to merge with another queue, be
2512          * sure to drop the reference taken on that queue (and others in
2513          * the merge chain).  See cfq_setup_merge and cfq_merge_cfqqs.
2514          */
2515         __cfqq = cfqq->new_cfqq;
2516         while (__cfqq) {
2517                 if (__cfqq == cfqq) {
2518                         WARN(1, "cfqq->new_cfqq loop detected\n");
2519                         break;
2520                 }
2521                 next = __cfqq->new_cfqq;
2522                 cfq_put_queue(__cfqq);
2523                 __cfqq = next;
2524         }
2525 }
2526
2527 static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2528 {
2529         if (unlikely(cfqq == cfqd->active_queue)) {
2530                 __cfq_slice_expired(cfqd, cfqq, 0);
2531                 cfq_schedule_dispatch(cfqd);
2532         }
2533
2534         cfq_put_cooperator(cfqq);
2535
2536         cfq_put_queue(cfqq);
2537 }
2538
2539 static void cfq_init_icq(struct io_cq *icq)
2540 {
2541         struct cfq_io_cq *cic = icq_to_cic(icq);
2542
2543         cic->ttime.last_end_request = jiffies;
2544 }
2545
2546 static void cfq_exit_icq(struct io_cq *icq)
2547 {
2548         struct cfq_io_cq *cic = icq_to_cic(icq);
2549         struct cfq_data *cfqd = cic_to_cfqd(cic);
2550
2551         if (cic->cfqq[BLK_RW_ASYNC]) {
2552                 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
2553                 cic->cfqq[BLK_RW_ASYNC] = NULL;
2554         }
2555
2556         if (cic->cfqq[BLK_RW_SYNC]) {
2557                 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
2558                 cic->cfqq[BLK_RW_SYNC] = NULL;
2559         }
2560 }
2561
2562 static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
2563 {
2564         struct task_struct *tsk = current;
2565         int ioprio_class;
2566
2567         if (!cfq_cfqq_prio_changed(cfqq))
2568                 return;
2569
2570         ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
2571         switch (ioprio_class) {
2572         default:
2573                 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
2574         case IOPRIO_CLASS_NONE:
2575                 /*
2576                  * no prio set, inherit CPU scheduling settings
2577                  */
2578                 cfqq->ioprio = task_nice_ioprio(tsk);
2579                 cfqq->ioprio_class = task_nice_ioclass(tsk);
2580                 break;
2581         case IOPRIO_CLASS_RT:
2582                 cfqq->ioprio = task_ioprio(ioc);
2583                 cfqq->ioprio_class = IOPRIO_CLASS_RT;
2584                 break;
2585         case IOPRIO_CLASS_BE:
2586                 cfqq->ioprio = task_ioprio(ioc);
2587                 cfqq->ioprio_class = IOPRIO_CLASS_BE;
2588                 break;
2589         case IOPRIO_CLASS_IDLE:
2590                 cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
2591                 cfqq->ioprio = 7;
2592                 cfq_clear_cfqq_idle_window(cfqq);
2593                 break;
2594         }
2595
2596         /*
2597          * keep track of original prio settings in case we have to temporarily
2598          * elevate the priority of this queue
2599          */
2600         cfqq->org_ioprio = cfqq->ioprio;
2601         cfq_clear_cfqq_prio_changed(cfqq);
2602 }
2603
2604 static void changed_ioprio(struct cfq_io_cq *cic)
2605 {
2606         struct cfq_data *cfqd = cic_to_cfqd(cic);
2607         struct cfq_queue *cfqq;
2608
2609         if (unlikely(!cfqd))
2610                 return;
2611
2612         cfqq = cic->cfqq[BLK_RW_ASYNC];
2613         if (cfqq) {
2614                 struct cfq_queue *new_cfqq;
2615                 new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->icq.ioc,
2616                                                 GFP_ATOMIC);
2617                 if (new_cfqq) {
2618                         cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
2619                         cfq_put_queue(cfqq);
2620                 }
2621         }
2622
2623         cfqq = cic->cfqq[BLK_RW_SYNC];
2624         if (cfqq)
2625                 cfq_mark_cfqq_prio_changed(cfqq);
2626 }
2627
2628 static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2629                           pid_t pid, bool is_sync)
2630 {
2631         RB_CLEAR_NODE(&cfqq->rb_node);
2632         RB_CLEAR_NODE(&cfqq->p_node);
2633         INIT_LIST_HEAD(&cfqq->fifo);
2634
2635         cfqq->ref = 0;
2636         cfqq->cfqd = cfqd;
2637
2638         cfq_mark_cfqq_prio_changed(cfqq);
2639
2640         if (is_sync) {
2641                 if (!cfq_class_idle(cfqq))
2642                         cfq_mark_cfqq_idle_window(cfqq);
2643                 cfq_mark_cfqq_sync(cfqq);
2644         }
2645         cfqq->pid = pid;
2646 }
2647
2648 #ifdef CONFIG_CFQ_GROUP_IOSCHED
2649 static void changed_cgroup(struct cfq_io_cq *cic)
2650 {
2651         struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1);
2652         struct cfq_data *cfqd = cic_to_cfqd(cic);
2653         struct request_queue *q;
2654
2655         if (unlikely(!cfqd))
2656                 return;
2657
2658         q = cfqd->queue;
2659
2660         if (sync_cfqq) {
2661                 /*
2662                  * Drop reference to sync queue. A new sync queue will be
2663                  * assigned in new group upon arrival of a fresh request.
2664                  */
2665                 cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
2666                 cic_set_cfqq(cic, NULL, 1);
2667                 cfq_put_queue(sync_cfqq);
2668         }
2669 }
2670 #endif  /* CONFIG_CFQ_GROUP_IOSCHED */
2671
2672 static struct cfq_queue *
2673 cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
2674                      struct io_context *ioc, gfp_t gfp_mask)
2675 {
2676         struct blkio_cgroup *blkcg;
2677         struct cfq_queue *cfqq, *new_cfqq = NULL;
2678         struct cfq_io_cq *cic;
2679         struct cfq_group *cfqg;
2680
2681 retry:
2682         rcu_read_lock();
2683
2684         blkcg = task_blkio_cgroup(current);
2685
2686         cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
2687
2688         cic = cfq_cic_lookup(cfqd, ioc);
2689         /* cic always exists here */
2690         cfqq = cic_to_cfqq(cic, is_sync);
2691
2692         /*
2693          * Always try a new alloc if we fell back to the OOM cfqq
2694          * originally, since it should just be a temporary situation.
2695          */
2696         if (!cfqq || cfqq == &cfqd->oom_cfqq) {
2697                 cfqq = NULL;
2698                 if (new_cfqq) {
2699                         cfqq = new_cfqq;
2700                         new_cfqq = NULL;
2701                 } else if (gfp_mask & __GFP_WAIT) {
2702                         rcu_read_unlock();
2703                         spin_unlock_irq(cfqd->queue->queue_lock);
2704                         new_cfqq = kmem_cache_alloc_node(cfq_pool,
2705                                         gfp_mask | __GFP_ZERO,
2706                                         cfqd->queue->node);
2707                         spin_lock_irq(cfqd->queue->queue_lock);
2708                         if (new_cfqq)
2709                                 goto retry;
2710                 } else {
2711                         cfqq = kmem_cache_alloc_node(cfq_pool,
2712                                         gfp_mask | __GFP_ZERO,
2713                                         cfqd->queue->node);
2714                 }
2715
2716                 if (cfqq) {
2717                         cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
2718                         cfq_init_prio_data(cfqq, ioc);
2719                         cfq_link_cfqq_cfqg(cfqq, cfqg);
2720                         cfq_log_cfqq(cfqd, cfqq, "alloced");
2721                 } else
2722                         cfqq = &cfqd->oom_cfqq;
2723         }
2724
2725         if (new_cfqq)
2726                 kmem_cache_free(cfq_pool, new_cfqq);
2727
2728         rcu_read_unlock();
2729         return cfqq;
2730 }
2731
2732 static struct cfq_queue **
2733 cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
2734 {
2735         switch (ioprio_class) {
2736         case IOPRIO_CLASS_RT:
2737                 return &cfqd->async_cfqq[0][ioprio];
2738         case IOPRIO_CLASS_BE:
2739                 return &cfqd->async_cfqq[1][ioprio];
2740         case IOPRIO_CLASS_IDLE:
2741                 return &cfqd->async_idle_cfqq;
2742         default:
2743                 BUG();
2744         }
2745 }
2746
2747 static struct cfq_queue *
2748 cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
2749               gfp_t gfp_mask)
2750 {
2751         const int ioprio = task_ioprio(ioc);
2752         const int ioprio_class = task_ioprio_class(ioc);
2753         struct cfq_queue **async_cfqq = NULL;
2754         struct cfq_queue *cfqq = NULL;
2755
2756         if (!is_sync) {
2757                 async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
2758                 cfqq = *async_cfqq;
2759         }
2760
2761         if (!cfqq)
2762                 cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
2763
2764         /*
2765          * pin the queue now that it's allocated, scheduler exit will prune it
2766          */
2767         if (!is_sync && !(*async_cfqq)) {
2768                 cfqq->ref++;
2769                 *async_cfqq = cfqq;
2770         }
2771
2772         cfqq->ref++;
2773         return cfqq;
2774 }
2775
2776 static void
2777 __cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle)
2778 {
2779         unsigned long elapsed = jiffies - ttime->last_end_request;
2780         elapsed = min(elapsed, 2UL * slice_idle);
2781
2782         ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
2783         ttime->ttime_total = (7*ttime->ttime_total + 256*elapsed) / 8;
2784         ttime->ttime_mean = (ttime->ttime_total + 128) / ttime->ttime_samples;
2785 }
2786
2787 static void
2788 cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2789                         struct cfq_io_cq *cic)
2790 {
2791         if (cfq_cfqq_sync(cfqq)) {
2792                 __cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle);
2793                 __cfq_update_io_thinktime(&cfqq->service_tree->ttime,
2794                         cfqd->cfq_slice_idle);
2795         }
2796 #ifdef CONFIG_CFQ_GROUP_IOSCHED
2797         __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
2798 #endif
2799 }
2800
2801 static void
2802 cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2803                        struct request *rq)
2804 {
2805         sector_t sdist = 0;
2806         sector_t n_sec = blk_rq_sectors(rq);
2807         if (cfqq->last_request_pos) {
2808                 if (cfqq->last_request_pos < blk_rq_pos(rq))
2809                         sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
2810                 else
2811                         sdist = cfqq->last_request_pos - blk_rq_pos(rq);
2812         }
2813
2814         cfqq->seek_history <<= 1;
2815         if (blk_queue_nonrot(cfqd->queue))
2816                 cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
2817         else
2818                 cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
2819 }
2820
2821 /*
2822  * Disable idle window if the process thinks too long or seeks so much that
2823  * it doesn't matter
2824  */
2825 static void
2826 cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2827                        struct cfq_io_cq *cic)
2828 {
2829         int old_idle, enable_idle;
2830
2831         /*
2832          * Don't idle for async or idle io prio class
2833          */
2834         if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
2835                 return;
2836
2837         enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
2838
2839         if (cfqq->queued[0] + cfqq->queued[1] >= 4)
2840                 cfq_mark_cfqq_deep(cfqq);
2841
2842         if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
2843                 enable_idle = 0;
2844         else if (!atomic_read(&cic->icq.ioc->nr_tasks) ||
2845                  !cfqd->cfq_slice_idle ||
2846                  (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
2847                 enable_idle = 0;
2848         else if (sample_valid(cic->ttime.ttime_samples)) {
2849                 if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle)
2850                         enable_idle = 0;
2851                 else
2852                         enable_idle = 1;
2853         }
2854
2855         if (old_idle != enable_idle) {
2856                 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
2857                 if (enable_idle)
2858                         cfq_mark_cfqq_idle_window(cfqq);
2859                 else
2860                         cfq_clear_cfqq_idle_window(cfqq);
2861         }
2862 }
2863
2864 /*
2865  * Check if new_cfqq should preempt the currently active queue. Return 0 for
2866  * no or if we aren't sure, a 1 will cause a preempt.
2867  */
2868 static bool
2869 cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
2870                    struct request *rq)
2871 {
2872         struct cfq_queue *cfqq;
2873
2874         cfqq = cfqd->active_queue;
2875         if (!cfqq)
2876                 return false;
2877
2878         if (cfq_class_idle(new_cfqq))
2879                 return false;
2880
2881         if (cfq_class_idle(cfqq))
2882                 return true;
2883
2884         /*
2885          * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
2886          */
2887         if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
2888                 return false;
2889
2890         /*
2891          * if the new request is sync, but the currently running queue is
2892          * not, let the sync request have priority.
2893          */
2894         if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
2895                 return true;
2896
2897         if (new_cfqq->cfqg != cfqq->cfqg)
2898                 return false;
2899
2900         if (cfq_slice_used(cfqq))
2901                 return true;
2902
2903         /* Allow preemption only if we are idling on sync-noidle tree */
2904         if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
2905             cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
2906             new_cfqq->service_tree->count == 2 &&
2907             RB_EMPTY_ROOT(&cfqq->sort_list))
2908                 return true;
2909
2910         /*
2911          * So both queues are sync. Let the new request get disk time if
2912          * it's a metadata request and the current queue is doing regular IO.
2913          */
2914         if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
2915                 return true;
2916
2917         /*
2918          * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
2919          */
2920         if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
2921                 return true;
2922
2923         /* An idle queue should not be idle now for some reason */
2924         if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
2925                 return true;
2926
2927         if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
2928                 return false;
2929
2930         /*
2931          * if this request is as-good as one we would expect from the
2932          * current cfqq, let it preempt
2933          */
2934         if (cfq_rq_close(cfqd, cfqq, rq))
2935                 return true;
2936
2937         return false;
2938 }
2939
2940 /*
2941  * cfqq preempts the active queue. if we allowed preempt with no slice left,
2942  * let it have half of its nominal slice.
2943  */
2944 static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2945 {
2946         enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
2947
2948         cfq_log_cfqq(cfqd, cfqq, "preempt");
2949         cfq_slice_expired(cfqd, 1);
2950
2951         /*
2952          * workload type is changed, don't save slice, otherwise preempt
2953          * doesn't happen
2954          */
2955         if (old_type != cfqq_type(cfqq))
2956                 cfqq->cfqg->saved_workload_slice = 0;
2957
2958         /*
2959          * Put the new queue at the front of the of the current list,
2960          * so we know that it will be selected next.
2961          */
2962         BUG_ON(!cfq_cfqq_on_rr(cfqq));
2963
2964         cfq_service_tree_add(cfqd, cfqq, 1);
2965
2966         cfqq->slice_end = 0;
2967         cfq_mark_cfqq_slice_new(cfqq);
2968 }
2969
2970 /*
2971  * Called when a new fs request (rq) is added (to cfqq). Check if there's
2972  * something we should do about it
2973  */
2974 static void
2975 cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2976                 struct request *rq)
2977 {
2978         struct cfq_io_cq *cic = RQ_CIC(rq);
2979
2980         cfqd->rq_queued++;
2981         if (rq->cmd_flags & REQ_PRIO)
2982                 cfqq->prio_pending++;
2983
2984         cfq_update_io_thinktime(cfqd, cfqq, cic);
2985         cfq_update_io_seektime(cfqd, cfqq, rq);
2986         cfq_update_idle_window(cfqd, cfqq, cic);
2987
2988         cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
2989
2990         if (cfqq == cfqd->active_queue) {
2991                 /*
2992                  * Remember that we saw a request from this process, but
2993                  * don't start queuing just yet. Otherwise we risk seeing lots
2994                  * of tiny requests, because we disrupt the normal plugging
2995                  * and merging. If the request is already larger than a single
2996                  * page, let it rip immediately. For that case we assume that
2997                  * merging is already done. Ditto for a busy system that
2998                  * has other work pending, don't risk delaying until the
2999                  * idle timer unplug to continue working.
3000                  */
3001                 if (cfq_cfqq_wait_request(cfqq)) {
3002                         if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
3003                             cfqd->busy_queues > 1) {
3004                                 cfq_del_timer(cfqd, cfqq);
3005                                 cfq_clear_cfqq_wait_request(cfqq);
3006                                 __blk_run_queue(cfqd->queue);
3007                         } else {
3008                                 cfq_blkiocg_update_idle_time_stats(
3009                                                 cfqg_to_blkg(cfqq->cfqg),
3010                                                 &blkio_policy_cfq);
3011                                 cfq_mark_cfqq_must_dispatch(cfqq);
3012                         }
3013                 }
3014         } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
3015                 /*
3016                  * not the active queue - expire current slice if it is
3017                  * idle and has expired it's mean thinktime or this new queue
3018                  * has some old slice time left and is of higher priority or
3019                  * this new queue is RT and the current one is BE
3020                  */
3021                 cfq_preempt_queue(cfqd, cfqq);
3022                 __blk_run_queue(cfqd->queue);
3023         }
3024 }
3025
3026 static void cfq_insert_request(struct request_queue *q, struct request *rq)
3027 {
3028         struct cfq_data *cfqd = q->elevator->elevator_data;
3029         struct cfq_queue *cfqq = RQ_CFQQ(rq);
3030
3031         cfq_log_cfqq(cfqd, cfqq, "insert_request");
3032         cfq_init_prio_data(cfqq, RQ_CIC(rq)->icq.ioc);
3033
3034         rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
3035         list_add_tail(&rq->queuelist, &cfqq->fifo);
3036         cfq_add_rq_rb(rq);
3037         cfq_blkiocg_update_io_add_stats(cfqg_to_blkg(RQ_CFQG(rq)),
3038                                         &blkio_policy_cfq,
3039                                         cfqg_to_blkg(cfqd->serving_group),
3040                                         rq_data_dir(rq), rq_is_sync(rq));
3041         cfq_rq_enqueued(cfqd, cfqq, rq);
3042 }
3043
3044 /*
3045  * Update hw_tag based on peak queue depth over 50 samples under
3046  * sufficient load.
3047  */
3048 static void cfq_update_hw_tag(struct cfq_data *cfqd)
3049 {
3050         struct cfq_queue *cfqq = cfqd->active_queue;
3051
3052         if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
3053                 cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
3054
3055         if (cfqd->hw_tag == 1)
3056                 return;
3057
3058         if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
3059             cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
3060                 return;
3061
3062         /*
3063          * If active queue hasn't enough requests and can idle, cfq might not
3064          * dispatch sufficient requests to hardware. Don't zero hw_tag in this
3065          * case
3066          */
3067         if (cfqq && cfq_cfqq_idle_window(cfqq) &&
3068             cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
3069             CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
3070                 return;
3071
3072         if (cfqd->hw_tag_samples++ < 50)
3073                 return;
3074
3075         if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
3076                 cfqd->hw_tag = 1;
3077         else
3078                 cfqd->hw_tag = 0;
3079 }
3080
3081 static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3082 {
3083         struct cfq_io_cq *cic = cfqd->active_cic;
3084
3085         /* If the queue already has requests, don't wait */
3086         if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3087                 return false;
3088
3089         /* If there are other queues in the group, don't wait */
3090         if (cfqq->cfqg->nr_cfqq > 1)
3091                 return false;
3092
3093         /* the only queue in the group, but think time is big */
3094         if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true))
3095                 return false;
3096
3097         if (cfq_slice_used(cfqq))
3098                 return true;
3099
3100         /* if slice left is less than think time, wait busy */
3101         if (cic && sample_valid(cic->ttime.ttime_samples)
3102             && (cfqq->slice_end - jiffies < cic->ttime.ttime_mean))
3103                 return true;
3104
3105         /*
3106          * If think times is less than a jiffy than ttime_mean=0 and above
3107          * will not be true. It might happen that slice has not expired yet
3108          * but will expire soon (4-5 ns) during select_queue(). To cover the
3109          * case where think time is less than a jiffy, mark the queue wait
3110          * busy if only 1 jiffy is left in the slice.
3111          */
3112         if (cfqq->slice_end - jiffies == 1)
3113                 return true;
3114
3115         return false;
3116 }
3117
3118 static void cfq_completed_request(struct request_queue *q, struct request *rq)
3119 {
3120         struct cfq_queue *cfqq = RQ_CFQQ(rq);
3121         struct cfq_data *cfqd = cfqq->cfqd;
3122         const int sync = rq_is_sync(rq);
3123         unsigned long now;
3124
3125         now = jiffies;
3126         cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
3127                      !!(rq->cmd_flags & REQ_NOIDLE));
3128
3129         cfq_update_hw_tag(cfqd);
3130
3131         WARN_ON(!cfqd->rq_in_driver);
3132         WARN_ON(!cfqq->dispatched);
3133         cfqd->rq_in_driver--;
3134         cfqq->dispatched--;
3135         (RQ_CFQG(rq))->dispatched--;
3136         cfq_blkiocg_update_completion_stats(cfqg_to_blkg(cfqq->cfqg),
3137                         &blkio_policy_cfq, rq_start_time_ns(rq),
3138                         rq_io_start_time_ns(rq), rq_data_dir(rq),
3139                         rq_is_sync(rq));
3140
3141         cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
3142
3143         if (sync) {
3144                 struct cfq_rb_root *service_tree;
3145
3146                 RQ_CIC(rq)->ttime.last_end_request = now;
3147
3148                 if (cfq_cfqq_on_rr(cfqq))
3149                         service_tree = cfqq->service_tree;
3150                 else
3151                         service_tree = service_tree_for(cfqq->cfqg,
3152                                 cfqq_prio(cfqq), cfqq_type(cfqq));
3153                 service_tree->ttime.last_end_request = now;
3154                 if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
3155                         cfqd->last_delayed_sync = now;
3156         }
3157
3158 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3159         cfqq->cfqg->ttime.last_end_request = now;
3160 #endif
3161
3162         /*
3163          * If this is the active queue, check if it needs to be expired,
3164          * or if we want to idle in case it has no pending requests.
3165          */
3166         if (cfqd->active_queue == cfqq) {
3167                 const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
3168
3169                 if (cfq_cfqq_slice_new(cfqq)) {
3170                         cfq_set_prio_slice(cfqd, cfqq);
3171                         cfq_clear_cfqq_slice_new(cfqq);
3172                 }
3173
3174                 /*
3175                  * Should we wait for next request to come in before we expire
3176                  * the queue.
3177                  */
3178                 if (cfq_should_wait_busy(cfqd, cfqq)) {
3179                         unsigned long extend_sl = cfqd->cfq_slice_idle;
3180                         if (!cfqd->cfq_slice_idle)
3181                                 extend_sl = cfqd->cfq_group_idle;
3182                         cfqq->slice_end = jiffies + extend_sl;
3183                         cfq_mark_cfqq_wait_busy(cfqq);
3184                         cfq_log_cfqq(cfqd, cfqq, "will busy wait");
3185                 }
3186
3187                 /*
3188                  * Idling is not enabled on:
3189                  * - expired queues
3190                  * - idle-priority queues
3191                  * - async queues
3192                  * - queues with still some requests queued
3193                  * - when there is a close cooperator
3194                  */
3195                 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
3196                         cfq_slice_expired(cfqd, 1);
3197                 else if (sync && cfqq_empty &&
3198                          !cfq_close_cooperator(cfqd, cfqq)) {
3199                         cfq_arm_slice_timer(cfqd);
3200                 }
3201         }
3202
3203         if (!cfqd->rq_in_driver)
3204                 cfq_schedule_dispatch(cfqd);
3205 }
3206
3207 static inline int __cfq_may_queue(struct cfq_queue *cfqq)
3208 {
3209         if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
3210                 cfq_mark_cfqq_must_alloc_slice(cfqq);
3211                 return ELV_MQUEUE_MUST;
3212         }
3213
3214         return ELV_MQUEUE_MAY;
3215 }
3216
3217 static int cfq_may_queue(struct request_queue *q, int rw)
3218 {
3219         struct cfq_data *cfqd = q->elevator->elevator_data;
3220         struct task_struct *tsk = current;
3221         struct cfq_io_cq *cic;
3222         struct cfq_queue *cfqq;
3223
3224         /*
3225          * don't force setup of a queue from here, as a call to may_queue
3226          * does not necessarily imply that a request actually will be queued.
3227          * so just lookup a possibly existing queue, or return 'may queue'
3228          * if that fails
3229          */
3230         cic = cfq_cic_lookup(cfqd, tsk->io_context);
3231         if (!cic)
3232                 return ELV_MQUEUE_MAY;
3233
3234         cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
3235         if (cfqq) {
3236                 cfq_init_prio_data(cfqq, cic->icq.ioc);
3237
3238                 return __cfq_may_queue(cfqq);
3239         }
3240
3241         return ELV_MQUEUE_MAY;
3242 }
3243
3244 /*
3245  * queue lock held here
3246  */
3247 static void cfq_put_request(struct request *rq)
3248 {
3249         struct cfq_queue *cfqq = RQ_CFQQ(rq);
3250
3251         if (cfqq) {
3252                 const int rw = rq_data_dir(rq);
3253
3254                 BUG_ON(!cfqq->allocated[rw]);
3255                 cfqq->allocated[rw]--;
3256
3257                 /* Put down rq reference on cfqg */
3258                 blkg_put(cfqg_to_blkg(RQ_CFQG(rq)));
3259                 rq->elv.priv[0] = NULL;
3260                 rq->elv.priv[1] = NULL;
3261
3262                 cfq_put_queue(cfqq);
3263         }
3264 }
3265
3266 static struct cfq_queue *
3267 cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic,
3268                 struct cfq_queue *cfqq)
3269 {
3270         cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
3271         cic_set_cfqq(cic, cfqq->new_cfqq, 1);
3272         cfq_mark_cfqq_coop(cfqq->new_cfqq);
3273         cfq_put_queue(cfqq);
3274         return cic_to_cfqq(cic, 1);
3275 }
3276
3277 /*
3278  * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
3279  * was the last process referring to said cfqq.
3280  */
3281 static struct cfq_queue *
3282 split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
3283 {
3284         if (cfqq_process_refs(cfqq) == 1) {
3285                 cfqq->pid = current->pid;
3286                 cfq_clear_cfqq_coop(cfqq);
3287                 cfq_clear_cfqq_split_coop(cfqq);
3288                 return cfqq;
3289         }
3290
3291         cic_set_cfqq(cic, NULL, 1);
3292
3293         cfq_put_cooperator(cfqq);
3294
3295         cfq_put_queue(cfqq);
3296         return NULL;
3297 }
3298 /*
3299  * Allocate cfq data structures associated with this request.
3300  */
3301 static int
3302 cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
3303 {
3304         struct cfq_data *cfqd = q->elevator->elevator_data;
3305         struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
3306         const int rw = rq_data_dir(rq);
3307         const bool is_sync = rq_is_sync(rq);
3308         struct cfq_queue *cfqq;
3309         unsigned int changed;
3310
3311         might_sleep_if(gfp_mask & __GFP_WAIT);
3312
3313         spin_lock_irq(q->queue_lock);
3314
3315         /* handle changed notifications */
3316         changed = icq_get_changed(&cic->icq);
3317         if (unlikely(changed & ICQ_IOPRIO_CHANGED))
3318                 changed_ioprio(cic);
3319 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3320         if (unlikely(changed & ICQ_CGROUP_CHANGED))
3321                 changed_cgroup(cic);
3322 #endif
3323
3324 new_queue:
3325         cfqq = cic_to_cfqq(cic, is_sync);
3326         if (!cfqq || cfqq == &cfqd->oom_cfqq) {
3327                 cfqq = cfq_get_queue(cfqd, is_sync, cic->icq.ioc, gfp_mask);
3328                 cic_set_cfqq(cic, cfqq, is_sync);
3329         } else {
3330                 /*
3331                  * If the queue was seeky for too long, break it apart.
3332                  */
3333                 if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
3334                         cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
3335                         cfqq = split_cfqq(cic, cfqq);
3336                         if (!cfqq)
3337                                 goto new_queue;
3338                 }
3339
3340                 /*
3341                  * Check to see if this queue is scheduled to merge with
3342                  * another, closely cooperating queue.  The merging of
3343                  * queues happens here as it must be done in process context.
3344                  * The reference on new_cfqq was taken in merge_cfqqs.
3345                  */
3346                 if (cfqq->new_cfqq)
3347                         cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
3348         }
3349
3350         cfqq->allocated[rw]++;
3351
3352         cfqq->ref++;
3353         blkg_get(cfqg_to_blkg(cfqq->cfqg));
3354         rq->elv.priv[0] = cfqq;
3355         rq->elv.priv[1] = cfqq->cfqg;
3356         spin_unlock_irq(q->queue_lock);
3357         return 0;
3358 }
3359
3360 static void cfq_kick_queue(struct work_struct *work)
3361 {
3362         struct cfq_data *cfqd =
3363                 container_of(work, struct cfq_data, unplug_work);
3364         struct request_queue *q = cfqd->queue;
3365
3366         spin_lock_irq(q->queue_lock);
3367         __blk_run_queue(cfqd->queue);
3368         spin_unlock_irq(q->queue_lock);
3369 }
3370
3371 /*
3372  * Timer running if the active_queue is currently idling inside its time slice
3373  */
3374 static void cfq_idle_slice_timer(unsigned long data)
3375 {
3376         struct cfq_data *cfqd = (struct cfq_data *) data;
3377         struct cfq_queue *cfqq;
3378         unsigned long flags;
3379         int timed_out = 1;
3380
3381         cfq_log(cfqd, "idle timer fired");
3382
3383         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
3384
3385         cfqq = cfqd->active_queue;
3386         if (cfqq) {
3387                 timed_out = 0;
3388
3389                 /*
3390                  * We saw a request before the queue expired, let it through
3391                  */
3392                 if (cfq_cfqq_must_dispatch(cfqq))
3393                         goto out_kick;
3394
3395                 /*
3396                  * expired
3397                  */
3398                 if (cfq_slice_used(cfqq))
3399                         goto expire;
3400
3401                 /*
3402                  * only expire and reinvoke request handler, if there are
3403                  * other queues with pending requests
3404                  */
3405                 if (!cfqd->busy_queues)
3406                         goto out_cont;
3407
3408                 /*
3409                  * not expired and it has a request pending, let it dispatch
3410                  */
3411                 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3412                         goto out_kick;
3413
3414                 /*
3415                  * Queue depth flag is reset only when the idle didn't succeed
3416                  */
3417                 cfq_clear_cfqq_deep(cfqq);
3418         }
3419 expire:
3420         cfq_slice_expired(cfqd, timed_out);
3421 out_kick:
3422         cfq_schedule_dispatch(cfqd);
3423 out_cont:
3424         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
3425 }
3426
3427 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
3428 {
3429         del_timer_sync(&cfqd->idle_slice_timer);
3430         cancel_work_sync(&cfqd->unplug_work);
3431 }
3432
3433 static void cfq_put_async_queues(struct cfq_data *cfqd)
3434 {
3435         int i;
3436
3437         for (i = 0; i < IOPRIO_BE_NR; i++) {
3438                 if (cfqd->async_cfqq[0][i])
3439                         cfq_put_queue(cfqd->async_cfqq[0][i]);
3440                 if (cfqd->async_cfqq[1][i])
3441                         cfq_put_queue(cfqd->async_cfqq[1][i]);
3442         }
3443
3444         if (cfqd->async_idle_cfqq)
3445                 cfq_put_queue(cfqd->async_idle_cfqq);
3446 }
3447
3448 static void cfq_exit_queue(struct elevator_queue *e)
3449 {
3450         struct cfq_data *cfqd = e->elevator_data;
3451         struct request_queue *q = cfqd->queue;
3452         bool wait = false;
3453
3454         cfq_shutdown_timer_wq(cfqd);
3455
3456         spin_lock_irq(q->queue_lock);
3457
3458         if (cfqd->active_queue)
3459                 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
3460
3461         cfq_put_async_queues(cfqd);
3462
3463         spin_unlock_irq(q->queue_lock);
3464
3465         blkg_destroy_all(q, BLKIO_POLICY_PROP, true);
3466
3467 #ifdef CONFIG_BLK_CGROUP
3468         /*
3469          * If there are groups which we could not unlink from blkcg list,
3470          * wait for a rcu period for them to be freed.
3471          */
3472         spin_lock_irq(q->queue_lock);
3473         wait = q->nr_blkgs[BLKIO_POLICY_PROP];
3474         spin_unlock_irq(q->queue_lock);
3475 #endif
3476         cfq_shutdown_timer_wq(cfqd);
3477
3478         /*
3479          * Wait for cfqg->blkg->key accessors to exit their grace periods.
3480          * Do this wait only if there are other unlinked groups out
3481          * there. This can happen if cgroup deletion path claimed the
3482          * responsibility of cleaning up a group before queue cleanup code
3483          * get to the group.
3484          *
3485          * Do not call synchronize_rcu() unconditionally as there are drivers
3486          * which create/delete request queue hundreds of times during scan/boot
3487          * and synchronize_rcu() can take significant time and slow down boot.
3488          */
3489         if (wait)
3490                 synchronize_rcu();
3491
3492 #ifndef CONFIG_CFQ_GROUP_IOSCHED
3493         kfree(cfqd->root_group);
3494 #endif
3495         kfree(cfqd);
3496 }
3497
3498 static int cfq_init_queue(struct request_queue *q)
3499 {
3500         struct cfq_data *cfqd;
3501         struct blkio_group *blkg __maybe_unused;
3502         int i;
3503
3504         cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
3505         if (!cfqd)
3506                 return -ENOMEM;
3507
3508         cfqd->queue = q;
3509         q->elevator->elevator_data = cfqd;
3510
3511         /* Init root service tree */
3512         cfqd->grp_service_tree = CFQ_RB_ROOT;
3513
3514         /* Init root group and prefer root group over other groups by default */
3515 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3516         rcu_read_lock();
3517         spin_lock_irq(q->queue_lock);
3518
3519         blkg = blkg_lookup_create(&blkio_root_cgroup, q, BLKIO_POLICY_PROP,
3520                                   true);
3521         if (!IS_ERR(blkg))
3522                 cfqd->root_group = blkg_to_cfqg(blkg);
3523
3524         spin_unlock_irq(q->queue_lock);
3525         rcu_read_unlock();
3526 #else
3527         cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
3528                                         GFP_KERNEL, cfqd->queue->node);
3529         if (cfqd->root_group)
3530                 cfq_init_cfqg_base(cfqd->root_group);
3531 #endif
3532         if (!cfqd->root_group) {
3533                 kfree(cfqd);
3534                 return -ENOMEM;
3535         }
3536
3537         cfqd->root_group->weight = 2*BLKIO_WEIGHT_DEFAULT;
3538
3539         /*
3540          * Not strictly needed (since RB_ROOT just clears the node and we
3541          * zeroed cfqd on alloc), but better be safe in case someone decides
3542          * to add magic to the rb code
3543          */
3544         for (i = 0; i < CFQ_PRIO_LISTS; i++)
3545                 cfqd->prio_trees[i] = RB_ROOT;
3546
3547         /*
3548          * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
3549          * Grab a permanent reference to it, so that the normal code flow
3550          * will not attempt to free it.  oom_cfqq is linked to root_group
3551          * but shouldn't hold a reference as it'll never be unlinked.  Lose
3552          * the reference from linking right away.
3553          */
3554         cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
3555         cfqd->oom_cfqq.ref++;
3556
3557         spin_lock_irq(q->queue_lock);
3558         cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group);
3559         blkg_put(cfqg_to_blkg(cfqd->root_group));
3560         spin_unlock_irq(q->queue_lock);
3561
3562         init_timer(&cfqd->idle_slice_timer);
3563         cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
3564         cfqd->idle_slice_timer.data = (unsigned long) cfqd;
3565
3566         INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
3567
3568         cfqd->cfq_quantum = cfq_quantum;
3569         cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
3570         cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
3571         cfqd->cfq_back_max = cfq_back_max;
3572         cfqd->cfq_back_penalty = cfq_back_penalty;
3573         cfqd->cfq_slice[0] = cfq_slice_async;
3574         cfqd->cfq_slice[1] = cfq_slice_sync;
3575         cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
3576         cfqd->cfq_slice_idle = cfq_slice_idle;
3577         cfqd->cfq_group_idle = cfq_group_idle;
3578         cfqd->cfq_latency = 1;
3579         cfqd->hw_tag = -1;
3580         /*
3581          * we optimistically start assuming sync ops weren't delayed in last
3582          * second, in order to have larger depth for async operations.
3583          */
3584         cfqd->last_delayed_sync = jiffies - HZ;
3585         return 0;
3586 }
3587
3588 /*
3589  * sysfs parts below -->
3590  */
3591 static ssize_t
3592 cfq_var_show(unsigned int var, char *page)
3593 {
3594         return sprintf(page, "%d\n", var);
3595 }
3596
3597 static ssize_t
3598 cfq_var_store(unsigned int *var, const char *page, size_t count)
3599 {
3600         char *p = (char *) page;
3601
3602         *var = simple_strtoul(p, &p, 10);
3603         return count;
3604 }
3605
3606 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                            \
3607 static ssize_t __FUNC(struct elevator_queue *e, char *page)             \
3608 {                                                                       \
3609         struct cfq_data *cfqd = e->elevator_data;                       \
3610         unsigned int __data = __VAR;                                    \
3611         if (__CONV)                                                     \
3612                 __data = jiffies_to_msecs(__data);                      \
3613         return cfq_var_show(__data, (page));                            \
3614 }
3615 SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
3616 SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
3617 SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
3618 SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
3619 SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
3620 SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
3621 SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
3622 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
3623 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
3624 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
3625 SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
3626 #undef SHOW_FUNCTION
3627
3628 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
3629 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
3630 {                                                                       \
3631         struct cfq_data *cfqd = e->elevator_data;                       \
3632         unsigned int __data;                                            \
3633         int ret = cfq_var_store(&__data, (page), count);                \
3634         if (__data < (MIN))                                             \
3635                 __data = (MIN);                                         \
3636         else if (__data > (MAX))                                        \
3637                 __data = (MAX);                                         \
3638         if (__CONV)                                                     \
3639                 *(__PTR) = msecs_to_jiffies(__data);                    \
3640         else                                                            \
3641                 *(__PTR) = __data;                                      \
3642         return ret;                                                     \
3643 }
3644 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
3645 STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
3646                 UINT_MAX, 1);
3647 STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
3648                 UINT_MAX, 1);
3649 STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
3650 STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
3651                 UINT_MAX, 0);
3652 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
3653 STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
3654 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
3655 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
3656 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
3657                 UINT_MAX, 0);
3658 STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
3659 #undef STORE_FUNCTION
3660
3661 #define CFQ_ATTR(name) \
3662         __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
3663
3664 static struct elv_fs_entry cfq_attrs[] = {
3665         CFQ_ATTR(quantum),
3666         CFQ_ATTR(fifo_expire_sync),
3667         CFQ_ATTR(fifo_expire_async),
3668         CFQ_ATTR(back_seek_max),
3669         CFQ_ATTR(back_seek_penalty),
3670         CFQ_ATTR(slice_sync),
3671         CFQ_ATTR(slice_async),
3672         CFQ_ATTR(slice_async_rq),
3673         CFQ_ATTR(slice_idle),
3674         CFQ_ATTR(group_idle),
3675         CFQ_ATTR(low_latency),
3676         __ATTR_NULL
3677 };
3678
3679 static struct elevator_type iosched_cfq = {
3680         .ops = {
3681                 .elevator_merge_fn =            cfq_merge,
3682                 .elevator_merged_fn =           cfq_merged_request,
3683                 .elevator_merge_req_fn =        cfq_merged_requests,
3684                 .elevator_allow_merge_fn =      cfq_allow_merge,
3685                 .elevator_bio_merged_fn =       cfq_bio_merged,
3686                 .elevator_dispatch_fn =         cfq_dispatch_requests,
3687                 .elevator_add_req_fn =          cfq_insert_request,
3688                 .elevator_activate_req_fn =     cfq_activate_request,
3689                 .elevator_deactivate_req_fn =   cfq_deactivate_request,
3690                 .elevator_completed_req_fn =    cfq_completed_request,
3691                 .elevator_former_req_fn =       elv_rb_former_request,
3692                 .elevator_latter_req_fn =       elv_rb_latter_request,
3693                 .elevator_init_icq_fn =         cfq_init_icq,
3694                 .elevator_exit_icq_fn =         cfq_exit_icq,
3695                 .elevator_set_req_fn =          cfq_set_request,
3696                 .elevator_put_req_fn =          cfq_put_request,
3697                 .elevator_may_queue_fn =        cfq_may_queue,
3698                 .elevator_init_fn =             cfq_init_queue,
3699                 .elevator_exit_fn =             cfq_exit_queue,
3700         },
3701         .icq_size       =       sizeof(struct cfq_io_cq),
3702         .icq_align      =       __alignof__(struct cfq_io_cq),
3703         .elevator_attrs =       cfq_attrs,
3704         .elevator_name  =       "cfq",
3705         .elevator_owner =       THIS_MODULE,
3706 };
3707
3708 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3709 static struct blkio_policy_type blkio_policy_cfq = {
3710         .ops = {
3711                 .blkio_init_group_fn =          cfq_init_blkio_group,
3712                 .blkio_update_group_weight_fn = cfq_update_blkio_group_weight,
3713         },
3714         .plid = BLKIO_POLICY_PROP,
3715         .pdata_size = sizeof(struct cfq_group),
3716 };
3717 #endif
3718
3719 static int __init cfq_init(void)
3720 {
3721         int ret;
3722
3723         /*
3724          * could be 0 on HZ < 1000 setups
3725          */
3726         if (!cfq_slice_async)
3727                 cfq_slice_async = 1;
3728         if (!cfq_slice_idle)
3729                 cfq_slice_idle = 1;
3730
3731 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3732         if (!cfq_group_idle)
3733                 cfq_group_idle = 1;
3734 #else
3735                 cfq_group_idle = 0;
3736 #endif
3737         cfq_pool = KMEM_CACHE(cfq_queue, 0);
3738         if (!cfq_pool)
3739                 return -ENOMEM;
3740
3741         ret = elv_register(&iosched_cfq);
3742         if (ret) {
3743                 kmem_cache_destroy(cfq_pool);
3744                 return ret;
3745         }
3746
3747 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3748         blkio_policy_register(&blkio_policy_cfq);
3749 #endif
3750         return 0;
3751 }
3752
3753 static void __exit cfq_exit(void)
3754 {
3755 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3756         blkio_policy_unregister(&blkio_policy_cfq);
3757 #endif
3758         elv_unregister(&iosched_cfq);
3759         kmem_cache_destroy(cfq_pool);
3760 }
3761
3762 module_init(cfq_init);
3763 module_exit(cfq_exit);
3764
3765 MODULE_AUTHOR("Jens Axboe");
3766 MODULE_LICENSE("GPL");
3767 MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");