cfq-iosched: algebraic simplification in cfq_prio_to_maxrq()
[linux-2.6.git] / block / cfq-iosched.c
1 /*
2  *  CFQ, or complete fairness queueing, disk scheduler.
3  *
4  *  Based on ideas from a previously unfinished io
5  *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6  *
7  *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8  */
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/blkdev.h>
12 #include <linux/elevator.h>
13 #include <linux/jiffies.h>
14 #include <linux/rbtree.h>
15 #include <linux/ioprio.h>
16 #include <linux/blktrace_api.h>
17 #include "cfq.h"
18
19 /*
20  * tunables
21  */
22 /* max queue in one round of service */
23 static const int cfq_quantum = 8;
24 static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
25 /* maximum backwards seek, in KiB */
26 static const int cfq_back_max = 16 * 1024;
27 /* penalty of a backwards seek */
28 static const int cfq_back_penalty = 2;
29 static const int cfq_slice_sync = HZ / 10;
30 static int cfq_slice_async = HZ / 25;
31 static const int cfq_slice_async_rq = 2;
32 static int cfq_slice_idle = HZ / 125;
33 static int cfq_group_idle = HZ / 125;
34 static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
35 static const int cfq_hist_divisor = 4;
36
37 /*
38  * offset from end of service tree
39  */
40 #define CFQ_IDLE_DELAY          (HZ / 5)
41
42 /*
43  * below this threshold, we consider thinktime immediate
44  */
45 #define CFQ_MIN_TT              (2)
46
47 #define CFQ_SLICE_SCALE         (5)
48 #define CFQ_HW_QUEUE_MIN        (5)
49 #define CFQ_SERVICE_SHIFT       12
50
51 #define CFQQ_SEEK_THR           (sector_t)(8 * 100)
52 #define CFQQ_CLOSE_THR          (sector_t)(8 * 1024)
53 #define CFQQ_SECT_THR_NONROT    (sector_t)(2 * 32)
54 #define CFQQ_SEEKY(cfqq)        (hweight32(cfqq->seek_history) > 32/8)
55
56 #define RQ_CIC(rq)              \
57         ((struct cfq_io_context *) (rq)->elevator_private[0])
58 #define RQ_CFQQ(rq)             (struct cfq_queue *) ((rq)->elevator_private[1])
59 #define RQ_CFQG(rq)             (struct cfq_group *) ((rq)->elevator_private[2])
60
61 static struct kmem_cache *cfq_pool;
62 static struct kmem_cache *cfq_ioc_pool;
63
64 static DEFINE_PER_CPU(unsigned long, cfq_ioc_count);
65 static struct completion *ioc_gone;
66 static DEFINE_SPINLOCK(ioc_gone_lock);
67
68 static DEFINE_SPINLOCK(cic_index_lock);
69 static DEFINE_IDA(cic_index_ida);
70
71 #define CFQ_PRIO_LISTS          IOPRIO_BE_NR
72 #define cfq_class_idle(cfqq)    ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
73 #define cfq_class_rt(cfqq)      ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
74
75 #define sample_valid(samples)   ((samples) > 80)
76 #define rb_entry_cfqg(node)     rb_entry((node), struct cfq_group, rb_node)
77
78 /*
79  * Most of our rbtree usage is for sorting with min extraction, so
80  * if we cache the leftmost node we don't have to walk down the tree
81  * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
82  * move this into the elevator for the rq sorting as well.
83  */
84 struct cfq_rb_root {
85         struct rb_root rb;
86         struct rb_node *left;
87         unsigned count;
88         unsigned total_weight;
89         u64 min_vdisktime;
90 };
91 #define CFQ_RB_ROOT     (struct cfq_rb_root) { .rb = RB_ROOT, .left = NULL, \
92                         .count = 0, .min_vdisktime = 0, }
93
94 /*
95  * Per process-grouping structure
96  */
97 struct cfq_queue {
98         /* reference count */
99         int ref;
100         /* various state flags, see below */
101         unsigned int flags;
102         /* parent cfq_data */
103         struct cfq_data *cfqd;
104         /* service_tree member */
105         struct rb_node rb_node;
106         /* service_tree key */
107         unsigned long rb_key;
108         /* prio tree member */
109         struct rb_node p_node;
110         /* prio tree root we belong to, if any */
111         struct rb_root *p_root;
112         /* sorted list of pending requests */
113         struct rb_root sort_list;
114         /* if fifo isn't expired, next request to serve */
115         struct request *next_rq;
116         /* requests queued in sort_list */
117         int queued[2];
118         /* currently allocated requests */
119         int allocated[2];
120         /* fifo list of requests in sort_list */
121         struct list_head fifo;
122
123         /* time when queue got scheduled in to dispatch first request. */
124         unsigned long dispatch_start;
125         unsigned int allocated_slice;
126         unsigned int slice_dispatch;
127         /* time when first request from queue completed and slice started. */
128         unsigned long slice_start;
129         unsigned long slice_end;
130         long slice_resid;
131
132         /* pending metadata requests */
133         int meta_pending;
134         /* number of requests that are on the dispatch list or inside driver */
135         int dispatched;
136
137         /* io prio of this group */
138         unsigned short ioprio, org_ioprio;
139         unsigned short ioprio_class, org_ioprio_class;
140
141         pid_t pid;
142
143         u32 seek_history;
144         sector_t last_request_pos;
145
146         struct cfq_rb_root *service_tree;
147         struct cfq_queue *new_cfqq;
148         struct cfq_group *cfqg;
149         /* Number of sectors dispatched from queue in single dispatch round */
150         unsigned long nr_sectors;
151 };
152
153 /*
154  * First index in the service_trees.
155  * IDLE is handled separately, so it has negative index
156  */
157 enum wl_prio_t {
158         BE_WORKLOAD = 0,
159         RT_WORKLOAD = 1,
160         IDLE_WORKLOAD = 2,
161         CFQ_PRIO_NR,
162 };
163
164 /*
165  * Second index in the service_trees.
166  */
167 enum wl_type_t {
168         ASYNC_WORKLOAD = 0,
169         SYNC_NOIDLE_WORKLOAD = 1,
170         SYNC_WORKLOAD = 2
171 };
172
173 /* This is per cgroup per device grouping structure */
174 struct cfq_group {
175         /* group service_tree member */
176         struct rb_node rb_node;
177
178         /* group service_tree key */
179         u64 vdisktime;
180         unsigned int weight;
181         unsigned int new_weight;
182         bool needs_update;
183
184         /* number of cfqq currently on this group */
185         int nr_cfqq;
186
187         /*
188          * Per group busy queus average. Useful for workload slice calc. We
189          * create the array for each prio class but at run time it is used
190          * only for RT and BE class and slot for IDLE class remains unused.
191          * This is primarily done to avoid confusion and a gcc warning.
192          */
193         unsigned int busy_queues_avg[CFQ_PRIO_NR];
194         /*
195          * rr lists of queues with requests. We maintain service trees for
196          * RT and BE classes. These trees are subdivided in subclasses
197          * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
198          * class there is no subclassification and all the cfq queues go on
199          * a single tree service_tree_idle.
200          * Counts are embedded in the cfq_rb_root
201          */
202         struct cfq_rb_root service_trees[2][3];
203         struct cfq_rb_root service_tree_idle;
204
205         unsigned long saved_workload_slice;
206         enum wl_type_t saved_workload;
207         enum wl_prio_t saved_serving_prio;
208         struct blkio_group blkg;
209 #ifdef CONFIG_CFQ_GROUP_IOSCHED
210         struct hlist_node cfqd_node;
211         int ref;
212 #endif
213         /* number of requests that are on the dispatch list or inside driver */
214         int dispatched;
215 };
216
217 /*
218  * Per block device queue structure
219  */
220 struct cfq_data {
221         struct request_queue *queue;
222         /* Root service tree for cfq_groups */
223         struct cfq_rb_root grp_service_tree;
224         struct cfq_group root_group;
225
226         /*
227          * The priority currently being served
228          */
229         enum wl_prio_t serving_prio;
230         enum wl_type_t serving_type;
231         unsigned long workload_expires;
232         struct cfq_group *serving_group;
233
234         /*
235          * Each priority tree is sorted by next_request position.  These
236          * trees are used when determining if two or more queues are
237          * interleaving requests (see cfq_close_cooperator).
238          */
239         struct rb_root prio_trees[CFQ_PRIO_LISTS];
240
241         unsigned int busy_queues;
242         unsigned int busy_sync_queues;
243
244         int rq_in_driver;
245         int rq_in_flight[2];
246
247         /*
248          * queue-depth detection
249          */
250         int rq_queued;
251         int hw_tag;
252         /*
253          * hw_tag can be
254          * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
255          *  1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
256          *  0 => no NCQ
257          */
258         int hw_tag_est_depth;
259         unsigned int hw_tag_samples;
260
261         /*
262          * idle window management
263          */
264         struct timer_list idle_slice_timer;
265         struct work_struct unplug_work;
266
267         struct cfq_queue *active_queue;
268         struct cfq_io_context *active_cic;
269
270         /*
271          * async queue for each priority case
272          */
273         struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
274         struct cfq_queue *async_idle_cfqq;
275
276         sector_t last_position;
277
278         /*
279          * tunables, see top of file
280          */
281         unsigned int cfq_quantum;
282         unsigned int cfq_fifo_expire[2];
283         unsigned int cfq_back_penalty;
284         unsigned int cfq_back_max;
285         unsigned int cfq_slice[2];
286         unsigned int cfq_slice_async_rq;
287         unsigned int cfq_slice_idle;
288         unsigned int cfq_group_idle;
289         unsigned int cfq_latency;
290
291         unsigned int cic_index;
292         struct list_head cic_list;
293
294         /*
295          * Fallback dummy cfqq for extreme OOM conditions
296          */
297         struct cfq_queue oom_cfqq;
298
299         unsigned long last_delayed_sync;
300
301         /* List of cfq groups being managed on this device*/
302         struct hlist_head cfqg_list;
303
304         /* Number of groups which are on blkcg->blkg_list */
305         unsigned int nr_blkcg_linked_grps;
306 };
307
308 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
309
310 static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
311                                             enum wl_prio_t prio,
312                                             enum wl_type_t type)
313 {
314         if (!cfqg)
315                 return NULL;
316
317         if (prio == IDLE_WORKLOAD)
318                 return &cfqg->service_tree_idle;
319
320         return &cfqg->service_trees[prio][type];
321 }
322
323 enum cfqq_state_flags {
324         CFQ_CFQQ_FLAG_on_rr = 0,        /* on round-robin busy list */
325         CFQ_CFQQ_FLAG_wait_request,     /* waiting for a request */
326         CFQ_CFQQ_FLAG_must_dispatch,    /* must be allowed a dispatch */
327         CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
328         CFQ_CFQQ_FLAG_fifo_expire,      /* FIFO checked in this slice */
329         CFQ_CFQQ_FLAG_idle_window,      /* slice idling enabled */
330         CFQ_CFQQ_FLAG_prio_changed,     /* task priority has changed */
331         CFQ_CFQQ_FLAG_slice_new,        /* no requests dispatched in slice */
332         CFQ_CFQQ_FLAG_sync,             /* synchronous queue */
333         CFQ_CFQQ_FLAG_coop,             /* cfqq is shared */
334         CFQ_CFQQ_FLAG_split_coop,       /* shared cfqq will be splitted */
335         CFQ_CFQQ_FLAG_deep,             /* sync cfqq experienced large depth */
336         CFQ_CFQQ_FLAG_wait_busy,        /* Waiting for next request */
337 };
338
339 #define CFQ_CFQQ_FNS(name)                                              \
340 static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)         \
341 {                                                                       \
342         (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name);                   \
343 }                                                                       \
344 static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)        \
345 {                                                                       \
346         (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);                  \
347 }                                                                       \
348 static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)         \
349 {                                                                       \
350         return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;      \
351 }
352
353 CFQ_CFQQ_FNS(on_rr);
354 CFQ_CFQQ_FNS(wait_request);
355 CFQ_CFQQ_FNS(must_dispatch);
356 CFQ_CFQQ_FNS(must_alloc_slice);
357 CFQ_CFQQ_FNS(fifo_expire);
358 CFQ_CFQQ_FNS(idle_window);
359 CFQ_CFQQ_FNS(prio_changed);
360 CFQ_CFQQ_FNS(slice_new);
361 CFQ_CFQQ_FNS(sync);
362 CFQ_CFQQ_FNS(coop);
363 CFQ_CFQQ_FNS(split_coop);
364 CFQ_CFQQ_FNS(deep);
365 CFQ_CFQQ_FNS(wait_busy);
366 #undef CFQ_CFQQ_FNS
367
368 #ifdef CONFIG_CFQ_GROUP_IOSCHED
369 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  \
370         blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
371                         cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
372                         blkg_path(&(cfqq)->cfqg->blkg), ##args);
373
374 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)                          \
375         blk_add_trace_msg((cfqd)->queue, "%s " fmt,                     \
376                                 blkg_path(&(cfqg)->blkg), ##args);      \
377
378 #else
379 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  \
380         blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
381 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)          do {} while (0);
382 #endif
383 #define cfq_log(cfqd, fmt, args...)     \
384         blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
385
386 /* Traverses through cfq group service trees */
387 #define for_each_cfqg_st(cfqg, i, j, st) \
388         for (i = 0; i <= IDLE_WORKLOAD; i++) \
389                 for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
390                         : &cfqg->service_tree_idle; \
391                         (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
392                         (i == IDLE_WORKLOAD && j == 0); \
393                         j++, st = i < IDLE_WORKLOAD ? \
394                         &cfqg->service_trees[i][j]: NULL) \
395
396
397 static inline bool iops_mode(struct cfq_data *cfqd)
398 {
399         /*
400          * If we are not idling on queues and it is a NCQ drive, parallel
401          * execution of requests is on and measuring time is not possible
402          * in most of the cases until and unless we drive shallower queue
403          * depths and that becomes a performance bottleneck. In such cases
404          * switch to start providing fairness in terms of number of IOs.
405          */
406         if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
407                 return true;
408         else
409                 return false;
410 }
411
412 static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
413 {
414         if (cfq_class_idle(cfqq))
415                 return IDLE_WORKLOAD;
416         if (cfq_class_rt(cfqq))
417                 return RT_WORKLOAD;
418         return BE_WORKLOAD;
419 }
420
421
422 static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
423 {
424         if (!cfq_cfqq_sync(cfqq))
425                 return ASYNC_WORKLOAD;
426         if (!cfq_cfqq_idle_window(cfqq))
427                 return SYNC_NOIDLE_WORKLOAD;
428         return SYNC_WORKLOAD;
429 }
430
431 static inline int cfq_group_busy_queues_wl(enum wl_prio_t wl,
432                                         struct cfq_data *cfqd,
433                                         struct cfq_group *cfqg)
434 {
435         if (wl == IDLE_WORKLOAD)
436                 return cfqg->service_tree_idle.count;
437
438         return cfqg->service_trees[wl][ASYNC_WORKLOAD].count
439                 + cfqg->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count
440                 + cfqg->service_trees[wl][SYNC_WORKLOAD].count;
441 }
442
443 static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
444                                         struct cfq_group *cfqg)
445 {
446         return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count
447                 + cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
448 }
449
450 static void cfq_dispatch_insert(struct request_queue *, struct request *);
451 static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
452                                        struct io_context *, gfp_t);
453 static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
454                                                 struct io_context *);
455
456 static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
457                                             bool is_sync)
458 {
459         return cic->cfqq[is_sync];
460 }
461
462 static inline void cic_set_cfqq(struct cfq_io_context *cic,
463                                 struct cfq_queue *cfqq, bool is_sync)
464 {
465         cic->cfqq[is_sync] = cfqq;
466 }
467
468 #define CIC_DEAD_KEY    1ul
469 #define CIC_DEAD_INDEX_SHIFT    1
470
471 static inline void *cfqd_dead_key(struct cfq_data *cfqd)
472 {
473         return (void *)(cfqd->cic_index << CIC_DEAD_INDEX_SHIFT | CIC_DEAD_KEY);
474 }
475
476 static inline struct cfq_data *cic_to_cfqd(struct cfq_io_context *cic)
477 {
478         struct cfq_data *cfqd = cic->key;
479
480         if (unlikely((unsigned long) cfqd & CIC_DEAD_KEY))
481                 return NULL;
482
483         return cfqd;
484 }
485
486 /*
487  * We regard a request as SYNC, if it's either a read or has the SYNC bit
488  * set (in which case it could also be direct WRITE).
489  */
490 static inline bool cfq_bio_sync(struct bio *bio)
491 {
492         return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
493 }
494
495 /*
496  * scheduler run of queue, if there are requests pending and no one in the
497  * driver that will restart queueing
498  */
499 static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
500 {
501         if (cfqd->busy_queues) {
502                 cfq_log(cfqd, "schedule dispatch");
503                 kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
504         }
505 }
506
507 /*
508  * Scale schedule slice based on io priority. Use the sync time slice only
509  * if a queue is marked sync and has sync io queued. A sync queue with async
510  * io only, should not get full sync slice length.
511  */
512 static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
513                                  unsigned short prio)
514 {
515         const int base_slice = cfqd->cfq_slice[sync];
516
517         WARN_ON(prio >= IOPRIO_BE_NR);
518
519         return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
520 }
521
522 static inline int
523 cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
524 {
525         return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
526 }
527
528 static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg)
529 {
530         u64 d = delta << CFQ_SERVICE_SHIFT;
531
532         d = d * BLKIO_WEIGHT_DEFAULT;
533         do_div(d, cfqg->weight);
534         return d;
535 }
536
537 static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
538 {
539         s64 delta = (s64)(vdisktime - min_vdisktime);
540         if (delta > 0)
541                 min_vdisktime = vdisktime;
542
543         return min_vdisktime;
544 }
545
546 static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
547 {
548         s64 delta = (s64)(vdisktime - min_vdisktime);
549         if (delta < 0)
550                 min_vdisktime = vdisktime;
551
552         return min_vdisktime;
553 }
554
555 static void update_min_vdisktime(struct cfq_rb_root *st)
556 {
557         struct cfq_group *cfqg;
558
559         if (st->left) {
560                 cfqg = rb_entry_cfqg(st->left);
561                 st->min_vdisktime = max_vdisktime(st->min_vdisktime,
562                                                   cfqg->vdisktime);
563         }
564 }
565
566 /*
567  * get averaged number of queues of RT/BE priority.
568  * average is updated, with a formula that gives more weight to higher numbers,
569  * to quickly follows sudden increases and decrease slowly
570  */
571
572 static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
573                                         struct cfq_group *cfqg, bool rt)
574 {
575         unsigned min_q, max_q;
576         unsigned mult  = cfq_hist_divisor - 1;
577         unsigned round = cfq_hist_divisor / 2;
578         unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
579
580         min_q = min(cfqg->busy_queues_avg[rt], busy);
581         max_q = max(cfqg->busy_queues_avg[rt], busy);
582         cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
583                 cfq_hist_divisor;
584         return cfqg->busy_queues_avg[rt];
585 }
586
587 static inline unsigned
588 cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
589 {
590         struct cfq_rb_root *st = &cfqd->grp_service_tree;
591
592         return cfq_target_latency * cfqg->weight / st->total_weight;
593 }
594
595 static inline unsigned
596 cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
597 {
598         unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
599         if (cfqd->cfq_latency) {
600                 /*
601                  * interested queues (we consider only the ones with the same
602                  * priority class in the cfq group)
603                  */
604                 unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
605                                                 cfq_class_rt(cfqq));
606                 unsigned sync_slice = cfqd->cfq_slice[1];
607                 unsigned expect_latency = sync_slice * iq;
608                 unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
609
610                 if (expect_latency > group_slice) {
611                         unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
612                         /* scale low_slice according to IO priority
613                          * and sync vs async */
614                         unsigned low_slice =
615                                 min(slice, base_low_slice * slice / sync_slice);
616                         /* the adapted slice value is scaled to fit all iqs
617                          * into the target latency */
618                         slice = max(slice * group_slice / expect_latency,
619                                     low_slice);
620                 }
621         }
622         return slice;
623 }
624
625 static inline void
626 cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
627 {
628         unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
629
630         cfqq->slice_start = jiffies;
631         cfqq->slice_end = jiffies + slice;
632         cfqq->allocated_slice = slice;
633         cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
634 }
635
636 /*
637  * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
638  * isn't valid until the first request from the dispatch is activated
639  * and the slice time set.
640  */
641 static inline bool cfq_slice_used(struct cfq_queue *cfqq)
642 {
643         if (cfq_cfqq_slice_new(cfqq))
644                 return false;
645         if (time_before(jiffies, cfqq->slice_end))
646                 return false;
647
648         return true;
649 }
650
651 /*
652  * Lifted from AS - choose which of rq1 and rq2 that is best served now.
653  * We choose the request that is closest to the head right now. Distance
654  * behind the head is penalized and only allowed to a certain extent.
655  */
656 static struct request *
657 cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
658 {
659         sector_t s1, s2, d1 = 0, d2 = 0;
660         unsigned long back_max;
661 #define CFQ_RQ1_WRAP    0x01 /* request 1 wraps */
662 #define CFQ_RQ2_WRAP    0x02 /* request 2 wraps */
663         unsigned wrap = 0; /* bit mask: requests behind the disk head? */
664
665         if (rq1 == NULL || rq1 == rq2)
666                 return rq2;
667         if (rq2 == NULL)
668                 return rq1;
669
670         if (rq_is_sync(rq1) && !rq_is_sync(rq2))
671                 return rq1;
672         else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
673                 return rq2;
674         if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
675                 return rq1;
676         else if ((rq2->cmd_flags & REQ_META) &&
677                  !(rq1->cmd_flags & REQ_META))
678                 return rq2;
679
680         s1 = blk_rq_pos(rq1);
681         s2 = blk_rq_pos(rq2);
682
683         /*
684          * by definition, 1KiB is 2 sectors
685          */
686         back_max = cfqd->cfq_back_max * 2;
687
688         /*
689          * Strict one way elevator _except_ in the case where we allow
690          * short backward seeks which are biased as twice the cost of a
691          * similar forward seek.
692          */
693         if (s1 >= last)
694                 d1 = s1 - last;
695         else if (s1 + back_max >= last)
696                 d1 = (last - s1) * cfqd->cfq_back_penalty;
697         else
698                 wrap |= CFQ_RQ1_WRAP;
699
700         if (s2 >= last)
701                 d2 = s2 - last;
702         else if (s2 + back_max >= last)
703                 d2 = (last - s2) * cfqd->cfq_back_penalty;
704         else
705                 wrap |= CFQ_RQ2_WRAP;
706
707         /* Found required data */
708
709         /*
710          * By doing switch() on the bit mask "wrap" we avoid having to
711          * check two variables for all permutations: --> faster!
712          */
713         switch (wrap) {
714         case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
715                 if (d1 < d2)
716                         return rq1;
717                 else if (d2 < d1)
718                         return rq2;
719                 else {
720                         if (s1 >= s2)
721                                 return rq1;
722                         else
723                                 return rq2;
724                 }
725
726         case CFQ_RQ2_WRAP:
727                 return rq1;
728         case CFQ_RQ1_WRAP:
729                 return rq2;
730         case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
731         default:
732                 /*
733                  * Since both rqs are wrapped,
734                  * start with the one that's further behind head
735                  * (--> only *one* back seek required),
736                  * since back seek takes more time than forward.
737                  */
738                 if (s1 <= s2)
739                         return rq1;
740                 else
741                         return rq2;
742         }
743 }
744
745 /*
746  * The below is leftmost cache rbtree addon
747  */
748 static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
749 {
750         /* Service tree is empty */
751         if (!root->count)
752                 return NULL;
753
754         if (!root->left)
755                 root->left = rb_first(&root->rb);
756
757         if (root->left)
758                 return rb_entry(root->left, struct cfq_queue, rb_node);
759
760         return NULL;
761 }
762
763 static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
764 {
765         if (!root->left)
766                 root->left = rb_first(&root->rb);
767
768         if (root->left)
769                 return rb_entry_cfqg(root->left);
770
771         return NULL;
772 }
773
774 static void rb_erase_init(struct rb_node *n, struct rb_root *root)
775 {
776         rb_erase(n, root);
777         RB_CLEAR_NODE(n);
778 }
779
780 static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
781 {
782         if (root->left == n)
783                 root->left = NULL;
784         rb_erase_init(n, &root->rb);
785         --root->count;
786 }
787
788 /*
789  * would be nice to take fifo expire time into account as well
790  */
791 static struct request *
792 cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
793                   struct request *last)
794 {
795         struct rb_node *rbnext = rb_next(&last->rb_node);
796         struct rb_node *rbprev = rb_prev(&last->rb_node);
797         struct request *next = NULL, *prev = NULL;
798
799         BUG_ON(RB_EMPTY_NODE(&last->rb_node));
800
801         if (rbprev)
802                 prev = rb_entry_rq(rbprev);
803
804         if (rbnext)
805                 next = rb_entry_rq(rbnext);
806         else {
807                 rbnext = rb_first(&cfqq->sort_list);
808                 if (rbnext && rbnext != &last->rb_node)
809                         next = rb_entry_rq(rbnext);
810         }
811
812         return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
813 }
814
815 static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
816                                       struct cfq_queue *cfqq)
817 {
818         /*
819          * just an approximation, should be ok.
820          */
821         return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
822                        cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
823 }
824
825 static inline s64
826 cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
827 {
828         return cfqg->vdisktime - st->min_vdisktime;
829 }
830
831 static void
832 __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
833 {
834         struct rb_node **node = &st->rb.rb_node;
835         struct rb_node *parent = NULL;
836         struct cfq_group *__cfqg;
837         s64 key = cfqg_key(st, cfqg);
838         int left = 1;
839
840         while (*node != NULL) {
841                 parent = *node;
842                 __cfqg = rb_entry_cfqg(parent);
843
844                 if (key < cfqg_key(st, __cfqg))
845                         node = &parent->rb_left;
846                 else {
847                         node = &parent->rb_right;
848                         left = 0;
849                 }
850         }
851
852         if (left)
853                 st->left = &cfqg->rb_node;
854
855         rb_link_node(&cfqg->rb_node, parent, node);
856         rb_insert_color(&cfqg->rb_node, &st->rb);
857 }
858
859 static void
860 cfq_update_group_weight(struct cfq_group *cfqg)
861 {
862         BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
863         if (cfqg->needs_update) {
864                 cfqg->weight = cfqg->new_weight;
865                 cfqg->needs_update = false;
866         }
867 }
868
869 static void
870 cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
871 {
872         BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
873
874         cfq_update_group_weight(cfqg);
875         __cfq_group_service_tree_add(st, cfqg);
876         st->total_weight += cfqg->weight;
877 }
878
879 static void
880 cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
881 {
882         struct cfq_rb_root *st = &cfqd->grp_service_tree;
883         struct cfq_group *__cfqg;
884         struct rb_node *n;
885
886         cfqg->nr_cfqq++;
887         if (!RB_EMPTY_NODE(&cfqg->rb_node))
888                 return;
889
890         /*
891          * Currently put the group at the end. Later implement something
892          * so that groups get lesser vtime based on their weights, so that
893          * if group does not loose all if it was not continuously backlogged.
894          */
895         n = rb_last(&st->rb);
896         if (n) {
897                 __cfqg = rb_entry_cfqg(n);
898                 cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
899         } else
900                 cfqg->vdisktime = st->min_vdisktime;
901         cfq_group_service_tree_add(st, cfqg);
902 }
903
904 static void
905 cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
906 {
907         st->total_weight -= cfqg->weight;
908         if (!RB_EMPTY_NODE(&cfqg->rb_node))
909                 cfq_rb_erase(&cfqg->rb_node, st);
910 }
911
912 static void
913 cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
914 {
915         struct cfq_rb_root *st = &cfqd->grp_service_tree;
916
917         BUG_ON(cfqg->nr_cfqq < 1);
918         cfqg->nr_cfqq--;
919
920         /* If there are other cfq queues under this group, don't delete it */
921         if (cfqg->nr_cfqq)
922                 return;
923
924         cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
925         cfq_group_service_tree_del(st, cfqg);
926         cfqg->saved_workload_slice = 0;
927         cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1);
928 }
929
930 static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
931                                                 unsigned int *unaccounted_time)
932 {
933         unsigned int slice_used;
934
935         /*
936          * Queue got expired before even a single request completed or
937          * got expired immediately after first request completion.
938          */
939         if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
940                 /*
941                  * Also charge the seek time incurred to the group, otherwise
942                  * if there are mutiple queues in the group, each can dispatch
943                  * a single request on seeky media and cause lots of seek time
944                  * and group will never know it.
945                  */
946                 slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
947                                         1);
948         } else {
949                 slice_used = jiffies - cfqq->slice_start;
950                 if (slice_used > cfqq->allocated_slice) {
951                         *unaccounted_time = slice_used - cfqq->allocated_slice;
952                         slice_used = cfqq->allocated_slice;
953                 }
954                 if (time_after(cfqq->slice_start, cfqq->dispatch_start))
955                         *unaccounted_time += cfqq->slice_start -
956                                         cfqq->dispatch_start;
957         }
958
959         return slice_used;
960 }
961
962 static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
963                                 struct cfq_queue *cfqq)
964 {
965         struct cfq_rb_root *st = &cfqd->grp_service_tree;
966         unsigned int used_sl, charge, unaccounted_sl = 0;
967         int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
968                         - cfqg->service_tree_idle.count;
969
970         BUG_ON(nr_sync < 0);
971         used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
972
973         if (iops_mode(cfqd))
974                 charge = cfqq->slice_dispatch;
975         else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
976                 charge = cfqq->allocated_slice;
977
978         /* Can't update vdisktime while group is on service tree */
979         cfq_group_service_tree_del(st, cfqg);
980         cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
981         /* If a new weight was requested, update now, off tree */
982         cfq_group_service_tree_add(st, cfqg);
983
984         /* This group is being expired. Save the context */
985         if (time_after(cfqd->workload_expires, jiffies)) {
986                 cfqg->saved_workload_slice = cfqd->workload_expires
987                                                 - jiffies;
988                 cfqg->saved_workload = cfqd->serving_type;
989                 cfqg->saved_serving_prio = cfqd->serving_prio;
990         } else
991                 cfqg->saved_workload_slice = 0;
992
993         cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
994                                         st->min_vdisktime);
995         cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u"
996                         " sect=%u", used_sl, cfqq->slice_dispatch, charge,
997                         iops_mode(cfqd), cfqq->nr_sectors);
998         cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl,
999                                           unaccounted_sl);
1000         cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
1001 }
1002
1003 #ifdef CONFIG_CFQ_GROUP_IOSCHED
1004 static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg)
1005 {
1006         if (blkg)
1007                 return container_of(blkg, struct cfq_group, blkg);
1008         return NULL;
1009 }
1010
1011 void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg,
1012                                         unsigned int weight)
1013 {
1014         struct cfq_group *cfqg = cfqg_of_blkg(blkg);
1015         cfqg->new_weight = weight;
1016         cfqg->needs_update = true;
1017 }
1018
1019 static void cfq_init_add_cfqg_lists(struct cfq_data *cfqd,
1020                         struct cfq_group *cfqg, struct blkio_cgroup *blkcg)
1021 {
1022         struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
1023         unsigned int major, minor;
1024
1025         /*
1026          * Add group onto cgroup list. It might happen that bdi->dev is
1027          * not initialized yet. Initialize this new group without major
1028          * and minor info and this info will be filled in once a new thread
1029          * comes for IO.
1030          */
1031         if (bdi->dev) {
1032                 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
1033                 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg,
1034                                         (void *)cfqd, MKDEV(major, minor));
1035         } else
1036                 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg,
1037                                         (void *)cfqd, 0);
1038
1039         cfqd->nr_blkcg_linked_grps++;
1040         cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
1041
1042         /* Add group on cfqd list */
1043         hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
1044 }
1045
1046 /*
1047  * Should be called from sleepable context. No request queue lock as per
1048  * cpu stats are allocated dynamically and alloc_percpu needs to be called
1049  * from sleepable context.
1050  */
1051 static struct cfq_group * cfq_alloc_cfqg(struct cfq_data *cfqd)
1052 {
1053         struct cfq_group *cfqg = NULL;
1054         int i, j, ret;
1055         struct cfq_rb_root *st;
1056
1057         cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, cfqd->queue->node);
1058         if (!cfqg)
1059                 return NULL;
1060
1061         for_each_cfqg_st(cfqg, i, j, st)
1062                 *st = CFQ_RB_ROOT;
1063         RB_CLEAR_NODE(&cfqg->rb_node);
1064
1065         /*
1066          * Take the initial reference that will be released on destroy
1067          * This can be thought of a joint reference by cgroup and
1068          * elevator which will be dropped by either elevator exit
1069          * or cgroup deletion path depending on who is exiting first.
1070          */
1071         cfqg->ref = 1;
1072
1073         ret = blkio_alloc_blkg_stats(&cfqg->blkg);
1074         if (ret) {
1075                 kfree(cfqg);
1076                 return NULL;
1077         }
1078
1079         return cfqg;
1080 }
1081
1082 static struct cfq_group *
1083 cfq_find_cfqg(struct cfq_data *cfqd, struct blkio_cgroup *blkcg)
1084 {
1085         struct cfq_group *cfqg = NULL;
1086         void *key = cfqd;
1087         struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
1088         unsigned int major, minor;
1089
1090         /*
1091          * This is the common case when there are no blkio cgroups.
1092          * Avoid lookup in this case
1093          */
1094         if (blkcg == &blkio_root_cgroup)
1095                 cfqg = &cfqd->root_group;
1096         else
1097                 cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key));
1098
1099         if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
1100                 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
1101                 cfqg->blkg.dev = MKDEV(major, minor);
1102         }
1103
1104         return cfqg;
1105 }
1106
1107 /*
1108  * Search for the cfq group current task belongs to. request_queue lock must
1109  * be held.
1110  */
1111 static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd)
1112 {
1113         struct blkio_cgroup *blkcg;
1114         struct cfq_group *cfqg = NULL, *__cfqg = NULL;
1115         struct request_queue *q = cfqd->queue;
1116
1117         rcu_read_lock();
1118         blkcg = task_blkio_cgroup(current);
1119         cfqg = cfq_find_cfqg(cfqd, blkcg);
1120         if (cfqg) {
1121                 rcu_read_unlock();
1122                 return cfqg;
1123         }
1124
1125         /*
1126          * Need to allocate a group. Allocation of group also needs allocation
1127          * of per cpu stats which in-turn takes a mutex() and can block. Hence
1128          * we need to drop rcu lock and queue_lock before we call alloc.
1129          *
1130          * Not taking any queue reference here and assuming that queue is
1131          * around by the time we return. CFQ queue allocation code does
1132          * the same. It might be racy though.
1133          */
1134
1135         rcu_read_unlock();
1136         spin_unlock_irq(q->queue_lock);
1137
1138         cfqg = cfq_alloc_cfqg(cfqd);
1139
1140         spin_lock_irq(q->queue_lock);
1141
1142         rcu_read_lock();
1143         blkcg = task_blkio_cgroup(current);
1144
1145         /*
1146          * If some other thread already allocated the group while we were
1147          * not holding queue lock, free up the group
1148          */
1149         __cfqg = cfq_find_cfqg(cfqd, blkcg);
1150
1151         if (__cfqg) {
1152                 kfree(cfqg);
1153                 rcu_read_unlock();
1154                 return __cfqg;
1155         }
1156
1157         if (!cfqg)
1158                 cfqg = &cfqd->root_group;
1159
1160         cfq_init_add_cfqg_lists(cfqd, cfqg, blkcg);
1161         rcu_read_unlock();
1162         return cfqg;
1163 }
1164
1165 static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
1166 {
1167         cfqg->ref++;
1168         return cfqg;
1169 }
1170
1171 static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1172 {
1173         /* Currently, all async queues are mapped to root group */
1174         if (!cfq_cfqq_sync(cfqq))
1175                 cfqg = &cfqq->cfqd->root_group;
1176
1177         cfqq->cfqg = cfqg;
1178         /* cfqq reference on cfqg */
1179         cfqq->cfqg->ref++;
1180 }
1181
1182 static void cfq_put_cfqg(struct cfq_group *cfqg)
1183 {
1184         struct cfq_rb_root *st;
1185         int i, j;
1186
1187         BUG_ON(cfqg->ref <= 0);
1188         cfqg->ref--;
1189         if (cfqg->ref)
1190                 return;
1191         for_each_cfqg_st(cfqg, i, j, st)
1192                 BUG_ON(!RB_EMPTY_ROOT(&st->rb));
1193         free_percpu(cfqg->blkg.stats_cpu);
1194         kfree(cfqg);
1195 }
1196
1197 static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg)
1198 {
1199         /* Something wrong if we are trying to remove same group twice */
1200         BUG_ON(hlist_unhashed(&cfqg->cfqd_node));
1201
1202         hlist_del_init(&cfqg->cfqd_node);
1203
1204         /*
1205          * Put the reference taken at the time of creation so that when all
1206          * queues are gone, group can be destroyed.
1207          */
1208         cfq_put_cfqg(cfqg);
1209 }
1210
1211 static void cfq_release_cfq_groups(struct cfq_data *cfqd)
1212 {
1213         struct hlist_node *pos, *n;
1214         struct cfq_group *cfqg;
1215
1216         hlist_for_each_entry_safe(cfqg, pos, n, &cfqd->cfqg_list, cfqd_node) {
1217                 /*
1218                  * If cgroup removal path got to blk_group first and removed
1219                  * it from cgroup list, then it will take care of destroying
1220                  * cfqg also.
1221                  */
1222                 if (!cfq_blkiocg_del_blkio_group(&cfqg->blkg))
1223                         cfq_destroy_cfqg(cfqd, cfqg);
1224         }
1225 }
1226
1227 /*
1228  * Blk cgroup controller notification saying that blkio_group object is being
1229  * delinked as associated cgroup object is going away. That also means that
1230  * no new IO will come in this group. So get rid of this group as soon as
1231  * any pending IO in the group is finished.
1232  *
1233  * This function is called under rcu_read_lock(). key is the rcu protected
1234  * pointer. That means "key" is a valid cfq_data pointer as long as we are rcu
1235  * read lock.
1236  *
1237  * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
1238  * it should not be NULL as even if elevator was exiting, cgroup deltion
1239  * path got to it first.
1240  */
1241 void cfq_unlink_blkio_group(void *key, struct blkio_group *blkg)
1242 {
1243         unsigned long  flags;
1244         struct cfq_data *cfqd = key;
1245
1246         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1247         cfq_destroy_cfqg(cfqd, cfqg_of_blkg(blkg));
1248         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1249 }
1250
1251 #else /* GROUP_IOSCHED */
1252 static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd)
1253 {
1254         return &cfqd->root_group;
1255 }
1256
1257 static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
1258 {
1259         return cfqg;
1260 }
1261
1262 static inline void
1263 cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
1264         cfqq->cfqg = cfqg;
1265 }
1266
1267 static void cfq_release_cfq_groups(struct cfq_data *cfqd) {}
1268 static inline void cfq_put_cfqg(struct cfq_group *cfqg) {}
1269
1270 #endif /* GROUP_IOSCHED */
1271
1272 /*
1273  * The cfqd->service_trees holds all pending cfq_queue's that have
1274  * requests waiting to be processed. It is sorted in the order that
1275  * we will service the queues.
1276  */
1277 static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1278                                  bool add_front)
1279 {
1280         struct rb_node **p, *parent;
1281         struct cfq_queue *__cfqq;
1282         unsigned long rb_key;
1283         struct cfq_rb_root *service_tree;
1284         int left;
1285         int new_cfqq = 1;
1286         int group_changed = 0;
1287
1288         service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
1289                                                 cfqq_type(cfqq));
1290         if (cfq_class_idle(cfqq)) {
1291                 rb_key = CFQ_IDLE_DELAY;
1292                 parent = rb_last(&service_tree->rb);
1293                 if (parent && parent != &cfqq->rb_node) {
1294                         __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1295                         rb_key += __cfqq->rb_key;
1296                 } else
1297                         rb_key += jiffies;
1298         } else if (!add_front) {
1299                 /*
1300                  * Get our rb key offset. Subtract any residual slice
1301                  * value carried from last service. A negative resid
1302                  * count indicates slice overrun, and this should position
1303                  * the next service time further away in the tree.
1304                  */
1305                 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
1306                 rb_key -= cfqq->slice_resid;
1307                 cfqq->slice_resid = 0;
1308         } else {
1309                 rb_key = -HZ;
1310                 __cfqq = cfq_rb_first(service_tree);
1311                 rb_key += __cfqq ? __cfqq->rb_key : jiffies;
1312         }
1313
1314         if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
1315                 new_cfqq = 0;
1316                 /*
1317                  * same position, nothing more to do
1318                  */
1319                 if (rb_key == cfqq->rb_key &&
1320                     cfqq->service_tree == service_tree)
1321                         return;
1322
1323                 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1324                 cfqq->service_tree = NULL;
1325         }
1326
1327         left = 1;
1328         parent = NULL;
1329         cfqq->service_tree = service_tree;
1330         p = &service_tree->rb.rb_node;
1331         while (*p) {
1332                 struct rb_node **n;
1333
1334                 parent = *p;
1335                 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1336
1337                 /*
1338                  * sort by key, that represents service time.
1339                  */
1340                 if (time_before(rb_key, __cfqq->rb_key))
1341                         n = &(*p)->rb_left;
1342                 else {
1343                         n = &(*p)->rb_right;
1344                         left = 0;
1345                 }
1346
1347                 p = n;
1348         }
1349
1350         if (left)
1351                 service_tree->left = &cfqq->rb_node;
1352
1353         cfqq->rb_key = rb_key;
1354         rb_link_node(&cfqq->rb_node, parent, p);
1355         rb_insert_color(&cfqq->rb_node, &service_tree->rb);
1356         service_tree->count++;
1357         if ((add_front || !new_cfqq) && !group_changed)
1358                 return;
1359         cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
1360 }
1361
1362 static struct cfq_queue *
1363 cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
1364                      sector_t sector, struct rb_node **ret_parent,
1365                      struct rb_node ***rb_link)
1366 {
1367         struct rb_node **p, *parent;
1368         struct cfq_queue *cfqq = NULL;
1369
1370         parent = NULL;
1371         p = &root->rb_node;
1372         while (*p) {
1373                 struct rb_node **n;
1374
1375                 parent = *p;
1376                 cfqq = rb_entry(parent, struct cfq_queue, p_node);
1377
1378                 /*
1379                  * Sort strictly based on sector.  Smallest to the left,
1380                  * largest to the right.
1381                  */
1382                 if (sector > blk_rq_pos(cfqq->next_rq))
1383                         n = &(*p)->rb_right;
1384                 else if (sector < blk_rq_pos(cfqq->next_rq))
1385                         n = &(*p)->rb_left;
1386                 else
1387                         break;
1388                 p = n;
1389                 cfqq = NULL;
1390         }
1391
1392         *ret_parent = parent;
1393         if (rb_link)
1394                 *rb_link = p;
1395         return cfqq;
1396 }
1397
1398 static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1399 {
1400         struct rb_node **p, *parent;
1401         struct cfq_queue *__cfqq;
1402
1403         if (cfqq->p_root) {
1404                 rb_erase(&cfqq->p_node, cfqq->p_root);
1405                 cfqq->p_root = NULL;
1406         }
1407
1408         if (cfq_class_idle(cfqq))
1409                 return;
1410         if (!cfqq->next_rq)
1411                 return;
1412
1413         cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
1414         __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
1415                                       blk_rq_pos(cfqq->next_rq), &parent, &p);
1416         if (!__cfqq) {
1417                 rb_link_node(&cfqq->p_node, parent, p);
1418                 rb_insert_color(&cfqq->p_node, cfqq->p_root);
1419         } else
1420                 cfqq->p_root = NULL;
1421 }
1422
1423 /*
1424  * Update cfqq's position in the service tree.
1425  */
1426 static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1427 {
1428         /*
1429          * Resorting requires the cfqq to be on the RR list already.
1430          */
1431         if (cfq_cfqq_on_rr(cfqq)) {
1432                 cfq_service_tree_add(cfqd, cfqq, 0);
1433                 cfq_prio_tree_add(cfqd, cfqq);
1434         }
1435 }
1436
1437 /*
1438  * add to busy list of queues for service, trying to be fair in ordering
1439  * the pending list according to last request service
1440  */
1441 static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1442 {
1443         cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
1444         BUG_ON(cfq_cfqq_on_rr(cfqq));
1445         cfq_mark_cfqq_on_rr(cfqq);
1446         cfqd->busy_queues++;
1447         if (cfq_cfqq_sync(cfqq))
1448                 cfqd->busy_sync_queues++;
1449
1450         cfq_resort_rr_list(cfqd, cfqq);
1451 }
1452
1453 /*
1454  * Called when the cfqq no longer has requests pending, remove it from
1455  * the service tree.
1456  */
1457 static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1458 {
1459         cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
1460         BUG_ON(!cfq_cfqq_on_rr(cfqq));
1461         cfq_clear_cfqq_on_rr(cfqq);
1462
1463         if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
1464                 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1465                 cfqq->service_tree = NULL;
1466         }
1467         if (cfqq->p_root) {
1468                 rb_erase(&cfqq->p_node, cfqq->p_root);
1469                 cfqq->p_root = NULL;
1470         }
1471
1472         cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
1473         BUG_ON(!cfqd->busy_queues);
1474         cfqd->busy_queues--;
1475         if (cfq_cfqq_sync(cfqq))
1476                 cfqd->busy_sync_queues--;
1477 }
1478
1479 /*
1480  * rb tree support functions
1481  */
1482 static void cfq_del_rq_rb(struct request *rq)
1483 {
1484         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1485         const int sync = rq_is_sync(rq);
1486
1487         BUG_ON(!cfqq->queued[sync]);
1488         cfqq->queued[sync]--;
1489
1490         elv_rb_del(&cfqq->sort_list, rq);
1491
1492         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
1493                 /*
1494                  * Queue will be deleted from service tree when we actually
1495                  * expire it later. Right now just remove it from prio tree
1496                  * as it is empty.
1497                  */
1498                 if (cfqq->p_root) {
1499                         rb_erase(&cfqq->p_node, cfqq->p_root);
1500                         cfqq->p_root = NULL;
1501                 }
1502         }
1503 }
1504
1505 static void cfq_add_rq_rb(struct request *rq)
1506 {
1507         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1508         struct cfq_data *cfqd = cfqq->cfqd;
1509         struct request *__alias, *prev;
1510
1511         cfqq->queued[rq_is_sync(rq)]++;
1512
1513         /*
1514          * looks a little odd, but the first insert might return an alias.
1515          * if that happens, put the alias on the dispatch list
1516          */
1517         while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
1518                 cfq_dispatch_insert(cfqd->queue, __alias);
1519
1520         if (!cfq_cfqq_on_rr(cfqq))
1521                 cfq_add_cfqq_rr(cfqd, cfqq);
1522
1523         /*
1524          * check if this request is a better next-serve candidate
1525          */
1526         prev = cfqq->next_rq;
1527         cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
1528
1529         /*
1530          * adjust priority tree position, if ->next_rq changes
1531          */
1532         if (prev != cfqq->next_rq)
1533                 cfq_prio_tree_add(cfqd, cfqq);
1534
1535         BUG_ON(!cfqq->next_rq);
1536 }
1537
1538 static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
1539 {
1540         elv_rb_del(&cfqq->sort_list, rq);
1541         cfqq->queued[rq_is_sync(rq)]--;
1542         cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
1543                                         rq_data_dir(rq), rq_is_sync(rq));
1544         cfq_add_rq_rb(rq);
1545         cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
1546                         &cfqq->cfqd->serving_group->blkg, rq_data_dir(rq),
1547                         rq_is_sync(rq));
1548 }
1549
1550 static struct request *
1551 cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
1552 {
1553         struct task_struct *tsk = current;
1554         struct cfq_io_context *cic;
1555         struct cfq_queue *cfqq;
1556
1557         cic = cfq_cic_lookup(cfqd, tsk->io_context);
1558         if (!cic)
1559                 return NULL;
1560
1561         cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
1562         if (cfqq) {
1563                 sector_t sector = bio->bi_sector + bio_sectors(bio);
1564
1565                 return elv_rb_find(&cfqq->sort_list, sector);
1566         }
1567
1568         return NULL;
1569 }
1570
1571 static void cfq_activate_request(struct request_queue *q, struct request *rq)
1572 {
1573         struct cfq_data *cfqd = q->elevator->elevator_data;
1574
1575         cfqd->rq_in_driver++;
1576         cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
1577                                                 cfqd->rq_in_driver);
1578
1579         cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
1580 }
1581
1582 static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
1583 {
1584         struct cfq_data *cfqd = q->elevator->elevator_data;
1585
1586         WARN_ON(!cfqd->rq_in_driver);
1587         cfqd->rq_in_driver--;
1588         cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
1589                                                 cfqd->rq_in_driver);
1590 }
1591
1592 static void cfq_remove_request(struct request *rq)
1593 {
1594         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1595
1596         if (cfqq->next_rq == rq)
1597                 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
1598
1599         list_del_init(&rq->queuelist);
1600         cfq_del_rq_rb(rq);
1601
1602         cfqq->cfqd->rq_queued--;
1603         cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
1604                                         rq_data_dir(rq), rq_is_sync(rq));
1605         if (rq->cmd_flags & REQ_META) {
1606                 WARN_ON(!cfqq->meta_pending);
1607                 cfqq->meta_pending--;
1608         }
1609 }
1610
1611 static int cfq_merge(struct request_queue *q, struct request **req,
1612                      struct bio *bio)
1613 {
1614         struct cfq_data *cfqd = q->elevator->elevator_data;
1615         struct request *__rq;
1616
1617         __rq = cfq_find_rq_fmerge(cfqd, bio);
1618         if (__rq && elv_rq_merge_ok(__rq, bio)) {
1619                 *req = __rq;
1620                 return ELEVATOR_FRONT_MERGE;
1621         }
1622
1623         return ELEVATOR_NO_MERGE;
1624 }
1625
1626 static void cfq_merged_request(struct request_queue *q, struct request *req,
1627                                int type)
1628 {
1629         if (type == ELEVATOR_FRONT_MERGE) {
1630                 struct cfq_queue *cfqq = RQ_CFQQ(req);
1631
1632                 cfq_reposition_rq_rb(cfqq, req);
1633         }
1634 }
1635
1636 static void cfq_bio_merged(struct request_queue *q, struct request *req,
1637                                 struct bio *bio)
1638 {
1639         cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg,
1640                                         bio_data_dir(bio), cfq_bio_sync(bio));
1641 }
1642
1643 static void
1644 cfq_merged_requests(struct request_queue *q, struct request *rq,
1645                     struct request *next)
1646 {
1647         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1648         /*
1649          * reposition in fifo if next is older than rq
1650          */
1651         if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
1652             time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
1653                 list_move(&rq->queuelist, &next->queuelist);
1654                 rq_set_fifo_time(rq, rq_fifo_time(next));
1655         }
1656
1657         if (cfqq->next_rq == next)
1658                 cfqq->next_rq = rq;
1659         cfq_remove_request(next);
1660         cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg,
1661                                         rq_data_dir(next), rq_is_sync(next));
1662 }
1663
1664 static int cfq_allow_merge(struct request_queue *q, struct request *rq,
1665                            struct bio *bio)
1666 {
1667         struct cfq_data *cfqd = q->elevator->elevator_data;
1668         struct cfq_io_context *cic;
1669         struct cfq_queue *cfqq;
1670
1671         /*
1672          * Disallow merge of a sync bio into an async request.
1673          */
1674         if (cfq_bio_sync(bio) && !rq_is_sync(rq))
1675                 return false;
1676
1677         /*
1678          * Lookup the cfqq that this bio will be queued with. Allow
1679          * merge only if rq is queued there.
1680          */
1681         cic = cfq_cic_lookup(cfqd, current->io_context);
1682         if (!cic)
1683                 return false;
1684
1685         cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
1686         return cfqq == RQ_CFQQ(rq);
1687 }
1688
1689 static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1690 {
1691         del_timer(&cfqd->idle_slice_timer);
1692         cfq_blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg);
1693 }
1694
1695 static void __cfq_set_active_queue(struct cfq_data *cfqd,
1696                                    struct cfq_queue *cfqq)
1697 {
1698         if (cfqq) {
1699                 cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
1700                                 cfqd->serving_prio, cfqd->serving_type);
1701                 cfq_blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg);
1702                 cfqq->slice_start = 0;
1703                 cfqq->dispatch_start = jiffies;
1704                 cfqq->allocated_slice = 0;
1705                 cfqq->slice_end = 0;
1706                 cfqq->slice_dispatch = 0;
1707                 cfqq->nr_sectors = 0;
1708
1709                 cfq_clear_cfqq_wait_request(cfqq);
1710                 cfq_clear_cfqq_must_dispatch(cfqq);
1711                 cfq_clear_cfqq_must_alloc_slice(cfqq);
1712                 cfq_clear_cfqq_fifo_expire(cfqq);
1713                 cfq_mark_cfqq_slice_new(cfqq);
1714
1715                 cfq_del_timer(cfqd, cfqq);
1716         }
1717
1718         cfqd->active_queue = cfqq;
1719 }
1720
1721 /*
1722  * current cfqq expired its slice (or was too idle), select new one
1723  */
1724 static void
1725 __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1726                     bool timed_out)
1727 {
1728         cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
1729
1730         if (cfq_cfqq_wait_request(cfqq))
1731                 cfq_del_timer(cfqd, cfqq);
1732
1733         cfq_clear_cfqq_wait_request(cfqq);
1734         cfq_clear_cfqq_wait_busy(cfqq);
1735
1736         /*
1737          * If this cfqq is shared between multiple processes, check to
1738          * make sure that those processes are still issuing I/Os within
1739          * the mean seek distance.  If not, it may be time to break the
1740          * queues apart again.
1741          */
1742         if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
1743                 cfq_mark_cfqq_split_coop(cfqq);
1744
1745         /*
1746          * store what was left of this slice, if the queue idled/timed out
1747          */
1748         if (timed_out) {
1749                 if (cfq_cfqq_slice_new(cfqq))
1750                         cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
1751                 else
1752                         cfqq->slice_resid = cfqq->slice_end - jiffies;
1753                 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
1754         }
1755
1756         cfq_group_served(cfqd, cfqq->cfqg, cfqq);
1757
1758         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
1759                 cfq_del_cfqq_rr(cfqd, cfqq);
1760
1761         cfq_resort_rr_list(cfqd, cfqq);
1762
1763         if (cfqq == cfqd->active_queue)
1764                 cfqd->active_queue = NULL;
1765
1766         if (cfqd->active_cic) {
1767                 put_io_context(cfqd->active_cic->ioc);
1768                 cfqd->active_cic = NULL;
1769         }
1770 }
1771
1772 static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
1773 {
1774         struct cfq_queue *cfqq = cfqd->active_queue;
1775
1776         if (cfqq)
1777                 __cfq_slice_expired(cfqd, cfqq, timed_out);
1778 }
1779
1780 /*
1781  * Get next queue for service. Unless we have a queue preemption,
1782  * we'll simply select the first cfqq in the service tree.
1783  */
1784 static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
1785 {
1786         struct cfq_rb_root *service_tree =
1787                 service_tree_for(cfqd->serving_group, cfqd->serving_prio,
1788                                         cfqd->serving_type);
1789
1790         if (!cfqd->rq_queued)
1791                 return NULL;
1792
1793         /* There is nothing to dispatch */
1794         if (!service_tree)
1795                 return NULL;
1796         if (RB_EMPTY_ROOT(&service_tree->rb))
1797                 return NULL;
1798         return cfq_rb_first(service_tree);
1799 }
1800
1801 static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
1802 {
1803         struct cfq_group *cfqg;
1804         struct cfq_queue *cfqq;
1805         int i, j;
1806         struct cfq_rb_root *st;
1807
1808         if (!cfqd->rq_queued)
1809                 return NULL;
1810
1811         cfqg = cfq_get_next_cfqg(cfqd);
1812         if (!cfqg)
1813                 return NULL;
1814
1815         for_each_cfqg_st(cfqg, i, j, st)
1816                 if ((cfqq = cfq_rb_first(st)) != NULL)
1817                         return cfqq;
1818         return NULL;
1819 }
1820
1821 /*
1822  * Get and set a new active queue for service.
1823  */
1824 static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
1825                                               struct cfq_queue *cfqq)
1826 {
1827         if (!cfqq)
1828                 cfqq = cfq_get_next_queue(cfqd);
1829
1830         __cfq_set_active_queue(cfqd, cfqq);
1831         return cfqq;
1832 }
1833
1834 static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
1835                                           struct request *rq)
1836 {
1837         if (blk_rq_pos(rq) >= cfqd->last_position)
1838                 return blk_rq_pos(rq) - cfqd->last_position;
1839         else
1840                 return cfqd->last_position - blk_rq_pos(rq);
1841 }
1842
1843 static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1844                                struct request *rq)
1845 {
1846         return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
1847 }
1848
1849 static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
1850                                     struct cfq_queue *cur_cfqq)
1851 {
1852         struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
1853         struct rb_node *parent, *node;
1854         struct cfq_queue *__cfqq;
1855         sector_t sector = cfqd->last_position;
1856
1857         if (RB_EMPTY_ROOT(root))
1858                 return NULL;
1859
1860         /*
1861          * First, if we find a request starting at the end of the last
1862          * request, choose it.
1863          */
1864         __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
1865         if (__cfqq)
1866                 return __cfqq;
1867
1868         /*
1869          * If the exact sector wasn't found, the parent of the NULL leaf
1870          * will contain the closest sector.
1871          */
1872         __cfqq = rb_entry(parent, struct cfq_queue, p_node);
1873         if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1874                 return __cfqq;
1875
1876         if (blk_rq_pos(__cfqq->next_rq) < sector)
1877                 node = rb_next(&__cfqq->p_node);
1878         else
1879                 node = rb_prev(&__cfqq->p_node);
1880         if (!node)
1881                 return NULL;
1882
1883         __cfqq = rb_entry(node, struct cfq_queue, p_node);
1884         if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1885                 return __cfqq;
1886
1887         return NULL;
1888 }
1889
1890 /*
1891  * cfqd - obvious
1892  * cur_cfqq - passed in so that we don't decide that the current queue is
1893  *            closely cooperating with itself.
1894  *
1895  * So, basically we're assuming that that cur_cfqq has dispatched at least
1896  * one request, and that cfqd->last_position reflects a position on the disk
1897  * associated with the I/O issued by cur_cfqq.  I'm not sure this is a valid
1898  * assumption.
1899  */
1900 static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
1901                                               struct cfq_queue *cur_cfqq)
1902 {
1903         struct cfq_queue *cfqq;
1904
1905         if (cfq_class_idle(cur_cfqq))
1906                 return NULL;
1907         if (!cfq_cfqq_sync(cur_cfqq))
1908                 return NULL;
1909         if (CFQQ_SEEKY(cur_cfqq))
1910                 return NULL;
1911
1912         /*
1913          * Don't search priority tree if it's the only queue in the group.
1914          */
1915         if (cur_cfqq->cfqg->nr_cfqq == 1)
1916                 return NULL;
1917
1918         /*
1919          * We should notice if some of the queues are cooperating, eg
1920          * working closely on the same area of the disk. In that case,
1921          * we can group them together and don't waste time idling.
1922          */
1923         cfqq = cfqq_close(cfqd, cur_cfqq);
1924         if (!cfqq)
1925                 return NULL;
1926
1927         /* If new queue belongs to different cfq_group, don't choose it */
1928         if (cur_cfqq->cfqg != cfqq->cfqg)
1929                 return NULL;
1930
1931         /*
1932          * It only makes sense to merge sync queues.
1933          */
1934         if (!cfq_cfqq_sync(cfqq))
1935                 return NULL;
1936         if (CFQQ_SEEKY(cfqq))
1937                 return NULL;
1938
1939         /*
1940          * Do not merge queues of different priority classes
1941          */
1942         if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
1943                 return NULL;
1944
1945         return cfqq;
1946 }
1947
1948 /*
1949  * Determine whether we should enforce idle window for this queue.
1950  */
1951
1952 static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1953 {
1954         enum wl_prio_t prio = cfqq_prio(cfqq);
1955         struct cfq_rb_root *service_tree = cfqq->service_tree;
1956
1957         BUG_ON(!service_tree);
1958         BUG_ON(!service_tree->count);
1959
1960         if (!cfqd->cfq_slice_idle)
1961                 return false;
1962
1963         /* We never do for idle class queues. */
1964         if (prio == IDLE_WORKLOAD)
1965                 return false;
1966
1967         /* We do for queues that were marked with idle window flag. */
1968         if (cfq_cfqq_idle_window(cfqq) &&
1969            !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
1970                 return true;
1971
1972         /*
1973          * Otherwise, we do only if they are the last ones
1974          * in their service tree.
1975          */
1976         if (service_tree->count == 1 && cfq_cfqq_sync(cfqq))
1977                 return true;
1978         cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
1979                         service_tree->count);
1980         return false;
1981 }
1982
1983 static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1984 {
1985         struct cfq_queue *cfqq = cfqd->active_queue;
1986         struct cfq_io_context *cic;
1987         unsigned long sl, group_idle = 0;
1988
1989         /*
1990          * SSD device without seek penalty, disable idling. But only do so
1991          * for devices that support queuing, otherwise we still have a problem
1992          * with sync vs async workloads.
1993          */
1994         if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
1995                 return;
1996
1997         WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
1998         WARN_ON(cfq_cfqq_slice_new(cfqq));
1999
2000         /*
2001          * idle is disabled, either manually or by past process history
2002          */
2003         if (!cfq_should_idle(cfqd, cfqq)) {
2004                 /* no queue idling. Check for group idling */
2005                 if (cfqd->cfq_group_idle)
2006                         group_idle = cfqd->cfq_group_idle;
2007                 else
2008                         return;
2009         }
2010
2011         /*
2012          * still active requests from this queue, don't idle
2013          */
2014         if (cfqq->dispatched)
2015                 return;
2016
2017         /*
2018          * task has exited, don't wait
2019          */
2020         cic = cfqd->active_cic;
2021         if (!cic || !atomic_read(&cic->ioc->nr_tasks))
2022                 return;
2023
2024         /*
2025          * If our average think time is larger than the remaining time
2026          * slice, then don't idle. This avoids overrunning the allotted
2027          * time slice.
2028          */
2029         if (sample_valid(cic->ttime_samples) &&
2030             (cfqq->slice_end - jiffies < cic->ttime_mean)) {
2031                 cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%d",
2032                                 cic->ttime_mean);
2033                 return;
2034         }
2035
2036         /* There are other queues in the group, don't do group idle */
2037         if (group_idle && cfqq->cfqg->nr_cfqq > 1)
2038                 return;
2039
2040         cfq_mark_cfqq_wait_request(cfqq);
2041
2042         if (group_idle)
2043                 sl = cfqd->cfq_group_idle;
2044         else
2045                 sl = cfqd->cfq_slice_idle;
2046
2047         mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
2048         cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
2049         cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
2050                         group_idle ? 1 : 0);
2051 }
2052
2053 /*
2054  * Move request from internal lists to the request queue dispatch list.
2055  */
2056 static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
2057 {
2058         struct cfq_data *cfqd = q->elevator->elevator_data;
2059         struct cfq_queue *cfqq = RQ_CFQQ(rq);
2060
2061         cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
2062
2063         cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
2064         cfq_remove_request(rq);
2065         cfqq->dispatched++;
2066         (RQ_CFQG(rq))->dispatched++;
2067         elv_dispatch_sort(q, rq);
2068
2069         cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
2070         cfqq->nr_sectors += blk_rq_sectors(rq);
2071         cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
2072                                         rq_data_dir(rq), rq_is_sync(rq));
2073 }
2074
2075 /*
2076  * return expired entry, or NULL to just start from scratch in rbtree
2077  */
2078 static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
2079 {
2080         struct request *rq = NULL;
2081
2082         if (cfq_cfqq_fifo_expire(cfqq))
2083                 return NULL;
2084
2085         cfq_mark_cfqq_fifo_expire(cfqq);
2086
2087         if (list_empty(&cfqq->fifo))
2088                 return NULL;
2089
2090         rq = rq_entry_fifo(cfqq->fifo.next);
2091         if (time_before(jiffies, rq_fifo_time(rq)))
2092                 rq = NULL;
2093
2094         cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
2095         return rq;
2096 }
2097
2098 static inline int
2099 cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2100 {
2101         const int base_rq = cfqd->cfq_slice_async_rq;
2102
2103         WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
2104
2105         return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
2106 }
2107
2108 /*
2109  * Must be called with the queue_lock held.
2110  */
2111 static int cfqq_process_refs(struct cfq_queue *cfqq)
2112 {
2113         int process_refs, io_refs;
2114
2115         io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
2116         process_refs = cfqq->ref - io_refs;
2117         BUG_ON(process_refs < 0);
2118         return process_refs;
2119 }
2120
2121 static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
2122 {
2123         int process_refs, new_process_refs;
2124         struct cfq_queue *__cfqq;
2125
2126         /*
2127          * If there are no process references on the new_cfqq, then it is
2128          * unsafe to follow the ->new_cfqq chain as other cfqq's in the
2129          * chain may have dropped their last reference (not just their
2130          * last process reference).
2131          */
2132         if (!cfqq_process_refs(new_cfqq))
2133                 return;
2134
2135         /* Avoid a circular list and skip interim queue merges */
2136         while ((__cfqq = new_cfqq->new_cfqq)) {
2137                 if (__cfqq == cfqq)
2138                         return;
2139                 new_cfqq = __cfqq;
2140         }
2141
2142         process_refs = cfqq_process_refs(cfqq);
2143         new_process_refs = cfqq_process_refs(new_cfqq);
2144         /*
2145          * If the process for the cfqq has gone away, there is no
2146          * sense in merging the queues.
2147          */
2148         if (process_refs == 0 || new_process_refs == 0)
2149                 return;
2150
2151         /*
2152          * Merge in the direction of the lesser amount of work.
2153          */
2154         if (new_process_refs >= process_refs) {
2155                 cfqq->new_cfqq = new_cfqq;
2156                 new_cfqq->ref += process_refs;
2157         } else {
2158                 new_cfqq->new_cfqq = cfqq;
2159                 cfqq->ref += new_process_refs;
2160         }
2161 }
2162
2163 static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
2164                                 struct cfq_group *cfqg, enum wl_prio_t prio)
2165 {
2166         struct cfq_queue *queue;
2167         int i;
2168         bool key_valid = false;
2169         unsigned long lowest_key = 0;
2170         enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
2171
2172         for (i = 0; i <= SYNC_WORKLOAD; ++i) {
2173                 /* select the one with lowest rb_key */
2174                 queue = cfq_rb_first(service_tree_for(cfqg, prio, i));
2175                 if (queue &&
2176                     (!key_valid || time_before(queue->rb_key, lowest_key))) {
2177                         lowest_key = queue->rb_key;
2178                         cur_best = i;
2179                         key_valid = true;
2180                 }
2181         }
2182
2183         return cur_best;
2184 }
2185
2186 static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
2187 {
2188         unsigned slice;
2189         unsigned count;
2190         struct cfq_rb_root *st;
2191         unsigned group_slice;
2192         enum wl_prio_t original_prio = cfqd->serving_prio;
2193
2194         /* Choose next priority. RT > BE > IDLE */
2195         if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
2196                 cfqd->serving_prio = RT_WORKLOAD;
2197         else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
2198                 cfqd->serving_prio = BE_WORKLOAD;
2199         else {
2200                 cfqd->serving_prio = IDLE_WORKLOAD;
2201                 cfqd->workload_expires = jiffies + 1;
2202                 return;
2203         }
2204
2205         if (original_prio != cfqd->serving_prio)
2206                 goto new_workload;
2207
2208         /*
2209          * For RT and BE, we have to choose also the type
2210          * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
2211          * expiration time
2212          */
2213         st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2214         count = st->count;
2215
2216         /*
2217          * check workload expiration, and that we still have other queues ready
2218          */
2219         if (count && !time_after(jiffies, cfqd->workload_expires))
2220                 return;
2221
2222 new_workload:
2223         /* otherwise select new workload type */
2224         cfqd->serving_type =
2225                 cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
2226         st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2227         count = st->count;
2228
2229         /*
2230          * the workload slice is computed as a fraction of target latency
2231          * proportional to the number of queues in that workload, over
2232          * all the queues in the same priority class
2233          */
2234         group_slice = cfq_group_slice(cfqd, cfqg);
2235
2236         slice = group_slice * count /
2237                 max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio],
2238                       cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg));
2239
2240         if (cfqd->serving_type == ASYNC_WORKLOAD) {
2241                 unsigned int tmp;
2242
2243                 /*
2244                  * Async queues are currently system wide. Just taking
2245                  * proportion of queues with-in same group will lead to higher
2246                  * async ratio system wide as generally root group is going
2247                  * to have higher weight. A more accurate thing would be to
2248                  * calculate system wide asnc/sync ratio.
2249                  */
2250                 tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg);
2251                 tmp = tmp/cfqd->busy_queues;
2252                 slice = min_t(unsigned, slice, tmp);
2253
2254                 /* async workload slice is scaled down according to
2255                  * the sync/async slice ratio. */
2256                 slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
2257         } else
2258                 /* sync workload slice is at least 2 * cfq_slice_idle */
2259                 slice = max(slice, 2 * cfqd->cfq_slice_idle);
2260
2261         slice = max_t(unsigned, slice, CFQ_MIN_TT);
2262         cfq_log(cfqd, "workload slice:%d", slice);
2263         cfqd->workload_expires = jiffies + slice;
2264 }
2265
2266 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
2267 {
2268         struct cfq_rb_root *st = &cfqd->grp_service_tree;
2269         struct cfq_group *cfqg;
2270
2271         if (RB_EMPTY_ROOT(&st->rb))
2272                 return NULL;
2273         cfqg = cfq_rb_first_group(st);
2274         update_min_vdisktime(st);
2275         return cfqg;
2276 }
2277
2278 static void cfq_choose_cfqg(struct cfq_data *cfqd)
2279 {
2280         struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
2281
2282         cfqd->serving_group = cfqg;
2283
2284         /* Restore the workload type data */
2285         if (cfqg->saved_workload_slice) {
2286                 cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
2287                 cfqd->serving_type = cfqg->saved_workload;
2288                 cfqd->serving_prio = cfqg->saved_serving_prio;
2289         } else
2290                 cfqd->workload_expires = jiffies - 1;
2291
2292         choose_service_tree(cfqd, cfqg);
2293 }
2294
2295 /*
2296  * Select a queue for service. If we have a current active queue,
2297  * check whether to continue servicing it, or retrieve and set a new one.
2298  */
2299 static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
2300 {
2301         struct cfq_queue *cfqq, *new_cfqq = NULL;
2302
2303         cfqq = cfqd->active_queue;
2304         if (!cfqq)
2305                 goto new_queue;
2306
2307         if (!cfqd->rq_queued)
2308                 return NULL;
2309
2310         /*
2311          * We were waiting for group to get backlogged. Expire the queue
2312          */
2313         if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
2314                 goto expire;
2315
2316         /*
2317          * The active queue has run out of time, expire it and select new.
2318          */
2319         if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
2320                 /*
2321                  * If slice had not expired at the completion of last request
2322                  * we might not have turned on wait_busy flag. Don't expire
2323                  * the queue yet. Allow the group to get backlogged.
2324                  *
2325                  * The very fact that we have used the slice, that means we
2326                  * have been idling all along on this queue and it should be
2327                  * ok to wait for this request to complete.
2328                  */
2329                 if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
2330                     && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2331                         cfqq = NULL;
2332                         goto keep_queue;
2333                 } else
2334                         goto check_group_idle;
2335         }
2336
2337         /*
2338          * The active queue has requests and isn't expired, allow it to
2339          * dispatch.
2340          */
2341         if (!RB_EMPTY_ROOT(&cfqq->sort_list))
2342                 goto keep_queue;
2343
2344         /*
2345          * If another queue has a request waiting within our mean seek
2346          * distance, let it run.  The expire code will check for close
2347          * cooperators and put the close queue at the front of the service
2348          * tree.  If possible, merge the expiring queue with the new cfqq.
2349          */
2350         new_cfqq = cfq_close_cooperator(cfqd, cfqq);
2351         if (new_cfqq) {
2352                 if (!cfqq->new_cfqq)
2353                         cfq_setup_merge(cfqq, new_cfqq);
2354                 goto expire;
2355         }
2356
2357         /*
2358          * No requests pending. If the active queue still has requests in
2359          * flight or is idling for a new request, allow either of these
2360          * conditions to happen (or time out) before selecting a new queue.
2361          */
2362         if (timer_pending(&cfqd->idle_slice_timer)) {
2363                 cfqq = NULL;
2364                 goto keep_queue;
2365         }
2366
2367         /*
2368          * This is a deep seek queue, but the device is much faster than
2369          * the queue can deliver, don't idle
2370          **/
2371         if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
2372             (cfq_cfqq_slice_new(cfqq) ||
2373             (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
2374                 cfq_clear_cfqq_deep(cfqq);
2375                 cfq_clear_cfqq_idle_window(cfqq);
2376         }
2377
2378         if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2379                 cfqq = NULL;
2380                 goto keep_queue;
2381         }
2382
2383         /*
2384          * If group idle is enabled and there are requests dispatched from
2385          * this group, wait for requests to complete.
2386          */
2387 check_group_idle:
2388         if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1
2389             && cfqq->cfqg->dispatched) {
2390                 cfqq = NULL;
2391                 goto keep_queue;
2392         }
2393
2394 expire:
2395         cfq_slice_expired(cfqd, 0);
2396 new_queue:
2397         /*
2398          * Current queue expired. Check if we have to switch to a new
2399          * service tree
2400          */
2401         if (!new_cfqq)
2402                 cfq_choose_cfqg(cfqd);
2403
2404         cfqq = cfq_set_active_queue(cfqd, new_cfqq);
2405 keep_queue:
2406         return cfqq;
2407 }
2408
2409 static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
2410 {
2411         int dispatched = 0;
2412
2413         while (cfqq->next_rq) {
2414                 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
2415                 dispatched++;
2416         }
2417
2418         BUG_ON(!list_empty(&cfqq->fifo));
2419
2420         /* By default cfqq is not expired if it is empty. Do it explicitly */
2421         __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
2422         return dispatched;
2423 }
2424
2425 /*
2426  * Drain our current requests. Used for barriers and when switching
2427  * io schedulers on-the-fly.
2428  */
2429 static int cfq_forced_dispatch(struct cfq_data *cfqd)
2430 {
2431         struct cfq_queue *cfqq;
2432         int dispatched = 0;
2433
2434         /* Expire the timeslice of the current active queue first */
2435         cfq_slice_expired(cfqd, 0);
2436         while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
2437                 __cfq_set_active_queue(cfqd, cfqq);
2438                 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
2439         }
2440
2441         BUG_ON(cfqd->busy_queues);
2442
2443         cfq_log(cfqd, "forced_dispatch=%d", dispatched);
2444         return dispatched;
2445 }
2446
2447 static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
2448         struct cfq_queue *cfqq)
2449 {
2450         /* the queue hasn't finished any request, can't estimate */
2451         if (cfq_cfqq_slice_new(cfqq))
2452                 return true;
2453         if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
2454                 cfqq->slice_end))
2455                 return true;
2456
2457         return false;
2458 }
2459
2460 static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2461 {
2462         unsigned int max_dispatch;
2463
2464         /*
2465          * Drain async requests before we start sync IO
2466          */
2467         if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
2468                 return false;
2469
2470         /*
2471          * If this is an async queue and we have sync IO in flight, let it wait
2472          */
2473         if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
2474                 return false;
2475
2476         max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
2477         if (cfq_class_idle(cfqq))
2478                 max_dispatch = 1;
2479
2480         /*
2481          * Does this cfqq already have too much IO in flight?
2482          */
2483         if (cfqq->dispatched >= max_dispatch) {
2484                 bool promote_sync = false;
2485                 /*
2486                  * idle queue must always only have a single IO in flight
2487                  */
2488                 if (cfq_class_idle(cfqq))
2489                         return false;
2490
2491                 /*
2492                  * If there is only one sync queue
2493                  * we can ignore async queue here and give the sync
2494                  * queue no dispatch limit. The reason is a sync queue can
2495                  * preempt async queue, limiting the sync queue doesn't make
2496                  * sense. This is useful for aiostress test.
2497                  */
2498                 if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
2499                         promote_sync = true;
2500
2501                 /*
2502                  * We have other queues, don't allow more IO from this one
2503                  */
2504                 if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
2505                                 !promote_sync)
2506                         return false;
2507
2508                 /*
2509                  * Sole queue user, no limit
2510                  */
2511                 if (cfqd->busy_queues == 1 || promote_sync)
2512                         max_dispatch = -1;
2513                 else
2514                         /*
2515                          * Normally we start throttling cfqq when cfq_quantum/2
2516                          * requests have been dispatched. But we can drive
2517                          * deeper queue depths at the beginning of slice
2518                          * subjected to upper limit of cfq_quantum.
2519                          * */
2520                         max_dispatch = cfqd->cfq_quantum;
2521         }
2522
2523         /*
2524          * Async queues must wait a bit before being allowed dispatch.
2525          * We also ramp up the dispatch depth gradually for async IO,
2526          * based on the last sync IO we serviced
2527          */
2528         if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
2529                 unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
2530                 unsigned int depth;
2531
2532                 depth = last_sync / cfqd->cfq_slice[1];
2533                 if (!depth && !cfqq->dispatched)
2534                         depth = 1;
2535                 if (depth < max_dispatch)
2536                         max_dispatch = depth;
2537         }
2538
2539         /*
2540          * If we're below the current max, allow a dispatch
2541          */
2542         return cfqq->dispatched < max_dispatch;
2543 }
2544
2545 /*
2546  * Dispatch a request from cfqq, moving them to the request queue
2547  * dispatch list.
2548  */
2549 static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2550 {
2551         struct request *rq;
2552
2553         BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
2554
2555         if (!cfq_may_dispatch(cfqd, cfqq))
2556                 return false;
2557
2558         /*
2559          * follow expired path, else get first next available
2560          */
2561         rq = cfq_check_fifo(cfqq);
2562         if (!rq)
2563                 rq = cfqq->next_rq;
2564
2565         /*
2566          * insert request into driver dispatch list
2567          */
2568         cfq_dispatch_insert(cfqd->queue, rq);
2569
2570         if (!cfqd->active_cic) {
2571                 struct cfq_io_context *cic = RQ_CIC(rq);
2572
2573                 atomic_long_inc(&cic->ioc->refcount);
2574                 cfqd->active_cic = cic;
2575         }
2576
2577         return true;
2578 }
2579
2580 /*
2581  * Find the cfqq that we need to service and move a request from that to the
2582  * dispatch list
2583  */
2584 static int cfq_dispatch_requests(struct request_queue *q, int force)
2585 {
2586         struct cfq_data *cfqd = q->elevator->elevator_data;
2587         struct cfq_queue *cfqq;
2588
2589         if (!cfqd->busy_queues)
2590                 return 0;
2591
2592         if (unlikely(force))
2593                 return cfq_forced_dispatch(cfqd);
2594
2595         cfqq = cfq_select_queue(cfqd);
2596         if (!cfqq)
2597                 return 0;
2598
2599         /*
2600          * Dispatch a request from this cfqq, if it is allowed
2601          */
2602         if (!cfq_dispatch_request(cfqd, cfqq))
2603                 return 0;
2604
2605         cfqq->slice_dispatch++;
2606         cfq_clear_cfqq_must_dispatch(cfqq);
2607
2608         /*
2609          * expire an async queue immediately if it has used up its slice. idle
2610          * queue always expire after 1 dispatch round.
2611          */
2612         if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
2613             cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
2614             cfq_class_idle(cfqq))) {
2615                 cfqq->slice_end = jiffies + 1;
2616                 cfq_slice_expired(cfqd, 0);
2617         }
2618
2619         cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
2620         return 1;
2621 }
2622
2623 /*
2624  * task holds one reference to the queue, dropped when task exits. each rq
2625  * in-flight on this queue also holds a reference, dropped when rq is freed.
2626  *
2627  * Each cfq queue took a reference on the parent group. Drop it now.
2628  * queue lock must be held here.
2629  */
2630 static void cfq_put_queue(struct cfq_queue *cfqq)
2631 {
2632         struct cfq_data *cfqd = cfqq->cfqd;
2633         struct cfq_group *cfqg;
2634
2635         BUG_ON(cfqq->ref <= 0);
2636
2637         cfqq->ref--;
2638         if (cfqq->ref)
2639                 return;
2640
2641         cfq_log_cfqq(cfqd, cfqq, "put_queue");
2642         BUG_ON(rb_first(&cfqq->sort_list));
2643         BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
2644         cfqg = cfqq->cfqg;
2645
2646         if (unlikely(cfqd->active_queue == cfqq)) {
2647                 __cfq_slice_expired(cfqd, cfqq, 0);
2648                 cfq_schedule_dispatch(cfqd);
2649         }
2650
2651         BUG_ON(cfq_cfqq_on_rr(cfqq));
2652         kmem_cache_free(cfq_pool, cfqq);
2653         cfq_put_cfqg(cfqg);
2654 }
2655
2656 /*
2657  * Call func for each cic attached to this ioc.
2658  */
2659 static void
2660 call_for_each_cic(struct io_context *ioc,
2661                   void (*func)(struct io_context *, struct cfq_io_context *))
2662 {
2663         struct cfq_io_context *cic;
2664         struct hlist_node *n;
2665
2666         rcu_read_lock();
2667
2668         hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
2669                 func(ioc, cic);
2670
2671         rcu_read_unlock();
2672 }
2673
2674 static void cfq_cic_free_rcu(struct rcu_head *head)
2675 {
2676         struct cfq_io_context *cic;
2677
2678         cic = container_of(head, struct cfq_io_context, rcu_head);
2679
2680         kmem_cache_free(cfq_ioc_pool, cic);
2681         elv_ioc_count_dec(cfq_ioc_count);
2682
2683         if (ioc_gone) {
2684                 /*
2685                  * CFQ scheduler is exiting, grab exit lock and check
2686                  * the pending io context count. If it hits zero,
2687                  * complete ioc_gone and set it back to NULL
2688                  */
2689                 spin_lock(&ioc_gone_lock);
2690                 if (ioc_gone && !elv_ioc_count_read(cfq_ioc_count)) {
2691                         complete(ioc_gone);
2692                         ioc_gone = NULL;
2693                 }
2694                 spin_unlock(&ioc_gone_lock);
2695         }
2696 }
2697
2698 static void cfq_cic_free(struct cfq_io_context *cic)
2699 {
2700         call_rcu(&cic->rcu_head, cfq_cic_free_rcu);
2701 }
2702
2703 static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
2704 {
2705         unsigned long flags;
2706         unsigned long dead_key = (unsigned long) cic->key;
2707
2708         BUG_ON(!(dead_key & CIC_DEAD_KEY));
2709
2710         spin_lock_irqsave(&ioc->lock, flags);
2711         radix_tree_delete(&ioc->radix_root, dead_key >> CIC_DEAD_INDEX_SHIFT);
2712         hlist_del_rcu(&cic->cic_list);
2713         spin_unlock_irqrestore(&ioc->lock, flags);
2714
2715         cfq_cic_free(cic);
2716 }
2717
2718 /*
2719  * Must be called with rcu_read_lock() held or preemption otherwise disabled.
2720  * Only two callers of this - ->dtor() which is called with the rcu_read_lock(),
2721  * and ->trim() which is called with the task lock held
2722  */
2723 static void cfq_free_io_context(struct io_context *ioc)
2724 {
2725         /*
2726          * ioc->refcount is zero here, or we are called from elv_unregister(),
2727          * so no more cic's are allowed to be linked into this ioc.  So it
2728          * should be ok to iterate over the known list, we will see all cic's
2729          * since no new ones are added.
2730          */
2731         call_for_each_cic(ioc, cic_free_func);
2732 }
2733
2734 static void cfq_put_cooperator(struct cfq_queue *cfqq)
2735 {
2736         struct cfq_queue *__cfqq, *next;
2737
2738         /*
2739          * If this queue was scheduled to merge with another queue, be
2740          * sure to drop the reference taken on that queue (and others in
2741          * the merge chain).  See cfq_setup_merge and cfq_merge_cfqqs.
2742          */
2743         __cfqq = cfqq->new_cfqq;
2744         while (__cfqq) {
2745                 if (__cfqq == cfqq) {
2746                         WARN(1, "cfqq->new_cfqq loop detected\n");
2747                         break;
2748                 }
2749                 next = __cfqq->new_cfqq;
2750                 cfq_put_queue(__cfqq);
2751                 __cfqq = next;
2752         }
2753 }
2754
2755 static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2756 {
2757         if (unlikely(cfqq == cfqd->active_queue)) {
2758                 __cfq_slice_expired(cfqd, cfqq, 0);
2759                 cfq_schedule_dispatch(cfqd);
2760         }
2761
2762         cfq_put_cooperator(cfqq);
2763
2764         cfq_put_queue(cfqq);
2765 }
2766
2767 static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
2768                                          struct cfq_io_context *cic)
2769 {
2770         struct io_context *ioc = cic->ioc;
2771
2772         list_del_init(&cic->queue_list);
2773
2774         /*
2775          * Make sure dead mark is seen for dead queues
2776          */
2777         smp_wmb();
2778         cic->key = cfqd_dead_key(cfqd);
2779
2780         if (ioc->ioc_data == cic)
2781                 rcu_assign_pointer(ioc->ioc_data, NULL);
2782
2783         if (cic->cfqq[BLK_RW_ASYNC]) {
2784                 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
2785                 cic->cfqq[BLK_RW_ASYNC] = NULL;
2786         }
2787
2788         if (cic->cfqq[BLK_RW_SYNC]) {
2789                 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
2790                 cic->cfqq[BLK_RW_SYNC] = NULL;
2791         }
2792 }
2793
2794 static void cfq_exit_single_io_context(struct io_context *ioc,
2795                                        struct cfq_io_context *cic)
2796 {
2797         struct cfq_data *cfqd = cic_to_cfqd(cic);
2798
2799         if (cfqd) {
2800                 struct request_queue *q = cfqd->queue;
2801                 unsigned long flags;
2802
2803                 spin_lock_irqsave(q->queue_lock, flags);
2804
2805                 /*
2806                  * Ensure we get a fresh copy of the ->key to prevent
2807                  * race between exiting task and queue
2808                  */
2809                 smp_read_barrier_depends();
2810                 if (cic->key == cfqd)
2811                         __cfq_exit_single_io_context(cfqd, cic);
2812
2813                 spin_unlock_irqrestore(q->queue_lock, flags);
2814         }
2815 }
2816
2817 /*
2818  * The process that ioc belongs to has exited, we need to clean up
2819  * and put the internal structures we have that belongs to that process.
2820  */
2821 static void cfq_exit_io_context(struct io_context *ioc)
2822 {
2823         call_for_each_cic(ioc, cfq_exit_single_io_context);
2824 }
2825
2826 static struct cfq_io_context *
2827 cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
2828 {
2829         struct cfq_io_context *cic;
2830
2831         cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO,
2832                                                         cfqd->queue->node);
2833         if (cic) {
2834                 cic->last_end_request = jiffies;
2835                 INIT_LIST_HEAD(&cic->queue_list);
2836                 INIT_HLIST_NODE(&cic->cic_list);
2837                 cic->dtor = cfq_free_io_context;
2838                 cic->exit = cfq_exit_io_context;
2839                 elv_ioc_count_inc(cfq_ioc_count);
2840         }
2841
2842         return cic;
2843 }
2844
2845 static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
2846 {
2847         struct task_struct *tsk = current;
2848         int ioprio_class;
2849
2850         if (!cfq_cfqq_prio_changed(cfqq))
2851                 return;
2852
2853         ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
2854         switch (ioprio_class) {
2855         default:
2856                 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
2857         case IOPRIO_CLASS_NONE:
2858                 /*
2859                  * no prio set, inherit CPU scheduling settings
2860                  */
2861                 cfqq->ioprio = task_nice_ioprio(tsk);
2862                 cfqq->ioprio_class = task_nice_ioclass(tsk);
2863                 break;
2864         case IOPRIO_CLASS_RT:
2865                 cfqq->ioprio = task_ioprio(ioc);
2866                 cfqq->ioprio_class = IOPRIO_CLASS_RT;
2867                 break;
2868         case IOPRIO_CLASS_BE:
2869                 cfqq->ioprio = task_ioprio(ioc);
2870                 cfqq->ioprio_class = IOPRIO_CLASS_BE;
2871                 break;
2872         case IOPRIO_CLASS_IDLE:
2873                 cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
2874                 cfqq->ioprio = 7;
2875                 cfq_clear_cfqq_idle_window(cfqq);
2876                 break;
2877         }
2878
2879         /*
2880          * keep track of original prio settings in case we have to temporarily
2881          * elevate the priority of this queue
2882          */
2883         cfqq->org_ioprio = cfqq->ioprio;
2884         cfqq->org_ioprio_class = cfqq->ioprio_class;
2885         cfq_clear_cfqq_prio_changed(cfqq);
2886 }
2887
2888 static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic)
2889 {
2890         struct cfq_data *cfqd = cic_to_cfqd(cic);
2891         struct cfq_queue *cfqq;
2892         unsigned long flags;
2893
2894         if (unlikely(!cfqd))
2895                 return;
2896
2897         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
2898
2899         cfqq = cic->cfqq[BLK_RW_ASYNC];
2900         if (cfqq) {
2901                 struct cfq_queue *new_cfqq;
2902                 new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->ioc,
2903                                                 GFP_ATOMIC);
2904                 if (new_cfqq) {
2905                         cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
2906                         cfq_put_queue(cfqq);
2907                 }
2908         }
2909
2910         cfqq = cic->cfqq[BLK_RW_SYNC];
2911         if (cfqq)
2912                 cfq_mark_cfqq_prio_changed(cfqq);
2913
2914         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2915 }
2916
2917 static void cfq_ioc_set_ioprio(struct io_context *ioc)
2918 {
2919         call_for_each_cic(ioc, changed_ioprio);
2920         ioc->ioprio_changed = 0;
2921 }
2922
2923 static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2924                           pid_t pid, bool is_sync)
2925 {
2926         RB_CLEAR_NODE(&cfqq->rb_node);
2927         RB_CLEAR_NODE(&cfqq->p_node);
2928         INIT_LIST_HEAD(&cfqq->fifo);
2929
2930         cfqq->ref = 0;
2931         cfqq->cfqd = cfqd;
2932
2933         cfq_mark_cfqq_prio_changed(cfqq);
2934
2935         if (is_sync) {
2936                 if (!cfq_class_idle(cfqq))
2937                         cfq_mark_cfqq_idle_window(cfqq);
2938                 cfq_mark_cfqq_sync(cfqq);
2939         }
2940         cfqq->pid = pid;
2941 }
2942
2943 #ifdef CONFIG_CFQ_GROUP_IOSCHED
2944 static void changed_cgroup(struct io_context *ioc, struct cfq_io_context *cic)
2945 {
2946         struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1);
2947         struct cfq_data *cfqd = cic_to_cfqd(cic);
2948         unsigned long flags;
2949         struct request_queue *q;
2950
2951         if (unlikely(!cfqd))
2952                 return;
2953
2954         q = cfqd->queue;
2955
2956         spin_lock_irqsave(q->queue_lock, flags);
2957
2958         if (sync_cfqq) {
2959                 /*
2960                  * Drop reference to sync queue. A new sync queue will be
2961                  * assigned in new group upon arrival of a fresh request.
2962                  */
2963                 cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
2964                 cic_set_cfqq(cic, NULL, 1);
2965                 cfq_put_queue(sync_cfqq);
2966         }
2967
2968         spin_unlock_irqrestore(q->queue_lock, flags);
2969 }
2970
2971 static void cfq_ioc_set_cgroup(struct io_context *ioc)
2972 {
2973         call_for_each_cic(ioc, changed_cgroup);
2974         ioc->cgroup_changed = 0;
2975 }
2976 #endif  /* CONFIG_CFQ_GROUP_IOSCHED */
2977
2978 static struct cfq_queue *
2979 cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
2980                      struct io_context *ioc, gfp_t gfp_mask)
2981 {
2982         struct cfq_queue *cfqq, *new_cfqq = NULL;
2983         struct cfq_io_context *cic;
2984         struct cfq_group *cfqg;
2985
2986 retry:
2987         cfqg = cfq_get_cfqg(cfqd);
2988         cic = cfq_cic_lookup(cfqd, ioc);
2989         /* cic always exists here */
2990         cfqq = cic_to_cfqq(cic, is_sync);
2991
2992         /*
2993          * Always try a new alloc if we fell back to the OOM cfqq
2994          * originally, since it should just be a temporary situation.
2995          */
2996         if (!cfqq || cfqq == &cfqd->oom_cfqq) {
2997                 cfqq = NULL;
2998                 if (new_cfqq) {
2999                         cfqq = new_cfqq;
3000                         new_cfqq = NULL;
3001                 } else if (gfp_mask & __GFP_WAIT) {
3002                         spin_unlock_irq(cfqd->queue->queue_lock);
3003                         new_cfqq = kmem_cache_alloc_node(cfq_pool,
3004                                         gfp_mask | __GFP_ZERO,
3005                                         cfqd->queue->node);
3006                         spin_lock_irq(cfqd->queue->queue_lock);
3007                         if (new_cfqq)
3008                                 goto retry;
3009                 } else {
3010                         cfqq = kmem_cache_alloc_node(cfq_pool,
3011                                         gfp_mask | __GFP_ZERO,
3012                                         cfqd->queue->node);
3013                 }
3014
3015                 if (cfqq) {
3016                         cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
3017                         cfq_init_prio_data(cfqq, ioc);
3018                         cfq_link_cfqq_cfqg(cfqq, cfqg);
3019                         cfq_log_cfqq(cfqd, cfqq, "alloced");
3020                 } else
3021                         cfqq = &cfqd->oom_cfqq;
3022         }
3023
3024         if (new_cfqq)
3025                 kmem_cache_free(cfq_pool, new_cfqq);
3026
3027         return cfqq;
3028 }
3029
3030 static struct cfq_queue **
3031 cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
3032 {
3033         switch (ioprio_class) {
3034         case IOPRIO_CLASS_RT:
3035                 return &cfqd->async_cfqq[0][ioprio];
3036         case IOPRIO_CLASS_BE:
3037                 return &cfqd->async_cfqq[1][ioprio];
3038         case IOPRIO_CLASS_IDLE:
3039                 return &cfqd->async_idle_cfqq;
3040         default:
3041                 BUG();
3042         }
3043 }
3044
3045 static struct cfq_queue *
3046 cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
3047               gfp_t gfp_mask)
3048 {
3049         const int ioprio = task_ioprio(ioc);
3050         const int ioprio_class = task_ioprio_class(ioc);
3051         struct cfq_queue **async_cfqq = NULL;
3052         struct cfq_queue *cfqq = NULL;
3053
3054         if (!is_sync) {
3055                 async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
3056                 cfqq = *async_cfqq;
3057         }
3058
3059         if (!cfqq)
3060                 cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
3061
3062         /*
3063          * pin the queue now that it's allocated, scheduler exit will prune it
3064          */
3065         if (!is_sync && !(*async_cfqq)) {
3066                 cfqq->ref++;
3067                 *async_cfqq = cfqq;
3068         }
3069
3070         cfqq->ref++;
3071         return cfqq;
3072 }
3073
3074 /*
3075  * We drop cfq io contexts lazily, so we may find a dead one.
3076  */
3077 static void
3078 cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
3079                   struct cfq_io_context *cic)
3080 {
3081         unsigned long flags;
3082
3083         WARN_ON(!list_empty(&cic->queue_list));
3084         BUG_ON(cic->key != cfqd_dead_key(cfqd));
3085
3086         spin_lock_irqsave(&ioc->lock, flags);
3087
3088         BUG_ON(ioc->ioc_data == cic);
3089
3090         radix_tree_delete(&ioc->radix_root, cfqd->cic_index);
3091         hlist_del_rcu(&cic->cic_list);
3092         spin_unlock_irqrestore(&ioc->lock, flags);
3093
3094         cfq_cic_free(cic);
3095 }
3096
3097 static struct cfq_io_context *
3098 cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
3099 {
3100         struct cfq_io_context *cic;
3101         unsigned long flags;
3102
3103         if (unlikely(!ioc))
3104                 return NULL;
3105
3106         rcu_read_lock();
3107
3108         /*
3109          * we maintain a last-hit cache, to avoid browsing over the tree
3110          */
3111         cic = rcu_dereference(ioc->ioc_data);
3112         if (cic && cic->key == cfqd) {
3113                 rcu_read_unlock();
3114                 return cic;
3115         }
3116
3117         do {
3118                 cic = radix_tree_lookup(&ioc->radix_root, cfqd->cic_index);
3119                 rcu_read_unlock();
3120                 if (!cic)
3121                         break;
3122                 if (unlikely(cic->key != cfqd)) {
3123                         cfq_drop_dead_cic(cfqd, ioc, cic);
3124                         rcu_read_lock();
3125                         continue;
3126                 }
3127
3128                 spin_lock_irqsave(&ioc->lock, flags);
3129                 rcu_assign_pointer(ioc->ioc_data, cic);
3130                 spin_unlock_irqrestore(&ioc->lock, flags);
3131                 break;
3132         } while (1);
3133
3134         return cic;
3135 }
3136
3137 /*
3138  * Add cic into ioc, using cfqd as the search key. This enables us to lookup
3139  * the process specific cfq io context when entered from the block layer.
3140  * Also adds the cic to a per-cfqd list, used when this queue is removed.
3141  */
3142 static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
3143                         struct cfq_io_context *cic, gfp_t gfp_mask)
3144 {
3145         unsigned long flags;
3146         int ret;
3147
3148         ret = radix_tree_preload(gfp_mask);
3149         if (!ret) {
3150                 cic->ioc = ioc;
3151                 cic->key = cfqd;
3152
3153                 spin_lock_irqsave(&ioc->lock, flags);
3154                 ret = radix_tree_insert(&ioc->radix_root,
3155                                                 cfqd->cic_index, cic);
3156                 if (!ret)
3157                         hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list);
3158                 spin_unlock_irqrestore(&ioc->lock, flags);
3159
3160                 radix_tree_preload_end();
3161
3162                 if (!ret) {
3163                         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
3164                         list_add(&cic->queue_list, &cfqd->cic_list);
3165                         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
3166                 }
3167         }
3168
3169         if (ret)
3170                 printk(KERN_ERR "cfq: cic link failed!\n");
3171
3172         return ret;
3173 }
3174
3175 /*
3176  * Setup general io context and cfq io context. There can be several cfq
3177  * io contexts per general io context, if this process is doing io to more
3178  * than one device managed by cfq.
3179  */
3180 static struct cfq_io_context *
3181 cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
3182 {
3183         struct io_context *ioc = NULL;
3184         struct cfq_io_context *cic;
3185
3186         might_sleep_if(gfp_mask & __GFP_WAIT);
3187
3188         ioc = get_io_context(gfp_mask, cfqd->queue->node);
3189         if (!ioc)
3190                 return NULL;
3191
3192         cic = cfq_cic_lookup(cfqd, ioc);
3193         if (cic)
3194                 goto out;
3195
3196         cic = cfq_alloc_io_context(cfqd, gfp_mask);
3197         if (cic == NULL)
3198                 goto err;
3199
3200         if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
3201                 goto err_free;
3202
3203 out:
3204         smp_read_barrier_depends();
3205         if (unlikely(ioc->ioprio_changed))
3206                 cfq_ioc_set_ioprio(ioc);
3207
3208 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3209         if (unlikely(ioc->cgroup_changed))
3210                 cfq_ioc_set_cgroup(ioc);
3211 #endif
3212         return cic;
3213 err_free:
3214         cfq_cic_free(cic);
3215 err:
3216         put_io_context(ioc);
3217         return NULL;
3218 }
3219
3220 static void
3221 cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
3222 {
3223         unsigned long elapsed = jiffies - cic->last_end_request;
3224         unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
3225
3226         cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
3227         cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
3228         cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
3229 }
3230
3231 static void
3232 cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3233                        struct request *rq)
3234 {
3235         sector_t sdist = 0;
3236         sector_t n_sec = blk_rq_sectors(rq);
3237         if (cfqq->last_request_pos) {
3238                 if (cfqq->last_request_pos < blk_rq_pos(rq))
3239                         sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
3240                 else
3241                         sdist = cfqq->last_request_pos - blk_rq_pos(rq);
3242         }
3243
3244         cfqq->seek_history <<= 1;
3245         if (blk_queue_nonrot(cfqd->queue))
3246                 cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
3247         else
3248                 cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
3249 }
3250
3251 /*
3252  * Disable idle window if the process thinks too long or seeks so much that
3253  * it doesn't matter
3254  */
3255 static void
3256 cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3257                        struct cfq_io_context *cic)
3258 {
3259         int old_idle, enable_idle;
3260
3261         /*
3262          * Don't idle for async or idle io prio class
3263          */
3264         if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
3265                 return;
3266
3267         enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
3268
3269         if (cfqq->queued[0] + cfqq->queued[1] >= 4)
3270                 cfq_mark_cfqq_deep(cfqq);
3271
3272         if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
3273                 enable_idle = 0;
3274         else if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
3275             (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
3276                 enable_idle = 0;
3277         else if (sample_valid(cic->ttime_samples)) {
3278                 if (cic->ttime_mean > cfqd->cfq_slice_idle)
3279                         enable_idle = 0;
3280                 else
3281                         enable_idle = 1;
3282         }
3283
3284         if (old_idle != enable_idle) {
3285                 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
3286                 if (enable_idle)
3287                         cfq_mark_cfqq_idle_window(cfqq);
3288                 else
3289                         cfq_clear_cfqq_idle_window(cfqq);
3290         }
3291 }
3292
3293 /*
3294  * Check if new_cfqq should preempt the currently active queue. Return 0 for
3295  * no or if we aren't sure, a 1 will cause a preempt.
3296  */
3297 static bool
3298 cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3299                    struct request *rq)
3300 {
3301         struct cfq_queue *cfqq;
3302
3303         cfqq = cfqd->active_queue;
3304         if (!cfqq)
3305                 return false;
3306
3307         if (cfq_class_idle(new_cfqq))
3308                 return false;
3309
3310         if (cfq_class_idle(cfqq))
3311                 return true;
3312
3313         /*
3314          * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
3315          */
3316         if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
3317                 return false;
3318
3319         /*
3320          * if the new request is sync, but the currently running queue is
3321          * not, let the sync request have priority.
3322          */
3323         if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
3324                 return true;
3325
3326         if (new_cfqq->cfqg != cfqq->cfqg)
3327                 return false;
3328
3329         if (cfq_slice_used(cfqq))
3330                 return true;
3331
3332         /* Allow preemption only if we are idling on sync-noidle tree */
3333         if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
3334             cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
3335             new_cfqq->service_tree->count == 2 &&
3336             RB_EMPTY_ROOT(&cfqq->sort_list))
3337                 return true;
3338
3339         /*
3340          * So both queues are sync. Let the new request get disk time if
3341          * it's a metadata request and the current queue is doing regular IO.
3342          */
3343         if ((rq->cmd_flags & REQ_META) && !cfqq->meta_pending)
3344                 return true;
3345
3346         /*
3347          * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
3348          */
3349         if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
3350                 return true;
3351
3352         /* An idle queue should not be idle now for some reason */
3353         if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
3354                 return true;
3355
3356         if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
3357                 return false;
3358
3359         /*
3360          * if this request is as-good as one we would expect from the
3361          * current cfqq, let it preempt
3362          */
3363         if (cfq_rq_close(cfqd, cfqq, rq))
3364                 return true;
3365
3366         return false;
3367 }
3368
3369 /*
3370  * cfqq preempts the active queue. if we allowed preempt with no slice left,
3371  * let it have half of its nominal slice.
3372  */
3373 static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3374 {
3375         struct cfq_queue *old_cfqq = cfqd->active_queue;
3376
3377         cfq_log_cfqq(cfqd, cfqq, "preempt");
3378         cfq_slice_expired(cfqd, 1);
3379
3380         /*
3381          * workload type is changed, don't save slice, otherwise preempt
3382          * doesn't happen
3383          */
3384         if (cfqq_type(old_cfqq) != cfqq_type(cfqq))
3385                 cfqq->cfqg->saved_workload_slice = 0;
3386
3387         /*
3388          * Put the new queue at the front of the of the current list,
3389          * so we know that it will be selected next.
3390          */
3391         BUG_ON(!cfq_cfqq_on_rr(cfqq));
3392
3393         cfq_service_tree_add(cfqd, cfqq, 1);
3394
3395         cfqq->slice_end = 0;
3396         cfq_mark_cfqq_slice_new(cfqq);
3397 }
3398
3399 /*
3400  * Called when a new fs request (rq) is added (to cfqq). Check if there's
3401  * something we should do about it
3402  */
3403 static void
3404 cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3405                 struct request *rq)
3406 {
3407         struct cfq_io_context *cic = RQ_CIC(rq);
3408
3409         cfqd->rq_queued++;
3410         if (rq->cmd_flags & REQ_META)
3411                 cfqq->meta_pending++;
3412
3413         cfq_update_io_thinktime(cfqd, cic);
3414         cfq_update_io_seektime(cfqd, cfqq, rq);
3415         cfq_update_idle_window(cfqd, cfqq, cic);
3416
3417         cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
3418
3419         if (cfqq == cfqd->active_queue) {
3420                 /*
3421                  * Remember that we saw a request from this process, but
3422                  * don't start queuing just yet. Otherwise we risk seeing lots
3423                  * of tiny requests, because we disrupt the normal plugging
3424                  * and merging. If the request is already larger than a single
3425                  * page, let it rip immediately. For that case we assume that
3426                  * merging is already done. Ditto for a busy system that
3427                  * has other work pending, don't risk delaying until the
3428                  * idle timer unplug to continue working.
3429                  */
3430                 if (cfq_cfqq_wait_request(cfqq)) {
3431                         if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
3432                             cfqd->busy_queues > 1) {
3433                                 cfq_del_timer(cfqd, cfqq);
3434                                 cfq_clear_cfqq_wait_request(cfqq);
3435                                 __blk_run_queue(cfqd->queue);
3436                         } else {
3437                                 cfq_blkiocg_update_idle_time_stats(
3438                                                 &cfqq->cfqg->blkg);
3439                                 cfq_mark_cfqq_must_dispatch(cfqq);
3440                         }
3441                 }
3442         } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
3443                 /*
3444                  * not the active queue - expire current slice if it is
3445                  * idle and has expired it's mean thinktime or this new queue
3446                  * has some old slice time left and is of higher priority or
3447                  * this new queue is RT and the current one is BE
3448                  */
3449                 cfq_preempt_queue(cfqd, cfqq);
3450                 __blk_run_queue(cfqd->queue);
3451         }
3452 }
3453
3454 static void cfq_insert_request(struct request_queue *q, struct request *rq)
3455 {
3456         struct cfq_data *cfqd = q->elevator->elevator_data;
3457         struct cfq_queue *cfqq = RQ_CFQQ(rq);
3458
3459         cfq_log_cfqq(cfqd, cfqq, "insert_request");
3460         cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc);
3461
3462         rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
3463         list_add_tail(&rq->queuelist, &cfqq->fifo);
3464         cfq_add_rq_rb(rq);
3465         cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
3466                         &cfqd->serving_group->blkg, rq_data_dir(rq),
3467                         rq_is_sync(rq));
3468         cfq_rq_enqueued(cfqd, cfqq, rq);
3469 }
3470
3471 /*
3472  * Update hw_tag based on peak queue depth over 50 samples under
3473  * sufficient load.
3474  */
3475 static void cfq_update_hw_tag(struct cfq_data *cfqd)
3476 {
3477         struct cfq_queue *cfqq = cfqd->active_queue;
3478
3479         if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
3480                 cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
3481
3482         if (cfqd->hw_tag == 1)
3483                 return;
3484
3485         if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
3486             cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
3487                 return;
3488
3489         /*
3490          * If active queue hasn't enough requests and can idle, cfq might not
3491          * dispatch sufficient requests to hardware. Don't zero hw_tag in this
3492          * case
3493          */
3494         if (cfqq && cfq_cfqq_idle_window(cfqq) &&
3495             cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
3496             CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
3497                 return;
3498
3499         if (cfqd->hw_tag_samples++ < 50)
3500                 return;
3501
3502         if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
3503                 cfqd->hw_tag = 1;
3504         else
3505                 cfqd->hw_tag = 0;
3506 }
3507
3508 static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3509 {
3510         struct cfq_io_context *cic = cfqd->active_cic;
3511
3512         /* If the queue already has requests, don't wait */
3513         if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3514                 return false;
3515
3516         /* If there are other queues in the group, don't wait */
3517         if (cfqq->cfqg->nr_cfqq > 1)
3518                 return false;
3519
3520         if (cfq_slice_used(cfqq))
3521                 return true;
3522
3523         /* if slice left is less than think time, wait busy */
3524         if (cic && sample_valid(cic->ttime_samples)
3525             && (cfqq->slice_end - jiffies < cic->ttime_mean))
3526                 return true;
3527
3528         /*
3529          * If think times is less than a jiffy than ttime_mean=0 and above
3530          * will not be true. It might happen that slice has not expired yet
3531          * but will expire soon (4-5 ns) during select_queue(). To cover the
3532          * case where think time is less than a jiffy, mark the queue wait
3533          * busy if only 1 jiffy is left in the slice.
3534          */
3535         if (cfqq->slice_end - jiffies == 1)
3536                 return true;
3537
3538         return false;
3539 }
3540
3541 static void cfq_completed_request(struct request_queue *q, struct request *rq)
3542 {
3543         struct cfq_queue *cfqq = RQ_CFQQ(rq);
3544         struct cfq_data *cfqd = cfqq->cfqd;
3545         const int sync = rq_is_sync(rq);
3546         unsigned long now;
3547
3548         now = jiffies;
3549         cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
3550                      !!(rq->cmd_flags & REQ_NOIDLE));
3551
3552         cfq_update_hw_tag(cfqd);
3553
3554         WARN_ON(!cfqd->rq_in_driver);
3555         WARN_ON(!cfqq->dispatched);
3556         cfqd->rq_in_driver--;
3557         cfqq->dispatched--;
3558         (RQ_CFQG(rq))->dispatched--;
3559         cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg,
3560                         rq_start_time_ns(rq), rq_io_start_time_ns(rq),
3561                         rq_data_dir(rq), rq_is_sync(rq));
3562
3563         cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
3564
3565         if (sync) {
3566                 RQ_CIC(rq)->last_end_request = now;
3567                 if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
3568                         cfqd->last_delayed_sync = now;
3569         }
3570
3571         /*
3572          * If this is the active queue, check if it needs to be expired,
3573          * or if we want to idle in case it has no pending requests.
3574          */
3575         if (cfqd->active_queue == cfqq) {
3576                 const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
3577
3578                 if (cfq_cfqq_slice_new(cfqq)) {
3579                         cfq_set_prio_slice(cfqd, cfqq);
3580                         cfq_clear_cfqq_slice_new(cfqq);
3581                 }
3582
3583                 /*
3584                  * Should we wait for next request to come in before we expire
3585                  * the queue.
3586                  */
3587                 if (cfq_should_wait_busy(cfqd, cfqq)) {
3588                         unsigned long extend_sl = cfqd->cfq_slice_idle;
3589                         if (!cfqd->cfq_slice_idle)
3590                                 extend_sl = cfqd->cfq_group_idle;
3591                         cfqq->slice_end = jiffies + extend_sl;
3592                         cfq_mark_cfqq_wait_busy(cfqq);
3593                         cfq_log_cfqq(cfqd, cfqq, "will busy wait");
3594                 }
3595
3596                 /*
3597                  * Idling is not enabled on:
3598                  * - expired queues
3599                  * - idle-priority queues
3600                  * - async queues
3601                  * - queues with still some requests queued
3602                  * - when there is a close cooperator
3603                  */
3604                 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
3605                         cfq_slice_expired(cfqd, 1);
3606                 else if (sync && cfqq_empty &&
3607                          !cfq_close_cooperator(cfqd, cfqq)) {
3608                         cfq_arm_slice_timer(cfqd);
3609                 }
3610         }
3611
3612         if (!cfqd->rq_in_driver)
3613                 cfq_schedule_dispatch(cfqd);
3614 }
3615
3616 /*
3617  * we temporarily boost lower priority queues if they are holding fs exclusive
3618  * resources. they are boosted to normal prio (CLASS_BE/4)
3619  */
3620 static void cfq_prio_boost(struct cfq_queue *cfqq)
3621 {
3622         if (has_fs_excl()) {
3623                 /*
3624                  * boost idle prio on transactions that would lock out other
3625                  * users of the filesystem
3626                  */
3627                 if (cfq_class_idle(cfqq))
3628                         cfqq->ioprio_class = IOPRIO_CLASS_BE;
3629                 if (cfqq->ioprio > IOPRIO_NORM)
3630                         cfqq->ioprio = IOPRIO_NORM;
3631         } else {
3632                 /*
3633                  * unboost the queue (if needed)
3634                  */
3635                 cfqq->ioprio_class = cfqq->org_ioprio_class;
3636                 cfqq->ioprio = cfqq->org_ioprio;
3637         }
3638 }
3639
3640 static inline int __cfq_may_queue(struct cfq_queue *cfqq)
3641 {
3642         if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
3643                 cfq_mark_cfqq_must_alloc_slice(cfqq);
3644                 return ELV_MQUEUE_MUST;
3645         }
3646
3647         return ELV_MQUEUE_MAY;
3648 }
3649
3650 static int cfq_may_queue(struct request_queue *q, int rw)
3651 {
3652         struct cfq_data *cfqd = q->elevator->elevator_data;
3653         struct task_struct *tsk = current;
3654         struct cfq_io_context *cic;
3655         struct cfq_queue *cfqq;
3656
3657         /*
3658          * don't force setup of a queue from here, as a call to may_queue
3659          * does not necessarily imply that a request actually will be queued.
3660          * so just lookup a possibly existing queue, or return 'may queue'
3661          * if that fails
3662          */
3663         cic = cfq_cic_lookup(cfqd, tsk->io_context);
3664         if (!cic)
3665                 return ELV_MQUEUE_MAY;
3666
3667         cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
3668         if (cfqq) {
3669                 cfq_init_prio_data(cfqq, cic->ioc);
3670                 cfq_prio_boost(cfqq);
3671
3672                 return __cfq_may_queue(cfqq);
3673         }
3674
3675         return ELV_MQUEUE_MAY;
3676 }
3677
3678 /*
3679  * queue lock held here
3680  */
3681 static void cfq_put_request(struct request *rq)
3682 {
3683         struct cfq_queue *cfqq = RQ_CFQQ(rq);
3684
3685         if (cfqq) {
3686                 const int rw = rq_data_dir(rq);
3687
3688                 BUG_ON(!cfqq->allocated[rw]);
3689                 cfqq->allocated[rw]--;
3690
3691                 put_io_context(RQ_CIC(rq)->ioc);
3692
3693                 rq->elevator_private[0] = NULL;
3694                 rq->elevator_private[1] = NULL;
3695
3696                 /* Put down rq reference on cfqg */
3697                 cfq_put_cfqg(RQ_CFQG(rq));
3698                 rq->elevator_private[2] = NULL;
3699
3700                 cfq_put_queue(cfqq);
3701         }
3702 }
3703
3704 static struct cfq_queue *
3705 cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_context *cic,
3706                 struct cfq_queue *cfqq)
3707 {
3708         cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
3709         cic_set_cfqq(cic, cfqq->new_cfqq, 1);
3710         cfq_mark_cfqq_coop(cfqq->new_cfqq);
3711         cfq_put_queue(cfqq);
3712         return cic_to_cfqq(cic, 1);
3713 }
3714
3715 /*
3716  * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
3717  * was the last process referring to said cfqq.
3718  */
3719 static struct cfq_queue *
3720 split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq)
3721 {
3722         if (cfqq_process_refs(cfqq) == 1) {
3723                 cfqq->pid = current->pid;
3724                 cfq_clear_cfqq_coop(cfqq);
3725                 cfq_clear_cfqq_split_coop(cfqq);
3726                 return cfqq;
3727         }
3728
3729         cic_set_cfqq(cic, NULL, 1);
3730
3731         cfq_put_cooperator(cfqq);
3732
3733         cfq_put_queue(cfqq);
3734         return NULL;
3735 }
3736 /*
3737  * Allocate cfq data structures associated with this request.
3738  */
3739 static int
3740 cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
3741 {
3742         struct cfq_data *cfqd = q->elevator->elevator_data;
3743         struct cfq_io_context *cic;
3744         const int rw = rq_data_dir(rq);
3745         const bool is_sync = rq_is_sync(rq);
3746         struct cfq_queue *cfqq;
3747         unsigned long flags;
3748
3749         might_sleep_if(gfp_mask & __GFP_WAIT);
3750
3751         cic = cfq_get_io_context(cfqd, gfp_mask);
3752
3753         spin_lock_irqsave(q->queue_lock, flags);
3754
3755         if (!cic)
3756                 goto queue_fail;
3757
3758 new_queue:
3759         cfqq = cic_to_cfqq(cic, is_sync);
3760         if (!cfqq || cfqq == &cfqd->oom_cfqq) {
3761                 cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask);
3762                 cic_set_cfqq(cic, cfqq, is_sync);
3763         } else {
3764                 /*
3765                  * If the queue was seeky for too long, break it apart.
3766                  */
3767                 if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
3768                         cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
3769                         cfqq = split_cfqq(cic, cfqq);
3770                         if (!cfqq)
3771                                 goto new_queue;
3772                 }
3773
3774                 /*
3775                  * Check to see if this queue is scheduled to merge with
3776                  * another, closely cooperating queue.  The merging of
3777                  * queues happens here as it must be done in process context.
3778                  * The reference on new_cfqq was taken in merge_cfqqs.
3779                  */
3780                 if (cfqq->new_cfqq)
3781                         cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
3782         }
3783
3784         cfqq->allocated[rw]++;
3785
3786         cfqq->ref++;
3787         rq->elevator_private[0] = cic;
3788         rq->elevator_private[1] = cfqq;
3789         rq->elevator_private[2] = cfq_ref_get_cfqg(cfqq->cfqg);
3790         spin_unlock_irqrestore(q->queue_lock, flags);
3791         return 0;
3792
3793 queue_fail:
3794         if (cic)
3795                 put_io_context(cic->ioc);
3796
3797         cfq_schedule_dispatch(cfqd);
3798         spin_unlock_irqrestore(q->queue_lock, flags);
3799         cfq_log(cfqd, "set_request fail");
3800         return 1;
3801 }
3802
3803 static void cfq_kick_queue(struct work_struct *work)
3804 {
3805         struct cfq_data *cfqd =
3806                 container_of(work, struct cfq_data, unplug_work);
3807         struct request_queue *q = cfqd->queue;
3808
3809         spin_lock_irq(q->queue_lock);
3810         __blk_run_queue(cfqd->queue);
3811         spin_unlock_irq(q->queue_lock);
3812 }
3813
3814 /*
3815  * Timer running if the active_queue is currently idling inside its time slice
3816  */
3817 static void cfq_idle_slice_timer(unsigned long data)
3818 {
3819         struct cfq_data *cfqd = (struct cfq_data *) data;
3820         struct cfq_queue *cfqq;
3821         unsigned long flags;
3822         int timed_out = 1;
3823
3824         cfq_log(cfqd, "idle timer fired");
3825
3826         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
3827
3828         cfqq = cfqd->active_queue;
3829         if (cfqq) {
3830                 timed_out = 0;
3831
3832                 /*
3833                  * We saw a request before the queue expired, let it through
3834                  */
3835                 if (cfq_cfqq_must_dispatch(cfqq))
3836                         goto out_kick;
3837
3838                 /*
3839                  * expired
3840                  */
3841                 if (cfq_slice_used(cfqq))
3842                         goto expire;
3843
3844                 /*
3845                  * only expire and reinvoke request handler, if there are
3846                  * other queues with pending requests
3847                  */
3848                 if (!cfqd->busy_queues)
3849                         goto out_cont;
3850
3851                 /*
3852                  * not expired and it has a request pending, let it dispatch
3853                  */
3854                 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3855                         goto out_kick;
3856
3857                 /*
3858                  * Queue depth flag is reset only when the idle didn't succeed
3859                  */
3860                 cfq_clear_cfqq_deep(cfqq);
3861         }
3862 expire:
3863         cfq_slice_expired(cfqd, timed_out);
3864 out_kick:
3865         cfq_schedule_dispatch(cfqd);
3866 out_cont:
3867         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
3868 }
3869
3870 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
3871 {
3872         del_timer_sync(&cfqd->idle_slice_timer);
3873         cancel_work_sync(&cfqd->unplug_work);
3874 }
3875
3876 static void cfq_put_async_queues(struct cfq_data *cfqd)
3877 {
3878         int i;
3879
3880         for (i = 0; i < IOPRIO_BE_NR; i++) {
3881                 if (cfqd->async_cfqq[0][i])
3882                         cfq_put_queue(cfqd->async_cfqq[0][i]);
3883                 if (cfqd->async_cfqq[1][i])
3884                         cfq_put_queue(cfqd->async_cfqq[1][i]);
3885         }
3886
3887         if (cfqd->async_idle_cfqq)
3888                 cfq_put_queue(cfqd->async_idle_cfqq);
3889 }
3890
3891 static void cfq_exit_queue(struct elevator_queue *e)
3892 {
3893         struct cfq_data *cfqd = e->elevator_data;
3894         struct request_queue *q = cfqd->queue;
3895         bool wait = false;
3896
3897         cfq_shutdown_timer_wq(cfqd);
3898
3899         spin_lock_irq(q->queue_lock);
3900
3901         if (cfqd->active_queue)
3902                 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
3903
3904         while (!list_empty(&cfqd->cic_list)) {
3905                 struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
3906                                                         struct cfq_io_context,
3907                                                         queue_list);
3908
3909                 __cfq_exit_single_io_context(cfqd, cic);
3910         }
3911
3912         cfq_put_async_queues(cfqd);
3913         cfq_release_cfq_groups(cfqd);
3914
3915         /*
3916          * If there are groups which we could not unlink from blkcg list,
3917          * wait for a rcu period for them to be freed.
3918          */
3919         if (cfqd->nr_blkcg_linked_grps)
3920                 wait = true;
3921
3922         spin_unlock_irq(q->queue_lock);
3923
3924         cfq_shutdown_timer_wq(cfqd);
3925
3926         spin_lock(&cic_index_lock);
3927         ida_remove(&cic_index_ida, cfqd->cic_index);
3928         spin_unlock(&cic_index_lock);
3929
3930         /*
3931          * Wait for cfqg->blkg->key accessors to exit their grace periods.
3932          * Do this wait only if there are other unlinked groups out
3933          * there. This can happen if cgroup deletion path claimed the
3934          * responsibility of cleaning up a group before queue cleanup code
3935          * get to the group.
3936          *
3937          * Do not call synchronize_rcu() unconditionally as there are drivers
3938          * which create/delete request queue hundreds of times during scan/boot
3939          * and synchronize_rcu() can take significant time and slow down boot.
3940          */
3941         if (wait)
3942                 synchronize_rcu();
3943
3944 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3945         /* Free up per cpu stats for root group */
3946         free_percpu(cfqd->root_group.blkg.stats_cpu);
3947 #endif
3948         kfree(cfqd);
3949 }
3950
3951 static int cfq_alloc_cic_index(void)
3952 {
3953         int index, error;
3954
3955         do {
3956                 if (!ida_pre_get(&cic_index_ida, GFP_KERNEL))
3957                         return -ENOMEM;
3958
3959                 spin_lock(&cic_index_lock);
3960                 error = ida_get_new(&cic_index_ida, &index);
3961                 spin_unlock(&cic_index_lock);
3962                 if (error && error != -EAGAIN)
3963                         return error;
3964         } while (error);
3965
3966         return index;
3967 }
3968
3969 static void *cfq_init_queue(struct request_queue *q)
3970 {
3971         struct cfq_data *cfqd;
3972         int i, j;
3973         struct cfq_group *cfqg;
3974         struct cfq_rb_root *st;
3975
3976         i = cfq_alloc_cic_index();
3977         if (i < 0)
3978                 return NULL;
3979
3980         cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
3981         if (!cfqd)
3982                 return NULL;
3983
3984         /*
3985          * Don't need take queue_lock in the routine, since we are
3986          * initializing the ioscheduler, and nobody is using cfqd
3987          */
3988         cfqd->cic_index = i;
3989
3990         /* Init root service tree */
3991         cfqd->grp_service_tree = CFQ_RB_ROOT;
3992
3993         /* Init root group */
3994         cfqg = &cfqd->root_group;
3995         for_each_cfqg_st(cfqg, i, j, st)
3996                 *st = CFQ_RB_ROOT;
3997         RB_CLEAR_NODE(&cfqg->rb_node);
3998
3999         /* Give preference to root group over other groups */
4000         cfqg->weight = 2*BLKIO_WEIGHT_DEFAULT;
4001
4002 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4003         /*
4004          * Set root group reference to 2. One reference will be dropped when
4005          * all groups on cfqd->cfqg_list are being deleted during queue exit.
4006          * Other reference will remain there as we don't want to delete this
4007          * group as it is statically allocated and gets destroyed when
4008          * throtl_data goes away.
4009          */
4010         cfqg->ref = 2;
4011
4012         if (blkio_alloc_blkg_stats(&cfqg->blkg)) {
4013                 kfree(cfqg);
4014                 kfree(cfqd);
4015                 return NULL;
4016         }
4017
4018         rcu_read_lock();
4019
4020         cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,
4021                                         (void *)cfqd, 0);
4022         rcu_read_unlock();
4023         cfqd->nr_blkcg_linked_grps++;
4024
4025         /* Add group on cfqd->cfqg_list */
4026         hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
4027 #endif
4028         /*
4029          * Not strictly needed (since RB_ROOT just clears the node and we
4030          * zeroed cfqd on alloc), but better be safe in case someone decides
4031          * to add magic to the rb code
4032          */
4033         for (i = 0; i < CFQ_PRIO_LISTS; i++)
4034                 cfqd->prio_trees[i] = RB_ROOT;
4035
4036         /*
4037          * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
4038          * Grab a permanent reference to it, so that the normal code flow
4039          * will not attempt to free it.
4040          */
4041         cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
4042         cfqd->oom_cfqq.ref++;
4043         cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group);
4044
4045         INIT_LIST_HEAD(&cfqd->cic_list);
4046
4047         cfqd->queue = q;
4048
4049         init_timer(&cfqd->idle_slice_timer);
4050         cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
4051         cfqd->idle_slice_timer.data = (unsigned long) cfqd;
4052
4053         INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
4054
4055         cfqd->cfq_quantum = cfq_quantum;
4056         cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
4057         cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
4058         cfqd->cfq_back_max = cfq_back_max;
4059         cfqd->cfq_back_penalty = cfq_back_penalty;
4060         cfqd->cfq_slice[0] = cfq_slice_async;
4061         cfqd->cfq_slice[1] = cfq_slice_sync;
4062         cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
4063         cfqd->cfq_slice_idle = cfq_slice_idle;
4064         cfqd->cfq_group_idle = cfq_group_idle;
4065         cfqd->cfq_latency = 1;
4066         cfqd->hw_tag = -1;
4067         /*
4068          * we optimistically start assuming sync ops weren't delayed in last
4069          * second, in order to have larger depth for async operations.
4070          */
4071         cfqd->last_delayed_sync = jiffies - HZ;
4072         return cfqd;
4073 }
4074
4075 static void cfq_slab_kill(void)
4076 {
4077         /*
4078          * Caller already ensured that pending RCU callbacks are completed,
4079          * so we should have no busy allocations at this point.
4080          */
4081         if (cfq_pool)
4082                 kmem_cache_destroy(cfq_pool);
4083         if (cfq_ioc_pool)
4084                 kmem_cache_destroy(cfq_ioc_pool);
4085 }
4086
4087 static int __init cfq_slab_setup(void)
4088 {
4089         cfq_pool = KMEM_CACHE(cfq_queue, 0);
4090         if (!cfq_pool)
4091                 goto fail;
4092
4093         cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0);
4094         if (!cfq_ioc_pool)
4095                 goto fail;
4096
4097         return 0;
4098 fail:
4099         cfq_slab_kill();
4100         return -ENOMEM;
4101 }
4102
4103 /*
4104  * sysfs parts below -->
4105  */
4106 static ssize_t
4107 cfq_var_show(unsigned int var, char *page)
4108 {
4109         return sprintf(page, "%d\n", var);
4110 }
4111
4112 static ssize_t
4113 cfq_var_store(unsigned int *var, const char *page, size_t count)
4114 {
4115         char *p = (char *) page;
4116
4117         *var = simple_strtoul(p, &p, 10);
4118         return count;
4119 }
4120
4121 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                            \
4122 static ssize_t __FUNC(struct elevator_queue *e, char *page)             \
4123 {                                                                       \
4124         struct cfq_data *cfqd = e->elevator_data;                       \
4125         unsigned int __data = __VAR;                                    \
4126         if (__CONV)                                                     \
4127                 __data = jiffies_to_msecs(__data);                      \
4128         return cfq_var_show(__data, (page));                            \
4129 }
4130 SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
4131 SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
4132 SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
4133 SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
4134 SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
4135 SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
4136 SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
4137 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
4138 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
4139 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
4140 SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
4141 #undef SHOW_FUNCTION
4142
4143 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
4144 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
4145 {                                                                       \
4146         struct cfq_data *cfqd = e->elevator_data;                       \
4147         unsigned int __data;                                            \
4148         int ret = cfq_var_store(&__data, (page), count);                \
4149         if (__data < (MIN))                                             \
4150                 __data = (MIN);                                         \
4151         else if (__data > (MAX))                                        \
4152                 __data = (MAX);                                         \
4153         if (__CONV)                                                     \
4154                 *(__PTR) = msecs_to_jiffies(__data);                    \
4155         else                                                            \
4156                 *(__PTR) = __data;                                      \
4157         return ret;                                                     \
4158 }
4159 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
4160 STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
4161                 UINT_MAX, 1);
4162 STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
4163                 UINT_MAX, 1);
4164 STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
4165 STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
4166                 UINT_MAX, 0);
4167 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
4168 STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
4169 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
4170 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
4171 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
4172                 UINT_MAX, 0);
4173 STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
4174 #undef STORE_FUNCTION
4175
4176 #define CFQ_ATTR(name) \
4177         __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
4178
4179 static struct elv_fs_entry cfq_attrs[] = {
4180         CFQ_ATTR(quantum),
4181         CFQ_ATTR(fifo_expire_sync),
4182         CFQ_ATTR(fifo_expire_async),
4183         CFQ_ATTR(back_seek_max),
4184         CFQ_ATTR(back_seek_penalty),
4185         CFQ_ATTR(slice_sync),
4186         CFQ_ATTR(slice_async),
4187         CFQ_ATTR(slice_async_rq),
4188         CFQ_ATTR(slice_idle),
4189         CFQ_ATTR(group_idle),
4190         CFQ_ATTR(low_latency),
4191         __ATTR_NULL
4192 };
4193
4194 static struct elevator_type iosched_cfq = {
4195         .ops = {
4196                 .elevator_merge_fn =            cfq_merge,
4197                 .elevator_merged_fn =           cfq_merged_request,
4198                 .elevator_merge_req_fn =        cfq_merged_requests,
4199                 .elevator_allow_merge_fn =      cfq_allow_merge,
4200                 .elevator_bio_merged_fn =       cfq_bio_merged,
4201                 .elevator_dispatch_fn =         cfq_dispatch_requests,
4202                 .elevator_add_req_fn =          cfq_insert_request,
4203                 .elevator_activate_req_fn =     cfq_activate_request,
4204                 .elevator_deactivate_req_fn =   cfq_deactivate_request,
4205                 .elevator_completed_req_fn =    cfq_completed_request,
4206                 .elevator_former_req_fn =       elv_rb_former_request,
4207                 .elevator_latter_req_fn =       elv_rb_latter_request,
4208                 .elevator_set_req_fn =          cfq_set_request,
4209                 .elevator_put_req_fn =          cfq_put_request,
4210                 .elevator_may_queue_fn =        cfq_may_queue,
4211                 .elevator_init_fn =             cfq_init_queue,
4212                 .elevator_exit_fn =             cfq_exit_queue,
4213                 .trim =                         cfq_free_io_context,
4214         },
4215         .elevator_attrs =       cfq_attrs,
4216         .elevator_name =        "cfq",
4217         .elevator_owner =       THIS_MODULE,
4218 };
4219
4220 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4221 static struct blkio_policy_type blkio_policy_cfq = {
4222         .ops = {
4223                 .blkio_unlink_group_fn =        cfq_unlink_blkio_group,
4224                 .blkio_update_group_weight_fn = cfq_update_blkio_group_weight,
4225         },
4226         .plid = BLKIO_POLICY_PROP,
4227 };
4228 #else
4229 static struct blkio_policy_type blkio_policy_cfq;
4230 #endif
4231
4232 static int __init cfq_init(void)
4233 {
4234         /*
4235          * could be 0 on HZ < 1000 setups
4236          */
4237         if (!cfq_slice_async)
4238                 cfq_slice_async = 1;
4239         if (!cfq_slice_idle)
4240                 cfq_slice_idle = 1;
4241
4242 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4243         if (!cfq_group_idle)
4244                 cfq_group_idle = 1;
4245 #else
4246                 cfq_group_idle = 0;
4247 #endif
4248         if (cfq_slab_setup())
4249                 return -ENOMEM;
4250
4251         elv_register(&iosched_cfq);
4252         blkio_policy_register(&blkio_policy_cfq);
4253
4254         return 0;
4255 }
4256
4257 static void __exit cfq_exit(void)
4258 {
4259         DECLARE_COMPLETION_ONSTACK(all_gone);
4260         blkio_policy_unregister(&blkio_policy_cfq);
4261         elv_unregister(&iosched_cfq);
4262         ioc_gone = &all_gone;
4263         /* ioc_gone's update must be visible before reading ioc_count */
4264         smp_wmb();
4265
4266         /*
4267          * this also protects us from entering cfq_slab_kill() with
4268          * pending RCU callbacks
4269          */
4270         if (elv_ioc_count_read(cfq_ioc_count))
4271                 wait_for_completion(&all_gone);
4272         ida_destroy(&cic_index_ida);
4273         cfq_slab_kill();
4274 }
4275
4276 module_init(cfq_init);
4277 module_exit(cfq_exit);
4278
4279 MODULE_AUTHOR("Jens Axboe");
4280 MODULE_LICENSE("GPL");
4281 MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");