blk-cgroup: Add unaccounted time to timeslice_used.
[linux-2.6.git] / block / cfq-iosched.c
1 /*
2  *  CFQ, or complete fairness queueing, disk scheduler.
3  *
4  *  Based on ideas from a previously unfinished io
5  *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6  *
7  *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8  */
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/blkdev.h>
12 #include <linux/elevator.h>
13 #include <linux/jiffies.h>
14 #include <linux/rbtree.h>
15 #include <linux/ioprio.h>
16 #include <linux/blktrace_api.h>
17 #include "cfq.h"
18
19 /*
20  * tunables
21  */
22 /* max queue in one round of service */
23 static const int cfq_quantum = 8;
24 static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
25 /* maximum backwards seek, in KiB */
26 static const int cfq_back_max = 16 * 1024;
27 /* penalty of a backwards seek */
28 static const int cfq_back_penalty = 2;
29 static const int cfq_slice_sync = HZ / 10;
30 static int cfq_slice_async = HZ / 25;
31 static const int cfq_slice_async_rq = 2;
32 static int cfq_slice_idle = HZ / 125;
33 static int cfq_group_idle = HZ / 125;
34 static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
35 static const int cfq_hist_divisor = 4;
36
37 /*
38  * offset from end of service tree
39  */
40 #define CFQ_IDLE_DELAY          (HZ / 5)
41
42 /*
43  * below this threshold, we consider thinktime immediate
44  */
45 #define CFQ_MIN_TT              (2)
46
47 #define CFQ_SLICE_SCALE         (5)
48 #define CFQ_HW_QUEUE_MIN        (5)
49 #define CFQ_SERVICE_SHIFT       12
50
51 #define CFQQ_SEEK_THR           (sector_t)(8 * 100)
52 #define CFQQ_CLOSE_THR          (sector_t)(8 * 1024)
53 #define CFQQ_SECT_THR_NONROT    (sector_t)(2 * 32)
54 #define CFQQ_SEEKY(cfqq)        (hweight32(cfqq->seek_history) > 32/8)
55
56 #define RQ_CIC(rq)              \
57         ((struct cfq_io_context *) (rq)->elevator_private[0])
58 #define RQ_CFQQ(rq)             (struct cfq_queue *) ((rq)->elevator_private[1])
59 #define RQ_CFQG(rq)             (struct cfq_group *) ((rq)->elevator_private[2])
60
61 static struct kmem_cache *cfq_pool;
62 static struct kmem_cache *cfq_ioc_pool;
63
64 static DEFINE_PER_CPU(unsigned long, cfq_ioc_count);
65 static struct completion *ioc_gone;
66 static DEFINE_SPINLOCK(ioc_gone_lock);
67
68 static DEFINE_SPINLOCK(cic_index_lock);
69 static DEFINE_IDA(cic_index_ida);
70
71 #define CFQ_PRIO_LISTS          IOPRIO_BE_NR
72 #define cfq_class_idle(cfqq)    ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
73 #define cfq_class_rt(cfqq)      ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
74
75 #define sample_valid(samples)   ((samples) > 80)
76 #define rb_entry_cfqg(node)     rb_entry((node), struct cfq_group, rb_node)
77
78 /*
79  * Most of our rbtree usage is for sorting with min extraction, so
80  * if we cache the leftmost node we don't have to walk down the tree
81  * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
82  * move this into the elevator for the rq sorting as well.
83  */
84 struct cfq_rb_root {
85         struct rb_root rb;
86         struct rb_node *left;
87         unsigned count;
88         unsigned total_weight;
89         u64 min_vdisktime;
90 };
91 #define CFQ_RB_ROOT     (struct cfq_rb_root) { .rb = RB_ROOT, .left = NULL, \
92                         .count = 0, .min_vdisktime = 0, }
93
94 /*
95  * Per process-grouping structure
96  */
97 struct cfq_queue {
98         /* reference count */
99         int ref;
100         /* various state flags, see below */
101         unsigned int flags;
102         /* parent cfq_data */
103         struct cfq_data *cfqd;
104         /* service_tree member */
105         struct rb_node rb_node;
106         /* service_tree key */
107         unsigned long rb_key;
108         /* prio tree member */
109         struct rb_node p_node;
110         /* prio tree root we belong to, if any */
111         struct rb_root *p_root;
112         /* sorted list of pending requests */
113         struct rb_root sort_list;
114         /* if fifo isn't expired, next request to serve */
115         struct request *next_rq;
116         /* requests queued in sort_list */
117         int queued[2];
118         /* currently allocated requests */
119         int allocated[2];
120         /* fifo list of requests in sort_list */
121         struct list_head fifo;
122
123         /* time when queue got scheduled in to dispatch first request. */
124         unsigned long dispatch_start;
125         unsigned int allocated_slice;
126         unsigned int slice_dispatch;
127         /* time when first request from queue completed and slice started. */
128         unsigned long slice_start;
129         unsigned long slice_end;
130         long slice_resid;
131
132         /* pending metadata requests */
133         int meta_pending;
134         /* number of requests that are on the dispatch list or inside driver */
135         int dispatched;
136
137         /* io prio of this group */
138         unsigned short ioprio, org_ioprio;
139         unsigned short ioprio_class, org_ioprio_class;
140
141         pid_t pid;
142
143         u32 seek_history;
144         sector_t last_request_pos;
145
146         struct cfq_rb_root *service_tree;
147         struct cfq_queue *new_cfqq;
148         struct cfq_group *cfqg;
149         /* Number of sectors dispatched from queue in single dispatch round */
150         unsigned long nr_sectors;
151 };
152
153 /*
154  * First index in the service_trees.
155  * IDLE is handled separately, so it has negative index
156  */
157 enum wl_prio_t {
158         BE_WORKLOAD = 0,
159         RT_WORKLOAD = 1,
160         IDLE_WORKLOAD = 2,
161         CFQ_PRIO_NR,
162 };
163
164 /*
165  * Second index in the service_trees.
166  */
167 enum wl_type_t {
168         ASYNC_WORKLOAD = 0,
169         SYNC_NOIDLE_WORKLOAD = 1,
170         SYNC_WORKLOAD = 2
171 };
172
173 /* This is per cgroup per device grouping structure */
174 struct cfq_group {
175         /* group service_tree member */
176         struct rb_node rb_node;
177
178         /* group service_tree key */
179         u64 vdisktime;
180         unsigned int weight;
181
182         /* number of cfqq currently on this group */
183         int nr_cfqq;
184
185         /*
186          * Per group busy queus average. Useful for workload slice calc. We
187          * create the array for each prio class but at run time it is used
188          * only for RT and BE class and slot for IDLE class remains unused.
189          * This is primarily done to avoid confusion and a gcc warning.
190          */
191         unsigned int busy_queues_avg[CFQ_PRIO_NR];
192         /*
193          * rr lists of queues with requests. We maintain service trees for
194          * RT and BE classes. These trees are subdivided in subclasses
195          * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
196          * class there is no subclassification and all the cfq queues go on
197          * a single tree service_tree_idle.
198          * Counts are embedded in the cfq_rb_root
199          */
200         struct cfq_rb_root service_trees[2][3];
201         struct cfq_rb_root service_tree_idle;
202
203         unsigned long saved_workload_slice;
204         enum wl_type_t saved_workload;
205         enum wl_prio_t saved_serving_prio;
206         struct blkio_group blkg;
207 #ifdef CONFIG_CFQ_GROUP_IOSCHED
208         struct hlist_node cfqd_node;
209         int ref;
210 #endif
211         /* number of requests that are on the dispatch list or inside driver */
212         int dispatched;
213 };
214
215 /*
216  * Per block device queue structure
217  */
218 struct cfq_data {
219         struct request_queue *queue;
220         /* Root service tree for cfq_groups */
221         struct cfq_rb_root grp_service_tree;
222         struct cfq_group root_group;
223
224         /*
225          * The priority currently being served
226          */
227         enum wl_prio_t serving_prio;
228         enum wl_type_t serving_type;
229         unsigned long workload_expires;
230         struct cfq_group *serving_group;
231
232         /*
233          * Each priority tree is sorted by next_request position.  These
234          * trees are used when determining if two or more queues are
235          * interleaving requests (see cfq_close_cooperator).
236          */
237         struct rb_root prio_trees[CFQ_PRIO_LISTS];
238
239         unsigned int busy_queues;
240         unsigned int busy_sync_queues;
241
242         int rq_in_driver;
243         int rq_in_flight[2];
244
245         /*
246          * queue-depth detection
247          */
248         int rq_queued;
249         int hw_tag;
250         /*
251          * hw_tag can be
252          * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
253          *  1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
254          *  0 => no NCQ
255          */
256         int hw_tag_est_depth;
257         unsigned int hw_tag_samples;
258
259         /*
260          * idle window management
261          */
262         struct timer_list idle_slice_timer;
263         struct work_struct unplug_work;
264
265         struct cfq_queue *active_queue;
266         struct cfq_io_context *active_cic;
267
268         /*
269          * async queue for each priority case
270          */
271         struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
272         struct cfq_queue *async_idle_cfqq;
273
274         sector_t last_position;
275
276         /*
277          * tunables, see top of file
278          */
279         unsigned int cfq_quantum;
280         unsigned int cfq_fifo_expire[2];
281         unsigned int cfq_back_penalty;
282         unsigned int cfq_back_max;
283         unsigned int cfq_slice[2];
284         unsigned int cfq_slice_async_rq;
285         unsigned int cfq_slice_idle;
286         unsigned int cfq_group_idle;
287         unsigned int cfq_latency;
288
289         unsigned int cic_index;
290         struct list_head cic_list;
291
292         /*
293          * Fallback dummy cfqq for extreme OOM conditions
294          */
295         struct cfq_queue oom_cfqq;
296
297         unsigned long last_delayed_sync;
298
299         /* List of cfq groups being managed on this device*/
300         struct hlist_head cfqg_list;
301         struct rcu_head rcu;
302 };
303
304 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
305
306 static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
307                                             enum wl_prio_t prio,
308                                             enum wl_type_t type)
309 {
310         if (!cfqg)
311                 return NULL;
312
313         if (prio == IDLE_WORKLOAD)
314                 return &cfqg->service_tree_idle;
315
316         return &cfqg->service_trees[prio][type];
317 }
318
319 enum cfqq_state_flags {
320         CFQ_CFQQ_FLAG_on_rr = 0,        /* on round-robin busy list */
321         CFQ_CFQQ_FLAG_wait_request,     /* waiting for a request */
322         CFQ_CFQQ_FLAG_must_dispatch,    /* must be allowed a dispatch */
323         CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
324         CFQ_CFQQ_FLAG_fifo_expire,      /* FIFO checked in this slice */
325         CFQ_CFQQ_FLAG_idle_window,      /* slice idling enabled */
326         CFQ_CFQQ_FLAG_prio_changed,     /* task priority has changed */
327         CFQ_CFQQ_FLAG_slice_new,        /* no requests dispatched in slice */
328         CFQ_CFQQ_FLAG_sync,             /* synchronous queue */
329         CFQ_CFQQ_FLAG_coop,             /* cfqq is shared */
330         CFQ_CFQQ_FLAG_split_coop,       /* shared cfqq will be splitted */
331         CFQ_CFQQ_FLAG_deep,             /* sync cfqq experienced large depth */
332         CFQ_CFQQ_FLAG_wait_busy,        /* Waiting for next request */
333 };
334
335 #define CFQ_CFQQ_FNS(name)                                              \
336 static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)         \
337 {                                                                       \
338         (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name);                   \
339 }                                                                       \
340 static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)        \
341 {                                                                       \
342         (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);                  \
343 }                                                                       \
344 static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)         \
345 {                                                                       \
346         return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;      \
347 }
348
349 CFQ_CFQQ_FNS(on_rr);
350 CFQ_CFQQ_FNS(wait_request);
351 CFQ_CFQQ_FNS(must_dispatch);
352 CFQ_CFQQ_FNS(must_alloc_slice);
353 CFQ_CFQQ_FNS(fifo_expire);
354 CFQ_CFQQ_FNS(idle_window);
355 CFQ_CFQQ_FNS(prio_changed);
356 CFQ_CFQQ_FNS(slice_new);
357 CFQ_CFQQ_FNS(sync);
358 CFQ_CFQQ_FNS(coop);
359 CFQ_CFQQ_FNS(split_coop);
360 CFQ_CFQQ_FNS(deep);
361 CFQ_CFQQ_FNS(wait_busy);
362 #undef CFQ_CFQQ_FNS
363
364 #ifdef CONFIG_CFQ_GROUP_IOSCHED
365 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  \
366         blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
367                         cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
368                         blkg_path(&(cfqq)->cfqg->blkg), ##args);
369
370 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)                          \
371         blk_add_trace_msg((cfqd)->queue, "%s " fmt,                     \
372                                 blkg_path(&(cfqg)->blkg), ##args);      \
373
374 #else
375 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  \
376         blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
377 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)          do {} while (0);
378 #endif
379 #define cfq_log(cfqd, fmt, args...)     \
380         blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
381
382 /* Traverses through cfq group service trees */
383 #define for_each_cfqg_st(cfqg, i, j, st) \
384         for (i = 0; i <= IDLE_WORKLOAD; i++) \
385                 for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
386                         : &cfqg->service_tree_idle; \
387                         (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
388                         (i == IDLE_WORKLOAD && j == 0); \
389                         j++, st = i < IDLE_WORKLOAD ? \
390                         &cfqg->service_trees[i][j]: NULL) \
391
392
393 static inline bool iops_mode(struct cfq_data *cfqd)
394 {
395         /*
396          * If we are not idling on queues and it is a NCQ drive, parallel
397          * execution of requests is on and measuring time is not possible
398          * in most of the cases until and unless we drive shallower queue
399          * depths and that becomes a performance bottleneck. In such cases
400          * switch to start providing fairness in terms of number of IOs.
401          */
402         if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
403                 return true;
404         else
405                 return false;
406 }
407
408 static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
409 {
410         if (cfq_class_idle(cfqq))
411                 return IDLE_WORKLOAD;
412         if (cfq_class_rt(cfqq))
413                 return RT_WORKLOAD;
414         return BE_WORKLOAD;
415 }
416
417
418 static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
419 {
420         if (!cfq_cfqq_sync(cfqq))
421                 return ASYNC_WORKLOAD;
422         if (!cfq_cfqq_idle_window(cfqq))
423                 return SYNC_NOIDLE_WORKLOAD;
424         return SYNC_WORKLOAD;
425 }
426
427 static inline int cfq_group_busy_queues_wl(enum wl_prio_t wl,
428                                         struct cfq_data *cfqd,
429                                         struct cfq_group *cfqg)
430 {
431         if (wl == IDLE_WORKLOAD)
432                 return cfqg->service_tree_idle.count;
433
434         return cfqg->service_trees[wl][ASYNC_WORKLOAD].count
435                 + cfqg->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count
436                 + cfqg->service_trees[wl][SYNC_WORKLOAD].count;
437 }
438
439 static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
440                                         struct cfq_group *cfqg)
441 {
442         return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count
443                 + cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
444 }
445
446 static void cfq_dispatch_insert(struct request_queue *, struct request *);
447 static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
448                                        struct io_context *, gfp_t);
449 static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
450                                                 struct io_context *);
451
452 static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
453                                             bool is_sync)
454 {
455         return cic->cfqq[is_sync];
456 }
457
458 static inline void cic_set_cfqq(struct cfq_io_context *cic,
459                                 struct cfq_queue *cfqq, bool is_sync)
460 {
461         cic->cfqq[is_sync] = cfqq;
462 }
463
464 #define CIC_DEAD_KEY    1ul
465 #define CIC_DEAD_INDEX_SHIFT    1
466
467 static inline void *cfqd_dead_key(struct cfq_data *cfqd)
468 {
469         return (void *)(cfqd->cic_index << CIC_DEAD_INDEX_SHIFT | CIC_DEAD_KEY);
470 }
471
472 static inline struct cfq_data *cic_to_cfqd(struct cfq_io_context *cic)
473 {
474         struct cfq_data *cfqd = cic->key;
475
476         if (unlikely((unsigned long) cfqd & CIC_DEAD_KEY))
477                 return NULL;
478
479         return cfqd;
480 }
481
482 /*
483  * We regard a request as SYNC, if it's either a read or has the SYNC bit
484  * set (in which case it could also be direct WRITE).
485  */
486 static inline bool cfq_bio_sync(struct bio *bio)
487 {
488         return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
489 }
490
491 /*
492  * scheduler run of queue, if there are requests pending and no one in the
493  * driver that will restart queueing
494  */
495 static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
496 {
497         if (cfqd->busy_queues) {
498                 cfq_log(cfqd, "schedule dispatch");
499                 kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
500         }
501 }
502
503 /*
504  * Scale schedule slice based on io priority. Use the sync time slice only
505  * if a queue is marked sync and has sync io queued. A sync queue with async
506  * io only, should not get full sync slice length.
507  */
508 static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
509                                  unsigned short prio)
510 {
511         const int base_slice = cfqd->cfq_slice[sync];
512
513         WARN_ON(prio >= IOPRIO_BE_NR);
514
515         return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
516 }
517
518 static inline int
519 cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
520 {
521         return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
522 }
523
524 static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg)
525 {
526         u64 d = delta << CFQ_SERVICE_SHIFT;
527
528         d = d * BLKIO_WEIGHT_DEFAULT;
529         do_div(d, cfqg->weight);
530         return d;
531 }
532
533 static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
534 {
535         s64 delta = (s64)(vdisktime - min_vdisktime);
536         if (delta > 0)
537                 min_vdisktime = vdisktime;
538
539         return min_vdisktime;
540 }
541
542 static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
543 {
544         s64 delta = (s64)(vdisktime - min_vdisktime);
545         if (delta < 0)
546                 min_vdisktime = vdisktime;
547
548         return min_vdisktime;
549 }
550
551 static void update_min_vdisktime(struct cfq_rb_root *st)
552 {
553         struct cfq_group *cfqg;
554
555         if (st->left) {
556                 cfqg = rb_entry_cfqg(st->left);
557                 st->min_vdisktime = max_vdisktime(st->min_vdisktime,
558                                                   cfqg->vdisktime);
559         }
560 }
561
562 /*
563  * get averaged number of queues of RT/BE priority.
564  * average is updated, with a formula that gives more weight to higher numbers,
565  * to quickly follows sudden increases and decrease slowly
566  */
567
568 static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
569                                         struct cfq_group *cfqg, bool rt)
570 {
571         unsigned min_q, max_q;
572         unsigned mult  = cfq_hist_divisor - 1;
573         unsigned round = cfq_hist_divisor / 2;
574         unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
575
576         min_q = min(cfqg->busy_queues_avg[rt], busy);
577         max_q = max(cfqg->busy_queues_avg[rt], busy);
578         cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
579                 cfq_hist_divisor;
580         return cfqg->busy_queues_avg[rt];
581 }
582
583 static inline unsigned
584 cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
585 {
586         struct cfq_rb_root *st = &cfqd->grp_service_tree;
587
588         return cfq_target_latency * cfqg->weight / st->total_weight;
589 }
590
591 static inline unsigned
592 cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
593 {
594         unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
595         if (cfqd->cfq_latency) {
596                 /*
597                  * interested queues (we consider only the ones with the same
598                  * priority class in the cfq group)
599                  */
600                 unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
601                                                 cfq_class_rt(cfqq));
602                 unsigned sync_slice = cfqd->cfq_slice[1];
603                 unsigned expect_latency = sync_slice * iq;
604                 unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
605
606                 if (expect_latency > group_slice) {
607                         unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
608                         /* scale low_slice according to IO priority
609                          * and sync vs async */
610                         unsigned low_slice =
611                                 min(slice, base_low_slice * slice / sync_slice);
612                         /* the adapted slice value is scaled to fit all iqs
613                          * into the target latency */
614                         slice = max(slice * group_slice / expect_latency,
615                                     low_slice);
616                 }
617         }
618         return slice;
619 }
620
621 static inline void
622 cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
623 {
624         unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
625
626         cfqq->slice_start = jiffies;
627         cfqq->slice_end = jiffies + slice;
628         cfqq->allocated_slice = slice;
629         cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
630 }
631
632 /*
633  * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
634  * isn't valid until the first request from the dispatch is activated
635  * and the slice time set.
636  */
637 static inline bool cfq_slice_used(struct cfq_queue *cfqq)
638 {
639         if (cfq_cfqq_slice_new(cfqq))
640                 return false;
641         if (time_before(jiffies, cfqq->slice_end))
642                 return false;
643
644         return true;
645 }
646
647 /*
648  * Lifted from AS - choose which of rq1 and rq2 that is best served now.
649  * We choose the request that is closest to the head right now. Distance
650  * behind the head is penalized and only allowed to a certain extent.
651  */
652 static struct request *
653 cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
654 {
655         sector_t s1, s2, d1 = 0, d2 = 0;
656         unsigned long back_max;
657 #define CFQ_RQ1_WRAP    0x01 /* request 1 wraps */
658 #define CFQ_RQ2_WRAP    0x02 /* request 2 wraps */
659         unsigned wrap = 0; /* bit mask: requests behind the disk head? */
660
661         if (rq1 == NULL || rq1 == rq2)
662                 return rq2;
663         if (rq2 == NULL)
664                 return rq1;
665
666         if (rq_is_sync(rq1) && !rq_is_sync(rq2))
667                 return rq1;
668         else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
669                 return rq2;
670         if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
671                 return rq1;
672         else if ((rq2->cmd_flags & REQ_META) &&
673                  !(rq1->cmd_flags & REQ_META))
674                 return rq2;
675
676         s1 = blk_rq_pos(rq1);
677         s2 = blk_rq_pos(rq2);
678
679         /*
680          * by definition, 1KiB is 2 sectors
681          */
682         back_max = cfqd->cfq_back_max * 2;
683
684         /*
685          * Strict one way elevator _except_ in the case where we allow
686          * short backward seeks which are biased as twice the cost of a
687          * similar forward seek.
688          */
689         if (s1 >= last)
690                 d1 = s1 - last;
691         else if (s1 + back_max >= last)
692                 d1 = (last - s1) * cfqd->cfq_back_penalty;
693         else
694                 wrap |= CFQ_RQ1_WRAP;
695
696         if (s2 >= last)
697                 d2 = s2 - last;
698         else if (s2 + back_max >= last)
699                 d2 = (last - s2) * cfqd->cfq_back_penalty;
700         else
701                 wrap |= CFQ_RQ2_WRAP;
702
703         /* Found required data */
704
705         /*
706          * By doing switch() on the bit mask "wrap" we avoid having to
707          * check two variables for all permutations: --> faster!
708          */
709         switch (wrap) {
710         case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
711                 if (d1 < d2)
712                         return rq1;
713                 else if (d2 < d1)
714                         return rq2;
715                 else {
716                         if (s1 >= s2)
717                                 return rq1;
718                         else
719                                 return rq2;
720                 }
721
722         case CFQ_RQ2_WRAP:
723                 return rq1;
724         case CFQ_RQ1_WRAP:
725                 return rq2;
726         case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
727         default:
728                 /*
729                  * Since both rqs are wrapped,
730                  * start with the one that's further behind head
731                  * (--> only *one* back seek required),
732                  * since back seek takes more time than forward.
733                  */
734                 if (s1 <= s2)
735                         return rq1;
736                 else
737                         return rq2;
738         }
739 }
740
741 /*
742  * The below is leftmost cache rbtree addon
743  */
744 static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
745 {
746         /* Service tree is empty */
747         if (!root->count)
748                 return NULL;
749
750         if (!root->left)
751                 root->left = rb_first(&root->rb);
752
753         if (root->left)
754                 return rb_entry(root->left, struct cfq_queue, rb_node);
755
756         return NULL;
757 }
758
759 static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
760 {
761         if (!root->left)
762                 root->left = rb_first(&root->rb);
763
764         if (root->left)
765                 return rb_entry_cfqg(root->left);
766
767         return NULL;
768 }
769
770 static void rb_erase_init(struct rb_node *n, struct rb_root *root)
771 {
772         rb_erase(n, root);
773         RB_CLEAR_NODE(n);
774 }
775
776 static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
777 {
778         if (root->left == n)
779                 root->left = NULL;
780         rb_erase_init(n, &root->rb);
781         --root->count;
782 }
783
784 /*
785  * would be nice to take fifo expire time into account as well
786  */
787 static struct request *
788 cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
789                   struct request *last)
790 {
791         struct rb_node *rbnext = rb_next(&last->rb_node);
792         struct rb_node *rbprev = rb_prev(&last->rb_node);
793         struct request *next = NULL, *prev = NULL;
794
795         BUG_ON(RB_EMPTY_NODE(&last->rb_node));
796
797         if (rbprev)
798                 prev = rb_entry_rq(rbprev);
799
800         if (rbnext)
801                 next = rb_entry_rq(rbnext);
802         else {
803                 rbnext = rb_first(&cfqq->sort_list);
804                 if (rbnext && rbnext != &last->rb_node)
805                         next = rb_entry_rq(rbnext);
806         }
807
808         return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
809 }
810
811 static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
812                                       struct cfq_queue *cfqq)
813 {
814         /*
815          * just an approximation, should be ok.
816          */
817         return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
818                        cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
819 }
820
821 static inline s64
822 cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
823 {
824         return cfqg->vdisktime - st->min_vdisktime;
825 }
826
827 static void
828 __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
829 {
830         struct rb_node **node = &st->rb.rb_node;
831         struct rb_node *parent = NULL;
832         struct cfq_group *__cfqg;
833         s64 key = cfqg_key(st, cfqg);
834         int left = 1;
835
836         while (*node != NULL) {
837                 parent = *node;
838                 __cfqg = rb_entry_cfqg(parent);
839
840                 if (key < cfqg_key(st, __cfqg))
841                         node = &parent->rb_left;
842                 else {
843                         node = &parent->rb_right;
844                         left = 0;
845                 }
846         }
847
848         if (left)
849                 st->left = &cfqg->rb_node;
850
851         rb_link_node(&cfqg->rb_node, parent, node);
852         rb_insert_color(&cfqg->rb_node, &st->rb);
853 }
854
855 static void
856 cfq_group_service_tree_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
857 {
858         struct cfq_rb_root *st = &cfqd->grp_service_tree;
859         struct cfq_group *__cfqg;
860         struct rb_node *n;
861
862         cfqg->nr_cfqq++;
863         if (!RB_EMPTY_NODE(&cfqg->rb_node))
864                 return;
865
866         /*
867          * Currently put the group at the end. Later implement something
868          * so that groups get lesser vtime based on their weights, so that
869          * if group does not loose all if it was not continously backlogged.
870          */
871         n = rb_last(&st->rb);
872         if (n) {
873                 __cfqg = rb_entry_cfqg(n);
874                 cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
875         } else
876                 cfqg->vdisktime = st->min_vdisktime;
877
878         __cfq_group_service_tree_add(st, cfqg);
879         st->total_weight += cfqg->weight;
880 }
881
882 static void
883 cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
884 {
885         struct cfq_rb_root *st = &cfqd->grp_service_tree;
886
887         BUG_ON(cfqg->nr_cfqq < 1);
888         cfqg->nr_cfqq--;
889
890         /* If there are other cfq queues under this group, don't delete it */
891         if (cfqg->nr_cfqq)
892                 return;
893
894         cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
895         st->total_weight -= cfqg->weight;
896         if (!RB_EMPTY_NODE(&cfqg->rb_node))
897                 cfq_rb_erase(&cfqg->rb_node, st);
898         cfqg->saved_workload_slice = 0;
899         cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1);
900 }
901
902 static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
903                                                 unsigned int *unaccounted_time)
904 {
905         unsigned int slice_used;
906
907         /*
908          * Queue got expired before even a single request completed or
909          * got expired immediately after first request completion.
910          */
911         if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
912                 /*
913                  * Also charge the seek time incurred to the group, otherwise
914                  * if there are mutiple queues in the group, each can dispatch
915                  * a single request on seeky media and cause lots of seek time
916                  * and group will never know it.
917                  */
918                 slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
919                                         1);
920         } else {
921                 slice_used = jiffies - cfqq->slice_start;
922                 if (slice_used > cfqq->allocated_slice) {
923                         *unaccounted_time = slice_used - cfqq->allocated_slice;
924                         slice_used = cfqq->allocated_slice;
925                 }
926                 if (time_after(cfqq->slice_start, cfqq->dispatch_start))
927                         *unaccounted_time += cfqq->slice_start -
928                                         cfqq->dispatch_start;
929         }
930
931         return slice_used;
932 }
933
934 static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
935                                 struct cfq_queue *cfqq)
936 {
937         struct cfq_rb_root *st = &cfqd->grp_service_tree;
938         unsigned int used_sl, charge, unaccounted_sl = 0;
939         int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
940                         - cfqg->service_tree_idle.count;
941
942         BUG_ON(nr_sync < 0);
943         used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
944
945         if (iops_mode(cfqd))
946                 charge = cfqq->slice_dispatch;
947         else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
948                 charge = cfqq->allocated_slice;
949
950         /* Can't update vdisktime while group is on service tree */
951         cfq_rb_erase(&cfqg->rb_node, st);
952         cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
953         __cfq_group_service_tree_add(st, cfqg);
954
955         /* This group is being expired. Save the context */
956         if (time_after(cfqd->workload_expires, jiffies)) {
957                 cfqg->saved_workload_slice = cfqd->workload_expires
958                                                 - jiffies;
959                 cfqg->saved_workload = cfqd->serving_type;
960                 cfqg->saved_serving_prio = cfqd->serving_prio;
961         } else
962                 cfqg->saved_workload_slice = 0;
963
964         cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
965                                         st->min_vdisktime);
966         cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u"
967                         " sect=%u", used_sl, cfqq->slice_dispatch, charge,
968                         iops_mode(cfqd), cfqq->nr_sectors);
969         cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl,
970                                           unaccounted_sl);
971         cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
972 }
973
974 #ifdef CONFIG_CFQ_GROUP_IOSCHED
975 static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg)
976 {
977         if (blkg)
978                 return container_of(blkg, struct cfq_group, blkg);
979         return NULL;
980 }
981
982 void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg,
983                                         unsigned int weight)
984 {
985         cfqg_of_blkg(blkg)->weight = weight;
986 }
987
988 static struct cfq_group *
989 cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
990 {
991         struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
992         struct cfq_group *cfqg = NULL;
993         void *key = cfqd;
994         int i, j;
995         struct cfq_rb_root *st;
996         struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
997         unsigned int major, minor;
998
999         cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key));
1000         if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
1001                 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
1002                 cfqg->blkg.dev = MKDEV(major, minor);
1003                 goto done;
1004         }
1005         if (cfqg || !create)
1006                 goto done;
1007
1008         cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, cfqd->queue->node);
1009         if (!cfqg)
1010                 goto done;
1011
1012         for_each_cfqg_st(cfqg, i, j, st)
1013                 *st = CFQ_RB_ROOT;
1014         RB_CLEAR_NODE(&cfqg->rb_node);
1015
1016         /*
1017          * Take the initial reference that will be released on destroy
1018          * This can be thought of a joint reference by cgroup and
1019          * elevator which will be dropped by either elevator exit
1020          * or cgroup deletion path depending on who is exiting first.
1021          */
1022         cfqg->ref = 1;
1023
1024         /*
1025          * Add group onto cgroup list. It might happen that bdi->dev is
1026          * not initialized yet. Initialize this new group without major
1027          * and minor info and this info will be filled in once a new thread
1028          * comes for IO. See code above.
1029          */
1030         if (bdi->dev) {
1031                 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
1032                 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
1033                                         MKDEV(major, minor));
1034         } else
1035                 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
1036                                         0);
1037
1038         cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
1039
1040         /* Add group on cfqd list */
1041         hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
1042
1043 done:
1044         return cfqg;
1045 }
1046
1047 /*
1048  * Search for the cfq group current task belongs to. If create = 1, then also
1049  * create the cfq group if it does not exist. request_queue lock must be held.
1050  */
1051 static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)
1052 {
1053         struct cgroup *cgroup;
1054         struct cfq_group *cfqg = NULL;
1055
1056         rcu_read_lock();
1057         cgroup = task_cgroup(current, blkio_subsys_id);
1058         cfqg = cfq_find_alloc_cfqg(cfqd, cgroup, create);
1059         if (!cfqg && create)
1060                 cfqg = &cfqd->root_group;
1061         rcu_read_unlock();
1062         return cfqg;
1063 }
1064
1065 static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
1066 {
1067         cfqg->ref++;
1068         return cfqg;
1069 }
1070
1071 static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1072 {
1073         /* Currently, all async queues are mapped to root group */
1074         if (!cfq_cfqq_sync(cfqq))
1075                 cfqg = &cfqq->cfqd->root_group;
1076
1077         cfqq->cfqg = cfqg;
1078         /* cfqq reference on cfqg */
1079         cfqq->cfqg->ref++;
1080 }
1081
1082 static void cfq_put_cfqg(struct cfq_group *cfqg)
1083 {
1084         struct cfq_rb_root *st;
1085         int i, j;
1086
1087         BUG_ON(cfqg->ref <= 0);
1088         cfqg->ref--;
1089         if (cfqg->ref)
1090                 return;
1091         for_each_cfqg_st(cfqg, i, j, st)
1092                 BUG_ON(!RB_EMPTY_ROOT(&st->rb));
1093         kfree(cfqg);
1094 }
1095
1096 static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg)
1097 {
1098         /* Something wrong if we are trying to remove same group twice */
1099         BUG_ON(hlist_unhashed(&cfqg->cfqd_node));
1100
1101         hlist_del_init(&cfqg->cfqd_node);
1102
1103         /*
1104          * Put the reference taken at the time of creation so that when all
1105          * queues are gone, group can be destroyed.
1106          */
1107         cfq_put_cfqg(cfqg);
1108 }
1109
1110 static void cfq_release_cfq_groups(struct cfq_data *cfqd)
1111 {
1112         struct hlist_node *pos, *n;
1113         struct cfq_group *cfqg;
1114
1115         hlist_for_each_entry_safe(cfqg, pos, n, &cfqd->cfqg_list, cfqd_node) {
1116                 /*
1117                  * If cgroup removal path got to blk_group first and removed
1118                  * it from cgroup list, then it will take care of destroying
1119                  * cfqg also.
1120                  */
1121                 if (!cfq_blkiocg_del_blkio_group(&cfqg->blkg))
1122                         cfq_destroy_cfqg(cfqd, cfqg);
1123         }
1124 }
1125
1126 /*
1127  * Blk cgroup controller notification saying that blkio_group object is being
1128  * delinked as associated cgroup object is going away. That also means that
1129  * no new IO will come in this group. So get rid of this group as soon as
1130  * any pending IO in the group is finished.
1131  *
1132  * This function is called under rcu_read_lock(). key is the rcu protected
1133  * pointer. That means "key" is a valid cfq_data pointer as long as we are rcu
1134  * read lock.
1135  *
1136  * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
1137  * it should not be NULL as even if elevator was exiting, cgroup deltion
1138  * path got to it first.
1139  */
1140 void cfq_unlink_blkio_group(void *key, struct blkio_group *blkg)
1141 {
1142         unsigned long  flags;
1143         struct cfq_data *cfqd = key;
1144
1145         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1146         cfq_destroy_cfqg(cfqd, cfqg_of_blkg(blkg));
1147         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1148 }
1149
1150 #else /* GROUP_IOSCHED */
1151 static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)
1152 {
1153         return &cfqd->root_group;
1154 }
1155
1156 static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
1157 {
1158         return cfqg;
1159 }
1160
1161 static inline void
1162 cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
1163         cfqq->cfqg = cfqg;
1164 }
1165
1166 static void cfq_release_cfq_groups(struct cfq_data *cfqd) {}
1167 static inline void cfq_put_cfqg(struct cfq_group *cfqg) {}
1168
1169 #endif /* GROUP_IOSCHED */
1170
1171 /*
1172  * The cfqd->service_trees holds all pending cfq_queue's that have
1173  * requests waiting to be processed. It is sorted in the order that
1174  * we will service the queues.
1175  */
1176 static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1177                                  bool add_front)
1178 {
1179         struct rb_node **p, *parent;
1180         struct cfq_queue *__cfqq;
1181         unsigned long rb_key;
1182         struct cfq_rb_root *service_tree;
1183         int left;
1184         int new_cfqq = 1;
1185         int group_changed = 0;
1186
1187         service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
1188                                                 cfqq_type(cfqq));
1189         if (cfq_class_idle(cfqq)) {
1190                 rb_key = CFQ_IDLE_DELAY;
1191                 parent = rb_last(&service_tree->rb);
1192                 if (parent && parent != &cfqq->rb_node) {
1193                         __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1194                         rb_key += __cfqq->rb_key;
1195                 } else
1196                         rb_key += jiffies;
1197         } else if (!add_front) {
1198                 /*
1199                  * Get our rb key offset. Subtract any residual slice
1200                  * value carried from last service. A negative resid
1201                  * count indicates slice overrun, and this should position
1202                  * the next service time further away in the tree.
1203                  */
1204                 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
1205                 rb_key -= cfqq->slice_resid;
1206                 cfqq->slice_resid = 0;
1207         } else {
1208                 rb_key = -HZ;
1209                 __cfqq = cfq_rb_first(service_tree);
1210                 rb_key += __cfqq ? __cfqq->rb_key : jiffies;
1211         }
1212
1213         if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
1214                 new_cfqq = 0;
1215                 /*
1216                  * same position, nothing more to do
1217                  */
1218                 if (rb_key == cfqq->rb_key &&
1219                     cfqq->service_tree == service_tree)
1220                         return;
1221
1222                 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1223                 cfqq->service_tree = NULL;
1224         }
1225
1226         left = 1;
1227         parent = NULL;
1228         cfqq->service_tree = service_tree;
1229         p = &service_tree->rb.rb_node;
1230         while (*p) {
1231                 struct rb_node **n;
1232
1233                 parent = *p;
1234                 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1235
1236                 /*
1237                  * sort by key, that represents service time.
1238                  */
1239                 if (time_before(rb_key, __cfqq->rb_key))
1240                         n = &(*p)->rb_left;
1241                 else {
1242                         n = &(*p)->rb_right;
1243                         left = 0;
1244                 }
1245
1246                 p = n;
1247         }
1248
1249         if (left)
1250                 service_tree->left = &cfqq->rb_node;
1251
1252         cfqq->rb_key = rb_key;
1253         rb_link_node(&cfqq->rb_node, parent, p);
1254         rb_insert_color(&cfqq->rb_node, &service_tree->rb);
1255         service_tree->count++;
1256         if ((add_front || !new_cfqq) && !group_changed)
1257                 return;
1258         cfq_group_service_tree_add(cfqd, cfqq->cfqg);
1259 }
1260
1261 static struct cfq_queue *
1262 cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
1263                      sector_t sector, struct rb_node **ret_parent,
1264                      struct rb_node ***rb_link)
1265 {
1266         struct rb_node **p, *parent;
1267         struct cfq_queue *cfqq = NULL;
1268
1269         parent = NULL;
1270         p = &root->rb_node;
1271         while (*p) {
1272                 struct rb_node **n;
1273
1274                 parent = *p;
1275                 cfqq = rb_entry(parent, struct cfq_queue, p_node);
1276
1277                 /*
1278                  * Sort strictly based on sector.  Smallest to the left,
1279                  * largest to the right.
1280                  */
1281                 if (sector > blk_rq_pos(cfqq->next_rq))
1282                         n = &(*p)->rb_right;
1283                 else if (sector < blk_rq_pos(cfqq->next_rq))
1284                         n = &(*p)->rb_left;
1285                 else
1286                         break;
1287                 p = n;
1288                 cfqq = NULL;
1289         }
1290
1291         *ret_parent = parent;
1292         if (rb_link)
1293                 *rb_link = p;
1294         return cfqq;
1295 }
1296
1297 static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1298 {
1299         struct rb_node **p, *parent;
1300         struct cfq_queue *__cfqq;
1301
1302         if (cfqq->p_root) {
1303                 rb_erase(&cfqq->p_node, cfqq->p_root);
1304                 cfqq->p_root = NULL;
1305         }
1306
1307         if (cfq_class_idle(cfqq))
1308                 return;
1309         if (!cfqq->next_rq)
1310                 return;
1311
1312         cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
1313         __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
1314                                       blk_rq_pos(cfqq->next_rq), &parent, &p);
1315         if (!__cfqq) {
1316                 rb_link_node(&cfqq->p_node, parent, p);
1317                 rb_insert_color(&cfqq->p_node, cfqq->p_root);
1318         } else
1319                 cfqq->p_root = NULL;
1320 }
1321
1322 /*
1323  * Update cfqq's position in the service tree.
1324  */
1325 static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1326 {
1327         /*
1328          * Resorting requires the cfqq to be on the RR list already.
1329          */
1330         if (cfq_cfqq_on_rr(cfqq)) {
1331                 cfq_service_tree_add(cfqd, cfqq, 0);
1332                 cfq_prio_tree_add(cfqd, cfqq);
1333         }
1334 }
1335
1336 /*
1337  * add to busy list of queues for service, trying to be fair in ordering
1338  * the pending list according to last request service
1339  */
1340 static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1341 {
1342         cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
1343         BUG_ON(cfq_cfqq_on_rr(cfqq));
1344         cfq_mark_cfqq_on_rr(cfqq);
1345         cfqd->busy_queues++;
1346         if (cfq_cfqq_sync(cfqq))
1347                 cfqd->busy_sync_queues++;
1348
1349         cfq_resort_rr_list(cfqd, cfqq);
1350 }
1351
1352 /*
1353  * Called when the cfqq no longer has requests pending, remove it from
1354  * the service tree.
1355  */
1356 static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1357 {
1358         cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
1359         BUG_ON(!cfq_cfqq_on_rr(cfqq));
1360         cfq_clear_cfqq_on_rr(cfqq);
1361
1362         if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
1363                 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1364                 cfqq->service_tree = NULL;
1365         }
1366         if (cfqq->p_root) {
1367                 rb_erase(&cfqq->p_node, cfqq->p_root);
1368                 cfqq->p_root = NULL;
1369         }
1370
1371         cfq_group_service_tree_del(cfqd, cfqq->cfqg);
1372         BUG_ON(!cfqd->busy_queues);
1373         cfqd->busy_queues--;
1374         if (cfq_cfqq_sync(cfqq))
1375                 cfqd->busy_sync_queues--;
1376 }
1377
1378 /*
1379  * rb tree support functions
1380  */
1381 static void cfq_del_rq_rb(struct request *rq)
1382 {
1383         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1384         const int sync = rq_is_sync(rq);
1385
1386         BUG_ON(!cfqq->queued[sync]);
1387         cfqq->queued[sync]--;
1388
1389         elv_rb_del(&cfqq->sort_list, rq);
1390
1391         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
1392                 /*
1393                  * Queue will be deleted from service tree when we actually
1394                  * expire it later. Right now just remove it from prio tree
1395                  * as it is empty.
1396                  */
1397                 if (cfqq->p_root) {
1398                         rb_erase(&cfqq->p_node, cfqq->p_root);
1399                         cfqq->p_root = NULL;
1400                 }
1401         }
1402 }
1403
1404 static void cfq_add_rq_rb(struct request *rq)
1405 {
1406         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1407         struct cfq_data *cfqd = cfqq->cfqd;
1408         struct request *__alias, *prev;
1409
1410         cfqq->queued[rq_is_sync(rq)]++;
1411
1412         /*
1413          * looks a little odd, but the first insert might return an alias.
1414          * if that happens, put the alias on the dispatch list
1415          */
1416         while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
1417                 cfq_dispatch_insert(cfqd->queue, __alias);
1418
1419         if (!cfq_cfqq_on_rr(cfqq))
1420                 cfq_add_cfqq_rr(cfqd, cfqq);
1421
1422         /*
1423          * check if this request is a better next-serve candidate
1424          */
1425         prev = cfqq->next_rq;
1426         cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
1427
1428         /*
1429          * adjust priority tree position, if ->next_rq changes
1430          */
1431         if (prev != cfqq->next_rq)
1432                 cfq_prio_tree_add(cfqd, cfqq);
1433
1434         BUG_ON(!cfqq->next_rq);
1435 }
1436
1437 static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
1438 {
1439         elv_rb_del(&cfqq->sort_list, rq);
1440         cfqq->queued[rq_is_sync(rq)]--;
1441         cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
1442                                         rq_data_dir(rq), rq_is_sync(rq));
1443         cfq_add_rq_rb(rq);
1444         cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
1445                         &cfqq->cfqd->serving_group->blkg, rq_data_dir(rq),
1446                         rq_is_sync(rq));
1447 }
1448
1449 static struct request *
1450 cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
1451 {
1452         struct task_struct *tsk = current;
1453         struct cfq_io_context *cic;
1454         struct cfq_queue *cfqq;
1455
1456         cic = cfq_cic_lookup(cfqd, tsk->io_context);
1457         if (!cic)
1458                 return NULL;
1459
1460         cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
1461         if (cfqq) {
1462                 sector_t sector = bio->bi_sector + bio_sectors(bio);
1463
1464                 return elv_rb_find(&cfqq->sort_list, sector);
1465         }
1466
1467         return NULL;
1468 }
1469
1470 static void cfq_activate_request(struct request_queue *q, struct request *rq)
1471 {
1472         struct cfq_data *cfqd = q->elevator->elevator_data;
1473
1474         cfqd->rq_in_driver++;
1475         cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
1476                                                 cfqd->rq_in_driver);
1477
1478         cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
1479 }
1480
1481 static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
1482 {
1483         struct cfq_data *cfqd = q->elevator->elevator_data;
1484
1485         WARN_ON(!cfqd->rq_in_driver);
1486         cfqd->rq_in_driver--;
1487         cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
1488                                                 cfqd->rq_in_driver);
1489 }
1490
1491 static void cfq_remove_request(struct request *rq)
1492 {
1493         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1494
1495         if (cfqq->next_rq == rq)
1496                 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
1497
1498         list_del_init(&rq->queuelist);
1499         cfq_del_rq_rb(rq);
1500
1501         cfqq->cfqd->rq_queued--;
1502         cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
1503                                         rq_data_dir(rq), rq_is_sync(rq));
1504         if (rq->cmd_flags & REQ_META) {
1505                 WARN_ON(!cfqq->meta_pending);
1506                 cfqq->meta_pending--;
1507         }
1508 }
1509
1510 static int cfq_merge(struct request_queue *q, struct request **req,
1511                      struct bio *bio)
1512 {
1513         struct cfq_data *cfqd = q->elevator->elevator_data;
1514         struct request *__rq;
1515
1516         __rq = cfq_find_rq_fmerge(cfqd, bio);
1517         if (__rq && elv_rq_merge_ok(__rq, bio)) {
1518                 *req = __rq;
1519                 return ELEVATOR_FRONT_MERGE;
1520         }
1521
1522         return ELEVATOR_NO_MERGE;
1523 }
1524
1525 static void cfq_merged_request(struct request_queue *q, struct request *req,
1526                                int type)
1527 {
1528         if (type == ELEVATOR_FRONT_MERGE) {
1529                 struct cfq_queue *cfqq = RQ_CFQQ(req);
1530
1531                 cfq_reposition_rq_rb(cfqq, req);
1532         }
1533 }
1534
1535 static void cfq_bio_merged(struct request_queue *q, struct request *req,
1536                                 struct bio *bio)
1537 {
1538         cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg,
1539                                         bio_data_dir(bio), cfq_bio_sync(bio));
1540 }
1541
1542 static void
1543 cfq_merged_requests(struct request_queue *q, struct request *rq,
1544                     struct request *next)
1545 {
1546         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1547         /*
1548          * reposition in fifo if next is older than rq
1549          */
1550         if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
1551             time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
1552                 list_move(&rq->queuelist, &next->queuelist);
1553                 rq_set_fifo_time(rq, rq_fifo_time(next));
1554         }
1555
1556         if (cfqq->next_rq == next)
1557                 cfqq->next_rq = rq;
1558         cfq_remove_request(next);
1559         cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg,
1560                                         rq_data_dir(next), rq_is_sync(next));
1561 }
1562
1563 static int cfq_allow_merge(struct request_queue *q, struct request *rq,
1564                            struct bio *bio)
1565 {
1566         struct cfq_data *cfqd = q->elevator->elevator_data;
1567         struct cfq_io_context *cic;
1568         struct cfq_queue *cfqq;
1569
1570         /*
1571          * Disallow merge of a sync bio into an async request.
1572          */
1573         if (cfq_bio_sync(bio) && !rq_is_sync(rq))
1574                 return false;
1575
1576         /*
1577          * Lookup the cfqq that this bio will be queued with. Allow
1578          * merge only if rq is queued there.
1579          */
1580         cic = cfq_cic_lookup(cfqd, current->io_context);
1581         if (!cic)
1582                 return false;
1583
1584         cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
1585         return cfqq == RQ_CFQQ(rq);
1586 }
1587
1588 static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1589 {
1590         del_timer(&cfqd->idle_slice_timer);
1591         cfq_blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg);
1592 }
1593
1594 static void __cfq_set_active_queue(struct cfq_data *cfqd,
1595                                    struct cfq_queue *cfqq)
1596 {
1597         if (cfqq) {
1598                 cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
1599                                 cfqd->serving_prio, cfqd->serving_type);
1600                 cfq_blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg);
1601                 cfqq->slice_start = 0;
1602                 cfqq->dispatch_start = jiffies;
1603                 cfqq->allocated_slice = 0;
1604                 cfqq->slice_end = 0;
1605                 cfqq->slice_dispatch = 0;
1606                 cfqq->nr_sectors = 0;
1607
1608                 cfq_clear_cfqq_wait_request(cfqq);
1609                 cfq_clear_cfqq_must_dispatch(cfqq);
1610                 cfq_clear_cfqq_must_alloc_slice(cfqq);
1611                 cfq_clear_cfqq_fifo_expire(cfqq);
1612                 cfq_mark_cfqq_slice_new(cfqq);
1613
1614                 cfq_del_timer(cfqd, cfqq);
1615         }
1616
1617         cfqd->active_queue = cfqq;
1618 }
1619
1620 /*
1621  * current cfqq expired its slice (or was too idle), select new one
1622  */
1623 static void
1624 __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1625                     bool timed_out)
1626 {
1627         cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
1628
1629         if (cfq_cfqq_wait_request(cfqq))
1630                 cfq_del_timer(cfqd, cfqq);
1631
1632         cfq_clear_cfqq_wait_request(cfqq);
1633         cfq_clear_cfqq_wait_busy(cfqq);
1634
1635         /*
1636          * If this cfqq is shared between multiple processes, check to
1637          * make sure that those processes are still issuing I/Os within
1638          * the mean seek distance.  If not, it may be time to break the
1639          * queues apart again.
1640          */
1641         if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
1642                 cfq_mark_cfqq_split_coop(cfqq);
1643
1644         /*
1645          * store what was left of this slice, if the queue idled/timed out
1646          */
1647         if (timed_out) {
1648                 if (cfq_cfqq_slice_new(cfqq))
1649                         cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
1650                 else
1651                         cfqq->slice_resid = cfqq->slice_end - jiffies;
1652                 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
1653         }
1654
1655         cfq_group_served(cfqd, cfqq->cfqg, cfqq);
1656
1657         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
1658                 cfq_del_cfqq_rr(cfqd, cfqq);
1659
1660         cfq_resort_rr_list(cfqd, cfqq);
1661
1662         if (cfqq == cfqd->active_queue)
1663                 cfqd->active_queue = NULL;
1664
1665         if (cfqd->active_cic) {
1666                 put_io_context(cfqd->active_cic->ioc);
1667                 cfqd->active_cic = NULL;
1668         }
1669 }
1670
1671 static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
1672 {
1673         struct cfq_queue *cfqq = cfqd->active_queue;
1674
1675         if (cfqq)
1676                 __cfq_slice_expired(cfqd, cfqq, timed_out);
1677 }
1678
1679 /*
1680  * Get next queue for service. Unless we have a queue preemption,
1681  * we'll simply select the first cfqq in the service tree.
1682  */
1683 static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
1684 {
1685         struct cfq_rb_root *service_tree =
1686                 service_tree_for(cfqd->serving_group, cfqd->serving_prio,
1687                                         cfqd->serving_type);
1688
1689         if (!cfqd->rq_queued)
1690                 return NULL;
1691
1692         /* There is nothing to dispatch */
1693         if (!service_tree)
1694                 return NULL;
1695         if (RB_EMPTY_ROOT(&service_tree->rb))
1696                 return NULL;
1697         return cfq_rb_first(service_tree);
1698 }
1699
1700 static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
1701 {
1702         struct cfq_group *cfqg;
1703         struct cfq_queue *cfqq;
1704         int i, j;
1705         struct cfq_rb_root *st;
1706
1707         if (!cfqd->rq_queued)
1708                 return NULL;
1709
1710         cfqg = cfq_get_next_cfqg(cfqd);
1711         if (!cfqg)
1712                 return NULL;
1713
1714         for_each_cfqg_st(cfqg, i, j, st)
1715                 if ((cfqq = cfq_rb_first(st)) != NULL)
1716                         return cfqq;
1717         return NULL;
1718 }
1719
1720 /*
1721  * Get and set a new active queue for service.
1722  */
1723 static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
1724                                               struct cfq_queue *cfqq)
1725 {
1726         if (!cfqq)
1727                 cfqq = cfq_get_next_queue(cfqd);
1728
1729         __cfq_set_active_queue(cfqd, cfqq);
1730         return cfqq;
1731 }
1732
1733 static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
1734                                           struct request *rq)
1735 {
1736         if (blk_rq_pos(rq) >= cfqd->last_position)
1737                 return blk_rq_pos(rq) - cfqd->last_position;
1738         else
1739                 return cfqd->last_position - blk_rq_pos(rq);
1740 }
1741
1742 static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1743                                struct request *rq)
1744 {
1745         return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
1746 }
1747
1748 static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
1749                                     struct cfq_queue *cur_cfqq)
1750 {
1751         struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
1752         struct rb_node *parent, *node;
1753         struct cfq_queue *__cfqq;
1754         sector_t sector = cfqd->last_position;
1755
1756         if (RB_EMPTY_ROOT(root))
1757                 return NULL;
1758
1759         /*
1760          * First, if we find a request starting at the end of the last
1761          * request, choose it.
1762          */
1763         __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
1764         if (__cfqq)
1765                 return __cfqq;
1766
1767         /*
1768          * If the exact sector wasn't found, the parent of the NULL leaf
1769          * will contain the closest sector.
1770          */
1771         __cfqq = rb_entry(parent, struct cfq_queue, p_node);
1772         if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1773                 return __cfqq;
1774
1775         if (blk_rq_pos(__cfqq->next_rq) < sector)
1776                 node = rb_next(&__cfqq->p_node);
1777         else
1778                 node = rb_prev(&__cfqq->p_node);
1779         if (!node)
1780                 return NULL;
1781
1782         __cfqq = rb_entry(node, struct cfq_queue, p_node);
1783         if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1784                 return __cfqq;
1785
1786         return NULL;
1787 }
1788
1789 /*
1790  * cfqd - obvious
1791  * cur_cfqq - passed in so that we don't decide that the current queue is
1792  *            closely cooperating with itself.
1793  *
1794  * So, basically we're assuming that that cur_cfqq has dispatched at least
1795  * one request, and that cfqd->last_position reflects a position on the disk
1796  * associated with the I/O issued by cur_cfqq.  I'm not sure this is a valid
1797  * assumption.
1798  */
1799 static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
1800                                               struct cfq_queue *cur_cfqq)
1801 {
1802         struct cfq_queue *cfqq;
1803
1804         if (cfq_class_idle(cur_cfqq))
1805                 return NULL;
1806         if (!cfq_cfqq_sync(cur_cfqq))
1807                 return NULL;
1808         if (CFQQ_SEEKY(cur_cfqq))
1809                 return NULL;
1810
1811         /*
1812          * Don't search priority tree if it's the only queue in the group.
1813          */
1814         if (cur_cfqq->cfqg->nr_cfqq == 1)
1815                 return NULL;
1816
1817         /*
1818          * We should notice if some of the queues are cooperating, eg
1819          * working closely on the same area of the disk. In that case,
1820          * we can group them together and don't waste time idling.
1821          */
1822         cfqq = cfqq_close(cfqd, cur_cfqq);
1823         if (!cfqq)
1824                 return NULL;
1825
1826         /* If new queue belongs to different cfq_group, don't choose it */
1827         if (cur_cfqq->cfqg != cfqq->cfqg)
1828                 return NULL;
1829
1830         /*
1831          * It only makes sense to merge sync queues.
1832          */
1833         if (!cfq_cfqq_sync(cfqq))
1834                 return NULL;
1835         if (CFQQ_SEEKY(cfqq))
1836                 return NULL;
1837
1838         /*
1839          * Do not merge queues of different priority classes
1840          */
1841         if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
1842                 return NULL;
1843
1844         return cfqq;
1845 }
1846
1847 /*
1848  * Determine whether we should enforce idle window for this queue.
1849  */
1850
1851 static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1852 {
1853         enum wl_prio_t prio = cfqq_prio(cfqq);
1854         struct cfq_rb_root *service_tree = cfqq->service_tree;
1855
1856         BUG_ON(!service_tree);
1857         BUG_ON(!service_tree->count);
1858
1859         if (!cfqd->cfq_slice_idle)
1860                 return false;
1861
1862         /* We never do for idle class queues. */
1863         if (prio == IDLE_WORKLOAD)
1864                 return false;
1865
1866         /* We do for queues that were marked with idle window flag. */
1867         if (cfq_cfqq_idle_window(cfqq) &&
1868            !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
1869                 return true;
1870
1871         /*
1872          * Otherwise, we do only if they are the last ones
1873          * in their service tree.
1874          */
1875         if (service_tree->count == 1 && cfq_cfqq_sync(cfqq))
1876                 return true;
1877         cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
1878                         service_tree->count);
1879         return false;
1880 }
1881
1882 static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1883 {
1884         struct cfq_queue *cfqq = cfqd->active_queue;
1885         struct cfq_io_context *cic;
1886         unsigned long sl, group_idle = 0;
1887
1888         /*
1889          * SSD device without seek penalty, disable idling. But only do so
1890          * for devices that support queuing, otherwise we still have a problem
1891          * with sync vs async workloads.
1892          */
1893         if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
1894                 return;
1895
1896         WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
1897         WARN_ON(cfq_cfqq_slice_new(cfqq));
1898
1899         /*
1900          * idle is disabled, either manually or by past process history
1901          */
1902         if (!cfq_should_idle(cfqd, cfqq)) {
1903                 /* no queue idling. Check for group idling */
1904                 if (cfqd->cfq_group_idle)
1905                         group_idle = cfqd->cfq_group_idle;
1906                 else
1907                         return;
1908         }
1909
1910         /*
1911          * still active requests from this queue, don't idle
1912          */
1913         if (cfqq->dispatched)
1914                 return;
1915
1916         /*
1917          * task has exited, don't wait
1918          */
1919         cic = cfqd->active_cic;
1920         if (!cic || !atomic_read(&cic->ioc->nr_tasks))
1921                 return;
1922
1923         /*
1924          * If our average think time is larger than the remaining time
1925          * slice, then don't idle. This avoids overrunning the allotted
1926          * time slice.
1927          */
1928         if (sample_valid(cic->ttime_samples) &&
1929             (cfqq->slice_end - jiffies < cic->ttime_mean)) {
1930                 cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%d",
1931                                 cic->ttime_mean);
1932                 return;
1933         }
1934
1935         /* There are other queues in the group, don't do group idle */
1936         if (group_idle && cfqq->cfqg->nr_cfqq > 1)
1937                 return;
1938
1939         cfq_mark_cfqq_wait_request(cfqq);
1940
1941         if (group_idle)
1942                 sl = cfqd->cfq_group_idle;
1943         else
1944                 sl = cfqd->cfq_slice_idle;
1945
1946         mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
1947         cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
1948         cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
1949                         group_idle ? 1 : 0);
1950 }
1951
1952 /*
1953  * Move request from internal lists to the request queue dispatch list.
1954  */
1955 static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
1956 {
1957         struct cfq_data *cfqd = q->elevator->elevator_data;
1958         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1959
1960         cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
1961
1962         cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
1963         cfq_remove_request(rq);
1964         cfqq->dispatched++;
1965         (RQ_CFQG(rq))->dispatched++;
1966         elv_dispatch_sort(q, rq);
1967
1968         cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
1969         cfqq->nr_sectors += blk_rq_sectors(rq);
1970         cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
1971                                         rq_data_dir(rq), rq_is_sync(rq));
1972 }
1973
1974 /*
1975  * return expired entry, or NULL to just start from scratch in rbtree
1976  */
1977 static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
1978 {
1979         struct request *rq = NULL;
1980
1981         if (cfq_cfqq_fifo_expire(cfqq))
1982                 return NULL;
1983
1984         cfq_mark_cfqq_fifo_expire(cfqq);
1985
1986         if (list_empty(&cfqq->fifo))
1987                 return NULL;
1988
1989         rq = rq_entry_fifo(cfqq->fifo.next);
1990         if (time_before(jiffies, rq_fifo_time(rq)))
1991                 rq = NULL;
1992
1993         cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
1994         return rq;
1995 }
1996
1997 static inline int
1998 cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1999 {
2000         const int base_rq = cfqd->cfq_slice_async_rq;
2001
2002         WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
2003
2004         return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
2005 }
2006
2007 /*
2008  * Must be called with the queue_lock held.
2009  */
2010 static int cfqq_process_refs(struct cfq_queue *cfqq)
2011 {
2012         int process_refs, io_refs;
2013
2014         io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
2015         process_refs = cfqq->ref - io_refs;
2016         BUG_ON(process_refs < 0);
2017         return process_refs;
2018 }
2019
2020 static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
2021 {
2022         int process_refs, new_process_refs;
2023         struct cfq_queue *__cfqq;
2024
2025         /*
2026          * If there are no process references on the new_cfqq, then it is
2027          * unsafe to follow the ->new_cfqq chain as other cfqq's in the
2028          * chain may have dropped their last reference (not just their
2029          * last process reference).
2030          */
2031         if (!cfqq_process_refs(new_cfqq))
2032                 return;
2033
2034         /* Avoid a circular list and skip interim queue merges */
2035         while ((__cfqq = new_cfqq->new_cfqq)) {
2036                 if (__cfqq == cfqq)
2037                         return;
2038                 new_cfqq = __cfqq;
2039         }
2040
2041         process_refs = cfqq_process_refs(cfqq);
2042         new_process_refs = cfqq_process_refs(new_cfqq);
2043         /*
2044          * If the process for the cfqq has gone away, there is no
2045          * sense in merging the queues.
2046          */
2047         if (process_refs == 0 || new_process_refs == 0)
2048                 return;
2049
2050         /*
2051          * Merge in the direction of the lesser amount of work.
2052          */
2053         if (new_process_refs >= process_refs) {
2054                 cfqq->new_cfqq = new_cfqq;
2055                 new_cfqq->ref += process_refs;
2056         } else {
2057                 new_cfqq->new_cfqq = cfqq;
2058                 cfqq->ref += new_process_refs;
2059         }
2060 }
2061
2062 static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
2063                                 struct cfq_group *cfqg, enum wl_prio_t prio)
2064 {
2065         struct cfq_queue *queue;
2066         int i;
2067         bool key_valid = false;
2068         unsigned long lowest_key = 0;
2069         enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
2070
2071         for (i = 0; i <= SYNC_WORKLOAD; ++i) {
2072                 /* select the one with lowest rb_key */
2073                 queue = cfq_rb_first(service_tree_for(cfqg, prio, i));
2074                 if (queue &&
2075                     (!key_valid || time_before(queue->rb_key, lowest_key))) {
2076                         lowest_key = queue->rb_key;
2077                         cur_best = i;
2078                         key_valid = true;
2079                 }
2080         }
2081
2082         return cur_best;
2083 }
2084
2085 static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
2086 {
2087         unsigned slice;
2088         unsigned count;
2089         struct cfq_rb_root *st;
2090         unsigned group_slice;
2091         enum wl_prio_t original_prio = cfqd->serving_prio;
2092
2093         /* Choose next priority. RT > BE > IDLE */
2094         if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
2095                 cfqd->serving_prio = RT_WORKLOAD;
2096         else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
2097                 cfqd->serving_prio = BE_WORKLOAD;
2098         else {
2099                 cfqd->serving_prio = IDLE_WORKLOAD;
2100                 cfqd->workload_expires = jiffies + 1;
2101                 return;
2102         }
2103
2104         if (original_prio != cfqd->serving_prio)
2105                 goto new_workload;
2106
2107         /*
2108          * For RT and BE, we have to choose also the type
2109          * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
2110          * expiration time
2111          */
2112         st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2113         count = st->count;
2114
2115         /*
2116          * check workload expiration, and that we still have other queues ready
2117          */
2118         if (count && !time_after(jiffies, cfqd->workload_expires))
2119                 return;
2120
2121 new_workload:
2122         /* otherwise select new workload type */
2123         cfqd->serving_type =
2124                 cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
2125         st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2126         count = st->count;
2127
2128         /*
2129          * the workload slice is computed as a fraction of target latency
2130          * proportional to the number of queues in that workload, over
2131          * all the queues in the same priority class
2132          */
2133         group_slice = cfq_group_slice(cfqd, cfqg);
2134
2135         slice = group_slice * count /
2136                 max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio],
2137                       cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg));
2138
2139         if (cfqd->serving_type == ASYNC_WORKLOAD) {
2140                 unsigned int tmp;
2141
2142                 /*
2143                  * Async queues are currently system wide. Just taking
2144                  * proportion of queues with-in same group will lead to higher
2145                  * async ratio system wide as generally root group is going
2146                  * to have higher weight. A more accurate thing would be to
2147                  * calculate system wide asnc/sync ratio.
2148                  */
2149                 tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg);
2150                 tmp = tmp/cfqd->busy_queues;
2151                 slice = min_t(unsigned, slice, tmp);
2152
2153                 /* async workload slice is scaled down according to
2154                  * the sync/async slice ratio. */
2155                 slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
2156         } else
2157                 /* sync workload slice is at least 2 * cfq_slice_idle */
2158                 slice = max(slice, 2 * cfqd->cfq_slice_idle);
2159
2160         slice = max_t(unsigned, slice, CFQ_MIN_TT);
2161         cfq_log(cfqd, "workload slice:%d", slice);
2162         cfqd->workload_expires = jiffies + slice;
2163 }
2164
2165 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
2166 {
2167         struct cfq_rb_root *st = &cfqd->grp_service_tree;
2168         struct cfq_group *cfqg;
2169
2170         if (RB_EMPTY_ROOT(&st->rb))
2171                 return NULL;
2172         cfqg = cfq_rb_first_group(st);
2173         update_min_vdisktime(st);
2174         return cfqg;
2175 }
2176
2177 static void cfq_choose_cfqg(struct cfq_data *cfqd)
2178 {
2179         struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
2180
2181         cfqd->serving_group = cfqg;
2182
2183         /* Restore the workload type data */
2184         if (cfqg->saved_workload_slice) {
2185                 cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
2186                 cfqd->serving_type = cfqg->saved_workload;
2187                 cfqd->serving_prio = cfqg->saved_serving_prio;
2188         } else
2189                 cfqd->workload_expires = jiffies - 1;
2190
2191         choose_service_tree(cfqd, cfqg);
2192 }
2193
2194 /*
2195  * Select a queue for service. If we have a current active queue,
2196  * check whether to continue servicing it, or retrieve and set a new one.
2197  */
2198 static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
2199 {
2200         struct cfq_queue *cfqq, *new_cfqq = NULL;
2201
2202         cfqq = cfqd->active_queue;
2203         if (!cfqq)
2204                 goto new_queue;
2205
2206         if (!cfqd->rq_queued)
2207                 return NULL;
2208
2209         /*
2210          * We were waiting for group to get backlogged. Expire the queue
2211          */
2212         if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
2213                 goto expire;
2214
2215         /*
2216          * The active queue has run out of time, expire it and select new.
2217          */
2218         if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
2219                 /*
2220                  * If slice had not expired at the completion of last request
2221                  * we might not have turned on wait_busy flag. Don't expire
2222                  * the queue yet. Allow the group to get backlogged.
2223                  *
2224                  * The very fact that we have used the slice, that means we
2225                  * have been idling all along on this queue and it should be
2226                  * ok to wait for this request to complete.
2227                  */
2228                 if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
2229                     && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2230                         cfqq = NULL;
2231                         goto keep_queue;
2232                 } else
2233                         goto check_group_idle;
2234         }
2235
2236         /*
2237          * The active queue has requests and isn't expired, allow it to
2238          * dispatch.
2239          */
2240         if (!RB_EMPTY_ROOT(&cfqq->sort_list))
2241                 goto keep_queue;
2242
2243         /*
2244          * If another queue has a request waiting within our mean seek
2245          * distance, let it run.  The expire code will check for close
2246          * cooperators and put the close queue at the front of the service
2247          * tree.  If possible, merge the expiring queue with the new cfqq.
2248          */
2249         new_cfqq = cfq_close_cooperator(cfqd, cfqq);
2250         if (new_cfqq) {
2251                 if (!cfqq->new_cfqq)
2252                         cfq_setup_merge(cfqq, new_cfqq);
2253                 goto expire;
2254         }
2255
2256         /*
2257          * No requests pending. If the active queue still has requests in
2258          * flight or is idling for a new request, allow either of these
2259          * conditions to happen (or time out) before selecting a new queue.
2260          */
2261         if (timer_pending(&cfqd->idle_slice_timer)) {
2262                 cfqq = NULL;
2263                 goto keep_queue;
2264         }
2265
2266         /*
2267          * This is a deep seek queue, but the device is much faster than
2268          * the queue can deliver, don't idle
2269          **/
2270         if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
2271             (cfq_cfqq_slice_new(cfqq) ||
2272             (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
2273                 cfq_clear_cfqq_deep(cfqq);
2274                 cfq_clear_cfqq_idle_window(cfqq);
2275         }
2276
2277         if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2278                 cfqq = NULL;
2279                 goto keep_queue;
2280         }
2281
2282         /*
2283          * If group idle is enabled and there are requests dispatched from
2284          * this group, wait for requests to complete.
2285          */
2286 check_group_idle:
2287         if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1
2288             && cfqq->cfqg->dispatched) {
2289                 cfqq = NULL;
2290                 goto keep_queue;
2291         }
2292
2293 expire:
2294         cfq_slice_expired(cfqd, 0);
2295 new_queue:
2296         /*
2297          * Current queue expired. Check if we have to switch to a new
2298          * service tree
2299          */
2300         if (!new_cfqq)
2301                 cfq_choose_cfqg(cfqd);
2302
2303         cfqq = cfq_set_active_queue(cfqd, new_cfqq);
2304 keep_queue:
2305         return cfqq;
2306 }
2307
2308 static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
2309 {
2310         int dispatched = 0;
2311
2312         while (cfqq->next_rq) {
2313                 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
2314                 dispatched++;
2315         }
2316
2317         BUG_ON(!list_empty(&cfqq->fifo));
2318
2319         /* By default cfqq is not expired if it is empty. Do it explicitly */
2320         __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
2321         return dispatched;
2322 }
2323
2324 /*
2325  * Drain our current requests. Used for barriers and when switching
2326  * io schedulers on-the-fly.
2327  */
2328 static int cfq_forced_dispatch(struct cfq_data *cfqd)
2329 {
2330         struct cfq_queue *cfqq;
2331         int dispatched = 0;
2332
2333         /* Expire the timeslice of the current active queue first */
2334         cfq_slice_expired(cfqd, 0);
2335         while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
2336                 __cfq_set_active_queue(cfqd, cfqq);
2337                 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
2338         }
2339
2340         BUG_ON(cfqd->busy_queues);
2341
2342         cfq_log(cfqd, "forced_dispatch=%d", dispatched);
2343         return dispatched;
2344 }
2345
2346 static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
2347         struct cfq_queue *cfqq)
2348 {
2349         /* the queue hasn't finished any request, can't estimate */
2350         if (cfq_cfqq_slice_new(cfqq))
2351                 return true;
2352         if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
2353                 cfqq->slice_end))
2354                 return true;
2355
2356         return false;
2357 }
2358
2359 static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2360 {
2361         unsigned int max_dispatch;
2362
2363         /*
2364          * Drain async requests before we start sync IO
2365          */
2366         if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
2367                 return false;
2368
2369         /*
2370          * If this is an async queue and we have sync IO in flight, let it wait
2371          */
2372         if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
2373                 return false;
2374
2375         max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
2376         if (cfq_class_idle(cfqq))
2377                 max_dispatch = 1;
2378
2379         /*
2380          * Does this cfqq already have too much IO in flight?
2381          */
2382         if (cfqq->dispatched >= max_dispatch) {
2383                 bool promote_sync = false;
2384                 /*
2385                  * idle queue must always only have a single IO in flight
2386                  */
2387                 if (cfq_class_idle(cfqq))
2388                         return false;
2389
2390                 /*
2391                  * If there is only one sync queue, and its think time is
2392                  * small, we can ignore async queue here and give the sync
2393                  * queue no dispatch limit. The reason is a sync queue can
2394                  * preempt async queue, limiting the sync queue doesn't make
2395                  * sense. This is useful for aiostress test.
2396                  */
2397                 if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1) {
2398                         struct cfq_io_context *cic = RQ_CIC(cfqq->next_rq);
2399
2400                         if (sample_valid(cic->ttime_samples) &&
2401                                 cic->ttime_mean < cfqd->cfq_slice_idle)
2402                                 promote_sync = true;
2403                 }
2404
2405                 /*
2406                  * We have other queues, don't allow more IO from this one
2407                  */
2408                 if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
2409                                 !promote_sync)
2410                         return false;
2411
2412                 /*
2413                  * Sole queue user, no limit
2414                  */
2415                 if (cfqd->busy_queues == 1 || promote_sync)
2416                         max_dispatch = -1;
2417                 else
2418                         /*
2419                          * Normally we start throttling cfqq when cfq_quantum/2
2420                          * requests have been dispatched. But we can drive
2421                          * deeper queue depths at the beginning of slice
2422                          * subjected to upper limit of cfq_quantum.
2423                          * */
2424                         max_dispatch = cfqd->cfq_quantum;
2425         }
2426
2427         /*
2428          * Async queues must wait a bit before being allowed dispatch.
2429          * We also ramp up the dispatch depth gradually for async IO,
2430          * based on the last sync IO we serviced
2431          */
2432         if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
2433                 unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
2434                 unsigned int depth;
2435
2436                 depth = last_sync / cfqd->cfq_slice[1];
2437                 if (!depth && !cfqq->dispatched)
2438                         depth = 1;
2439                 if (depth < max_dispatch)
2440                         max_dispatch = depth;
2441         }
2442
2443         /*
2444          * If we're below the current max, allow a dispatch
2445          */
2446         return cfqq->dispatched < max_dispatch;
2447 }
2448
2449 /*
2450  * Dispatch a request from cfqq, moving them to the request queue
2451  * dispatch list.
2452  */
2453 static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2454 {
2455         struct request *rq;
2456
2457         BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
2458
2459         if (!cfq_may_dispatch(cfqd, cfqq))
2460                 return false;
2461
2462         /*
2463          * follow expired path, else get first next available
2464          */
2465         rq = cfq_check_fifo(cfqq);
2466         if (!rq)
2467                 rq = cfqq->next_rq;
2468
2469         /*
2470          * insert request into driver dispatch list
2471          */
2472         cfq_dispatch_insert(cfqd->queue, rq);
2473
2474         if (!cfqd->active_cic) {
2475                 struct cfq_io_context *cic = RQ_CIC(rq);
2476
2477                 atomic_long_inc(&cic->ioc->refcount);
2478                 cfqd->active_cic = cic;
2479         }
2480
2481         return true;
2482 }
2483
2484 /*
2485  * Find the cfqq that we need to service and move a request from that to the
2486  * dispatch list
2487  */
2488 static int cfq_dispatch_requests(struct request_queue *q, int force)
2489 {
2490         struct cfq_data *cfqd = q->elevator->elevator_data;
2491         struct cfq_queue *cfqq;
2492
2493         if (!cfqd->busy_queues)
2494                 return 0;
2495
2496         if (unlikely(force))
2497                 return cfq_forced_dispatch(cfqd);
2498
2499         cfqq = cfq_select_queue(cfqd);
2500         if (!cfqq)
2501                 return 0;
2502
2503         /*
2504          * Dispatch a request from this cfqq, if it is allowed
2505          */
2506         if (!cfq_dispatch_request(cfqd, cfqq))
2507                 return 0;
2508
2509         cfqq->slice_dispatch++;
2510         cfq_clear_cfqq_must_dispatch(cfqq);
2511
2512         /*
2513          * expire an async queue immediately if it has used up its slice. idle
2514          * queue always expire after 1 dispatch round.
2515          */
2516         if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
2517             cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
2518             cfq_class_idle(cfqq))) {
2519                 cfqq->slice_end = jiffies + 1;
2520                 cfq_slice_expired(cfqd, 0);
2521         }
2522
2523         cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
2524         return 1;
2525 }
2526
2527 /*
2528  * task holds one reference to the queue, dropped when task exits. each rq
2529  * in-flight on this queue also holds a reference, dropped when rq is freed.
2530  *
2531  * Each cfq queue took a reference on the parent group. Drop it now.
2532  * queue lock must be held here.
2533  */
2534 static void cfq_put_queue(struct cfq_queue *cfqq)
2535 {
2536         struct cfq_data *cfqd = cfqq->cfqd;
2537         struct cfq_group *cfqg;
2538
2539         BUG_ON(cfqq->ref <= 0);
2540
2541         cfqq->ref--;
2542         if (cfqq->ref)
2543                 return;
2544
2545         cfq_log_cfqq(cfqd, cfqq, "put_queue");
2546         BUG_ON(rb_first(&cfqq->sort_list));
2547         BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
2548         cfqg = cfqq->cfqg;
2549
2550         if (unlikely(cfqd->active_queue == cfqq)) {
2551                 __cfq_slice_expired(cfqd, cfqq, 0);
2552                 cfq_schedule_dispatch(cfqd);
2553         }
2554
2555         BUG_ON(cfq_cfqq_on_rr(cfqq));
2556         kmem_cache_free(cfq_pool, cfqq);
2557         cfq_put_cfqg(cfqg);
2558 }
2559
2560 /*
2561  * Must always be called with the rcu_read_lock() held
2562  */
2563 static void
2564 __call_for_each_cic(struct io_context *ioc,
2565                     void (*func)(struct io_context *, struct cfq_io_context *))
2566 {
2567         struct cfq_io_context *cic;
2568         struct hlist_node *n;
2569
2570         hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
2571                 func(ioc, cic);
2572 }
2573
2574 /*
2575  * Call func for each cic attached to this ioc.
2576  */
2577 static void
2578 call_for_each_cic(struct io_context *ioc,
2579                   void (*func)(struct io_context *, struct cfq_io_context *))
2580 {
2581         rcu_read_lock();
2582         __call_for_each_cic(ioc, func);
2583         rcu_read_unlock();
2584 }
2585
2586 static void cfq_cic_free_rcu(struct rcu_head *head)
2587 {
2588         struct cfq_io_context *cic;
2589
2590         cic = container_of(head, struct cfq_io_context, rcu_head);
2591
2592         kmem_cache_free(cfq_ioc_pool, cic);
2593         elv_ioc_count_dec(cfq_ioc_count);
2594
2595         if (ioc_gone) {
2596                 /*
2597                  * CFQ scheduler is exiting, grab exit lock and check
2598                  * the pending io context count. If it hits zero,
2599                  * complete ioc_gone and set it back to NULL
2600                  */
2601                 spin_lock(&ioc_gone_lock);
2602                 if (ioc_gone && !elv_ioc_count_read(cfq_ioc_count)) {
2603                         complete(ioc_gone);
2604                         ioc_gone = NULL;
2605                 }
2606                 spin_unlock(&ioc_gone_lock);
2607         }
2608 }
2609
2610 static void cfq_cic_free(struct cfq_io_context *cic)
2611 {
2612         call_rcu(&cic->rcu_head, cfq_cic_free_rcu);
2613 }
2614
2615 static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
2616 {
2617         unsigned long flags;
2618         unsigned long dead_key = (unsigned long) cic->key;
2619
2620         BUG_ON(!(dead_key & CIC_DEAD_KEY));
2621
2622         spin_lock_irqsave(&ioc->lock, flags);
2623         radix_tree_delete(&ioc->radix_root, dead_key >> CIC_DEAD_INDEX_SHIFT);
2624         hlist_del_rcu(&cic->cic_list);
2625         spin_unlock_irqrestore(&ioc->lock, flags);
2626
2627         cfq_cic_free(cic);
2628 }
2629
2630 /*
2631  * Must be called with rcu_read_lock() held or preemption otherwise disabled.
2632  * Only two callers of this - ->dtor() which is called with the rcu_read_lock(),
2633  * and ->trim() which is called with the task lock held
2634  */
2635 static void cfq_free_io_context(struct io_context *ioc)
2636 {
2637         /*
2638          * ioc->refcount is zero here, or we are called from elv_unregister(),
2639          * so no more cic's are allowed to be linked into this ioc.  So it
2640          * should be ok to iterate over the known list, we will see all cic's
2641          * since no new ones are added.
2642          */
2643         __call_for_each_cic(ioc, cic_free_func);
2644 }
2645
2646 static void cfq_put_cooperator(struct cfq_queue *cfqq)
2647 {
2648         struct cfq_queue *__cfqq, *next;
2649
2650         /*
2651          * If this queue was scheduled to merge with another queue, be
2652          * sure to drop the reference taken on that queue (and others in
2653          * the merge chain).  See cfq_setup_merge and cfq_merge_cfqqs.
2654          */
2655         __cfqq = cfqq->new_cfqq;
2656         while (__cfqq) {
2657                 if (__cfqq == cfqq) {
2658                         WARN(1, "cfqq->new_cfqq loop detected\n");
2659                         break;
2660                 }
2661                 next = __cfqq->new_cfqq;
2662                 cfq_put_queue(__cfqq);
2663                 __cfqq = next;
2664         }
2665 }
2666
2667 static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2668 {
2669         if (unlikely(cfqq == cfqd->active_queue)) {
2670                 __cfq_slice_expired(cfqd, cfqq, 0);
2671                 cfq_schedule_dispatch(cfqd);
2672         }
2673
2674         cfq_put_cooperator(cfqq);
2675
2676         cfq_put_queue(cfqq);
2677 }
2678
2679 static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
2680                                          struct cfq_io_context *cic)
2681 {
2682         struct io_context *ioc = cic->ioc;
2683
2684         list_del_init(&cic->queue_list);
2685
2686         /*
2687          * Make sure dead mark is seen for dead queues
2688          */
2689         smp_wmb();
2690         cic->key = cfqd_dead_key(cfqd);
2691
2692         if (ioc->ioc_data == cic)
2693                 rcu_assign_pointer(ioc->ioc_data, NULL);
2694
2695         if (cic->cfqq[BLK_RW_ASYNC]) {
2696                 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
2697                 cic->cfqq[BLK_RW_ASYNC] = NULL;
2698         }
2699
2700         if (cic->cfqq[BLK_RW_SYNC]) {
2701                 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
2702                 cic->cfqq[BLK_RW_SYNC] = NULL;
2703         }
2704 }
2705
2706 static void cfq_exit_single_io_context(struct io_context *ioc,
2707                                        struct cfq_io_context *cic)
2708 {
2709         struct cfq_data *cfqd = cic_to_cfqd(cic);
2710
2711         if (cfqd) {
2712                 struct request_queue *q = cfqd->queue;
2713                 unsigned long flags;
2714
2715                 spin_lock_irqsave(q->queue_lock, flags);
2716
2717                 /*
2718                  * Ensure we get a fresh copy of the ->key to prevent
2719                  * race between exiting task and queue
2720                  */
2721                 smp_read_barrier_depends();
2722                 if (cic->key == cfqd)
2723                         __cfq_exit_single_io_context(cfqd, cic);
2724
2725                 spin_unlock_irqrestore(q->queue_lock, flags);
2726         }
2727 }
2728
2729 /*
2730  * The process that ioc belongs to has exited, we need to clean up
2731  * and put the internal structures we have that belongs to that process.
2732  */
2733 static void cfq_exit_io_context(struct io_context *ioc)
2734 {
2735         call_for_each_cic(ioc, cfq_exit_single_io_context);
2736 }
2737
2738 static struct cfq_io_context *
2739 cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
2740 {
2741         struct cfq_io_context *cic;
2742
2743         cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO,
2744                                                         cfqd->queue->node);
2745         if (cic) {
2746                 cic->last_end_request = jiffies;
2747                 INIT_LIST_HEAD(&cic->queue_list);
2748                 INIT_HLIST_NODE(&cic->cic_list);
2749                 cic->dtor = cfq_free_io_context;
2750                 cic->exit = cfq_exit_io_context;
2751                 elv_ioc_count_inc(cfq_ioc_count);
2752         }
2753
2754         return cic;
2755 }
2756
2757 static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
2758 {
2759         struct task_struct *tsk = current;
2760         int ioprio_class;
2761
2762         if (!cfq_cfqq_prio_changed(cfqq))
2763                 return;
2764
2765         ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
2766         switch (ioprio_class) {
2767         default:
2768                 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
2769         case IOPRIO_CLASS_NONE:
2770                 /*
2771                  * no prio set, inherit CPU scheduling settings
2772                  */
2773                 cfqq->ioprio = task_nice_ioprio(tsk);
2774                 cfqq->ioprio_class = task_nice_ioclass(tsk);
2775                 break;
2776         case IOPRIO_CLASS_RT:
2777                 cfqq->ioprio = task_ioprio(ioc);
2778                 cfqq->ioprio_class = IOPRIO_CLASS_RT;
2779                 break;
2780         case IOPRIO_CLASS_BE:
2781                 cfqq->ioprio = task_ioprio(ioc);
2782                 cfqq->ioprio_class = IOPRIO_CLASS_BE;
2783                 break;
2784         case IOPRIO_CLASS_IDLE:
2785                 cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
2786                 cfqq->ioprio = 7;
2787                 cfq_clear_cfqq_idle_window(cfqq);
2788                 break;
2789         }
2790
2791         /*
2792          * keep track of original prio settings in case we have to temporarily
2793          * elevate the priority of this queue
2794          */
2795         cfqq->org_ioprio = cfqq->ioprio;
2796         cfqq->org_ioprio_class = cfqq->ioprio_class;
2797         cfq_clear_cfqq_prio_changed(cfqq);
2798 }
2799
2800 static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic)
2801 {
2802         struct cfq_data *cfqd = cic_to_cfqd(cic);
2803         struct cfq_queue *cfqq;
2804         unsigned long flags;
2805
2806         if (unlikely(!cfqd))
2807                 return;
2808
2809         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
2810
2811         cfqq = cic->cfqq[BLK_RW_ASYNC];
2812         if (cfqq) {
2813                 struct cfq_queue *new_cfqq;
2814                 new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->ioc,
2815                                                 GFP_ATOMIC);
2816                 if (new_cfqq) {
2817                         cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
2818                         cfq_put_queue(cfqq);
2819                 }
2820         }
2821
2822         cfqq = cic->cfqq[BLK_RW_SYNC];
2823         if (cfqq)
2824                 cfq_mark_cfqq_prio_changed(cfqq);
2825
2826         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2827 }
2828
2829 static void cfq_ioc_set_ioprio(struct io_context *ioc)
2830 {
2831         call_for_each_cic(ioc, changed_ioprio);
2832         ioc->ioprio_changed = 0;
2833 }
2834
2835 static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2836                           pid_t pid, bool is_sync)
2837 {
2838         RB_CLEAR_NODE(&cfqq->rb_node);
2839         RB_CLEAR_NODE(&cfqq->p_node);
2840         INIT_LIST_HEAD(&cfqq->fifo);
2841
2842         cfqq->ref = 0;
2843         cfqq->cfqd = cfqd;
2844
2845         cfq_mark_cfqq_prio_changed(cfqq);
2846
2847         if (is_sync) {
2848                 if (!cfq_class_idle(cfqq))
2849                         cfq_mark_cfqq_idle_window(cfqq);
2850                 cfq_mark_cfqq_sync(cfqq);
2851         }
2852         cfqq->pid = pid;
2853 }
2854
2855 #ifdef CONFIG_CFQ_GROUP_IOSCHED
2856 static void changed_cgroup(struct io_context *ioc, struct cfq_io_context *cic)
2857 {
2858         struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1);
2859         struct cfq_data *cfqd = cic_to_cfqd(cic);
2860         unsigned long flags;
2861         struct request_queue *q;
2862
2863         if (unlikely(!cfqd))
2864                 return;
2865
2866         q = cfqd->queue;
2867
2868         spin_lock_irqsave(q->queue_lock, flags);
2869
2870         if (sync_cfqq) {
2871                 /*
2872                  * Drop reference to sync queue. A new sync queue will be
2873                  * assigned in new group upon arrival of a fresh request.
2874                  */
2875                 cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
2876                 cic_set_cfqq(cic, NULL, 1);
2877                 cfq_put_queue(sync_cfqq);
2878         }
2879
2880         spin_unlock_irqrestore(q->queue_lock, flags);
2881 }
2882
2883 static void cfq_ioc_set_cgroup(struct io_context *ioc)
2884 {
2885         call_for_each_cic(ioc, changed_cgroup);
2886         ioc->cgroup_changed = 0;
2887 }
2888 #endif  /* CONFIG_CFQ_GROUP_IOSCHED */
2889
2890 static struct cfq_queue *
2891 cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
2892                      struct io_context *ioc, gfp_t gfp_mask)
2893 {
2894         struct cfq_queue *cfqq, *new_cfqq = NULL;
2895         struct cfq_io_context *cic;
2896         struct cfq_group *cfqg;
2897
2898 retry:
2899         cfqg = cfq_get_cfqg(cfqd, 1);
2900         cic = cfq_cic_lookup(cfqd, ioc);
2901         /* cic always exists here */
2902         cfqq = cic_to_cfqq(cic, is_sync);
2903
2904         /*
2905          * Always try a new alloc if we fell back to the OOM cfqq
2906          * originally, since it should just be a temporary situation.
2907          */
2908         if (!cfqq || cfqq == &cfqd->oom_cfqq) {
2909                 cfqq = NULL;
2910                 if (new_cfqq) {
2911                         cfqq = new_cfqq;
2912                         new_cfqq = NULL;
2913                 } else if (gfp_mask & __GFP_WAIT) {
2914                         spin_unlock_irq(cfqd->queue->queue_lock);
2915                         new_cfqq = kmem_cache_alloc_node(cfq_pool,
2916                                         gfp_mask | __GFP_ZERO,
2917                                         cfqd->queue->node);
2918                         spin_lock_irq(cfqd->queue->queue_lock);
2919                         if (new_cfqq)
2920                                 goto retry;
2921                 } else {
2922                         cfqq = kmem_cache_alloc_node(cfq_pool,
2923                                         gfp_mask | __GFP_ZERO,
2924                                         cfqd->queue->node);
2925                 }
2926
2927                 if (cfqq) {
2928                         cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
2929                         cfq_init_prio_data(cfqq, ioc);
2930                         cfq_link_cfqq_cfqg(cfqq, cfqg);
2931                         cfq_log_cfqq(cfqd, cfqq, "alloced");
2932                 } else
2933                         cfqq = &cfqd->oom_cfqq;
2934         }
2935
2936         if (new_cfqq)
2937                 kmem_cache_free(cfq_pool, new_cfqq);
2938
2939         return cfqq;
2940 }
2941
2942 static struct cfq_queue **
2943 cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
2944 {
2945         switch (ioprio_class) {
2946         case IOPRIO_CLASS_RT:
2947                 return &cfqd->async_cfqq[0][ioprio];
2948         case IOPRIO_CLASS_BE:
2949                 return &cfqd->async_cfqq[1][ioprio];
2950         case IOPRIO_CLASS_IDLE:
2951                 return &cfqd->async_idle_cfqq;
2952         default:
2953                 BUG();
2954         }
2955 }
2956
2957 static struct cfq_queue *
2958 cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
2959               gfp_t gfp_mask)
2960 {
2961         const int ioprio = task_ioprio(ioc);
2962         const int ioprio_class = task_ioprio_class(ioc);
2963         struct cfq_queue **async_cfqq = NULL;
2964         struct cfq_queue *cfqq = NULL;
2965
2966         if (!is_sync) {
2967                 async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
2968                 cfqq = *async_cfqq;
2969         }
2970
2971         if (!cfqq)
2972                 cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
2973
2974         /*
2975          * pin the queue now that it's allocated, scheduler exit will prune it
2976          */
2977         if (!is_sync && !(*async_cfqq)) {
2978                 cfqq->ref++;
2979                 *async_cfqq = cfqq;
2980         }
2981
2982         cfqq->ref++;
2983         return cfqq;
2984 }
2985
2986 /*
2987  * We drop cfq io contexts lazily, so we may find a dead one.
2988  */
2989 static void
2990 cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
2991                   struct cfq_io_context *cic)
2992 {
2993         unsigned long flags;
2994
2995         WARN_ON(!list_empty(&cic->queue_list));
2996         BUG_ON(cic->key != cfqd_dead_key(cfqd));
2997
2998         spin_lock_irqsave(&ioc->lock, flags);
2999
3000         BUG_ON(ioc->ioc_data == cic);
3001
3002         radix_tree_delete(&ioc->radix_root, cfqd->cic_index);
3003         hlist_del_rcu(&cic->cic_list);
3004         spin_unlock_irqrestore(&ioc->lock, flags);
3005
3006         cfq_cic_free(cic);
3007 }
3008
3009 static struct cfq_io_context *
3010 cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
3011 {
3012         struct cfq_io_context *cic;
3013         unsigned long flags;
3014
3015         if (unlikely(!ioc))
3016                 return NULL;
3017
3018         rcu_read_lock();
3019
3020         /*
3021          * we maintain a last-hit cache, to avoid browsing over the tree
3022          */
3023         cic = rcu_dereference(ioc->ioc_data);
3024         if (cic && cic->key == cfqd) {
3025                 rcu_read_unlock();
3026                 return cic;
3027         }
3028
3029         do {
3030                 cic = radix_tree_lookup(&ioc->radix_root, cfqd->cic_index);
3031                 rcu_read_unlock();
3032                 if (!cic)
3033                         break;
3034                 if (unlikely(cic->key != cfqd)) {
3035                         cfq_drop_dead_cic(cfqd, ioc, cic);
3036                         rcu_read_lock();
3037                         continue;
3038                 }
3039
3040                 spin_lock_irqsave(&ioc->lock, flags);
3041                 rcu_assign_pointer(ioc->ioc_data, cic);
3042                 spin_unlock_irqrestore(&ioc->lock, flags);
3043                 break;
3044         } while (1);
3045
3046         return cic;
3047 }
3048
3049 /*
3050  * Add cic into ioc, using cfqd as the search key. This enables us to lookup
3051  * the process specific cfq io context when entered from the block layer.
3052  * Also adds the cic to a per-cfqd list, used when this queue is removed.
3053  */
3054 static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
3055                         struct cfq_io_context *cic, gfp_t gfp_mask)
3056 {
3057         unsigned long flags;
3058         int ret;
3059
3060         ret = radix_tree_preload(gfp_mask);
3061         if (!ret) {
3062                 cic->ioc = ioc;
3063                 cic->key = cfqd;
3064
3065                 spin_lock_irqsave(&ioc->lock, flags);
3066                 ret = radix_tree_insert(&ioc->radix_root,
3067                                                 cfqd->cic_index, cic);
3068                 if (!ret)
3069                         hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list);
3070                 spin_unlock_irqrestore(&ioc->lock, flags);
3071
3072                 radix_tree_preload_end();
3073
3074                 if (!ret) {
3075                         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
3076                         list_add(&cic->queue_list, &cfqd->cic_list);
3077                         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
3078                 }
3079         }
3080
3081         if (ret)
3082                 printk(KERN_ERR "cfq: cic link failed!\n");
3083
3084         return ret;
3085 }
3086
3087 /*
3088  * Setup general io context and cfq io context. There can be several cfq
3089  * io contexts per general io context, if this process is doing io to more
3090  * than one device managed by cfq.
3091  */
3092 static struct cfq_io_context *
3093 cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
3094 {
3095         struct io_context *ioc = NULL;
3096         struct cfq_io_context *cic;
3097
3098         might_sleep_if(gfp_mask & __GFP_WAIT);
3099
3100         ioc = get_io_context(gfp_mask, cfqd->queue->node);
3101         if (!ioc)
3102                 return NULL;
3103
3104         cic = cfq_cic_lookup(cfqd, ioc);
3105         if (cic)
3106                 goto out;
3107
3108         cic = cfq_alloc_io_context(cfqd, gfp_mask);
3109         if (cic == NULL)
3110                 goto err;
3111
3112         if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
3113                 goto err_free;
3114
3115 out:
3116         smp_read_barrier_depends();
3117         if (unlikely(ioc->ioprio_changed))
3118                 cfq_ioc_set_ioprio(ioc);
3119
3120 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3121         if (unlikely(ioc->cgroup_changed))
3122                 cfq_ioc_set_cgroup(ioc);
3123 #endif
3124         return cic;
3125 err_free:
3126         cfq_cic_free(cic);
3127 err:
3128         put_io_context(ioc);
3129         return NULL;
3130 }
3131
3132 static void
3133 cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
3134 {
3135         unsigned long elapsed = jiffies - cic->last_end_request;
3136         unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
3137
3138         cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
3139         cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
3140         cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
3141 }
3142
3143 static void
3144 cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3145                        struct request *rq)
3146 {
3147         sector_t sdist = 0;
3148         sector_t n_sec = blk_rq_sectors(rq);
3149         if (cfqq->last_request_pos) {
3150                 if (cfqq->last_request_pos < blk_rq_pos(rq))
3151                         sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
3152                 else
3153                         sdist = cfqq->last_request_pos - blk_rq_pos(rq);
3154         }
3155
3156         cfqq->seek_history <<= 1;
3157         if (blk_queue_nonrot(cfqd->queue))
3158                 cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
3159         else
3160                 cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
3161 }
3162
3163 /*
3164  * Disable idle window if the process thinks too long or seeks so much that
3165  * it doesn't matter
3166  */
3167 static void
3168 cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3169                        struct cfq_io_context *cic)
3170 {
3171         int old_idle, enable_idle;
3172
3173         /*
3174          * Don't idle for async or idle io prio class
3175          */
3176         if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
3177                 return;
3178
3179         enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
3180
3181         if (cfqq->queued[0] + cfqq->queued[1] >= 4)
3182                 cfq_mark_cfqq_deep(cfqq);
3183
3184         if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
3185                 enable_idle = 0;
3186         else if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
3187             (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
3188                 enable_idle = 0;
3189         else if (sample_valid(cic->ttime_samples)) {
3190                 if (cic->ttime_mean > cfqd->cfq_slice_idle)
3191                         enable_idle = 0;
3192                 else
3193                         enable_idle = 1;
3194         }
3195
3196         if (old_idle != enable_idle) {
3197                 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
3198                 if (enable_idle)
3199                         cfq_mark_cfqq_idle_window(cfqq);
3200                 else
3201                         cfq_clear_cfqq_idle_window(cfqq);
3202         }
3203 }
3204
3205 /*
3206  * Check if new_cfqq should preempt the currently active queue. Return 0 for
3207  * no or if we aren't sure, a 1 will cause a preempt.
3208  */
3209 static bool
3210 cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3211                    struct request *rq)
3212 {
3213         struct cfq_queue *cfqq;
3214
3215         cfqq = cfqd->active_queue;
3216         if (!cfqq)
3217                 return false;
3218
3219         if (cfq_class_idle(new_cfqq))
3220                 return false;
3221
3222         if (cfq_class_idle(cfqq))
3223                 return true;
3224
3225         /*
3226          * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
3227          */
3228         if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
3229                 return false;
3230
3231         /*
3232          * if the new request is sync, but the currently running queue is
3233          * not, let the sync request have priority.
3234          */
3235         if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
3236                 return true;
3237
3238         if (new_cfqq->cfqg != cfqq->cfqg)
3239                 return false;
3240
3241         if (cfq_slice_used(cfqq))
3242                 return true;
3243
3244         /* Allow preemption only if we are idling on sync-noidle tree */
3245         if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
3246             cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
3247             new_cfqq->service_tree->count == 2 &&
3248             RB_EMPTY_ROOT(&cfqq->sort_list))
3249                 return true;
3250
3251         /*
3252          * So both queues are sync. Let the new request get disk time if
3253          * it's a metadata request and the current queue is doing regular IO.
3254          */
3255         if ((rq->cmd_flags & REQ_META) && !cfqq->meta_pending)
3256                 return true;
3257
3258         /*
3259          * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
3260          */
3261         if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
3262                 return true;
3263
3264         /* An idle queue should not be idle now for some reason */
3265         if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
3266                 return true;
3267
3268         if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
3269                 return false;
3270
3271         /*
3272          * if this request is as-good as one we would expect from the
3273          * current cfqq, let it preempt
3274          */
3275         if (cfq_rq_close(cfqd, cfqq, rq))
3276                 return true;
3277
3278         return false;
3279 }
3280
3281 /*
3282  * cfqq preempts the active queue. if we allowed preempt with no slice left,
3283  * let it have half of its nominal slice.
3284  */
3285 static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3286 {
3287         struct cfq_queue *old_cfqq = cfqd->active_queue;
3288
3289         cfq_log_cfqq(cfqd, cfqq, "preempt");
3290         cfq_slice_expired(cfqd, 1);
3291
3292         /*
3293          * workload type is changed, don't save slice, otherwise preempt
3294          * doesn't happen
3295          */
3296         if (cfqq_type(old_cfqq) != cfqq_type(cfqq))
3297                 cfqq->cfqg->saved_workload_slice = 0;
3298
3299         /*
3300          * Put the new queue at the front of the of the current list,
3301          * so we know that it will be selected next.
3302          */
3303         BUG_ON(!cfq_cfqq_on_rr(cfqq));
3304
3305         cfq_service_tree_add(cfqd, cfqq, 1);
3306         __cfq_set_active_queue(cfqd, cfqq);
3307 }
3308
3309 /*
3310  * Called when a new fs request (rq) is added (to cfqq). Check if there's
3311  * something we should do about it
3312  */
3313 static void
3314 cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3315                 struct request *rq)
3316 {
3317         struct cfq_io_context *cic = RQ_CIC(rq);
3318
3319         cfqd->rq_queued++;
3320         if (rq->cmd_flags & REQ_META)
3321                 cfqq->meta_pending++;
3322
3323         cfq_update_io_thinktime(cfqd, cic);
3324         cfq_update_io_seektime(cfqd, cfqq, rq);
3325         cfq_update_idle_window(cfqd, cfqq, cic);
3326
3327         cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
3328
3329         if (cfqq == cfqd->active_queue) {
3330                 /*
3331                  * Remember that we saw a request from this process, but
3332                  * don't start queuing just yet. Otherwise we risk seeing lots
3333                  * of tiny requests, because we disrupt the normal plugging
3334                  * and merging. If the request is already larger than a single
3335                  * page, let it rip immediately. For that case we assume that
3336                  * merging is already done. Ditto for a busy system that
3337                  * has other work pending, don't risk delaying until the
3338                  * idle timer unplug to continue working.
3339                  */
3340                 if (cfq_cfqq_wait_request(cfqq)) {
3341                         if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
3342                             cfqd->busy_queues > 1) {
3343                                 cfq_del_timer(cfqd, cfqq);
3344                                 cfq_clear_cfqq_wait_request(cfqq);
3345                                 __blk_run_queue(cfqd->queue, false);
3346                         } else {
3347                                 cfq_blkiocg_update_idle_time_stats(
3348                                                 &cfqq->cfqg->blkg);
3349                                 cfq_mark_cfqq_must_dispatch(cfqq);
3350                         }
3351                 }
3352         } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
3353                 /*
3354                  * not the active queue - expire current slice if it is
3355                  * idle and has expired it's mean thinktime or this new queue
3356                  * has some old slice time left and is of higher priority or
3357                  * this new queue is RT and the current one is BE
3358                  */
3359                 cfq_preempt_queue(cfqd, cfqq);
3360                 __blk_run_queue(cfqd->queue, false);
3361         }
3362 }
3363
3364 static void cfq_insert_request(struct request_queue *q, struct request *rq)
3365 {
3366         struct cfq_data *cfqd = q->elevator->elevator_data;
3367         struct cfq_queue *cfqq = RQ_CFQQ(rq);
3368
3369         cfq_log_cfqq(cfqd, cfqq, "insert_request");
3370         cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc);
3371
3372         rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
3373         list_add_tail(&rq->queuelist, &cfqq->fifo);
3374         cfq_add_rq_rb(rq);
3375         cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
3376                         &cfqd->serving_group->blkg, rq_data_dir(rq),
3377                         rq_is_sync(rq));
3378         cfq_rq_enqueued(cfqd, cfqq, rq);
3379 }
3380
3381 /*
3382  * Update hw_tag based on peak queue depth over 50 samples under
3383  * sufficient load.
3384  */
3385 static void cfq_update_hw_tag(struct cfq_data *cfqd)
3386 {
3387         struct cfq_queue *cfqq = cfqd->active_queue;
3388
3389         if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
3390                 cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
3391
3392         if (cfqd->hw_tag == 1)
3393                 return;
3394
3395         if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
3396             cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
3397                 return;
3398
3399         /*
3400          * If active queue hasn't enough requests and can idle, cfq might not
3401          * dispatch sufficient requests to hardware. Don't zero hw_tag in this
3402          * case
3403          */
3404         if (cfqq && cfq_cfqq_idle_window(cfqq) &&
3405             cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
3406             CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
3407                 return;
3408
3409         if (cfqd->hw_tag_samples++ < 50)
3410                 return;
3411
3412         if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
3413                 cfqd->hw_tag = 1;
3414         else
3415                 cfqd->hw_tag = 0;
3416 }
3417
3418 static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3419 {
3420         struct cfq_io_context *cic = cfqd->active_cic;
3421
3422         /* If the queue already has requests, don't wait */
3423         if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3424                 return false;
3425
3426         /* If there are other queues in the group, don't wait */
3427         if (cfqq->cfqg->nr_cfqq > 1)
3428                 return false;
3429
3430         if (cfq_slice_used(cfqq))
3431                 return true;
3432
3433         /* if slice left is less than think time, wait busy */
3434         if (cic && sample_valid(cic->ttime_samples)
3435             && (cfqq->slice_end - jiffies < cic->ttime_mean))
3436                 return true;
3437
3438         /*
3439          * If think times is less than a jiffy than ttime_mean=0 and above
3440          * will not be true. It might happen that slice has not expired yet
3441          * but will expire soon (4-5 ns) during select_queue(). To cover the
3442          * case where think time is less than a jiffy, mark the queue wait
3443          * busy if only 1 jiffy is left in the slice.
3444          */
3445         if (cfqq->slice_end - jiffies == 1)
3446                 return true;
3447
3448         return false;
3449 }
3450
3451 static void cfq_completed_request(struct request_queue *q, struct request *rq)
3452 {
3453         struct cfq_queue *cfqq = RQ_CFQQ(rq);
3454         struct cfq_data *cfqd = cfqq->cfqd;
3455         const int sync = rq_is_sync(rq);
3456         unsigned long now;
3457
3458         now = jiffies;
3459         cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
3460                      !!(rq->cmd_flags & REQ_NOIDLE));
3461
3462         cfq_update_hw_tag(cfqd);
3463
3464         WARN_ON(!cfqd->rq_in_driver);
3465         WARN_ON(!cfqq->dispatched);
3466         cfqd->rq_in_driver--;
3467         cfqq->dispatched--;
3468         (RQ_CFQG(rq))->dispatched--;
3469         cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg,
3470                         rq_start_time_ns(rq), rq_io_start_time_ns(rq),
3471                         rq_data_dir(rq), rq_is_sync(rq));
3472
3473         cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
3474
3475         if (sync) {
3476                 RQ_CIC(rq)->last_end_request = now;
3477                 if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
3478                         cfqd->last_delayed_sync = now;
3479         }
3480
3481         /*
3482          * If this is the active queue, check if it needs to be expired,
3483          * or if we want to idle in case it has no pending requests.
3484          */
3485         if (cfqd->active_queue == cfqq) {
3486                 const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
3487
3488                 if (cfq_cfqq_slice_new(cfqq)) {
3489                         cfq_set_prio_slice(cfqd, cfqq);
3490                         cfq_clear_cfqq_slice_new(cfqq);
3491                 }
3492
3493                 /*
3494                  * Should we wait for next request to come in before we expire
3495                  * the queue.
3496                  */
3497                 if (cfq_should_wait_busy(cfqd, cfqq)) {
3498                         unsigned long extend_sl = cfqd->cfq_slice_idle;
3499                         if (!cfqd->cfq_slice_idle)
3500                                 extend_sl = cfqd->cfq_group_idle;
3501                         cfqq->slice_end = jiffies + extend_sl;
3502                         cfq_mark_cfqq_wait_busy(cfqq);
3503                         cfq_log_cfqq(cfqd, cfqq, "will busy wait");
3504                 }
3505
3506                 /*
3507                  * Idling is not enabled on:
3508                  * - expired queues
3509                  * - idle-priority queues
3510                  * - async queues
3511                  * - queues with still some requests queued
3512                  * - when there is a close cooperator
3513                  */
3514                 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
3515                         cfq_slice_expired(cfqd, 1);
3516                 else if (sync && cfqq_empty &&
3517                          !cfq_close_cooperator(cfqd, cfqq)) {
3518                         cfq_arm_slice_timer(cfqd);
3519                 }
3520         }
3521
3522         if (!cfqd->rq_in_driver)
3523                 cfq_schedule_dispatch(cfqd);
3524 }
3525
3526 /*
3527  * we temporarily boost lower priority queues if they are holding fs exclusive
3528  * resources. they are boosted to normal prio (CLASS_BE/4)
3529  */
3530 static void cfq_prio_boost(struct cfq_queue *cfqq)
3531 {
3532         if (has_fs_excl()) {
3533                 /*
3534                  * boost idle prio on transactions that would lock out other
3535                  * users of the filesystem
3536                  */
3537                 if (cfq_class_idle(cfqq))
3538                         cfqq->ioprio_class = IOPRIO_CLASS_BE;
3539                 if (cfqq->ioprio > IOPRIO_NORM)
3540                         cfqq->ioprio = IOPRIO_NORM;
3541         } else {
3542                 /*
3543                  * unboost the queue (if needed)
3544                  */
3545                 cfqq->ioprio_class = cfqq->org_ioprio_class;
3546                 cfqq->ioprio = cfqq->org_ioprio;
3547         }
3548 }
3549
3550 static inline int __cfq_may_queue(struct cfq_queue *cfqq)
3551 {
3552         if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
3553                 cfq_mark_cfqq_must_alloc_slice(cfqq);
3554                 return ELV_MQUEUE_MUST;
3555         }
3556
3557         return ELV_MQUEUE_MAY;
3558 }
3559
3560 static int cfq_may_queue(struct request_queue *q, int rw)
3561 {
3562         struct cfq_data *cfqd = q->elevator->elevator_data;
3563         struct task_struct *tsk = current;
3564         struct cfq_io_context *cic;
3565         struct cfq_queue *cfqq;
3566
3567         /*
3568          * don't force setup of a queue from here, as a call to may_queue
3569          * does not necessarily imply that a request actually will be queued.
3570          * so just lookup a possibly existing queue, or return 'may queue'
3571          * if that fails
3572          */
3573         cic = cfq_cic_lookup(cfqd, tsk->io_context);
3574         if (!cic)
3575                 return ELV_MQUEUE_MAY;
3576
3577         cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
3578         if (cfqq) {
3579                 cfq_init_prio_data(cfqq, cic->ioc);
3580                 cfq_prio_boost(cfqq);
3581
3582                 return __cfq_may_queue(cfqq);
3583         }
3584
3585         return ELV_MQUEUE_MAY;
3586 }
3587
3588 /*
3589  * queue lock held here
3590  */
3591 static void cfq_put_request(struct request *rq)
3592 {
3593         struct cfq_queue *cfqq = RQ_CFQQ(rq);
3594
3595         if (cfqq) {
3596                 const int rw = rq_data_dir(rq);
3597
3598                 BUG_ON(!cfqq->allocated[rw]);
3599                 cfqq->allocated[rw]--;
3600
3601                 put_io_context(RQ_CIC(rq)->ioc);
3602
3603                 rq->elevator_private[0] = NULL;
3604                 rq->elevator_private[1] = NULL;
3605
3606                 /* Put down rq reference on cfqg */
3607                 cfq_put_cfqg(RQ_CFQG(rq));
3608                 rq->elevator_private[2] = NULL;
3609
3610                 cfq_put_queue(cfqq);
3611         }
3612 }
3613
3614 static struct cfq_queue *
3615 cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_context *cic,
3616                 struct cfq_queue *cfqq)
3617 {
3618         cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
3619         cic_set_cfqq(cic, cfqq->new_cfqq, 1);
3620         cfq_mark_cfqq_coop(cfqq->new_cfqq);
3621         cfq_put_queue(cfqq);
3622         return cic_to_cfqq(cic, 1);
3623 }
3624
3625 /*
3626  * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
3627  * was the last process referring to said cfqq.
3628  */
3629 static struct cfq_queue *
3630 split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq)
3631 {
3632         if (cfqq_process_refs(cfqq) == 1) {
3633                 cfqq->pid = current->pid;
3634                 cfq_clear_cfqq_coop(cfqq);
3635                 cfq_clear_cfqq_split_coop(cfqq);
3636                 return cfqq;
3637         }
3638
3639         cic_set_cfqq(cic, NULL, 1);
3640
3641         cfq_put_cooperator(cfqq);
3642
3643         cfq_put_queue(cfqq);
3644         return NULL;
3645 }
3646 /*
3647  * Allocate cfq data structures associated with this request.
3648  */
3649 static int
3650 cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
3651 {
3652         struct cfq_data *cfqd = q->elevator->elevator_data;
3653         struct cfq_io_context *cic;
3654         const int rw = rq_data_dir(rq);
3655         const bool is_sync = rq_is_sync(rq);
3656         struct cfq_queue *cfqq;
3657         unsigned long flags;
3658
3659         might_sleep_if(gfp_mask & __GFP_WAIT);
3660
3661         cic = cfq_get_io_context(cfqd, gfp_mask);
3662
3663         spin_lock_irqsave(q->queue_lock, flags);
3664
3665         if (!cic)
3666                 goto queue_fail;
3667
3668 new_queue:
3669         cfqq = cic_to_cfqq(cic, is_sync);
3670         if (!cfqq || cfqq == &cfqd->oom_cfqq) {
3671                 cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask);
3672                 cic_set_cfqq(cic, cfqq, is_sync);
3673         } else {
3674                 /*
3675                  * If the queue was seeky for too long, break it apart.
3676                  */
3677                 if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
3678                         cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
3679                         cfqq = split_cfqq(cic, cfqq);
3680                         if (!cfqq)
3681                                 goto new_queue;
3682                 }
3683
3684                 /*
3685                  * Check to see if this queue is scheduled to merge with
3686                  * another, closely cooperating queue.  The merging of
3687                  * queues happens here as it must be done in process context.
3688                  * The reference on new_cfqq was taken in merge_cfqqs.
3689                  */
3690                 if (cfqq->new_cfqq)
3691                         cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
3692         }
3693
3694         cfqq->allocated[rw]++;
3695
3696         cfqq->ref++;
3697         rq->elevator_private[0] = cic;
3698         rq->elevator_private[1] = cfqq;
3699         rq->elevator_private[2] = cfq_ref_get_cfqg(cfqq->cfqg);
3700         spin_unlock_irqrestore(q->queue_lock, flags);
3701         return 0;
3702
3703 queue_fail:
3704         if (cic)
3705                 put_io_context(cic->ioc);
3706
3707         cfq_schedule_dispatch(cfqd);
3708         spin_unlock_irqrestore(q->queue_lock, flags);
3709         cfq_log(cfqd, "set_request fail");
3710         return 1;
3711 }
3712
3713 static void cfq_kick_queue(struct work_struct *work)
3714 {
3715         struct cfq_data *cfqd =
3716                 container_of(work, struct cfq_data, unplug_work);
3717         struct request_queue *q = cfqd->queue;
3718
3719         spin_lock_irq(q->queue_lock);
3720         __blk_run_queue(cfqd->queue, false);
3721         spin_unlock_irq(q->queue_lock);
3722 }
3723
3724 /*
3725  * Timer running if the active_queue is currently idling inside its time slice
3726  */
3727 static void cfq_idle_slice_timer(unsigned long data)
3728 {
3729         struct cfq_data *cfqd = (struct cfq_data *) data;
3730         struct cfq_queue *cfqq;
3731         unsigned long flags;
3732         int timed_out = 1;
3733
3734         cfq_log(cfqd, "idle timer fired");
3735
3736         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
3737
3738         cfqq = cfqd->active_queue;
3739         if (cfqq) {
3740                 timed_out = 0;
3741
3742                 /*
3743                  * We saw a request before the queue expired, let it through
3744                  */
3745                 if (cfq_cfqq_must_dispatch(cfqq))
3746                         goto out_kick;
3747
3748                 /*
3749                  * expired
3750                  */
3751                 if (cfq_slice_used(cfqq))
3752                         goto expire;
3753
3754                 /*
3755                  * only expire and reinvoke request handler, if there are
3756                  * other queues with pending requests
3757                  */
3758                 if (!cfqd->busy_queues)
3759                         goto out_cont;
3760
3761                 /*
3762                  * not expired and it has a request pending, let it dispatch
3763                  */
3764                 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3765                         goto out_kick;
3766
3767                 /*
3768                  * Queue depth flag is reset only when the idle didn't succeed
3769                  */
3770                 cfq_clear_cfqq_deep(cfqq);
3771         }
3772 expire:
3773         cfq_slice_expired(cfqd, timed_out);
3774 out_kick:
3775         cfq_schedule_dispatch(cfqd);
3776 out_cont:
3777         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
3778 }
3779
3780 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
3781 {
3782         del_timer_sync(&cfqd->idle_slice_timer);
3783         cancel_work_sync(&cfqd->unplug_work);
3784 }
3785
3786 static void cfq_put_async_queues(struct cfq_data *cfqd)
3787 {
3788         int i;
3789
3790         for (i = 0; i < IOPRIO_BE_NR; i++) {
3791                 if (cfqd->async_cfqq[0][i])
3792                         cfq_put_queue(cfqd->async_cfqq[0][i]);
3793                 if (cfqd->async_cfqq[1][i])
3794                         cfq_put_queue(cfqd->async_cfqq[1][i]);
3795         }
3796
3797         if (cfqd->async_idle_cfqq)
3798                 cfq_put_queue(cfqd->async_idle_cfqq);
3799 }
3800
3801 static void cfq_cfqd_free(struct rcu_head *head)
3802 {
3803         kfree(container_of(head, struct cfq_data, rcu));
3804 }
3805
3806 static void cfq_exit_queue(struct elevator_queue *e)
3807 {
3808         struct cfq_data *cfqd = e->elevator_data;
3809         struct request_queue *q = cfqd->queue;
3810
3811         cfq_shutdown_timer_wq(cfqd);
3812
3813         spin_lock_irq(q->queue_lock);
3814
3815         if (cfqd->active_queue)
3816                 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
3817
3818         while (!list_empty(&cfqd->cic_list)) {
3819                 struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
3820                                                         struct cfq_io_context,
3821                                                         queue_list);
3822
3823                 __cfq_exit_single_io_context(cfqd, cic);
3824         }
3825
3826         cfq_put_async_queues(cfqd);
3827         cfq_release_cfq_groups(cfqd);
3828         cfq_blkiocg_del_blkio_group(&cfqd->root_group.blkg);
3829
3830         spin_unlock_irq(q->queue_lock);
3831
3832         cfq_shutdown_timer_wq(cfqd);
3833
3834         spin_lock(&cic_index_lock);
3835         ida_remove(&cic_index_ida, cfqd->cic_index);
3836         spin_unlock(&cic_index_lock);
3837
3838         /* Wait for cfqg->blkg->key accessors to exit their grace periods. */
3839         call_rcu(&cfqd->rcu, cfq_cfqd_free);
3840 }
3841
3842 static int cfq_alloc_cic_index(void)
3843 {
3844         int index, error;
3845
3846         do {
3847                 if (!ida_pre_get(&cic_index_ida, GFP_KERNEL))
3848                         return -ENOMEM;
3849
3850                 spin_lock(&cic_index_lock);
3851                 error = ida_get_new(&cic_index_ida, &index);
3852                 spin_unlock(&cic_index_lock);
3853                 if (error && error != -EAGAIN)
3854                         return error;
3855         } while (error);
3856
3857         return index;
3858 }
3859
3860 static void *cfq_init_queue(struct request_queue *q)
3861 {
3862         struct cfq_data *cfqd;
3863         int i, j;
3864         struct cfq_group *cfqg;
3865         struct cfq_rb_root *st;
3866
3867         i = cfq_alloc_cic_index();
3868         if (i < 0)
3869                 return NULL;
3870
3871         cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
3872         if (!cfqd)
3873                 return NULL;
3874
3875         /*
3876          * Don't need take queue_lock in the routine, since we are
3877          * initializing the ioscheduler, and nobody is using cfqd
3878          */
3879         cfqd->cic_index = i;
3880
3881         /* Init root service tree */
3882         cfqd->grp_service_tree = CFQ_RB_ROOT;
3883
3884         /* Init root group */
3885         cfqg = &cfqd->root_group;
3886         for_each_cfqg_st(cfqg, i, j, st)
3887                 *st = CFQ_RB_ROOT;
3888         RB_CLEAR_NODE(&cfqg->rb_node);
3889
3890         /* Give preference to root group over other groups */
3891         cfqg->weight = 2*BLKIO_WEIGHT_DEFAULT;
3892
3893 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3894         /*
3895          * Take a reference to root group which we never drop. This is just
3896          * to make sure that cfq_put_cfqg() does not try to kfree root group
3897          */
3898         cfqg->ref = 1;
3899         rcu_read_lock();
3900         cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,
3901                                         (void *)cfqd, 0);
3902         rcu_read_unlock();
3903 #endif
3904         /*
3905          * Not strictly needed (since RB_ROOT just clears the node and we
3906          * zeroed cfqd on alloc), but better be safe in case someone decides
3907          * to add magic to the rb code
3908          */
3909         for (i = 0; i < CFQ_PRIO_LISTS; i++)
3910                 cfqd->prio_trees[i] = RB_ROOT;
3911
3912         /*
3913          * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
3914          * Grab a permanent reference to it, so that the normal code flow
3915          * will not attempt to free it.
3916          */
3917         cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
3918         cfqd->oom_cfqq.ref++;
3919         cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group);
3920
3921         INIT_LIST_HEAD(&cfqd->cic_list);
3922
3923         cfqd->queue = q;
3924
3925         init_timer(&cfqd->idle_slice_timer);
3926         cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
3927         cfqd->idle_slice_timer.data = (unsigned long) cfqd;
3928
3929         INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
3930
3931         cfqd->cfq_quantum = cfq_quantum;
3932         cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
3933         cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
3934         cfqd->cfq_back_max = cfq_back_max;
3935         cfqd->cfq_back_penalty = cfq_back_penalty;
3936         cfqd->cfq_slice[0] = cfq_slice_async;
3937         cfqd->cfq_slice[1] = cfq_slice_sync;
3938         cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
3939         cfqd->cfq_slice_idle = cfq_slice_idle;
3940         cfqd->cfq_group_idle = cfq_group_idle;
3941         cfqd->cfq_latency = 1;
3942         cfqd->hw_tag = -1;
3943         /*
3944          * we optimistically start assuming sync ops weren't delayed in last
3945          * second, in order to have larger depth for async operations.
3946          */
3947         cfqd->last_delayed_sync = jiffies - HZ;
3948         return cfqd;
3949 }
3950
3951 static void cfq_slab_kill(void)
3952 {
3953         /*
3954          * Caller already ensured that pending RCU callbacks are completed,
3955          * so we should have no busy allocations at this point.
3956          */
3957         if (cfq_pool)
3958                 kmem_cache_destroy(cfq_pool);
3959         if (cfq_ioc_pool)
3960                 kmem_cache_destroy(cfq_ioc_pool);
3961 }
3962
3963 static int __init cfq_slab_setup(void)
3964 {
3965         cfq_pool = KMEM_CACHE(cfq_queue, 0);
3966         if (!cfq_pool)
3967                 goto fail;
3968
3969         cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0);
3970         if (!cfq_ioc_pool)
3971                 goto fail;
3972
3973         return 0;
3974 fail:
3975         cfq_slab_kill();
3976         return -ENOMEM;
3977 }
3978
3979 /*
3980  * sysfs parts below -->
3981  */
3982 static ssize_t
3983 cfq_var_show(unsigned int var, char *page)
3984 {
3985         return sprintf(page, "%d\n", var);
3986 }
3987
3988 static ssize_t
3989 cfq_var_store(unsigned int *var, const char *page, size_t count)
3990 {
3991         char *p = (char *) page;
3992
3993         *var = simple_strtoul(p, &p, 10);
3994         return count;
3995 }
3996
3997 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                            \
3998 static ssize_t __FUNC(struct elevator_queue *e, char *page)             \
3999 {                                                                       \
4000         struct cfq_data *cfqd = e->elevator_data;                       \
4001         unsigned int __data = __VAR;                                    \
4002         if (__CONV)                                                     \
4003                 __data = jiffies_to_msecs(__data);                      \
4004         return cfq_var_show(__data, (page));                            \
4005 }
4006 SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
4007 SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
4008 SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
4009 SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
4010 SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
4011 SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
4012 SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
4013 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
4014 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
4015 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
4016 SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
4017 #undef SHOW_FUNCTION
4018
4019 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
4020 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
4021 {                                                                       \
4022         struct cfq_data *cfqd = e->elevator_data;                       \
4023         unsigned int __data;                                            \
4024         int ret = cfq_var_store(&__data, (page), count);                \
4025         if (__data < (MIN))                                             \
4026                 __data = (MIN);                                         \
4027         else if (__data > (MAX))                                        \
4028                 __data = (MAX);                                         \
4029         if (__CONV)                                                     \
4030                 *(__PTR) = msecs_to_jiffies(__data);                    \
4031         else                                                            \
4032                 *(__PTR) = __data;                                      \
4033         return ret;                                                     \
4034 }
4035 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
4036 STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
4037                 UINT_MAX, 1);
4038 STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
4039                 UINT_MAX, 1);
4040 STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
4041 STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
4042                 UINT_MAX, 0);
4043 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
4044 STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
4045 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
4046 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
4047 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
4048                 UINT_MAX, 0);
4049 STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
4050 #undef STORE_FUNCTION
4051
4052 #define CFQ_ATTR(name) \
4053         __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
4054
4055 static struct elv_fs_entry cfq_attrs[] = {
4056         CFQ_ATTR(quantum),
4057         CFQ_ATTR(fifo_expire_sync),
4058         CFQ_ATTR(fifo_expire_async),
4059         CFQ_ATTR(back_seek_max),
4060         CFQ_ATTR(back_seek_penalty),
4061         CFQ_ATTR(slice_sync),
4062         CFQ_ATTR(slice_async),
4063         CFQ_ATTR(slice_async_rq),
4064         CFQ_ATTR(slice_idle),
4065         CFQ_ATTR(group_idle),
4066         CFQ_ATTR(low_latency),
4067         __ATTR_NULL
4068 };
4069
4070 static struct elevator_type iosched_cfq = {
4071         .ops = {
4072                 .elevator_merge_fn =            cfq_merge,
4073                 .elevator_merged_fn =           cfq_merged_request,
4074                 .elevator_merge_req_fn =        cfq_merged_requests,
4075                 .elevator_allow_merge_fn =      cfq_allow_merge,
4076                 .elevator_bio_merged_fn =       cfq_bio_merged,
4077                 .elevator_dispatch_fn =         cfq_dispatch_requests,
4078                 .elevator_add_req_fn =          cfq_insert_request,
4079                 .elevator_activate_req_fn =     cfq_activate_request,
4080                 .elevator_deactivate_req_fn =   cfq_deactivate_request,
4081                 .elevator_completed_req_fn =    cfq_completed_request,
4082                 .elevator_former_req_fn =       elv_rb_former_request,
4083                 .elevator_latter_req_fn =       elv_rb_latter_request,
4084                 .elevator_set_req_fn =          cfq_set_request,
4085                 .elevator_put_req_fn =          cfq_put_request,
4086                 .elevator_may_queue_fn =        cfq_may_queue,
4087                 .elevator_init_fn =             cfq_init_queue,
4088                 .elevator_exit_fn =             cfq_exit_queue,
4089                 .trim =                         cfq_free_io_context,
4090         },
4091         .elevator_attrs =       cfq_attrs,
4092         .elevator_name =        "cfq",
4093         .elevator_owner =       THIS_MODULE,
4094 };
4095
4096 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4097 static struct blkio_policy_type blkio_policy_cfq = {
4098         .ops = {
4099                 .blkio_unlink_group_fn =        cfq_unlink_blkio_group,
4100                 .blkio_update_group_weight_fn = cfq_update_blkio_group_weight,
4101         },
4102         .plid = BLKIO_POLICY_PROP,
4103 };
4104 #else
4105 static struct blkio_policy_type blkio_policy_cfq;
4106 #endif
4107
4108 static int __init cfq_init(void)
4109 {
4110         /*
4111          * could be 0 on HZ < 1000 setups
4112          */
4113         if (!cfq_slice_async)
4114                 cfq_slice_async = 1;
4115         if (!cfq_slice_idle)
4116                 cfq_slice_idle = 1;
4117
4118 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4119         if (!cfq_group_idle)
4120                 cfq_group_idle = 1;
4121 #else
4122                 cfq_group_idle = 0;
4123 #endif
4124         if (cfq_slab_setup())
4125                 return -ENOMEM;
4126
4127         elv_register(&iosched_cfq);
4128         blkio_policy_register(&blkio_policy_cfq);
4129
4130         return 0;
4131 }
4132
4133 static void __exit cfq_exit(void)
4134 {
4135         DECLARE_COMPLETION_ONSTACK(all_gone);
4136         blkio_policy_unregister(&blkio_policy_cfq);
4137         elv_unregister(&iosched_cfq);
4138         ioc_gone = &all_gone;
4139         /* ioc_gone's update must be visible before reading ioc_count */
4140         smp_wmb();
4141
4142         /*
4143          * this also protects us from entering cfq_slab_kill() with
4144          * pending RCU callbacks
4145          */
4146         if (elv_ioc_count_read(cfq_ioc_count))
4147                 wait_for_completion(&all_gone);
4148         ida_destroy(&cic_index_ida);
4149         cfq_slab_kill();
4150 }
4151
4152 module_init(cfq_init);
4153 module_exit(cfq_exit);
4154
4155 MODULE_AUTHOR("Jens Axboe");
4156 MODULE_LICENSE("GPL");
4157 MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");