cfq-iosched: fix next_rq computation
[linux-2.6.git] / block / cfq-iosched.c
1 /*
2  *  CFQ, or complete fairness queueing, disk scheduler.
3  *
4  *  Based on ideas from a previously unfinished io
5  *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6  *
7  *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8  */
9 #include <linux/module.h>
10 #include <linux/blkdev.h>
11 #include <linux/elevator.h>
12 #include <linux/rbtree.h>
13 #include <linux/ioprio.h>
14 #include <linux/blktrace_api.h>
15
16 /*
17  * tunables
18  */
19 /* max queue in one round of service */
20 static const int cfq_quantum = 4;
21 static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
22 /* maximum backwards seek, in KiB */
23 static const int cfq_back_max = 16 * 1024;
24 /* penalty of a backwards seek */
25 static const int cfq_back_penalty = 2;
26 static const int cfq_slice_sync = HZ / 10;
27 static int cfq_slice_async = HZ / 25;
28 static const int cfq_slice_async_rq = 2;
29 static int cfq_slice_idle = HZ / 125;
30 static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
31 static const int cfq_hist_divisor = 4;
32
33 /*
34  * offset from end of service tree
35  */
36 #define CFQ_IDLE_DELAY          (HZ / 5)
37
38 /*
39  * below this threshold, we consider thinktime immediate
40  */
41 #define CFQ_MIN_TT              (2)
42
43 /*
44  * Allow merged cfqqs to perform this amount of seeky I/O before
45  * deciding to break the queues up again.
46  */
47 #define CFQQ_COOP_TOUT          (HZ)
48
49 #define CFQ_SLICE_SCALE         (5)
50 #define CFQ_HW_QUEUE_MIN        (5)
51
52 #define RQ_CIC(rq)              \
53         ((struct cfq_io_context *) (rq)->elevator_private)
54 #define RQ_CFQQ(rq)             (struct cfq_queue *) ((rq)->elevator_private2)
55
56 static struct kmem_cache *cfq_pool;
57 static struct kmem_cache *cfq_ioc_pool;
58
59 static DEFINE_PER_CPU(unsigned long, cfq_ioc_count);
60 static struct completion *ioc_gone;
61 static DEFINE_SPINLOCK(ioc_gone_lock);
62
63 #define CFQ_PRIO_LISTS          IOPRIO_BE_NR
64 #define cfq_class_idle(cfqq)    ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
65 #define cfq_class_rt(cfqq)      ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
66
67 #define sample_valid(samples)   ((samples) > 80)
68
69 /*
70  * Most of our rbtree usage is for sorting with min extraction, so
71  * if we cache the leftmost node we don't have to walk down the tree
72  * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
73  * move this into the elevator for the rq sorting as well.
74  */
75 struct cfq_rb_root {
76         struct rb_root rb;
77         struct rb_node *left;
78         unsigned count;
79 };
80 #define CFQ_RB_ROOT     (struct cfq_rb_root) { RB_ROOT, NULL, 0, }
81
82 /*
83  * Per process-grouping structure
84  */
85 struct cfq_queue {
86         /* reference count */
87         atomic_t ref;
88         /* various state flags, see below */
89         unsigned int flags;
90         /* parent cfq_data */
91         struct cfq_data *cfqd;
92         /* service_tree member */
93         struct rb_node rb_node;
94         /* service_tree key */
95         unsigned long rb_key;
96         /* prio tree member */
97         struct rb_node p_node;
98         /* prio tree root we belong to, if any */
99         struct rb_root *p_root;
100         /* sorted list of pending requests */
101         struct rb_root sort_list;
102         /* if fifo isn't expired, next request to serve */
103         struct request *next_rq;
104         /* requests queued in sort_list */
105         int queued[2];
106         /* currently allocated requests */
107         int allocated[2];
108         /* fifo list of requests in sort_list */
109         struct list_head fifo;
110
111         unsigned long slice_end;
112         long slice_resid;
113         unsigned int slice_dispatch;
114
115         /* pending metadata requests */
116         int meta_pending;
117         /* number of requests that are on the dispatch list or inside driver */
118         int dispatched;
119
120         /* io prio of this group */
121         unsigned short ioprio, org_ioprio;
122         unsigned short ioprio_class, org_ioprio_class;
123
124         unsigned int seek_samples;
125         u64 seek_total;
126         sector_t seek_mean;
127         sector_t last_request_pos;
128         unsigned long seeky_start;
129
130         pid_t pid;
131
132         struct cfq_rb_root *service_tree;
133         struct cfq_queue *new_cfqq;
134 };
135
136 /*
137  * First index in the service_trees.
138  * IDLE is handled separately, so it has negative index
139  */
140 enum wl_prio_t {
141         IDLE_WORKLOAD = -1,
142         BE_WORKLOAD = 0,
143         RT_WORKLOAD = 1
144 };
145
146 /*
147  * Second index in the service_trees.
148  */
149 enum wl_type_t {
150         ASYNC_WORKLOAD = 0,
151         SYNC_NOIDLE_WORKLOAD = 1,
152         SYNC_WORKLOAD = 2
153 };
154
155
156 /*
157  * Per block device queue structure
158  */
159 struct cfq_data {
160         struct request_queue *queue;
161
162         /*
163          * rr lists of queues with requests, onle rr for each priority class.
164          * Counts are embedded in the cfq_rb_root
165          */
166         struct cfq_rb_root service_trees[2][3];
167         struct cfq_rb_root service_tree_idle;
168         /*
169          * The priority currently being served
170          */
171         enum wl_prio_t serving_prio;
172         enum wl_type_t serving_type;
173         unsigned long workload_expires;
174
175         /*
176          * Each priority tree is sorted by next_request position.  These
177          * trees are used when determining if two or more queues are
178          * interleaving requests (see cfq_close_cooperator).
179          */
180         struct rb_root prio_trees[CFQ_PRIO_LISTS];
181
182         unsigned int busy_queues;
183         unsigned int busy_queues_avg[2];
184
185         int rq_in_driver[2];
186         int sync_flight;
187
188         /*
189          * queue-depth detection
190          */
191         int rq_queued;
192         int hw_tag;
193         int hw_tag_samples;
194         int rq_in_driver_peak;
195
196         /*
197          * idle window management
198          */
199         struct timer_list idle_slice_timer;
200         struct work_struct unplug_work;
201
202         struct cfq_queue *active_queue;
203         struct cfq_io_context *active_cic;
204
205         /*
206          * async queue for each priority case
207          */
208         struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
209         struct cfq_queue *async_idle_cfqq;
210
211         sector_t last_position;
212
213         /*
214          * tunables, see top of file
215          */
216         unsigned int cfq_quantum;
217         unsigned int cfq_fifo_expire[2];
218         unsigned int cfq_back_penalty;
219         unsigned int cfq_back_max;
220         unsigned int cfq_slice[2];
221         unsigned int cfq_slice_async_rq;
222         unsigned int cfq_slice_idle;
223         unsigned int cfq_latency;
224
225         struct list_head cic_list;
226
227         /*
228          * Fallback dummy cfqq for extreme OOM conditions
229          */
230         struct cfq_queue oom_cfqq;
231
232         unsigned long last_end_sync_rq;
233 };
234
235 static struct cfq_rb_root *service_tree_for(enum wl_prio_t prio,
236                                             enum wl_type_t type,
237                                             struct cfq_data *cfqd)
238 {
239         if (prio == IDLE_WORKLOAD)
240                 return &cfqd->service_tree_idle;
241
242         return &cfqd->service_trees[prio][type];
243 }
244
245 enum cfqq_state_flags {
246         CFQ_CFQQ_FLAG_on_rr = 0,        /* on round-robin busy list */
247         CFQ_CFQQ_FLAG_wait_request,     /* waiting for a request */
248         CFQ_CFQQ_FLAG_must_dispatch,    /* must be allowed a dispatch */
249         CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
250         CFQ_CFQQ_FLAG_fifo_expire,      /* FIFO checked in this slice */
251         CFQ_CFQQ_FLAG_idle_window,      /* slice idling enabled */
252         CFQ_CFQQ_FLAG_prio_changed,     /* task priority has changed */
253         CFQ_CFQQ_FLAG_slice_new,        /* no requests dispatched in slice */
254         CFQ_CFQQ_FLAG_sync,             /* synchronous queue */
255         CFQ_CFQQ_FLAG_coop,             /* cfqq is shared */
256 };
257
258 #define CFQ_CFQQ_FNS(name)                                              \
259 static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)         \
260 {                                                                       \
261         (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name);                   \
262 }                                                                       \
263 static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)        \
264 {                                                                       \
265         (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);                  \
266 }                                                                       \
267 static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)         \
268 {                                                                       \
269         return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;      \
270 }
271
272 CFQ_CFQQ_FNS(on_rr);
273 CFQ_CFQQ_FNS(wait_request);
274 CFQ_CFQQ_FNS(must_dispatch);
275 CFQ_CFQQ_FNS(must_alloc_slice);
276 CFQ_CFQQ_FNS(fifo_expire);
277 CFQ_CFQQ_FNS(idle_window);
278 CFQ_CFQQ_FNS(prio_changed);
279 CFQ_CFQQ_FNS(slice_new);
280 CFQ_CFQQ_FNS(sync);
281 CFQ_CFQQ_FNS(coop);
282 #undef CFQ_CFQQ_FNS
283
284 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  \
285         blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
286 #define cfq_log(cfqd, fmt, args...)     \
287         blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
288
289 static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
290 {
291         if (cfq_class_idle(cfqq))
292                 return IDLE_WORKLOAD;
293         if (cfq_class_rt(cfqq))
294                 return RT_WORKLOAD;
295         return BE_WORKLOAD;
296 }
297
298
299 static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
300 {
301         if (!cfq_cfqq_sync(cfqq))
302                 return ASYNC_WORKLOAD;
303         if (!cfq_cfqq_idle_window(cfqq))
304                 return SYNC_NOIDLE_WORKLOAD;
305         return SYNC_WORKLOAD;
306 }
307
308 static inline int cfq_busy_queues_wl(enum wl_prio_t wl, struct cfq_data *cfqd)
309 {
310         if (wl == IDLE_WORKLOAD)
311                 return cfqd->service_tree_idle.count;
312
313         return cfqd->service_trees[wl][ASYNC_WORKLOAD].count
314                 + cfqd->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count
315                 + cfqd->service_trees[wl][SYNC_WORKLOAD].count;
316 }
317
318 static void cfq_dispatch_insert(struct request_queue *, struct request *);
319 static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
320                                        struct io_context *, gfp_t);
321 static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
322                                                 struct io_context *);
323
324 static inline int rq_in_driver(struct cfq_data *cfqd)
325 {
326         return cfqd->rq_in_driver[0] + cfqd->rq_in_driver[1];
327 }
328
329 static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
330                                             bool is_sync)
331 {
332         return cic->cfqq[is_sync];
333 }
334
335 static inline void cic_set_cfqq(struct cfq_io_context *cic,
336                                 struct cfq_queue *cfqq, bool is_sync)
337 {
338         cic->cfqq[is_sync] = cfqq;
339 }
340
341 /*
342  * We regard a request as SYNC, if it's either a read or has the SYNC bit
343  * set (in which case it could also be direct WRITE).
344  */
345 static inline bool cfq_bio_sync(struct bio *bio)
346 {
347         return bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO);
348 }
349
350 /*
351  * scheduler run of queue, if there are requests pending and no one in the
352  * driver that will restart queueing
353  */
354 static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
355 {
356         if (cfqd->busy_queues) {
357                 cfq_log(cfqd, "schedule dispatch");
358                 kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
359         }
360 }
361
362 static int cfq_queue_empty(struct request_queue *q)
363 {
364         struct cfq_data *cfqd = q->elevator->elevator_data;
365
366         return !cfqd->busy_queues;
367 }
368
369 /*
370  * Scale schedule slice based on io priority. Use the sync time slice only
371  * if a queue is marked sync and has sync io queued. A sync queue with async
372  * io only, should not get full sync slice length.
373  */
374 static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
375                                  unsigned short prio)
376 {
377         const int base_slice = cfqd->cfq_slice[sync];
378
379         WARN_ON(prio >= IOPRIO_BE_NR);
380
381         return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
382 }
383
384 static inline int
385 cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
386 {
387         return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
388 }
389
390 /*
391  * get averaged number of queues of RT/BE priority.
392  * average is updated, with a formula that gives more weight to higher numbers,
393  * to quickly follows sudden increases and decrease slowly
394  */
395
396 static inline unsigned cfq_get_avg_queues(struct cfq_data *cfqd, bool rt)
397 {
398         unsigned min_q, max_q;
399         unsigned mult  = cfq_hist_divisor - 1;
400         unsigned round = cfq_hist_divisor / 2;
401         unsigned busy = cfq_busy_queues_wl(rt, cfqd);
402
403         min_q = min(cfqd->busy_queues_avg[rt], busy);
404         max_q = max(cfqd->busy_queues_avg[rt], busy);
405         cfqd->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
406                 cfq_hist_divisor;
407         return cfqd->busy_queues_avg[rt];
408 }
409
410 static inline void
411 cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
412 {
413         unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
414         if (cfqd->cfq_latency) {
415                 /* interested queues (we consider only the ones with the same
416                  * priority class) */
417                 unsigned iq = cfq_get_avg_queues(cfqd, cfq_class_rt(cfqq));
418                 unsigned sync_slice = cfqd->cfq_slice[1];
419                 unsigned expect_latency = sync_slice * iq;
420                 if (expect_latency > cfq_target_latency) {
421                         unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
422                         /* scale low_slice according to IO priority
423                          * and sync vs async */
424                         unsigned low_slice =
425                                 min(slice, base_low_slice * slice / sync_slice);
426                         /* the adapted slice value is scaled to fit all iqs
427                          * into the target latency */
428                         slice = max(slice * cfq_target_latency / expect_latency,
429                                     low_slice);
430                 }
431         }
432         cfqq->slice_end = jiffies + slice;
433         cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
434 }
435
436 /*
437  * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
438  * isn't valid until the first request from the dispatch is activated
439  * and the slice time set.
440  */
441 static inline bool cfq_slice_used(struct cfq_queue *cfqq)
442 {
443         if (cfq_cfqq_slice_new(cfqq))
444                 return 0;
445         if (time_before(jiffies, cfqq->slice_end))
446                 return 0;
447
448         return 1;
449 }
450
451 /*
452  * Lifted from AS - choose which of rq1 and rq2 that is best served now.
453  * We choose the request that is closest to the head right now. Distance
454  * behind the head is penalized and only allowed to a certain extent.
455  */
456 static struct request *
457 cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
458 {
459         sector_t s1, s2, d1 = 0, d2 = 0;
460         unsigned long back_max;
461 #define CFQ_RQ1_WRAP    0x01 /* request 1 wraps */
462 #define CFQ_RQ2_WRAP    0x02 /* request 2 wraps */
463         unsigned wrap = 0; /* bit mask: requests behind the disk head? */
464
465         if (rq1 == NULL || rq1 == rq2)
466                 return rq2;
467         if (rq2 == NULL)
468                 return rq1;
469
470         if (rq_is_sync(rq1) && !rq_is_sync(rq2))
471                 return rq1;
472         else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
473                 return rq2;
474         if (rq_is_meta(rq1) && !rq_is_meta(rq2))
475                 return rq1;
476         else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
477                 return rq2;
478
479         s1 = blk_rq_pos(rq1);
480         s2 = blk_rq_pos(rq2);
481
482         /*
483          * by definition, 1KiB is 2 sectors
484          */
485         back_max = cfqd->cfq_back_max * 2;
486
487         /*
488          * Strict one way elevator _except_ in the case where we allow
489          * short backward seeks which are biased as twice the cost of a
490          * similar forward seek.
491          */
492         if (s1 >= last)
493                 d1 = s1 - last;
494         else if (s1 + back_max >= last)
495                 d1 = (last - s1) * cfqd->cfq_back_penalty;
496         else
497                 wrap |= CFQ_RQ1_WRAP;
498
499         if (s2 >= last)
500                 d2 = s2 - last;
501         else if (s2 + back_max >= last)
502                 d2 = (last - s2) * cfqd->cfq_back_penalty;
503         else
504                 wrap |= CFQ_RQ2_WRAP;
505
506         /* Found required data */
507
508         /*
509          * By doing switch() on the bit mask "wrap" we avoid having to
510          * check two variables for all permutations: --> faster!
511          */
512         switch (wrap) {
513         case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
514                 if (d1 < d2)
515                         return rq1;
516                 else if (d2 < d1)
517                         return rq2;
518                 else {
519                         if (s1 >= s2)
520                                 return rq1;
521                         else
522                                 return rq2;
523                 }
524
525         case CFQ_RQ2_WRAP:
526                 return rq1;
527         case CFQ_RQ1_WRAP:
528                 return rq2;
529         case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
530         default:
531                 /*
532                  * Since both rqs are wrapped,
533                  * start with the one that's further behind head
534                  * (--> only *one* back seek required),
535                  * since back seek takes more time than forward.
536                  */
537                 if (s1 <= s2)
538                         return rq1;
539                 else
540                         return rq2;
541         }
542 }
543
544 /*
545  * The below is leftmost cache rbtree addon
546  */
547 static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
548 {
549         if (!root->left)
550                 root->left = rb_first(&root->rb);
551
552         if (root->left)
553                 return rb_entry(root->left, struct cfq_queue, rb_node);
554
555         return NULL;
556 }
557
558 static void rb_erase_init(struct rb_node *n, struct rb_root *root)
559 {
560         rb_erase(n, root);
561         RB_CLEAR_NODE(n);
562 }
563
564 static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
565 {
566         if (root->left == n)
567                 root->left = NULL;
568         rb_erase_init(n, &root->rb);
569         --root->count;
570 }
571
572 /*
573  * would be nice to take fifo expire time into account as well
574  */
575 static struct request *
576 cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
577                   struct request *last)
578 {
579         struct rb_node *rbnext = rb_next(&last->rb_node);
580         struct rb_node *rbprev = rb_prev(&last->rb_node);
581         struct request *next = NULL, *prev = NULL;
582
583         BUG_ON(RB_EMPTY_NODE(&last->rb_node));
584
585         if (rbprev)
586                 prev = rb_entry_rq(rbprev);
587
588         if (rbnext)
589                 next = rb_entry_rq(rbnext);
590         else {
591                 rbnext = rb_first(&cfqq->sort_list);
592                 if (rbnext && rbnext != &last->rb_node)
593                         next = rb_entry_rq(rbnext);
594         }
595
596         return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
597 }
598
599 static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
600                                       struct cfq_queue *cfqq)
601 {
602         /*
603          * just an approximation, should be ok.
604          */
605         return (cfqd->busy_queues - 1) * (cfq_prio_slice(cfqd, 1, 0) -
606                        cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
607 }
608
609 /*
610  * The cfqd->service_trees holds all pending cfq_queue's that have
611  * requests waiting to be processed. It is sorted in the order that
612  * we will service the queues.
613  */
614 static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
615                                  bool add_front)
616 {
617         struct rb_node **p, *parent;
618         struct cfq_queue *__cfqq;
619         unsigned long rb_key;
620         struct cfq_rb_root *service_tree;
621         int left;
622
623         service_tree = service_tree_for(cfqq_prio(cfqq), cfqq_type(cfqq), cfqd);
624         if (cfq_class_idle(cfqq)) {
625                 rb_key = CFQ_IDLE_DELAY;
626                 parent = rb_last(&service_tree->rb);
627                 if (parent && parent != &cfqq->rb_node) {
628                         __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
629                         rb_key += __cfqq->rb_key;
630                 } else
631                         rb_key += jiffies;
632         } else if (!add_front) {
633                 /*
634                  * Get our rb key offset. Subtract any residual slice
635                  * value carried from last service. A negative resid
636                  * count indicates slice overrun, and this should position
637                  * the next service time further away in the tree.
638                  */
639                 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
640                 rb_key -= cfqq->slice_resid;
641                 cfqq->slice_resid = 0;
642         } else {
643                 rb_key = -HZ;
644                 __cfqq = cfq_rb_first(service_tree);
645                 rb_key += __cfqq ? __cfqq->rb_key : jiffies;
646         }
647
648         if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
649                 /*
650                  * same position, nothing more to do
651                  */
652                 if (rb_key == cfqq->rb_key &&
653                     cfqq->service_tree == service_tree)
654                         return;
655
656                 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
657                 cfqq->service_tree = NULL;
658         }
659
660         left = 1;
661         parent = NULL;
662         cfqq->service_tree = service_tree;
663         p = &service_tree->rb.rb_node;
664         while (*p) {
665                 struct rb_node **n;
666
667                 parent = *p;
668                 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
669
670                 /*
671                  * sort by key, that represents service time.
672                  */
673                 if (time_before(rb_key, __cfqq->rb_key))
674                         n = &(*p)->rb_left;
675                 else {
676                         n = &(*p)->rb_right;
677                         left = 0;
678                 }
679
680                 p = n;
681         }
682
683         if (left)
684                 service_tree->left = &cfqq->rb_node;
685
686         cfqq->rb_key = rb_key;
687         rb_link_node(&cfqq->rb_node, parent, p);
688         rb_insert_color(&cfqq->rb_node, &service_tree->rb);
689         service_tree->count++;
690 }
691
692 static struct cfq_queue *
693 cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
694                      sector_t sector, struct rb_node **ret_parent,
695                      struct rb_node ***rb_link)
696 {
697         struct rb_node **p, *parent;
698         struct cfq_queue *cfqq = NULL;
699
700         parent = NULL;
701         p = &root->rb_node;
702         while (*p) {
703                 struct rb_node **n;
704
705                 parent = *p;
706                 cfqq = rb_entry(parent, struct cfq_queue, p_node);
707
708                 /*
709                  * Sort strictly based on sector.  Smallest to the left,
710                  * largest to the right.
711                  */
712                 if (sector > blk_rq_pos(cfqq->next_rq))
713                         n = &(*p)->rb_right;
714                 else if (sector < blk_rq_pos(cfqq->next_rq))
715                         n = &(*p)->rb_left;
716                 else
717                         break;
718                 p = n;
719                 cfqq = NULL;
720         }
721
722         *ret_parent = parent;
723         if (rb_link)
724                 *rb_link = p;
725         return cfqq;
726 }
727
728 static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
729 {
730         struct rb_node **p, *parent;
731         struct cfq_queue *__cfqq;
732
733         if (cfqq->p_root) {
734                 rb_erase(&cfqq->p_node, cfqq->p_root);
735                 cfqq->p_root = NULL;
736         }
737
738         if (cfq_class_idle(cfqq))
739                 return;
740         if (!cfqq->next_rq)
741                 return;
742
743         cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
744         __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
745                                       blk_rq_pos(cfqq->next_rq), &parent, &p);
746         if (!__cfqq) {
747                 rb_link_node(&cfqq->p_node, parent, p);
748                 rb_insert_color(&cfqq->p_node, cfqq->p_root);
749         } else
750                 cfqq->p_root = NULL;
751 }
752
753 /*
754  * Update cfqq's position in the service tree.
755  */
756 static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
757 {
758         /*
759          * Resorting requires the cfqq to be on the RR list already.
760          */
761         if (cfq_cfqq_on_rr(cfqq)) {
762                 cfq_service_tree_add(cfqd, cfqq, 0);
763                 cfq_prio_tree_add(cfqd, cfqq);
764         }
765 }
766
767 /*
768  * add to busy list of queues for service, trying to be fair in ordering
769  * the pending list according to last request service
770  */
771 static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
772 {
773         cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
774         BUG_ON(cfq_cfqq_on_rr(cfqq));
775         cfq_mark_cfqq_on_rr(cfqq);
776         cfqd->busy_queues++;
777
778         cfq_resort_rr_list(cfqd, cfqq);
779 }
780
781 /*
782  * Called when the cfqq no longer has requests pending, remove it from
783  * the service tree.
784  */
785 static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
786 {
787         cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
788         BUG_ON(!cfq_cfqq_on_rr(cfqq));
789         cfq_clear_cfqq_on_rr(cfqq);
790
791         if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
792                 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
793                 cfqq->service_tree = NULL;
794         }
795         if (cfqq->p_root) {
796                 rb_erase(&cfqq->p_node, cfqq->p_root);
797                 cfqq->p_root = NULL;
798         }
799
800         BUG_ON(!cfqd->busy_queues);
801         cfqd->busy_queues--;
802 }
803
804 /*
805  * rb tree support functions
806  */
807 static void cfq_del_rq_rb(struct request *rq)
808 {
809         struct cfq_queue *cfqq = RQ_CFQQ(rq);
810         struct cfq_data *cfqd = cfqq->cfqd;
811         const int sync = rq_is_sync(rq);
812
813         BUG_ON(!cfqq->queued[sync]);
814         cfqq->queued[sync]--;
815
816         elv_rb_del(&cfqq->sort_list, rq);
817
818         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
819                 cfq_del_cfqq_rr(cfqd, cfqq);
820 }
821
822 static void cfq_add_rq_rb(struct request *rq)
823 {
824         struct cfq_queue *cfqq = RQ_CFQQ(rq);
825         struct cfq_data *cfqd = cfqq->cfqd;
826         struct request *__alias, *prev;
827
828         cfqq->queued[rq_is_sync(rq)]++;
829
830         /*
831          * looks a little odd, but the first insert might return an alias.
832          * if that happens, put the alias on the dispatch list
833          */
834         while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
835                 cfq_dispatch_insert(cfqd->queue, __alias);
836
837         if (!cfq_cfqq_on_rr(cfqq))
838                 cfq_add_cfqq_rr(cfqd, cfqq);
839
840         /*
841          * check if this request is a better next-serve candidate
842          */
843         prev = cfqq->next_rq;
844         cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
845
846         /*
847          * adjust priority tree position, if ->next_rq changes
848          */
849         if (prev != cfqq->next_rq)
850                 cfq_prio_tree_add(cfqd, cfqq);
851
852         BUG_ON(!cfqq->next_rq);
853 }
854
855 static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
856 {
857         elv_rb_del(&cfqq->sort_list, rq);
858         cfqq->queued[rq_is_sync(rq)]--;
859         cfq_add_rq_rb(rq);
860 }
861
862 static struct request *
863 cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
864 {
865         struct task_struct *tsk = current;
866         struct cfq_io_context *cic;
867         struct cfq_queue *cfqq;
868
869         cic = cfq_cic_lookup(cfqd, tsk->io_context);
870         if (!cic)
871                 return NULL;
872
873         cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
874         if (cfqq) {
875                 sector_t sector = bio->bi_sector + bio_sectors(bio);
876
877                 return elv_rb_find(&cfqq->sort_list, sector);
878         }
879
880         return NULL;
881 }
882
883 static void cfq_activate_request(struct request_queue *q, struct request *rq)
884 {
885         struct cfq_data *cfqd = q->elevator->elevator_data;
886
887         cfqd->rq_in_driver[rq_is_sync(rq)]++;
888         cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
889                                                 rq_in_driver(cfqd));
890
891         cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
892 }
893
894 static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
895 {
896         struct cfq_data *cfqd = q->elevator->elevator_data;
897         const int sync = rq_is_sync(rq);
898
899         WARN_ON(!cfqd->rq_in_driver[sync]);
900         cfqd->rq_in_driver[sync]--;
901         cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
902                                                 rq_in_driver(cfqd));
903 }
904
905 static void cfq_remove_request(struct request *rq)
906 {
907         struct cfq_queue *cfqq = RQ_CFQQ(rq);
908
909         if (cfqq->next_rq == rq)
910                 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
911
912         list_del_init(&rq->queuelist);
913         cfq_del_rq_rb(rq);
914
915         cfqq->cfqd->rq_queued--;
916         if (rq_is_meta(rq)) {
917                 WARN_ON(!cfqq->meta_pending);
918                 cfqq->meta_pending--;
919         }
920 }
921
922 static int cfq_merge(struct request_queue *q, struct request **req,
923                      struct bio *bio)
924 {
925         struct cfq_data *cfqd = q->elevator->elevator_data;
926         struct request *__rq;
927
928         __rq = cfq_find_rq_fmerge(cfqd, bio);
929         if (__rq && elv_rq_merge_ok(__rq, bio)) {
930                 *req = __rq;
931                 return ELEVATOR_FRONT_MERGE;
932         }
933
934         return ELEVATOR_NO_MERGE;
935 }
936
937 static void cfq_merged_request(struct request_queue *q, struct request *req,
938                                int type)
939 {
940         if (type == ELEVATOR_FRONT_MERGE) {
941                 struct cfq_queue *cfqq = RQ_CFQQ(req);
942
943                 cfq_reposition_rq_rb(cfqq, req);
944         }
945 }
946
947 static void
948 cfq_merged_requests(struct request_queue *q, struct request *rq,
949                     struct request *next)
950 {
951         struct cfq_queue *cfqq = RQ_CFQQ(rq);
952         /*
953          * reposition in fifo if next is older than rq
954          */
955         if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
956             time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
957                 list_move(&rq->queuelist, &next->queuelist);
958                 rq_set_fifo_time(rq, rq_fifo_time(next));
959         }
960
961         if (cfqq->next_rq == next)
962                 cfqq->next_rq = rq;
963         cfq_remove_request(next);
964 }
965
966 static int cfq_allow_merge(struct request_queue *q, struct request *rq,
967                            struct bio *bio)
968 {
969         struct cfq_data *cfqd = q->elevator->elevator_data;
970         struct cfq_io_context *cic;
971         struct cfq_queue *cfqq;
972
973         /*
974          * Disallow merge of a sync bio into an async request.
975          */
976         if (cfq_bio_sync(bio) && !rq_is_sync(rq))
977                 return false;
978
979         /*
980          * Lookup the cfqq that this bio will be queued with. Allow
981          * merge only if rq is queued there.
982          */
983         cic = cfq_cic_lookup(cfqd, current->io_context);
984         if (!cic)
985                 return false;
986
987         cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
988         return cfqq == RQ_CFQQ(rq);
989 }
990
991 static void __cfq_set_active_queue(struct cfq_data *cfqd,
992                                    struct cfq_queue *cfqq)
993 {
994         if (cfqq) {
995                 cfq_log_cfqq(cfqd, cfqq, "set_active");
996                 cfqq->slice_end = 0;
997                 cfqq->slice_dispatch = 0;
998
999                 cfq_clear_cfqq_wait_request(cfqq);
1000                 cfq_clear_cfqq_must_dispatch(cfqq);
1001                 cfq_clear_cfqq_must_alloc_slice(cfqq);
1002                 cfq_clear_cfqq_fifo_expire(cfqq);
1003                 cfq_mark_cfqq_slice_new(cfqq);
1004
1005                 del_timer(&cfqd->idle_slice_timer);
1006         }
1007
1008         cfqd->active_queue = cfqq;
1009 }
1010
1011 /*
1012  * current cfqq expired its slice (or was too idle), select new one
1013  */
1014 static void
1015 __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1016                     bool timed_out)
1017 {
1018         cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
1019
1020         if (cfq_cfqq_wait_request(cfqq))
1021                 del_timer(&cfqd->idle_slice_timer);
1022
1023         cfq_clear_cfqq_wait_request(cfqq);
1024
1025         /*
1026          * store what was left of this slice, if the queue idled/timed out
1027          */
1028         if (timed_out && !cfq_cfqq_slice_new(cfqq)) {
1029                 cfqq->slice_resid = cfqq->slice_end - jiffies;
1030                 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
1031         }
1032
1033         cfq_resort_rr_list(cfqd, cfqq);
1034
1035         if (cfqq == cfqd->active_queue)
1036                 cfqd->active_queue = NULL;
1037
1038         if (cfqd->active_cic) {
1039                 put_io_context(cfqd->active_cic->ioc);
1040                 cfqd->active_cic = NULL;
1041         }
1042 }
1043
1044 static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
1045 {
1046         struct cfq_queue *cfqq = cfqd->active_queue;
1047
1048         if (cfqq)
1049                 __cfq_slice_expired(cfqd, cfqq, timed_out);
1050 }
1051
1052 /*
1053  * Get next queue for service. Unless we have a queue preemption,
1054  * we'll simply select the first cfqq in the service tree.
1055  */
1056 static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
1057 {
1058         struct cfq_rb_root *service_tree =
1059                 service_tree_for(cfqd->serving_prio, cfqd->serving_type, cfqd);
1060
1061         if (RB_EMPTY_ROOT(&service_tree->rb))
1062                 return NULL;
1063         return cfq_rb_first(service_tree);
1064 }
1065
1066 /*
1067  * Get and set a new active queue for service.
1068  */
1069 static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
1070                                               struct cfq_queue *cfqq)
1071 {
1072         if (!cfqq)
1073                 cfqq = cfq_get_next_queue(cfqd);
1074
1075         __cfq_set_active_queue(cfqd, cfqq);
1076         return cfqq;
1077 }
1078
1079 static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
1080                                           struct request *rq)
1081 {
1082         if (blk_rq_pos(rq) >= cfqd->last_position)
1083                 return blk_rq_pos(rq) - cfqd->last_position;
1084         else
1085                 return cfqd->last_position - blk_rq_pos(rq);
1086 }
1087
1088 #define CFQQ_SEEK_THR           8 * 1024
1089 #define CFQQ_SEEKY(cfqq)        ((cfqq)->seek_mean > CFQQ_SEEK_THR)
1090
1091 static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1092                                struct request *rq)
1093 {
1094         sector_t sdist = cfqq->seek_mean;
1095
1096         if (!sample_valid(cfqq->seek_samples))
1097                 sdist = CFQQ_SEEK_THR;
1098
1099         return cfq_dist_from_last(cfqd, rq) <= sdist;
1100 }
1101
1102 static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
1103                                     struct cfq_queue *cur_cfqq)
1104 {
1105         struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
1106         struct rb_node *parent, *node;
1107         struct cfq_queue *__cfqq;
1108         sector_t sector = cfqd->last_position;
1109
1110         if (RB_EMPTY_ROOT(root))
1111                 return NULL;
1112
1113         /*
1114          * First, if we find a request starting at the end of the last
1115          * request, choose it.
1116          */
1117         __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
1118         if (__cfqq)
1119                 return __cfqq;
1120
1121         /*
1122          * If the exact sector wasn't found, the parent of the NULL leaf
1123          * will contain the closest sector.
1124          */
1125         __cfqq = rb_entry(parent, struct cfq_queue, p_node);
1126         if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1127                 return __cfqq;
1128
1129         if (blk_rq_pos(__cfqq->next_rq) < sector)
1130                 node = rb_next(&__cfqq->p_node);
1131         else
1132                 node = rb_prev(&__cfqq->p_node);
1133         if (!node)
1134                 return NULL;
1135
1136         __cfqq = rb_entry(node, struct cfq_queue, p_node);
1137         if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1138                 return __cfqq;
1139
1140         return NULL;
1141 }
1142
1143 /*
1144  * cfqd - obvious
1145  * cur_cfqq - passed in so that we don't decide that the current queue is
1146  *            closely cooperating with itself.
1147  *
1148  * So, basically we're assuming that that cur_cfqq has dispatched at least
1149  * one request, and that cfqd->last_position reflects a position on the disk
1150  * associated with the I/O issued by cur_cfqq.  I'm not sure this is a valid
1151  * assumption.
1152  */
1153 static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
1154                                               struct cfq_queue *cur_cfqq)
1155 {
1156         struct cfq_queue *cfqq;
1157
1158         if (!cfq_cfqq_sync(cur_cfqq))
1159                 return NULL;
1160         if (CFQQ_SEEKY(cur_cfqq))
1161                 return NULL;
1162
1163         /*
1164          * We should notice if some of the queues are cooperating, eg
1165          * working closely on the same area of the disk. In that case,
1166          * we can group them together and don't waste time idling.
1167          */
1168         cfqq = cfqq_close(cfqd, cur_cfqq);
1169         if (!cfqq)
1170                 return NULL;
1171
1172         /*
1173          * It only makes sense to merge sync queues.
1174          */
1175         if (!cfq_cfqq_sync(cfqq))
1176                 return NULL;
1177         if (CFQQ_SEEKY(cfqq))
1178                 return NULL;
1179
1180         /*
1181          * Do not merge queues of different priority classes
1182          */
1183         if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
1184                 return NULL;
1185
1186         return cfqq;
1187 }
1188
1189 /*
1190  * Determine whether we should enforce idle window for this queue.
1191  */
1192
1193 static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1194 {
1195         enum wl_prio_t prio = cfqq_prio(cfqq);
1196         struct cfq_rb_root *service_tree = cfqq->service_tree;
1197
1198         /* We never do for idle class queues. */
1199         if (prio == IDLE_WORKLOAD)
1200                 return false;
1201
1202         /* We do for queues that were marked with idle window flag. */
1203         if (cfq_cfqq_idle_window(cfqq))
1204                 return true;
1205
1206         /*
1207          * Otherwise, we do only if they are the last ones
1208          * in their service tree.
1209          */
1210         if (!service_tree)
1211                 service_tree = service_tree_for(prio, cfqq_type(cfqq), cfqd);
1212
1213         if (service_tree->count == 0)
1214                 return true;
1215
1216         return (service_tree->count == 1 && cfq_rb_first(service_tree) == cfqq);
1217 }
1218
1219 static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1220 {
1221         struct cfq_queue *cfqq = cfqd->active_queue;
1222         struct cfq_io_context *cic;
1223         unsigned long sl;
1224
1225         /*
1226          * SSD device without seek penalty, disable idling. But only do so
1227          * for devices that support queuing, otherwise we still have a problem
1228          * with sync vs async workloads.
1229          */
1230         if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
1231                 return;
1232
1233         WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
1234         WARN_ON(cfq_cfqq_slice_new(cfqq));
1235
1236         /*
1237          * idle is disabled, either manually or by past process history
1238          */
1239         if (!cfqd->cfq_slice_idle || !cfq_should_idle(cfqd, cfqq))
1240                 return;
1241
1242         /*
1243          * still requests with the driver, don't idle
1244          */
1245         if (rq_in_driver(cfqd))
1246                 return;
1247
1248         /*
1249          * task has exited, don't wait
1250          */
1251         cic = cfqd->active_cic;
1252         if (!cic || !atomic_read(&cic->ioc->nr_tasks))
1253                 return;
1254
1255         /*
1256          * If our average think time is larger than the remaining time
1257          * slice, then don't idle. This avoids overrunning the allotted
1258          * time slice.
1259          */
1260         if (sample_valid(cic->ttime_samples) &&
1261             (cfqq->slice_end - jiffies < cic->ttime_mean))
1262                 return;
1263
1264         cfq_mark_cfqq_wait_request(cfqq);
1265
1266         sl = cfqd->cfq_slice_idle;
1267         /* are we servicing noidle tree, and there are more queues?
1268          * non-rotational or NCQ: no idle
1269          * non-NCQ rotational : very small idle, to allow
1270          *     fair distribution of slice time for a process doing back-to-back
1271          *     seeks.
1272          */
1273         if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
1274             service_tree_for(cfqd->serving_prio, SYNC_NOIDLE_WORKLOAD, cfqd)
1275                 ->count > 0) {
1276                 if (blk_queue_nonrot(cfqd->queue) || cfqd->hw_tag)
1277                         return;
1278                 sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT));
1279         }
1280
1281         mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
1282         cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl);
1283 }
1284
1285 /*
1286  * Move request from internal lists to the request queue dispatch list.
1287  */
1288 static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
1289 {
1290         struct cfq_data *cfqd = q->elevator->elevator_data;
1291         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1292
1293         cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
1294
1295         cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
1296         cfq_remove_request(rq);
1297         cfqq->dispatched++;
1298         elv_dispatch_sort(q, rq);
1299
1300         if (cfq_cfqq_sync(cfqq))
1301                 cfqd->sync_flight++;
1302 }
1303
1304 /*
1305  * return expired entry, or NULL to just start from scratch in rbtree
1306  */
1307 static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
1308 {
1309         struct request *rq = NULL;
1310
1311         if (cfq_cfqq_fifo_expire(cfqq))
1312                 return NULL;
1313
1314         cfq_mark_cfqq_fifo_expire(cfqq);
1315
1316         if (list_empty(&cfqq->fifo))
1317                 return NULL;
1318
1319         rq = rq_entry_fifo(cfqq->fifo.next);
1320         if (time_before(jiffies, rq_fifo_time(rq)))
1321                 rq = NULL;
1322
1323         cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
1324         return rq;
1325 }
1326
1327 static inline int
1328 cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1329 {
1330         const int base_rq = cfqd->cfq_slice_async_rq;
1331
1332         WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
1333
1334         return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
1335 }
1336
1337 /*
1338  * Must be called with the queue_lock held.
1339  */
1340 static int cfqq_process_refs(struct cfq_queue *cfqq)
1341 {
1342         int process_refs, io_refs;
1343
1344         io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
1345         process_refs = atomic_read(&cfqq->ref) - io_refs;
1346         BUG_ON(process_refs < 0);
1347         return process_refs;
1348 }
1349
1350 static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
1351 {
1352         int process_refs, new_process_refs;
1353         struct cfq_queue *__cfqq;
1354
1355         /* Avoid a circular list and skip interim queue merges */
1356         while ((__cfqq = new_cfqq->new_cfqq)) {
1357                 if (__cfqq == cfqq)
1358                         return;
1359                 new_cfqq = __cfqq;
1360         }
1361
1362         process_refs = cfqq_process_refs(cfqq);
1363         /*
1364          * If the process for the cfqq has gone away, there is no
1365          * sense in merging the queues.
1366          */
1367         if (process_refs == 0)
1368                 return;
1369
1370         /*
1371          * Merge in the direction of the lesser amount of work.
1372          */
1373         new_process_refs = cfqq_process_refs(new_cfqq);
1374         if (new_process_refs >= process_refs) {
1375                 cfqq->new_cfqq = new_cfqq;
1376                 atomic_add(process_refs, &new_cfqq->ref);
1377         } else {
1378                 new_cfqq->new_cfqq = cfqq;
1379                 atomic_add(new_process_refs, &cfqq->ref);
1380         }
1381 }
1382
1383 static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd, enum wl_prio_t prio,
1384                                     bool prio_changed)
1385 {
1386         struct cfq_queue *queue;
1387         int i;
1388         bool key_valid = false;
1389         unsigned long lowest_key = 0;
1390         enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
1391
1392         if (prio_changed) {
1393                 /*
1394                  * When priorities switched, we prefer starting
1395                  * from SYNC_NOIDLE (first choice), or just SYNC
1396                  * over ASYNC
1397                  */
1398                 if (service_tree_for(prio, cur_best, cfqd)->count)
1399                         return cur_best;
1400                 cur_best = SYNC_WORKLOAD;
1401                 if (service_tree_for(prio, cur_best, cfqd)->count)
1402                         return cur_best;
1403
1404                 return ASYNC_WORKLOAD;
1405         }
1406
1407         for (i = 0; i < 3; ++i) {
1408                 /* otherwise, select the one with lowest rb_key */
1409                 queue = cfq_rb_first(service_tree_for(prio, i, cfqd));
1410                 if (queue &&
1411                     (!key_valid || time_before(queue->rb_key, lowest_key))) {
1412                         lowest_key = queue->rb_key;
1413                         cur_best = i;
1414                         key_valid = true;
1415                 }
1416         }
1417
1418         return cur_best;
1419 }
1420
1421 static void choose_service_tree(struct cfq_data *cfqd)
1422 {
1423         enum wl_prio_t previous_prio = cfqd->serving_prio;
1424         bool prio_changed;
1425         unsigned slice;
1426         unsigned count;
1427
1428         /* Choose next priority. RT > BE > IDLE */
1429         if (cfq_busy_queues_wl(RT_WORKLOAD, cfqd))
1430                 cfqd->serving_prio = RT_WORKLOAD;
1431         else if (cfq_busy_queues_wl(BE_WORKLOAD, cfqd))
1432                 cfqd->serving_prio = BE_WORKLOAD;
1433         else {
1434                 cfqd->serving_prio = IDLE_WORKLOAD;
1435                 cfqd->workload_expires = jiffies + 1;
1436                 return;
1437         }
1438
1439         /*
1440          * For RT and BE, we have to choose also the type
1441          * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
1442          * expiration time
1443          */
1444         prio_changed = (cfqd->serving_prio != previous_prio);
1445         count = service_tree_for(cfqd->serving_prio, cfqd->serving_type, cfqd)
1446                 ->count;
1447
1448         /*
1449          * If priority didn't change, check workload expiration,
1450          * and that we still have other queues ready
1451          */
1452         if (!prio_changed && count &&
1453             !time_after(jiffies, cfqd->workload_expires))
1454                 return;
1455
1456         /* otherwise select new workload type */
1457         cfqd->serving_type =
1458                 cfq_choose_wl(cfqd, cfqd->serving_prio, prio_changed);
1459         count = service_tree_for(cfqd->serving_prio, cfqd->serving_type, cfqd)
1460                 ->count;
1461
1462         /*
1463          * the workload slice is computed as a fraction of target latency
1464          * proportional to the number of queues in that workload, over
1465          * all the queues in the same priority class
1466          */
1467         slice = cfq_target_latency * count /
1468                 max_t(unsigned, cfqd->busy_queues_avg[cfqd->serving_prio],
1469                       cfq_busy_queues_wl(cfqd->serving_prio, cfqd));
1470
1471         if (cfqd->serving_type == ASYNC_WORKLOAD)
1472                 /* async workload slice is scaled down according to
1473                  * the sync/async slice ratio. */
1474                 slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
1475         else
1476                 /* sync workload slice is at least 2 * cfq_slice_idle */
1477                 slice = max(slice, 2 * cfqd->cfq_slice_idle);
1478
1479         slice = max_t(unsigned, slice, CFQ_MIN_TT);
1480         cfqd->workload_expires = jiffies + slice;
1481 }
1482
1483 /*
1484  * Select a queue for service. If we have a current active queue,
1485  * check whether to continue servicing it, or retrieve and set a new one.
1486  */
1487 static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
1488 {
1489         struct cfq_queue *cfqq, *new_cfqq = NULL;
1490
1491         cfqq = cfqd->active_queue;
1492         if (!cfqq)
1493                 goto new_queue;
1494
1495         /*
1496          * The active queue has run out of time, expire it and select new.
1497          */
1498         if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq))
1499                 goto expire;
1500
1501         /*
1502          * The active queue has requests and isn't expired, allow it to
1503          * dispatch.
1504          */
1505         if (!RB_EMPTY_ROOT(&cfqq->sort_list))
1506                 goto keep_queue;
1507
1508         /*
1509          * If another queue has a request waiting within our mean seek
1510          * distance, let it run.  The expire code will check for close
1511          * cooperators and put the close queue at the front of the service
1512          * tree.  If possible, merge the expiring queue with the new cfqq.
1513          */
1514         new_cfqq = cfq_close_cooperator(cfqd, cfqq);
1515         if (new_cfqq) {
1516                 if (!cfqq->new_cfqq)
1517                         cfq_setup_merge(cfqq, new_cfqq);
1518                 goto expire;
1519         }
1520
1521         /*
1522          * No requests pending. If the active queue still has requests in
1523          * flight or is idling for a new request, allow either of these
1524          * conditions to happen (or time out) before selecting a new queue.
1525          */
1526         if (timer_pending(&cfqd->idle_slice_timer) ||
1527             (cfqq->dispatched && cfq_should_idle(cfqd, cfqq))) {
1528                 cfqq = NULL;
1529                 goto keep_queue;
1530         }
1531
1532 expire:
1533         cfq_slice_expired(cfqd, 0);
1534 new_queue:
1535         /*
1536          * Current queue expired. Check if we have to switch to a new
1537          * service tree
1538          */
1539         if (!new_cfqq)
1540                 choose_service_tree(cfqd);
1541
1542         cfqq = cfq_set_active_queue(cfqd, new_cfqq);
1543 keep_queue:
1544         return cfqq;
1545 }
1546
1547 static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
1548 {
1549         int dispatched = 0;
1550
1551         while (cfqq->next_rq) {
1552                 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
1553                 dispatched++;
1554         }
1555
1556         BUG_ON(!list_empty(&cfqq->fifo));
1557         return dispatched;
1558 }
1559
1560 /*
1561  * Drain our current requests. Used for barriers and when switching
1562  * io schedulers on-the-fly.
1563  */
1564 static int cfq_forced_dispatch(struct cfq_data *cfqd)
1565 {
1566         struct cfq_queue *cfqq;
1567         int dispatched = 0;
1568         int i, j;
1569         for (i = 0; i < 2; ++i)
1570                 for (j = 0; j < 3; ++j)
1571                         while ((cfqq = cfq_rb_first(&cfqd->service_trees[i][j]))
1572                                 != NULL)
1573                                 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
1574
1575         while ((cfqq = cfq_rb_first(&cfqd->service_tree_idle)) != NULL)
1576                 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
1577
1578         cfq_slice_expired(cfqd, 0);
1579
1580         BUG_ON(cfqd->busy_queues);
1581
1582         cfq_log(cfqd, "forced_dispatch=%d", dispatched);
1583         return dispatched;
1584 }
1585
1586 static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1587 {
1588         unsigned int max_dispatch;
1589
1590         /*
1591          * Drain async requests before we start sync IO
1592          */
1593         if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC])
1594                 return false;
1595
1596         /*
1597          * If this is an async queue and we have sync IO in flight, let it wait
1598          */
1599         if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
1600                 return false;
1601
1602         max_dispatch = cfqd->cfq_quantum;
1603         if (cfq_class_idle(cfqq))
1604                 max_dispatch = 1;
1605
1606         /*
1607          * Does this cfqq already have too much IO in flight?
1608          */
1609         if (cfqq->dispatched >= max_dispatch) {
1610                 /*
1611                  * idle queue must always only have a single IO in flight
1612                  */
1613                 if (cfq_class_idle(cfqq))
1614                         return false;
1615
1616                 /*
1617                  * We have other queues, don't allow more IO from this one
1618                  */
1619                 if (cfqd->busy_queues > 1)
1620                         return false;
1621
1622                 /*
1623                  * Sole queue user, allow bigger slice
1624                  */
1625                 max_dispatch *= 4;
1626         }
1627
1628         /*
1629          * Async queues must wait a bit before being allowed dispatch.
1630          * We also ramp up the dispatch depth gradually for async IO,
1631          * based on the last sync IO we serviced
1632          */
1633         if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
1634                 unsigned long last_sync = jiffies - cfqd->last_end_sync_rq;
1635                 unsigned int depth;
1636
1637                 depth = last_sync / cfqd->cfq_slice[1];
1638                 if (!depth && !cfqq->dispatched)
1639                         depth = 1;
1640                 if (depth < max_dispatch)
1641                         max_dispatch = depth;
1642         }
1643
1644         /*
1645          * If we're below the current max, allow a dispatch
1646          */
1647         return cfqq->dispatched < max_dispatch;
1648 }
1649
1650 /*
1651  * Dispatch a request from cfqq, moving them to the request queue
1652  * dispatch list.
1653  */
1654 static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1655 {
1656         struct request *rq;
1657
1658         BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
1659
1660         if (!cfq_may_dispatch(cfqd, cfqq))
1661                 return false;
1662
1663         /*
1664          * follow expired path, else get first next available
1665          */
1666         rq = cfq_check_fifo(cfqq);
1667         if (!rq)
1668                 rq = cfqq->next_rq;
1669
1670         /*
1671          * insert request into driver dispatch list
1672          */
1673         cfq_dispatch_insert(cfqd->queue, rq);
1674
1675         if (!cfqd->active_cic) {
1676                 struct cfq_io_context *cic = RQ_CIC(rq);
1677
1678                 atomic_long_inc(&cic->ioc->refcount);
1679                 cfqd->active_cic = cic;
1680         }
1681
1682         return true;
1683 }
1684
1685 /*
1686  * Find the cfqq that we need to service and move a request from that to the
1687  * dispatch list
1688  */
1689 static int cfq_dispatch_requests(struct request_queue *q, int force)
1690 {
1691         struct cfq_data *cfqd = q->elevator->elevator_data;
1692         struct cfq_queue *cfqq;
1693
1694         if (!cfqd->busy_queues)
1695                 return 0;
1696
1697         if (unlikely(force))
1698                 return cfq_forced_dispatch(cfqd);
1699
1700         cfqq = cfq_select_queue(cfqd);
1701         if (!cfqq)
1702                 return 0;
1703
1704         /*
1705          * Dispatch a request from this cfqq, if it is allowed
1706          */
1707         if (!cfq_dispatch_request(cfqd, cfqq))
1708                 return 0;
1709
1710         cfqq->slice_dispatch++;
1711         cfq_clear_cfqq_must_dispatch(cfqq);
1712
1713         /*
1714          * expire an async queue immediately if it has used up its slice. idle
1715          * queue always expire after 1 dispatch round.
1716          */
1717         if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
1718             cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
1719             cfq_class_idle(cfqq))) {
1720                 cfqq->slice_end = jiffies + 1;
1721                 cfq_slice_expired(cfqd, 0);
1722         }
1723
1724         cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
1725         return 1;
1726 }
1727
1728 /*
1729  * task holds one reference to the queue, dropped when task exits. each rq
1730  * in-flight on this queue also holds a reference, dropped when rq is freed.
1731  *
1732  * queue lock must be held here.
1733  */
1734 static void cfq_put_queue(struct cfq_queue *cfqq)
1735 {
1736         struct cfq_data *cfqd = cfqq->cfqd;
1737
1738         BUG_ON(atomic_read(&cfqq->ref) <= 0);
1739
1740         if (!atomic_dec_and_test(&cfqq->ref))
1741                 return;
1742
1743         cfq_log_cfqq(cfqd, cfqq, "put_queue");
1744         BUG_ON(rb_first(&cfqq->sort_list));
1745         BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
1746         BUG_ON(cfq_cfqq_on_rr(cfqq));
1747
1748         if (unlikely(cfqd->active_queue == cfqq)) {
1749                 __cfq_slice_expired(cfqd, cfqq, 0);
1750                 cfq_schedule_dispatch(cfqd);
1751         }
1752
1753         kmem_cache_free(cfq_pool, cfqq);
1754 }
1755
1756 /*
1757  * Must always be called with the rcu_read_lock() held
1758  */
1759 static void
1760 __call_for_each_cic(struct io_context *ioc,
1761                     void (*func)(struct io_context *, struct cfq_io_context *))
1762 {
1763         struct cfq_io_context *cic;
1764         struct hlist_node *n;
1765
1766         hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
1767                 func(ioc, cic);
1768 }
1769
1770 /*
1771  * Call func for each cic attached to this ioc.
1772  */
1773 static void
1774 call_for_each_cic(struct io_context *ioc,
1775                   void (*func)(struct io_context *, struct cfq_io_context *))
1776 {
1777         rcu_read_lock();
1778         __call_for_each_cic(ioc, func);
1779         rcu_read_unlock();
1780 }
1781
1782 static void cfq_cic_free_rcu(struct rcu_head *head)
1783 {
1784         struct cfq_io_context *cic;
1785
1786         cic = container_of(head, struct cfq_io_context, rcu_head);
1787
1788         kmem_cache_free(cfq_ioc_pool, cic);
1789         elv_ioc_count_dec(cfq_ioc_count);
1790
1791         if (ioc_gone) {
1792                 /*
1793                  * CFQ scheduler is exiting, grab exit lock and check
1794                  * the pending io context count. If it hits zero,
1795                  * complete ioc_gone and set it back to NULL
1796                  */
1797                 spin_lock(&ioc_gone_lock);
1798                 if (ioc_gone && !elv_ioc_count_read(cfq_ioc_count)) {
1799                         complete(ioc_gone);
1800                         ioc_gone = NULL;
1801                 }
1802                 spin_unlock(&ioc_gone_lock);
1803         }
1804 }
1805
1806 static void cfq_cic_free(struct cfq_io_context *cic)
1807 {
1808         call_rcu(&cic->rcu_head, cfq_cic_free_rcu);
1809 }
1810
1811 static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
1812 {
1813         unsigned long flags;
1814
1815         BUG_ON(!cic->dead_key);
1816
1817         spin_lock_irqsave(&ioc->lock, flags);
1818         radix_tree_delete(&ioc->radix_root, cic->dead_key);
1819         hlist_del_rcu(&cic->cic_list);
1820         spin_unlock_irqrestore(&ioc->lock, flags);
1821
1822         cfq_cic_free(cic);
1823 }
1824
1825 /*
1826  * Must be called with rcu_read_lock() held or preemption otherwise disabled.
1827  * Only two callers of this - ->dtor() which is called with the rcu_read_lock(),
1828  * and ->trim() which is called with the task lock held
1829  */
1830 static void cfq_free_io_context(struct io_context *ioc)
1831 {
1832         /*
1833          * ioc->refcount is zero here, or we are called from elv_unregister(),
1834          * so no more cic's are allowed to be linked into this ioc.  So it
1835          * should be ok to iterate over the known list, we will see all cic's
1836          * since no new ones are added.
1837          */
1838         __call_for_each_cic(ioc, cic_free_func);
1839 }
1840
1841 static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1842 {
1843         struct cfq_queue *__cfqq, *next;
1844
1845         if (unlikely(cfqq == cfqd->active_queue)) {
1846                 __cfq_slice_expired(cfqd, cfqq, 0);
1847                 cfq_schedule_dispatch(cfqd);
1848         }
1849
1850         /*
1851          * If this queue was scheduled to merge with another queue, be
1852          * sure to drop the reference taken on that queue (and others in
1853          * the merge chain).  See cfq_setup_merge and cfq_merge_cfqqs.
1854          */
1855         __cfqq = cfqq->new_cfqq;
1856         while (__cfqq) {
1857                 if (__cfqq == cfqq) {
1858                         WARN(1, "cfqq->new_cfqq loop detected\n");
1859                         break;
1860                 }
1861                 next = __cfqq->new_cfqq;
1862                 cfq_put_queue(__cfqq);
1863                 __cfqq = next;
1864         }
1865
1866         cfq_put_queue(cfqq);
1867 }
1868
1869 static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
1870                                          struct cfq_io_context *cic)
1871 {
1872         struct io_context *ioc = cic->ioc;
1873
1874         list_del_init(&cic->queue_list);
1875
1876         /*
1877          * Make sure key == NULL is seen for dead queues
1878          */
1879         smp_wmb();
1880         cic->dead_key = (unsigned long) cic->key;
1881         cic->key = NULL;
1882
1883         if (ioc->ioc_data == cic)
1884                 rcu_assign_pointer(ioc->ioc_data, NULL);
1885
1886         if (cic->cfqq[BLK_RW_ASYNC]) {
1887                 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
1888                 cic->cfqq[BLK_RW_ASYNC] = NULL;
1889         }
1890
1891         if (cic->cfqq[BLK_RW_SYNC]) {
1892                 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
1893                 cic->cfqq[BLK_RW_SYNC] = NULL;
1894         }
1895 }
1896
1897 static void cfq_exit_single_io_context(struct io_context *ioc,
1898                                        struct cfq_io_context *cic)
1899 {
1900         struct cfq_data *cfqd = cic->key;
1901
1902         if (cfqd) {
1903                 struct request_queue *q = cfqd->queue;
1904                 unsigned long flags;
1905
1906                 spin_lock_irqsave(q->queue_lock, flags);
1907
1908                 /*
1909                  * Ensure we get a fresh copy of the ->key to prevent
1910                  * race between exiting task and queue
1911                  */
1912                 smp_read_barrier_depends();
1913                 if (cic->key)
1914                         __cfq_exit_single_io_context(cfqd, cic);
1915
1916                 spin_unlock_irqrestore(q->queue_lock, flags);
1917         }
1918 }
1919
1920 /*
1921  * The process that ioc belongs to has exited, we need to clean up
1922  * and put the internal structures we have that belongs to that process.
1923  */
1924 static void cfq_exit_io_context(struct io_context *ioc)
1925 {
1926         call_for_each_cic(ioc, cfq_exit_single_io_context);
1927 }
1928
1929 static struct cfq_io_context *
1930 cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1931 {
1932         struct cfq_io_context *cic;
1933
1934         cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO,
1935                                                         cfqd->queue->node);
1936         if (cic) {
1937                 cic->last_end_request = jiffies;
1938                 INIT_LIST_HEAD(&cic->queue_list);
1939                 INIT_HLIST_NODE(&cic->cic_list);
1940                 cic->dtor = cfq_free_io_context;
1941                 cic->exit = cfq_exit_io_context;
1942                 elv_ioc_count_inc(cfq_ioc_count);
1943         }
1944
1945         return cic;
1946 }
1947
1948 static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
1949 {
1950         struct task_struct *tsk = current;
1951         int ioprio_class;
1952
1953         if (!cfq_cfqq_prio_changed(cfqq))
1954                 return;
1955
1956         ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
1957         switch (ioprio_class) {
1958         default:
1959                 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
1960         case IOPRIO_CLASS_NONE:
1961                 /*
1962                  * no prio set, inherit CPU scheduling settings
1963                  */
1964                 cfqq->ioprio = task_nice_ioprio(tsk);
1965                 cfqq->ioprio_class = task_nice_ioclass(tsk);
1966                 break;
1967         case IOPRIO_CLASS_RT:
1968                 cfqq->ioprio = task_ioprio(ioc);
1969                 cfqq->ioprio_class = IOPRIO_CLASS_RT;
1970                 break;
1971         case IOPRIO_CLASS_BE:
1972                 cfqq->ioprio = task_ioprio(ioc);
1973                 cfqq->ioprio_class = IOPRIO_CLASS_BE;
1974                 break;
1975         case IOPRIO_CLASS_IDLE:
1976                 cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
1977                 cfqq->ioprio = 7;
1978                 cfq_clear_cfqq_idle_window(cfqq);
1979                 break;
1980         }
1981
1982         /*
1983          * keep track of original prio settings in case we have to temporarily
1984          * elevate the priority of this queue
1985          */
1986         cfqq->org_ioprio = cfqq->ioprio;
1987         cfqq->org_ioprio_class = cfqq->ioprio_class;
1988         cfq_clear_cfqq_prio_changed(cfqq);
1989 }
1990
1991 static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic)
1992 {
1993         struct cfq_data *cfqd = cic->key;
1994         struct cfq_queue *cfqq;
1995         unsigned long flags;
1996
1997         if (unlikely(!cfqd))
1998                 return;
1999
2000         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
2001
2002         cfqq = cic->cfqq[BLK_RW_ASYNC];
2003         if (cfqq) {
2004                 struct cfq_queue *new_cfqq;
2005                 new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->ioc,
2006                                                 GFP_ATOMIC);
2007                 if (new_cfqq) {
2008                         cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
2009                         cfq_put_queue(cfqq);
2010                 }
2011         }
2012
2013         cfqq = cic->cfqq[BLK_RW_SYNC];
2014         if (cfqq)
2015                 cfq_mark_cfqq_prio_changed(cfqq);
2016
2017         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2018 }
2019
2020 static void cfq_ioc_set_ioprio(struct io_context *ioc)
2021 {
2022         call_for_each_cic(ioc, changed_ioprio);
2023         ioc->ioprio_changed = 0;
2024 }
2025
2026 static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2027                           pid_t pid, bool is_sync)
2028 {
2029         RB_CLEAR_NODE(&cfqq->rb_node);
2030         RB_CLEAR_NODE(&cfqq->p_node);
2031         INIT_LIST_HEAD(&cfqq->fifo);
2032
2033         atomic_set(&cfqq->ref, 0);
2034         cfqq->cfqd = cfqd;
2035
2036         cfq_mark_cfqq_prio_changed(cfqq);
2037
2038         if (is_sync) {
2039                 if (!cfq_class_idle(cfqq))
2040                         cfq_mark_cfqq_idle_window(cfqq);
2041                 cfq_mark_cfqq_sync(cfqq);
2042         }
2043         cfqq->pid = pid;
2044 }
2045
2046 static struct cfq_queue *
2047 cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
2048                      struct io_context *ioc, gfp_t gfp_mask)
2049 {
2050         struct cfq_queue *cfqq, *new_cfqq = NULL;
2051         struct cfq_io_context *cic;
2052
2053 retry:
2054         cic = cfq_cic_lookup(cfqd, ioc);
2055         /* cic always exists here */
2056         cfqq = cic_to_cfqq(cic, is_sync);
2057
2058         /*
2059          * Always try a new alloc if we fell back to the OOM cfqq
2060          * originally, since it should just be a temporary situation.
2061          */
2062         if (!cfqq || cfqq == &cfqd->oom_cfqq) {
2063                 cfqq = NULL;
2064                 if (new_cfqq) {
2065                         cfqq = new_cfqq;
2066                         new_cfqq = NULL;
2067                 } else if (gfp_mask & __GFP_WAIT) {
2068                         spin_unlock_irq(cfqd->queue->queue_lock);
2069                         new_cfqq = kmem_cache_alloc_node(cfq_pool,
2070                                         gfp_mask | __GFP_ZERO,
2071                                         cfqd->queue->node);
2072                         spin_lock_irq(cfqd->queue->queue_lock);
2073                         if (new_cfqq)
2074                                 goto retry;
2075                 } else {
2076                         cfqq = kmem_cache_alloc_node(cfq_pool,
2077                                         gfp_mask | __GFP_ZERO,
2078                                         cfqd->queue->node);
2079                 }
2080
2081                 if (cfqq) {
2082                         cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
2083                         cfq_init_prio_data(cfqq, ioc);
2084                         cfq_log_cfqq(cfqd, cfqq, "alloced");
2085                 } else
2086                         cfqq = &cfqd->oom_cfqq;
2087         }
2088
2089         if (new_cfqq)
2090                 kmem_cache_free(cfq_pool, new_cfqq);
2091
2092         return cfqq;
2093 }
2094
2095 static struct cfq_queue **
2096 cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
2097 {
2098         switch (ioprio_class) {
2099         case IOPRIO_CLASS_RT:
2100                 return &cfqd->async_cfqq[0][ioprio];
2101         case IOPRIO_CLASS_BE:
2102                 return &cfqd->async_cfqq[1][ioprio];
2103         case IOPRIO_CLASS_IDLE:
2104                 return &cfqd->async_idle_cfqq;
2105         default:
2106                 BUG();
2107         }
2108 }
2109
2110 static struct cfq_queue *
2111 cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
2112               gfp_t gfp_mask)
2113 {
2114         const int ioprio = task_ioprio(ioc);
2115         const int ioprio_class = task_ioprio_class(ioc);
2116         struct cfq_queue **async_cfqq = NULL;
2117         struct cfq_queue *cfqq = NULL;
2118
2119         if (!is_sync) {
2120                 async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
2121                 cfqq = *async_cfqq;
2122         }
2123
2124         if (!cfqq)
2125                 cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
2126
2127         /*
2128          * pin the queue now that it's allocated, scheduler exit will prune it
2129          */
2130         if (!is_sync && !(*async_cfqq)) {
2131                 atomic_inc(&cfqq->ref);
2132                 *async_cfqq = cfqq;
2133         }
2134
2135         atomic_inc(&cfqq->ref);
2136         return cfqq;
2137 }
2138
2139 /*
2140  * We drop cfq io contexts lazily, so we may find a dead one.
2141  */
2142 static void
2143 cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
2144                   struct cfq_io_context *cic)
2145 {
2146         unsigned long flags;
2147
2148         WARN_ON(!list_empty(&cic->queue_list));
2149
2150         spin_lock_irqsave(&ioc->lock, flags);
2151
2152         BUG_ON(ioc->ioc_data == cic);
2153
2154         radix_tree_delete(&ioc->radix_root, (unsigned long) cfqd);
2155         hlist_del_rcu(&cic->cic_list);
2156         spin_unlock_irqrestore(&ioc->lock, flags);
2157
2158         cfq_cic_free(cic);
2159 }
2160
2161 static struct cfq_io_context *
2162 cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
2163 {
2164         struct cfq_io_context *cic;
2165         unsigned long flags;
2166         void *k;
2167
2168         if (unlikely(!ioc))
2169                 return NULL;
2170
2171         rcu_read_lock();
2172
2173         /*
2174          * we maintain a last-hit cache, to avoid browsing over the tree
2175          */
2176         cic = rcu_dereference(ioc->ioc_data);
2177         if (cic && cic->key == cfqd) {
2178                 rcu_read_unlock();
2179                 return cic;
2180         }
2181
2182         do {
2183                 cic = radix_tree_lookup(&ioc->radix_root, (unsigned long) cfqd);
2184                 rcu_read_unlock();
2185                 if (!cic)
2186                         break;
2187                 /* ->key must be copied to avoid race with cfq_exit_queue() */
2188                 k = cic->key;
2189                 if (unlikely(!k)) {
2190                         cfq_drop_dead_cic(cfqd, ioc, cic);
2191                         rcu_read_lock();
2192                         continue;
2193                 }
2194
2195                 spin_lock_irqsave(&ioc->lock, flags);
2196                 rcu_assign_pointer(ioc->ioc_data, cic);
2197                 spin_unlock_irqrestore(&ioc->lock, flags);
2198                 break;
2199         } while (1);
2200
2201         return cic;
2202 }
2203
2204 /*
2205  * Add cic into ioc, using cfqd as the search key. This enables us to lookup
2206  * the process specific cfq io context when entered from the block layer.
2207  * Also adds the cic to a per-cfqd list, used when this queue is removed.
2208  */
2209 static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
2210                         struct cfq_io_context *cic, gfp_t gfp_mask)
2211 {
2212         unsigned long flags;
2213         int ret;
2214
2215         ret = radix_tree_preload(gfp_mask);
2216         if (!ret) {
2217                 cic->ioc = ioc;
2218                 cic->key = cfqd;
2219
2220                 spin_lock_irqsave(&ioc->lock, flags);
2221                 ret = radix_tree_insert(&ioc->radix_root,
2222                                                 (unsigned long) cfqd, cic);
2223                 if (!ret)
2224                         hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list);
2225                 spin_unlock_irqrestore(&ioc->lock, flags);
2226
2227                 radix_tree_preload_end();
2228
2229                 if (!ret) {
2230                         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
2231                         list_add(&cic->queue_list, &cfqd->cic_list);
2232                         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2233                 }
2234         }
2235
2236         if (ret)
2237                 printk(KERN_ERR "cfq: cic link failed!\n");
2238
2239         return ret;
2240 }
2241
2242 /*
2243  * Setup general io context and cfq io context. There can be several cfq
2244  * io contexts per general io context, if this process is doing io to more
2245  * than one device managed by cfq.
2246  */
2247 static struct cfq_io_context *
2248 cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
2249 {
2250         struct io_context *ioc = NULL;
2251         struct cfq_io_context *cic;
2252
2253         might_sleep_if(gfp_mask & __GFP_WAIT);
2254
2255         ioc = get_io_context(gfp_mask, cfqd->queue->node);
2256         if (!ioc)
2257                 return NULL;
2258
2259         cic = cfq_cic_lookup(cfqd, ioc);
2260         if (cic)
2261                 goto out;
2262
2263         cic = cfq_alloc_io_context(cfqd, gfp_mask);
2264         if (cic == NULL)
2265                 goto err;
2266
2267         if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
2268                 goto err_free;
2269
2270 out:
2271         smp_read_barrier_depends();
2272         if (unlikely(ioc->ioprio_changed))
2273                 cfq_ioc_set_ioprio(ioc);
2274
2275         return cic;
2276 err_free:
2277         cfq_cic_free(cic);
2278 err:
2279         put_io_context(ioc);
2280         return NULL;
2281 }
2282
2283 static void
2284 cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
2285 {
2286         unsigned long elapsed = jiffies - cic->last_end_request;
2287         unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
2288
2289         cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
2290         cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
2291         cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
2292 }
2293
2294 static void
2295 cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2296                        struct request *rq)
2297 {
2298         sector_t sdist;
2299         u64 total;
2300
2301         if (!cfqq->last_request_pos)
2302                 sdist = 0;
2303         else if (cfqq->last_request_pos < blk_rq_pos(rq))
2304                 sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
2305         else
2306                 sdist = cfqq->last_request_pos - blk_rq_pos(rq);
2307
2308         /*
2309          * Don't allow the seek distance to get too large from the
2310          * odd fragment, pagein, etc
2311          */
2312         if (cfqq->seek_samples <= 60) /* second&third seek */
2313                 sdist = min(sdist, (cfqq->seek_mean * 4) + 2*1024*1024);
2314         else
2315                 sdist = min(sdist, (cfqq->seek_mean * 4) + 2*1024*64);
2316
2317         cfqq->seek_samples = (7*cfqq->seek_samples + 256) / 8;
2318         cfqq->seek_total = (7*cfqq->seek_total + (u64)256*sdist) / 8;
2319         total = cfqq->seek_total + (cfqq->seek_samples/2);
2320         do_div(total, cfqq->seek_samples);
2321         cfqq->seek_mean = (sector_t)total;
2322
2323         /*
2324          * If this cfqq is shared between multiple processes, check to
2325          * make sure that those processes are still issuing I/Os within
2326          * the mean seek distance.  If not, it may be time to break the
2327          * queues apart again.
2328          */
2329         if (cfq_cfqq_coop(cfqq)) {
2330                 if (CFQQ_SEEKY(cfqq) && !cfqq->seeky_start)
2331                         cfqq->seeky_start = jiffies;
2332                 else if (!CFQQ_SEEKY(cfqq))
2333                         cfqq->seeky_start = 0;
2334         }
2335 }
2336
2337 /*
2338  * Disable idle window if the process thinks too long or seeks so much that
2339  * it doesn't matter
2340  */
2341 static void
2342 cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2343                        struct cfq_io_context *cic)
2344 {
2345         int old_idle, enable_idle;
2346
2347         /*
2348          * Don't idle for async or idle io prio class
2349          */
2350         if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
2351                 return;
2352
2353         enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
2354
2355         if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
2356             (sample_valid(cfqq->seek_samples) && CFQQ_SEEKY(cfqq)))
2357                 enable_idle = 0;
2358         else if (sample_valid(cic->ttime_samples)) {
2359                 if (cic->ttime_mean > cfqd->cfq_slice_idle)
2360                         enable_idle = 0;
2361                 else
2362                         enable_idle = 1;
2363         }
2364
2365         if (old_idle != enable_idle) {
2366                 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
2367                 if (enable_idle)
2368                         cfq_mark_cfqq_idle_window(cfqq);
2369                 else
2370                         cfq_clear_cfqq_idle_window(cfqq);
2371         }
2372 }
2373
2374 /*
2375  * Check if new_cfqq should preempt the currently active queue. Return 0 for
2376  * no or if we aren't sure, a 1 will cause a preempt.
2377  */
2378 static bool
2379 cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
2380                    struct request *rq)
2381 {
2382         struct cfq_queue *cfqq;
2383
2384         cfqq = cfqd->active_queue;
2385         if (!cfqq)
2386                 return false;
2387
2388         if (cfq_slice_used(cfqq))
2389                 return true;
2390
2391         if (cfq_class_idle(new_cfqq))
2392                 return false;
2393
2394         if (cfq_class_idle(cfqq))
2395                 return true;
2396
2397         if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD
2398             && new_cfqq->service_tree == cfqq->service_tree)
2399                 return true;
2400
2401         /*
2402          * if the new request is sync, but the currently running queue is
2403          * not, let the sync request have priority.
2404          */
2405         if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
2406                 return true;
2407
2408         /*
2409          * So both queues are sync. Let the new request get disk time if
2410          * it's a metadata request and the current queue is doing regular IO.
2411          */
2412         if (rq_is_meta(rq) && !cfqq->meta_pending)
2413                 return true;
2414
2415         /*
2416          * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
2417          */
2418         if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
2419                 return true;
2420
2421         if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
2422                 return false;
2423
2424         /*
2425          * if this request is as-good as one we would expect from the
2426          * current cfqq, let it preempt
2427          */
2428         if (cfq_rq_close(cfqd, cfqq, rq))
2429                 return true;
2430
2431         return false;
2432 }
2433
2434 /*
2435  * cfqq preempts the active queue. if we allowed preempt with no slice left,
2436  * let it have half of its nominal slice.
2437  */
2438 static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2439 {
2440         cfq_log_cfqq(cfqd, cfqq, "preempt");
2441         cfq_slice_expired(cfqd, 1);
2442
2443         /*
2444          * Put the new queue at the front of the of the current list,
2445          * so we know that it will be selected next.
2446          */
2447         BUG_ON(!cfq_cfqq_on_rr(cfqq));
2448
2449         cfq_service_tree_add(cfqd, cfqq, 1);
2450
2451         cfqq->slice_end = 0;
2452         cfq_mark_cfqq_slice_new(cfqq);
2453 }
2454
2455 /*
2456  * Called when a new fs request (rq) is added (to cfqq). Check if there's
2457  * something we should do about it
2458  */
2459 static void
2460 cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2461                 struct request *rq)
2462 {
2463         struct cfq_io_context *cic = RQ_CIC(rq);
2464
2465         cfqd->rq_queued++;
2466         if (rq_is_meta(rq))
2467                 cfqq->meta_pending++;
2468
2469         cfq_update_io_thinktime(cfqd, cic);
2470         cfq_update_io_seektime(cfqd, cfqq, rq);
2471         cfq_update_idle_window(cfqd, cfqq, cic);
2472
2473         cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
2474
2475         if (cfqq == cfqd->active_queue) {
2476                 /*
2477                  * Remember that we saw a request from this process, but
2478                  * don't start queuing just yet. Otherwise we risk seeing lots
2479                  * of tiny requests, because we disrupt the normal plugging
2480                  * and merging. If the request is already larger than a single
2481                  * page, let it rip immediately. For that case we assume that
2482                  * merging is already done. Ditto for a busy system that
2483                  * has other work pending, don't risk delaying until the
2484                  * idle timer unplug to continue working.
2485                  */
2486                 if (cfq_cfqq_wait_request(cfqq)) {
2487                         if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
2488                             cfqd->busy_queues > 1) {
2489                                 del_timer(&cfqd->idle_slice_timer);
2490                         __blk_run_queue(cfqd->queue);
2491                         }
2492                         cfq_mark_cfqq_must_dispatch(cfqq);
2493                 }
2494         } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
2495                 /*
2496                  * not the active queue - expire current slice if it is
2497                  * idle and has expired it's mean thinktime or this new queue
2498                  * has some old slice time left and is of higher priority or
2499                  * this new queue is RT and the current one is BE
2500                  */
2501                 cfq_preempt_queue(cfqd, cfqq);
2502                 __blk_run_queue(cfqd->queue);
2503         }
2504 }
2505
2506 static void cfq_insert_request(struct request_queue *q, struct request *rq)
2507 {
2508         struct cfq_data *cfqd = q->elevator->elevator_data;
2509         struct cfq_queue *cfqq = RQ_CFQQ(rq);
2510
2511         cfq_log_cfqq(cfqd, cfqq, "insert_request");
2512         cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc);
2513
2514         rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
2515         list_add_tail(&rq->queuelist, &cfqq->fifo);
2516         cfq_add_rq_rb(rq);
2517
2518         cfq_rq_enqueued(cfqd, cfqq, rq);
2519 }
2520
2521 /*
2522  * Update hw_tag based on peak queue depth over 50 samples under
2523  * sufficient load.
2524  */
2525 static void cfq_update_hw_tag(struct cfq_data *cfqd)
2526 {
2527         struct cfq_queue *cfqq = cfqd->active_queue;
2528
2529         if (rq_in_driver(cfqd) > cfqd->rq_in_driver_peak)
2530                 cfqd->rq_in_driver_peak = rq_in_driver(cfqd);
2531
2532         if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
2533             rq_in_driver(cfqd) <= CFQ_HW_QUEUE_MIN)
2534                 return;
2535
2536         /*
2537          * If active queue hasn't enough requests and can idle, cfq might not
2538          * dispatch sufficient requests to hardware. Don't zero hw_tag in this
2539          * case
2540          */
2541         if (cfqq && cfq_cfqq_idle_window(cfqq) &&
2542             cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
2543             CFQ_HW_QUEUE_MIN && rq_in_driver(cfqd) < CFQ_HW_QUEUE_MIN)
2544                 return;
2545
2546         if (cfqd->hw_tag_samples++ < 50)
2547                 return;
2548
2549         if (cfqd->rq_in_driver_peak >= CFQ_HW_QUEUE_MIN)
2550                 cfqd->hw_tag = 1;
2551         else
2552                 cfqd->hw_tag = 0;
2553
2554         cfqd->hw_tag_samples = 0;
2555         cfqd->rq_in_driver_peak = 0;
2556 }
2557
2558 static void cfq_completed_request(struct request_queue *q, struct request *rq)
2559 {
2560         struct cfq_queue *cfqq = RQ_CFQQ(rq);
2561         struct cfq_data *cfqd = cfqq->cfqd;
2562         const int sync = rq_is_sync(rq);
2563         unsigned long now;
2564
2565         now = jiffies;
2566         cfq_log_cfqq(cfqd, cfqq, "complete");
2567
2568         cfq_update_hw_tag(cfqd);
2569
2570         WARN_ON(!cfqd->rq_in_driver[sync]);
2571         WARN_ON(!cfqq->dispatched);
2572         cfqd->rq_in_driver[sync]--;
2573         cfqq->dispatched--;
2574
2575         if (cfq_cfqq_sync(cfqq))
2576                 cfqd->sync_flight--;
2577
2578         if (sync) {
2579                 RQ_CIC(rq)->last_end_request = now;
2580                 cfqd->last_end_sync_rq = now;
2581         }
2582
2583         /*
2584          * If this is the active queue, check if it needs to be expired,
2585          * or if we want to idle in case it has no pending requests.
2586          */
2587         if (cfqd->active_queue == cfqq) {
2588                 const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
2589
2590                 if (cfq_cfqq_slice_new(cfqq)) {
2591                         cfq_set_prio_slice(cfqd, cfqq);
2592                         cfq_clear_cfqq_slice_new(cfqq);
2593                 }
2594                 /*
2595                  * If there are no requests waiting in this queue, and
2596                  * there are other queues ready to issue requests, AND
2597                  * those other queues are issuing requests within our
2598                  * mean seek distance, give them a chance to run instead
2599                  * of idling.
2600                  */
2601                 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
2602                         cfq_slice_expired(cfqd, 1);
2603                 else if (cfqq_empty && !cfq_close_cooperator(cfqd, cfqq) &&
2604                          sync && !rq_noidle(rq))
2605                         cfq_arm_slice_timer(cfqd);
2606         }
2607
2608         if (!rq_in_driver(cfqd))
2609                 cfq_schedule_dispatch(cfqd);
2610 }
2611
2612 /*
2613  * we temporarily boost lower priority queues if they are holding fs exclusive
2614  * resources. they are boosted to normal prio (CLASS_BE/4)
2615  */
2616 static void cfq_prio_boost(struct cfq_queue *cfqq)
2617 {
2618         if (has_fs_excl()) {
2619                 /*
2620                  * boost idle prio on transactions that would lock out other
2621                  * users of the filesystem
2622                  */
2623                 if (cfq_class_idle(cfqq))
2624                         cfqq->ioprio_class = IOPRIO_CLASS_BE;
2625                 if (cfqq->ioprio > IOPRIO_NORM)
2626                         cfqq->ioprio = IOPRIO_NORM;
2627         } else {
2628                 /*
2629                  * unboost the queue (if needed)
2630                  */
2631                 cfqq->ioprio_class = cfqq->org_ioprio_class;
2632                 cfqq->ioprio = cfqq->org_ioprio;
2633         }
2634 }
2635
2636 static inline int __cfq_may_queue(struct cfq_queue *cfqq)
2637 {
2638         if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
2639                 cfq_mark_cfqq_must_alloc_slice(cfqq);
2640                 return ELV_MQUEUE_MUST;
2641         }
2642
2643         return ELV_MQUEUE_MAY;
2644 }
2645
2646 static int cfq_may_queue(struct request_queue *q, int rw)
2647 {
2648         struct cfq_data *cfqd = q->elevator->elevator_data;
2649         struct task_struct *tsk = current;
2650         struct cfq_io_context *cic;
2651         struct cfq_queue *cfqq;
2652
2653         /*
2654          * don't force setup of a queue from here, as a call to may_queue
2655          * does not necessarily imply that a request actually will be queued.
2656          * so just lookup a possibly existing queue, or return 'may queue'
2657          * if that fails
2658          */
2659         cic = cfq_cic_lookup(cfqd, tsk->io_context);
2660         if (!cic)
2661                 return ELV_MQUEUE_MAY;
2662
2663         cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
2664         if (cfqq) {
2665                 cfq_init_prio_data(cfqq, cic->ioc);
2666                 cfq_prio_boost(cfqq);
2667
2668                 return __cfq_may_queue(cfqq);
2669         }
2670
2671         return ELV_MQUEUE_MAY;
2672 }
2673
2674 /*
2675  * queue lock held here
2676  */
2677 static void cfq_put_request(struct request *rq)
2678 {
2679         struct cfq_queue *cfqq = RQ_CFQQ(rq);
2680
2681         if (cfqq) {
2682                 const int rw = rq_data_dir(rq);
2683
2684                 BUG_ON(!cfqq->allocated[rw]);
2685                 cfqq->allocated[rw]--;
2686
2687                 put_io_context(RQ_CIC(rq)->ioc);
2688
2689                 rq->elevator_private = NULL;
2690                 rq->elevator_private2 = NULL;
2691
2692                 cfq_put_queue(cfqq);
2693         }
2694 }
2695
2696 static struct cfq_queue *
2697 cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_context *cic,
2698                 struct cfq_queue *cfqq)
2699 {
2700         cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
2701         cic_set_cfqq(cic, cfqq->new_cfqq, 1);
2702         cfq_mark_cfqq_coop(cfqq->new_cfqq);
2703         cfq_put_queue(cfqq);
2704         return cic_to_cfqq(cic, 1);
2705 }
2706
2707 static int should_split_cfqq(struct cfq_queue *cfqq)
2708 {
2709         if (cfqq->seeky_start &&
2710             time_after(jiffies, cfqq->seeky_start + CFQQ_COOP_TOUT))
2711                 return 1;
2712         return 0;
2713 }
2714
2715 /*
2716  * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
2717  * was the last process referring to said cfqq.
2718  */
2719 static struct cfq_queue *
2720 split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq)
2721 {
2722         if (cfqq_process_refs(cfqq) == 1) {
2723                 cfqq->seeky_start = 0;
2724                 cfqq->pid = current->pid;
2725                 cfq_clear_cfqq_coop(cfqq);
2726                 return cfqq;
2727         }
2728
2729         cic_set_cfqq(cic, NULL, 1);
2730         cfq_put_queue(cfqq);
2731         return NULL;
2732 }
2733 /*
2734  * Allocate cfq data structures associated with this request.
2735  */
2736 static int
2737 cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
2738 {
2739         struct cfq_data *cfqd = q->elevator->elevator_data;
2740         struct cfq_io_context *cic;
2741         const int rw = rq_data_dir(rq);
2742         const bool is_sync = rq_is_sync(rq);
2743         struct cfq_queue *cfqq;
2744         unsigned long flags;
2745
2746         might_sleep_if(gfp_mask & __GFP_WAIT);
2747
2748         cic = cfq_get_io_context(cfqd, gfp_mask);
2749
2750         spin_lock_irqsave(q->queue_lock, flags);
2751
2752         if (!cic)
2753                 goto queue_fail;
2754
2755 new_queue:
2756         cfqq = cic_to_cfqq(cic, is_sync);
2757         if (!cfqq || cfqq == &cfqd->oom_cfqq) {
2758                 cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask);
2759                 cic_set_cfqq(cic, cfqq, is_sync);
2760         } else {
2761                 /*
2762                  * If the queue was seeky for too long, break it apart.
2763                  */
2764                 if (cfq_cfqq_coop(cfqq) && should_split_cfqq(cfqq)) {
2765                         cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
2766                         cfqq = split_cfqq(cic, cfqq);
2767                         if (!cfqq)
2768                                 goto new_queue;
2769                 }
2770
2771                 /*
2772                  * Check to see if this queue is scheduled to merge with
2773                  * another, closely cooperating queue.  The merging of
2774                  * queues happens here as it must be done in process context.
2775                  * The reference on new_cfqq was taken in merge_cfqqs.
2776                  */
2777                 if (cfqq->new_cfqq)
2778                         cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
2779         }
2780
2781         cfqq->allocated[rw]++;
2782         atomic_inc(&cfqq->ref);
2783
2784         spin_unlock_irqrestore(q->queue_lock, flags);
2785
2786         rq->elevator_private = cic;
2787         rq->elevator_private2 = cfqq;
2788         return 0;
2789
2790 queue_fail:
2791         if (cic)
2792                 put_io_context(cic->ioc);
2793
2794         cfq_schedule_dispatch(cfqd);
2795         spin_unlock_irqrestore(q->queue_lock, flags);
2796         cfq_log(cfqd, "set_request fail");
2797         return 1;
2798 }
2799
2800 static void cfq_kick_queue(struct work_struct *work)
2801 {
2802         struct cfq_data *cfqd =
2803                 container_of(work, struct cfq_data, unplug_work);
2804         struct request_queue *q = cfqd->queue;
2805
2806         spin_lock_irq(q->queue_lock);
2807         __blk_run_queue(cfqd->queue);
2808         spin_unlock_irq(q->queue_lock);
2809 }
2810
2811 /*
2812  * Timer running if the active_queue is currently idling inside its time slice
2813  */
2814 static void cfq_idle_slice_timer(unsigned long data)
2815 {
2816         struct cfq_data *cfqd = (struct cfq_data *) data;
2817         struct cfq_queue *cfqq;
2818         unsigned long flags;
2819         int timed_out = 1;
2820
2821         cfq_log(cfqd, "idle timer fired");
2822
2823         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
2824
2825         cfqq = cfqd->active_queue;
2826         if (cfqq) {
2827                 timed_out = 0;
2828
2829                 /*
2830                  * We saw a request before the queue expired, let it through
2831                  */
2832                 if (cfq_cfqq_must_dispatch(cfqq))
2833                         goto out_kick;
2834
2835                 /*
2836                  * expired
2837                  */
2838                 if (cfq_slice_used(cfqq))
2839                         goto expire;
2840
2841                 /*
2842                  * only expire and reinvoke request handler, if there are
2843                  * other queues with pending requests
2844                  */
2845                 if (!cfqd->busy_queues)
2846                         goto out_cont;
2847
2848                 /*
2849                  * not expired and it has a request pending, let it dispatch
2850                  */
2851                 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
2852                         goto out_kick;
2853         }
2854 expire:
2855         cfq_slice_expired(cfqd, timed_out);
2856 out_kick:
2857         cfq_schedule_dispatch(cfqd);
2858 out_cont:
2859         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2860 }
2861
2862 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
2863 {
2864         del_timer_sync(&cfqd->idle_slice_timer);
2865         cancel_work_sync(&cfqd->unplug_work);
2866 }
2867
2868 static void cfq_put_async_queues(struct cfq_data *cfqd)
2869 {
2870         int i;
2871
2872         for (i = 0; i < IOPRIO_BE_NR; i++) {
2873                 if (cfqd->async_cfqq[0][i])
2874                         cfq_put_queue(cfqd->async_cfqq[0][i]);
2875                 if (cfqd->async_cfqq[1][i])
2876                         cfq_put_queue(cfqd->async_cfqq[1][i]);
2877         }
2878
2879         if (cfqd->async_idle_cfqq)
2880                 cfq_put_queue(cfqd->async_idle_cfqq);
2881 }
2882
2883 static void cfq_exit_queue(struct elevator_queue *e)
2884 {
2885         struct cfq_data *cfqd = e->elevator_data;
2886         struct request_queue *q = cfqd->queue;
2887
2888         cfq_shutdown_timer_wq(cfqd);
2889
2890         spin_lock_irq(q->queue_lock);
2891
2892         if (cfqd->active_queue)
2893                 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
2894
2895         while (!list_empty(&cfqd->cic_list)) {
2896                 struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
2897                                                         struct cfq_io_context,
2898                                                         queue_list);
2899
2900                 __cfq_exit_single_io_context(cfqd, cic);
2901         }
2902
2903         cfq_put_async_queues(cfqd);
2904
2905         spin_unlock_irq(q->queue_lock);
2906
2907         cfq_shutdown_timer_wq(cfqd);
2908
2909         kfree(cfqd);
2910 }
2911
2912 static void *cfq_init_queue(struct request_queue *q)
2913 {
2914         struct cfq_data *cfqd;
2915         int i, j;
2916
2917         cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
2918         if (!cfqd)
2919                 return NULL;
2920
2921         for (i = 0; i < 2; ++i)
2922                 for (j = 0; j < 3; ++j)
2923                         cfqd->service_trees[i][j] = CFQ_RB_ROOT;
2924         cfqd->service_tree_idle = CFQ_RB_ROOT;
2925
2926         /*
2927          * Not strictly needed (since RB_ROOT just clears the node and we
2928          * zeroed cfqd on alloc), but better be safe in case someone decides
2929          * to add magic to the rb code
2930          */
2931         for (i = 0; i < CFQ_PRIO_LISTS; i++)
2932                 cfqd->prio_trees[i] = RB_ROOT;
2933
2934         /*
2935          * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
2936          * Grab a permanent reference to it, so that the normal code flow
2937          * will not attempt to free it.
2938          */
2939         cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
2940         atomic_inc(&cfqd->oom_cfqq.ref);
2941
2942         INIT_LIST_HEAD(&cfqd->cic_list);
2943
2944         cfqd->queue = q;
2945
2946         init_timer(&cfqd->idle_slice_timer);
2947         cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
2948         cfqd->idle_slice_timer.data = (unsigned long) cfqd;
2949
2950         INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
2951
2952         cfqd->cfq_quantum = cfq_quantum;
2953         cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
2954         cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
2955         cfqd->cfq_back_max = cfq_back_max;
2956         cfqd->cfq_back_penalty = cfq_back_penalty;
2957         cfqd->cfq_slice[0] = cfq_slice_async;
2958         cfqd->cfq_slice[1] = cfq_slice_sync;
2959         cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
2960         cfqd->cfq_slice_idle = cfq_slice_idle;
2961         cfqd->cfq_latency = 1;
2962         cfqd->hw_tag = 1;
2963         cfqd->last_end_sync_rq = jiffies;
2964         return cfqd;
2965 }
2966
2967 static void cfq_slab_kill(void)
2968 {
2969         /*
2970          * Caller already ensured that pending RCU callbacks are completed,
2971          * so we should have no busy allocations at this point.
2972          */
2973         if (cfq_pool)
2974                 kmem_cache_destroy(cfq_pool);
2975         if (cfq_ioc_pool)
2976                 kmem_cache_destroy(cfq_ioc_pool);
2977 }
2978
2979 static int __init cfq_slab_setup(void)
2980 {
2981         cfq_pool = KMEM_CACHE(cfq_queue, 0);
2982         if (!cfq_pool)
2983                 goto fail;
2984
2985         cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0);
2986         if (!cfq_ioc_pool)
2987                 goto fail;
2988
2989         return 0;
2990 fail:
2991         cfq_slab_kill();
2992         return -ENOMEM;
2993 }
2994
2995 /*
2996  * sysfs parts below -->
2997  */
2998 static ssize_t
2999 cfq_var_show(unsigned int var, char *page)
3000 {
3001         return sprintf(page, "%d\n", var);
3002 }
3003
3004 static ssize_t
3005 cfq_var_store(unsigned int *var, const char *page, size_t count)
3006 {
3007         char *p = (char *) page;
3008
3009         *var = simple_strtoul(p, &p, 10);
3010         return count;
3011 }
3012
3013 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                            \
3014 static ssize_t __FUNC(struct elevator_queue *e, char *page)             \
3015 {                                                                       \
3016         struct cfq_data *cfqd = e->elevator_data;                       \
3017         unsigned int __data = __VAR;                                    \
3018         if (__CONV)                                                     \
3019                 __data = jiffies_to_msecs(__data);                      \
3020         return cfq_var_show(__data, (page));                            \
3021 }
3022 SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
3023 SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
3024 SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
3025 SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
3026 SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
3027 SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
3028 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
3029 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
3030 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
3031 SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
3032 #undef SHOW_FUNCTION
3033
3034 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
3035 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
3036 {                                                                       \
3037         struct cfq_data *cfqd = e->elevator_data;                       \
3038         unsigned int __data;                                            \
3039         int ret = cfq_var_store(&__data, (page), count);                \
3040         if (__data < (MIN))                                             \
3041                 __data = (MIN);                                         \
3042         else if (__data > (MAX))                                        \
3043                 __data = (MAX);                                         \
3044         if (__CONV)                                                     \
3045                 *(__PTR) = msecs_to_jiffies(__data);                    \
3046         else                                                            \
3047                 *(__PTR) = __data;                                      \
3048         return ret;                                                     \
3049 }
3050 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
3051 STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
3052                 UINT_MAX, 1);
3053 STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
3054                 UINT_MAX, 1);
3055 STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
3056 STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
3057                 UINT_MAX, 0);
3058 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
3059 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
3060 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
3061 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
3062                 UINT_MAX, 0);
3063 STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
3064 #undef STORE_FUNCTION
3065
3066 #define CFQ_ATTR(name) \
3067         __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
3068
3069 static struct elv_fs_entry cfq_attrs[] = {
3070         CFQ_ATTR(quantum),
3071         CFQ_ATTR(fifo_expire_sync),
3072         CFQ_ATTR(fifo_expire_async),
3073         CFQ_ATTR(back_seek_max),
3074         CFQ_ATTR(back_seek_penalty),
3075         CFQ_ATTR(slice_sync),
3076         CFQ_ATTR(slice_async),
3077         CFQ_ATTR(slice_async_rq),
3078         CFQ_ATTR(slice_idle),
3079         CFQ_ATTR(low_latency),
3080         __ATTR_NULL
3081 };
3082
3083 static struct elevator_type iosched_cfq = {
3084         .ops = {
3085                 .elevator_merge_fn =            cfq_merge,
3086                 .elevator_merged_fn =           cfq_merged_request,
3087                 .elevator_merge_req_fn =        cfq_merged_requests,
3088                 .elevator_allow_merge_fn =      cfq_allow_merge,
3089                 .elevator_dispatch_fn =         cfq_dispatch_requests,
3090                 .elevator_add_req_fn =          cfq_insert_request,
3091                 .elevator_activate_req_fn =     cfq_activate_request,
3092                 .elevator_deactivate_req_fn =   cfq_deactivate_request,
3093                 .elevator_queue_empty_fn =      cfq_queue_empty,
3094                 .elevator_completed_req_fn =    cfq_completed_request,
3095                 .elevator_former_req_fn =       elv_rb_former_request,
3096                 .elevator_latter_req_fn =       elv_rb_latter_request,
3097                 .elevator_set_req_fn =          cfq_set_request,
3098                 .elevator_put_req_fn =          cfq_put_request,
3099                 .elevator_may_queue_fn =        cfq_may_queue,
3100                 .elevator_init_fn =             cfq_init_queue,
3101                 .elevator_exit_fn =             cfq_exit_queue,
3102                 .trim =                         cfq_free_io_context,
3103         },
3104         .elevator_attrs =       cfq_attrs,
3105         .elevator_name =        "cfq",
3106         .elevator_owner =       THIS_MODULE,
3107 };
3108
3109 static int __init cfq_init(void)
3110 {
3111         /*
3112          * could be 0 on HZ < 1000 setups
3113          */
3114         if (!cfq_slice_async)
3115                 cfq_slice_async = 1;
3116         if (!cfq_slice_idle)
3117                 cfq_slice_idle = 1;
3118
3119         if (cfq_slab_setup())
3120                 return -ENOMEM;
3121
3122         elv_register(&iosched_cfq);
3123
3124         return 0;
3125 }
3126
3127 static void __exit cfq_exit(void)
3128 {
3129         DECLARE_COMPLETION_ONSTACK(all_gone);
3130         elv_unregister(&iosched_cfq);
3131         ioc_gone = &all_gone;
3132         /* ioc_gone's update must be visible before reading ioc_count */
3133         smp_wmb();
3134
3135         /*
3136          * this also protects us from entering cfq_slab_kill() with
3137          * pending RCU callbacks
3138          */
3139         if (elv_ioc_count_read(cfq_ioc_count))
3140                 wait_for_completion(&all_gone);
3141         cfq_slab_kill();
3142 }
3143
3144 module_init(cfq_init);
3145 module_exit(cfq_exit);
3146
3147 MODULE_AUTHOR("Jens Axboe");
3148 MODULE_LICENSE("GPL");
3149 MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");