2 * Common Block IO controller cgroup interface
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
13 #include <linux/ioprio.h>
14 #include <linux/seq_file.h>
15 #include <linux/kdev_t.h>
16 #include <linux/module.h>
17 #include <linux/err.h>
18 #include <linux/blkdev.h>
19 #include <linux/slab.h>
20 #include "blk-cgroup.h"
21 #include <linux/genhd.h>
23 #define MAX_KEY_LEN 100
25 static DEFINE_SPINLOCK(blkio_list_lock);
26 static LIST_HEAD(blkio_list);
28 struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
29 EXPORT_SYMBOL_GPL(blkio_root_cgroup);
31 static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
33 static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
34 struct task_struct *, bool);
35 static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
36 struct cgroup *, struct task_struct *, bool);
37 static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
38 static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
40 /* for encoding cft->private value on file */
41 #define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
42 /* What policy owns the file, proportional or throttle */
43 #define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
44 #define BLKIOFILE_ATTR(val) ((val) & 0xffff)
46 struct cgroup_subsys blkio_subsys = {
48 .create = blkiocg_create,
49 .can_attach = blkiocg_can_attach,
50 .attach = blkiocg_attach,
51 .destroy = blkiocg_destroy,
52 .populate = blkiocg_populate,
53 #ifdef CONFIG_BLK_CGROUP
54 /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
55 .subsys_id = blkio_subsys_id,
58 .module = THIS_MODULE,
60 EXPORT_SYMBOL_GPL(blkio_subsys);
62 static inline void blkio_policy_insert_node(struct blkio_cgroup *blkcg,
63 struct blkio_policy_node *pn)
65 list_add(&pn->node, &blkcg->policy_list);
68 static inline bool cftype_blkg_same_policy(struct cftype *cft,
69 struct blkio_group *blkg)
71 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
73 if (blkg->plid == plid)
79 /* Determines if policy node matches cgroup file being accessed */
80 static inline bool pn_matches_cftype(struct cftype *cft,
81 struct blkio_policy_node *pn)
83 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
84 int fileid = BLKIOFILE_ATTR(cft->private);
86 return (plid == pn->plid && fileid == pn->fileid);
89 /* Must be called with blkcg->lock held */
90 static inline void blkio_policy_delete_node(struct blkio_policy_node *pn)
95 /* Must be called with blkcg->lock held */
96 static struct blkio_policy_node *
97 blkio_policy_search_node(const struct blkio_cgroup *blkcg, dev_t dev,
98 enum blkio_policy_id plid, int fileid)
100 struct blkio_policy_node *pn;
102 list_for_each_entry(pn, &blkcg->policy_list, node) {
103 if (pn->dev == dev && pn->plid == plid && pn->fileid == fileid)
110 struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
112 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
113 struct blkio_cgroup, css);
115 EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
118 blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
120 struct blkio_policy_type *blkiop;
122 list_for_each_entry(blkiop, &blkio_list, list) {
123 /* If this policy does not own the blkg, do not send updates */
124 if (blkiop->plid != blkg->plid)
126 if (blkiop->ops.blkio_update_group_weight_fn)
127 blkiop->ops.blkio_update_group_weight_fn(blkg, weight);
132 * Add to the appropriate stat variable depending on the request type.
133 * This should be called with the blkg->stats_lock held.
135 static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
139 stat[BLKIO_STAT_WRITE] += add;
141 stat[BLKIO_STAT_READ] += add;
143 stat[BLKIO_STAT_SYNC] += add;
145 stat[BLKIO_STAT_ASYNC] += add;
149 * Decrements the appropriate stat variable if non-zero depending on the
150 * request type. Panics on value being zero.
151 * This should be called with the blkg->stats_lock held.
153 static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
156 BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
157 stat[BLKIO_STAT_WRITE]--;
159 BUG_ON(stat[BLKIO_STAT_READ] == 0);
160 stat[BLKIO_STAT_READ]--;
163 BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
164 stat[BLKIO_STAT_SYNC]--;
166 BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
167 stat[BLKIO_STAT_ASYNC]--;
171 #ifdef CONFIG_DEBUG_BLK_CGROUP
172 /* This should be called with the blkg->stats_lock held. */
173 static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
174 struct blkio_group *curr_blkg)
176 if (blkio_blkg_waiting(&blkg->stats))
178 if (blkg == curr_blkg)
180 blkg->stats.start_group_wait_time = sched_clock();
181 blkio_mark_blkg_waiting(&blkg->stats);
184 /* This should be called with the blkg->stats_lock held. */
185 static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
187 unsigned long long now;
189 if (!blkio_blkg_waiting(stats))
193 if (time_after64(now, stats->start_group_wait_time))
194 stats->group_wait_time += now - stats->start_group_wait_time;
195 blkio_clear_blkg_waiting(stats);
198 /* This should be called with the blkg->stats_lock held. */
199 static void blkio_end_empty_time(struct blkio_group_stats *stats)
201 unsigned long long now;
203 if (!blkio_blkg_empty(stats))
207 if (time_after64(now, stats->start_empty_time))
208 stats->empty_time += now - stats->start_empty_time;
209 blkio_clear_blkg_empty(stats);
212 void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
216 spin_lock_irqsave(&blkg->stats_lock, flags);
217 BUG_ON(blkio_blkg_idling(&blkg->stats));
218 blkg->stats.start_idle_time = sched_clock();
219 blkio_mark_blkg_idling(&blkg->stats);
220 spin_unlock_irqrestore(&blkg->stats_lock, flags);
222 EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
224 void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
227 unsigned long long now;
228 struct blkio_group_stats *stats;
230 spin_lock_irqsave(&blkg->stats_lock, flags);
231 stats = &blkg->stats;
232 if (blkio_blkg_idling(stats)) {
234 if (time_after64(now, stats->start_idle_time))
235 stats->idle_time += now - stats->start_idle_time;
236 blkio_clear_blkg_idling(stats);
238 spin_unlock_irqrestore(&blkg->stats_lock, flags);
240 EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
242 void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
245 struct blkio_group_stats *stats;
247 spin_lock_irqsave(&blkg->stats_lock, flags);
248 stats = &blkg->stats;
249 stats->avg_queue_size_sum +=
250 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
251 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
252 stats->avg_queue_size_samples++;
253 blkio_update_group_wait_time(stats);
254 spin_unlock_irqrestore(&blkg->stats_lock, flags);
256 EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
258 void blkiocg_set_start_empty_time(struct blkio_group *blkg)
261 struct blkio_group_stats *stats;
263 spin_lock_irqsave(&blkg->stats_lock, flags);
264 stats = &blkg->stats;
266 if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
267 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
268 spin_unlock_irqrestore(&blkg->stats_lock, flags);
273 * group is already marked empty. This can happen if cfqq got new
274 * request in parent group and moved to this group while being added
275 * to service tree. Just ignore the event and move on.
277 if(blkio_blkg_empty(stats)) {
278 spin_unlock_irqrestore(&blkg->stats_lock, flags);
282 stats->start_empty_time = sched_clock();
283 blkio_mark_blkg_empty(stats);
284 spin_unlock_irqrestore(&blkg->stats_lock, flags);
286 EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
288 void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
289 unsigned long dequeue)
291 blkg->stats.dequeue += dequeue;
293 EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
295 static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
296 struct blkio_group *curr_blkg) {}
297 static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
300 void blkiocg_update_io_add_stats(struct blkio_group *blkg,
301 struct blkio_group *curr_blkg, bool direction,
306 spin_lock_irqsave(&blkg->stats_lock, flags);
307 blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
309 blkio_end_empty_time(&blkg->stats);
310 blkio_set_start_group_wait_time(blkg, curr_blkg);
311 spin_unlock_irqrestore(&blkg->stats_lock, flags);
313 EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
315 void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
316 bool direction, bool sync)
320 spin_lock_irqsave(&blkg->stats_lock, flags);
321 blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
323 spin_unlock_irqrestore(&blkg->stats_lock, flags);
325 EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
327 void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time)
331 spin_lock_irqsave(&blkg->stats_lock, flags);
332 blkg->stats.time += time;
333 spin_unlock_irqrestore(&blkg->stats_lock, flags);
335 EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
337 void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
338 uint64_t bytes, bool direction, bool sync)
340 struct blkio_group_stats *stats;
343 spin_lock_irqsave(&blkg->stats_lock, flags);
344 stats = &blkg->stats;
345 stats->sectors += bytes >> 9;
346 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICED], 1, direction,
348 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_BYTES], bytes,
350 spin_unlock_irqrestore(&blkg->stats_lock, flags);
352 EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
354 void blkiocg_update_completion_stats(struct blkio_group *blkg,
355 uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
357 struct blkio_group_stats *stats;
359 unsigned long long now = sched_clock();
361 spin_lock_irqsave(&blkg->stats_lock, flags);
362 stats = &blkg->stats;
363 if (time_after64(now, io_start_time))
364 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
365 now - io_start_time, direction, sync);
366 if (time_after64(io_start_time, start_time))
367 blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
368 io_start_time - start_time, direction, sync);
369 spin_unlock_irqrestore(&blkg->stats_lock, flags);
371 EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
373 void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
378 spin_lock_irqsave(&blkg->stats_lock, flags);
379 blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_MERGED], 1, direction,
381 spin_unlock_irqrestore(&blkg->stats_lock, flags);
383 EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
385 void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
386 struct blkio_group *blkg, void *key, dev_t dev,
387 enum blkio_policy_id plid)
391 spin_lock_irqsave(&blkcg->lock, flags);
392 spin_lock_init(&blkg->stats_lock);
393 rcu_assign_pointer(blkg->key, key);
394 blkg->blkcg_id = css_id(&blkcg->css);
395 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
397 spin_unlock_irqrestore(&blkcg->lock, flags);
398 /* Need to take css reference ? */
399 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
402 EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
404 static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
406 hlist_del_init_rcu(&blkg->blkcg_node);
411 * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
412 * indicating that blk_group was unhashed by the time we got to it.
414 int blkiocg_del_blkio_group(struct blkio_group *blkg)
416 struct blkio_cgroup *blkcg;
418 struct cgroup_subsys_state *css;
422 css = css_lookup(&blkio_subsys, blkg->blkcg_id);
424 blkcg = container_of(css, struct blkio_cgroup, css);
425 spin_lock_irqsave(&blkcg->lock, flags);
426 if (!hlist_unhashed(&blkg->blkcg_node)) {
427 __blkiocg_del_blkio_group(blkg);
430 spin_unlock_irqrestore(&blkcg->lock, flags);
436 EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
438 /* called under rcu_read_lock(). */
439 struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
441 struct blkio_group *blkg;
442 struct hlist_node *n;
445 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
453 EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
456 blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
458 struct blkio_cgroup *blkcg;
459 struct blkio_group *blkg;
460 struct blkio_group_stats *stats;
461 struct hlist_node *n;
462 uint64_t queued[BLKIO_STAT_TOTAL];
464 #ifdef CONFIG_DEBUG_BLK_CGROUP
465 bool idling, waiting, empty;
466 unsigned long long now = sched_clock();
469 blkcg = cgroup_to_blkio_cgroup(cgroup);
470 spin_lock_irq(&blkcg->lock);
471 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
472 spin_lock(&blkg->stats_lock);
473 stats = &blkg->stats;
474 #ifdef CONFIG_DEBUG_BLK_CGROUP
475 idling = blkio_blkg_idling(stats);
476 waiting = blkio_blkg_waiting(stats);
477 empty = blkio_blkg_empty(stats);
479 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
480 queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
481 memset(stats, 0, sizeof(struct blkio_group_stats));
482 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
483 stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
484 #ifdef CONFIG_DEBUG_BLK_CGROUP
486 blkio_mark_blkg_idling(stats);
487 stats->start_idle_time = now;
490 blkio_mark_blkg_waiting(stats);
491 stats->start_group_wait_time = now;
494 blkio_mark_blkg_empty(stats);
495 stats->start_empty_time = now;
498 spin_unlock(&blkg->stats_lock);
500 spin_unlock_irq(&blkcg->lock);
504 static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
505 int chars_left, bool diskname_only)
507 snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
508 chars_left -= strlen(str);
509 if (chars_left <= 0) {
511 "Possibly incorrect cgroup stat display format");
517 case BLKIO_STAT_READ:
518 strlcat(str, " Read", chars_left);
520 case BLKIO_STAT_WRITE:
521 strlcat(str, " Write", chars_left);
523 case BLKIO_STAT_SYNC:
524 strlcat(str, " Sync", chars_left);
526 case BLKIO_STAT_ASYNC:
527 strlcat(str, " Async", chars_left);
529 case BLKIO_STAT_TOTAL:
530 strlcat(str, " Total", chars_left);
533 strlcat(str, " Invalid", chars_left);
537 static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
538 struct cgroup_map_cb *cb, dev_t dev)
540 blkio_get_key_name(0, dev, str, chars_left, true);
541 cb->fill(cb, str, val);
545 /* This should be called with blkg->stats_lock held */
546 static uint64_t blkio_get_stat(struct blkio_group *blkg,
547 struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
550 char key_str[MAX_KEY_LEN];
551 enum stat_sub_type sub_type;
553 if (type == BLKIO_STAT_TIME)
554 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
555 blkg->stats.time, cb, dev);
556 if (type == BLKIO_STAT_SECTORS)
557 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
558 blkg->stats.sectors, cb, dev);
559 #ifdef CONFIG_DEBUG_BLK_CGROUP
560 if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
561 uint64_t sum = blkg->stats.avg_queue_size_sum;
562 uint64_t samples = blkg->stats.avg_queue_size_samples;
564 do_div(sum, samples);
567 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev);
569 if (type == BLKIO_STAT_GROUP_WAIT_TIME)
570 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
571 blkg->stats.group_wait_time, cb, dev);
572 if (type == BLKIO_STAT_IDLE_TIME)
573 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
574 blkg->stats.idle_time, cb, dev);
575 if (type == BLKIO_STAT_EMPTY_TIME)
576 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
577 blkg->stats.empty_time, cb, dev);
578 if (type == BLKIO_STAT_DEQUEUE)
579 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
580 blkg->stats.dequeue, cb, dev);
583 for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
585 blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
586 cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
588 disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
589 blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
590 blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
591 cb->fill(cb, key_str, disk_total);
595 static int blkio_check_dev_num(dev_t dev)
598 struct gendisk *disk;
600 disk = get_gendisk(dev, &part);
607 static int blkio_policy_parse_and_set(char *buf,
608 struct blkio_policy_node *newpn, enum blkio_policy_id plid, int fileid)
610 char *s[4], *p, *major_s = NULL, *minor_s = NULL;
612 unsigned long major, minor, temp;
616 memset(s, 0, sizeof(s));
618 while ((p = strsep(&buf, " ")) != NULL) {
624 /* Prevent from inputing too many things */
632 p = strsep(&s[0], ":");
642 ret = strict_strtoul(major_s, 10, &major);
646 ret = strict_strtoul(minor_s, 10, &minor);
650 dev = MKDEV(major, minor);
652 ret = blkio_check_dev_num(dev);
662 case BLKIO_POLICY_PROP:
663 ret = strict_strtoul(s[1], 10, &temp);
664 if (ret || (temp < BLKIO_WEIGHT_MIN && temp > 0) ||
665 temp > BLKIO_WEIGHT_MAX)
669 newpn->fileid = fileid;
670 newpn->weight = temp;
679 unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
682 struct blkio_policy_node *pn;
684 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_PROP,
685 BLKIO_PROP_weight_device);
689 return blkcg->weight;
691 EXPORT_SYMBOL_GPL(blkcg_get_weight);
693 /* Checks whether user asked for deleting a policy rule */
694 static bool blkio_delete_rule_command(struct blkio_policy_node *pn)
697 case BLKIO_POLICY_PROP:
708 static void blkio_update_policy_rule(struct blkio_policy_node *oldpn,
709 struct blkio_policy_node *newpn)
711 switch(oldpn->plid) {
712 case BLKIO_POLICY_PROP:
713 oldpn->weight = newpn->weight;
721 * Some rules/values in blkg have changed. Propogate those to respective
724 static void blkio_update_blkg_policy(struct blkio_cgroup *blkcg,
725 struct blkio_group *blkg, struct blkio_policy_node *pn)
730 case BLKIO_POLICY_PROP:
731 weight = pn->weight ? pn->weight :
733 blkio_update_group_weight(blkg, weight);
741 * A policy node rule has been updated. Propogate this update to all the
742 * block groups which might be affected by this update.
744 static void blkio_update_policy_node_blkg(struct blkio_cgroup *blkcg,
745 struct blkio_policy_node *pn)
747 struct blkio_group *blkg;
748 struct hlist_node *n;
750 spin_lock(&blkio_list_lock);
751 spin_lock_irq(&blkcg->lock);
753 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
754 if (pn->dev != blkg->dev || pn->plid != blkg->plid)
756 blkio_update_blkg_policy(blkcg, blkg, pn);
759 spin_unlock_irq(&blkcg->lock);
760 spin_unlock(&blkio_list_lock);
763 static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
768 struct blkio_policy_node *newpn, *pn;
769 struct blkio_cgroup *blkcg;
771 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
772 int fileid = BLKIOFILE_ATTR(cft->private);
774 buf = kstrdup(buffer, GFP_KERNEL);
778 newpn = kzalloc(sizeof(*newpn), GFP_KERNEL);
784 ret = blkio_policy_parse_and_set(buf, newpn, plid, fileid);
788 blkcg = cgroup_to_blkio_cgroup(cgrp);
790 spin_lock_irq(&blkcg->lock);
792 pn = blkio_policy_search_node(blkcg, newpn->dev, plid, fileid);
794 if (!blkio_delete_rule_command(newpn)) {
795 blkio_policy_insert_node(blkcg, newpn);
798 spin_unlock_irq(&blkcg->lock);
799 goto update_io_group;
802 if (blkio_delete_rule_command(newpn)) {
803 blkio_policy_delete_node(pn);
804 spin_unlock_irq(&blkcg->lock);
805 goto update_io_group;
807 spin_unlock_irq(&blkcg->lock);
809 blkio_update_policy_rule(pn, newpn);
812 blkio_update_policy_node_blkg(blkcg, newpn);
823 blkio_print_policy_node(struct seq_file *m, struct blkio_policy_node *pn)
826 case BLKIO_POLICY_PROP:
827 if (pn->fileid == BLKIO_PROP_weight_device)
828 seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
829 MINOR(pn->dev), pn->weight);
836 /* cgroup files which read their data from policy nodes end up here */
837 static void blkio_read_policy_node_files(struct cftype *cft,
838 struct blkio_cgroup *blkcg, struct seq_file *m)
840 struct blkio_policy_node *pn;
842 if (!list_empty(&blkcg->policy_list)) {
843 spin_lock_irq(&blkcg->lock);
844 list_for_each_entry(pn, &blkcg->policy_list, node) {
845 if (!pn_matches_cftype(cft, pn))
847 blkio_print_policy_node(m, pn);
849 spin_unlock_irq(&blkcg->lock);
853 static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
856 struct blkio_cgroup *blkcg;
857 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
858 int name = BLKIOFILE_ATTR(cft->private);
860 blkcg = cgroup_to_blkio_cgroup(cgrp);
863 case BLKIO_POLICY_PROP:
865 case BLKIO_PROP_weight_device:
866 blkio_read_policy_node_files(cft, blkcg, m);
879 static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
880 struct cftype *cft, struct cgroup_map_cb *cb, enum stat_type type,
883 struct blkio_group *blkg;
884 struct hlist_node *n;
885 uint64_t cgroup_total = 0;
888 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
890 if (!cftype_blkg_same_policy(cft, blkg))
892 spin_lock_irq(&blkg->stats_lock);
893 cgroup_total += blkio_get_stat(blkg, cb, blkg->dev,
895 spin_unlock_irq(&blkg->stats_lock);
899 cb->fill(cb, "Total", cgroup_total);
904 /* All map kind of cgroup file get serviced by this function */
905 static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
906 struct cgroup_map_cb *cb)
908 struct blkio_cgroup *blkcg;
909 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
910 int name = BLKIOFILE_ATTR(cft->private);
912 blkcg = cgroup_to_blkio_cgroup(cgrp);
915 case BLKIO_POLICY_PROP:
917 case BLKIO_PROP_time:
918 return blkio_read_blkg_stats(blkcg, cft, cb,
920 case BLKIO_PROP_sectors:
921 return blkio_read_blkg_stats(blkcg, cft, cb,
922 BLKIO_STAT_SECTORS, 0);
923 case BLKIO_PROP_io_service_bytes:
924 return blkio_read_blkg_stats(blkcg, cft, cb,
925 BLKIO_STAT_SERVICE_BYTES, 1);
926 case BLKIO_PROP_io_serviced:
927 return blkio_read_blkg_stats(blkcg, cft, cb,
928 BLKIO_STAT_SERVICED, 1);
929 case BLKIO_PROP_io_service_time:
930 return blkio_read_blkg_stats(blkcg, cft, cb,
931 BLKIO_STAT_SERVICE_TIME, 1);
932 case BLKIO_PROP_io_wait_time:
933 return blkio_read_blkg_stats(blkcg, cft, cb,
934 BLKIO_STAT_WAIT_TIME, 1);
935 case BLKIO_PROP_io_merged:
936 return blkio_read_blkg_stats(blkcg, cft, cb,
937 BLKIO_STAT_MERGED, 1);
938 case BLKIO_PROP_io_queued:
939 return blkio_read_blkg_stats(blkcg, cft, cb,
940 BLKIO_STAT_QUEUED, 1);
941 #ifdef CONFIG_DEBUG_BLK_CGROUP
942 case BLKIO_PROP_dequeue:
943 return blkio_read_blkg_stats(blkcg, cft, cb,
944 BLKIO_STAT_DEQUEUE, 0);
945 case BLKIO_PROP_avg_queue_size:
946 return blkio_read_blkg_stats(blkcg, cft, cb,
947 BLKIO_STAT_AVG_QUEUE_SIZE, 0);
948 case BLKIO_PROP_group_wait_time:
949 return blkio_read_blkg_stats(blkcg, cft, cb,
950 BLKIO_STAT_GROUP_WAIT_TIME, 0);
951 case BLKIO_PROP_idle_time:
952 return blkio_read_blkg_stats(blkcg, cft, cb,
953 BLKIO_STAT_IDLE_TIME, 0);
954 case BLKIO_PROP_empty_time:
955 return blkio_read_blkg_stats(blkcg, cft, cb,
956 BLKIO_STAT_EMPTY_TIME, 0);
970 static int blkio_weight_write(struct blkio_cgroup *blkcg, u64 val)
972 struct blkio_group *blkg;
973 struct hlist_node *n;
974 struct blkio_policy_node *pn;
976 if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
979 spin_lock(&blkio_list_lock);
980 spin_lock_irq(&blkcg->lock);
981 blkcg->weight = (unsigned int)val;
983 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
984 pn = blkio_policy_search_node(blkcg, blkg->dev,
985 BLKIO_POLICY_PROP, BLKIO_PROP_weight_device);
989 blkio_update_group_weight(blkg, blkcg->weight);
991 spin_unlock_irq(&blkcg->lock);
992 spin_unlock(&blkio_list_lock);
996 static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
997 struct blkio_cgroup *blkcg;
998 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
999 int name = BLKIOFILE_ATTR(cft->private);
1001 blkcg = cgroup_to_blkio_cgroup(cgrp);
1004 case BLKIO_POLICY_PROP:
1006 case BLKIO_PROP_weight:
1007 return (u64)blkcg->weight;
1017 blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
1019 struct blkio_cgroup *blkcg;
1020 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1021 int name = BLKIOFILE_ATTR(cft->private);
1023 blkcg = cgroup_to_blkio_cgroup(cgrp);
1026 case BLKIO_POLICY_PROP:
1028 case BLKIO_PROP_weight:
1029 return blkio_weight_write(blkcg, val);
1039 struct cftype blkio_files[] = {
1041 .name = "weight_device",
1042 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1043 BLKIO_PROP_weight_device),
1044 .read_seq_string = blkiocg_file_read,
1045 .write_string = blkiocg_file_write,
1046 .max_write_len = 256,
1050 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1052 .read_u64 = blkiocg_file_read_u64,
1053 .write_u64 = blkiocg_file_write_u64,
1057 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1059 .read_map = blkiocg_file_read_map,
1063 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1064 BLKIO_PROP_sectors),
1065 .read_map = blkiocg_file_read_map,
1068 .name = "io_service_bytes",
1069 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1070 BLKIO_PROP_io_service_bytes),
1071 .read_map = blkiocg_file_read_map,
1074 .name = "io_serviced",
1075 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1076 BLKIO_PROP_io_serviced),
1077 .read_map = blkiocg_file_read_map,
1080 .name = "io_service_time",
1081 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1082 BLKIO_PROP_io_service_time),
1083 .read_map = blkiocg_file_read_map,
1086 .name = "io_wait_time",
1087 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1088 BLKIO_PROP_io_wait_time),
1089 .read_map = blkiocg_file_read_map,
1092 .name = "io_merged",
1093 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1094 BLKIO_PROP_io_merged),
1095 .read_map = blkiocg_file_read_map,
1098 .name = "io_queued",
1099 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1100 BLKIO_PROP_io_queued),
1101 .read_map = blkiocg_file_read_map,
1104 .name = "reset_stats",
1105 .write_u64 = blkiocg_reset_stats,
1107 #ifdef CONFIG_DEBUG_BLK_CGROUP
1109 .name = "avg_queue_size",
1110 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1111 BLKIO_PROP_avg_queue_size),
1112 .read_map = blkiocg_file_read_map,
1115 .name = "group_wait_time",
1116 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1117 BLKIO_PROP_group_wait_time),
1118 .read_map = blkiocg_file_read_map,
1121 .name = "idle_time",
1122 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1123 BLKIO_PROP_idle_time),
1124 .read_map = blkiocg_file_read_map,
1127 .name = "empty_time",
1128 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1129 BLKIO_PROP_empty_time),
1130 .read_map = blkiocg_file_read_map,
1134 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1135 BLKIO_PROP_dequeue),
1136 .read_map = blkiocg_file_read_map,
1141 static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1143 return cgroup_add_files(cgroup, subsys, blkio_files,
1144 ARRAY_SIZE(blkio_files));
1147 static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1149 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
1150 unsigned long flags;
1151 struct blkio_group *blkg;
1153 struct blkio_policy_type *blkiop;
1154 struct blkio_policy_node *pn, *pntmp;
1158 spin_lock_irqsave(&blkcg->lock, flags);
1160 if (hlist_empty(&blkcg->blkg_list)) {
1161 spin_unlock_irqrestore(&blkcg->lock, flags);
1165 blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
1167 key = rcu_dereference(blkg->key);
1168 __blkiocg_del_blkio_group(blkg);
1170 spin_unlock_irqrestore(&blkcg->lock, flags);
1173 * This blkio_group is being unlinked as associated cgroup is
1174 * going away. Let all the IO controlling policies know about
1175 * this event. Currently this is static call to one io
1176 * controlling policy. Once we have more policies in place, we
1177 * need some dynamic registration of callback function.
1179 spin_lock(&blkio_list_lock);
1180 list_for_each_entry(blkiop, &blkio_list, list)
1181 blkiop->ops.blkio_unlink_group_fn(key, blkg);
1182 spin_unlock(&blkio_list_lock);
1185 list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) {
1186 blkio_policy_delete_node(pn);
1190 free_css_id(&blkio_subsys, &blkcg->css);
1192 if (blkcg != &blkio_root_cgroup)
1196 static struct cgroup_subsys_state *
1197 blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1199 struct blkio_cgroup *blkcg;
1200 struct cgroup *parent = cgroup->parent;
1203 blkcg = &blkio_root_cgroup;
1207 /* Currently we do not support hierarchy deeper than two level (0,1) */
1208 if (parent != cgroup->top_cgroup)
1209 return ERR_PTR(-EINVAL);
1211 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
1213 return ERR_PTR(-ENOMEM);
1215 blkcg->weight = BLKIO_WEIGHT_DEFAULT;
1217 spin_lock_init(&blkcg->lock);
1218 INIT_HLIST_HEAD(&blkcg->blkg_list);
1220 INIT_LIST_HEAD(&blkcg->policy_list);
1225 * We cannot support shared io contexts, as we have no mean to support
1226 * two tasks with the same ioc in two different groups without major rework
1227 * of the main cic data structures. For now we allow a task to change
1228 * its cgroup only if it's the only owner of its ioc.
1230 static int blkiocg_can_attach(struct cgroup_subsys *subsys,
1231 struct cgroup *cgroup, struct task_struct *tsk,
1234 struct io_context *ioc;
1237 /* task_lock() is needed to avoid races with exit_io_context() */
1239 ioc = tsk->io_context;
1240 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1247 static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup,
1248 struct cgroup *prev, struct task_struct *tsk,
1251 struct io_context *ioc;
1254 ioc = tsk->io_context;
1256 ioc->cgroup_changed = 1;
1260 void blkio_policy_register(struct blkio_policy_type *blkiop)
1262 spin_lock(&blkio_list_lock);
1263 list_add_tail(&blkiop->list, &blkio_list);
1264 spin_unlock(&blkio_list_lock);
1266 EXPORT_SYMBOL_GPL(blkio_policy_register);
1268 void blkio_policy_unregister(struct blkio_policy_type *blkiop)
1270 spin_lock(&blkio_list_lock);
1271 list_del_init(&blkiop->list);
1272 spin_unlock(&blkio_list_lock);
1274 EXPORT_SYMBOL_GPL(blkio_policy_unregister);
1276 static int __init init_cgroup_blkio(void)
1278 return cgroup_load_subsys(&blkio_subsys);
1281 static void __exit exit_cgroup_blkio(void)
1283 cgroup_unload_subsys(&blkio_subsys);
1286 module_init(init_cgroup_blkio);
1287 module_exit(exit_cgroup_blkio);
1288 MODULE_LICENSE("GPL");