blkio-throttle: Fix link failure failure on i386
[linux-3.10.git] / block / blk-throttle.c
index 4b49201..a467002 100644 (file)
@@ -59,12 +59,20 @@ struct throtl_grp {
        /* bytes per second rate limits */
        uint64_t bps[2];
 
+       /* IOPS limits */
+       unsigned int iops[2];
+
        /* Number of bytes disptached in current slice */
        uint64_t bytes_disp[2];
+       /* Number of bio's dispatched in current slice */
+       unsigned int io_disp[2];
 
        /* When did we start a new slice */
        unsigned long slice_start[2];
        unsigned long slice_end[2];
+
+       /* Some throttle limits got updated for the group */
+       bool limits_changed;
 };
 
 struct throtl_data
@@ -82,12 +90,14 @@ struct throtl_data
        unsigned int nr_queued[2];
 
        /*
-        * number of total undestroyed groups (excluding root group)
+        * number of total undestroyed groups
         */
        unsigned int nr_undestroyed_grps;
 
        /* Work for dispatching throttled bios */
        struct delayed_work throtl_work;
+
+       atomic_t limits_changed;
 };
 
 enum tg_state_flags {
@@ -194,6 +204,8 @@ static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td,
 
        tg->bps[READ] = blkcg_get_read_bps(blkcg, tg->blkg.dev);
        tg->bps[WRITE] = blkcg_get_write_bps(blkcg, tg->blkg.dev);
+       tg->iops[READ] = blkcg_get_read_iops(blkcg, tg->blkg.dev);
+       tg->iops[WRITE] = blkcg_get_write_iops(blkcg, tg->blkg.dev);
 
        hlist_add_head(&tg->tg_node, &td->tg_list);
        td->nr_undestroyed_grps++;
@@ -335,6 +347,7 @@ static inline void
 throtl_start_new_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
 {
        tg->bytes_disp[rw] = 0;
+       tg->io_disp[rw] = 0;
        tg->slice_start[rw] = jiffies;
        tg->slice_end[rw] = jiffies + throtl_slice;
        throtl_log_tg(td, tg, "[%c] new slice start=%lu end=%lu jiffies=%lu",
@@ -365,7 +378,8 @@ throtl_slice_used(struct throtl_data *td, struct throtl_grp *tg, bool rw)
 static inline void
 throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
 {
-       unsigned long nr_slices, bytes_trim, time_elapsed;
+       unsigned long nr_slices, time_elapsed, io_trim;
+       u64 bytes_trim, tmp;
 
        BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
 
@@ -383,10 +397,13 @@ throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
 
        if (!nr_slices)
                return;
+       tmp = tg->bps[rw] * throtl_slice * nr_slices;
+       do_div(tmp, HZ);
+       bytes_trim = tmp;
 
-       bytes_trim = (tg->bps[rw] * throtl_slice * nr_slices)/HZ;
+       io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ;
 
-       if (!bytes_trim)
+       if (!bytes_trim && !io_trim)
                return;
 
        if (tg->bytes_disp[rw] >= bytes_trim)
@@ -394,14 +411,98 @@ throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
        else
                tg->bytes_disp[rw] = 0;
 
+       if (tg->io_disp[rw] >= io_trim)
+               tg->io_disp[rw] -= io_trim;
+       else
+               tg->io_disp[rw] = 0;
+
        tg->slice_start[rw] += nr_slices * throtl_slice;
 
-       throtl_log_tg(td, tg, "[%c] trim slice nr=%lu bytes=%lu"
+       throtl_log_tg(td, tg, "[%c] trim slice nr=%lu bytes=%llu io=%lu"
                        " start=%lu end=%lu jiffies=%lu",
-                       rw == READ ? 'R' : 'W', nr_slices, bytes_trim,
+                       rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
                        tg->slice_start[rw], tg->slice_end[rw], jiffies);
 }
 
+static bool tg_with_in_iops_limit(struct throtl_data *td, struct throtl_grp *tg,
+               struct bio *bio, unsigned long *wait)
+{
+       bool rw = bio_data_dir(bio);
+       unsigned int io_allowed;
+       unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
+
+       jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
+
+       /* Slice has just started. Consider one slice interval */
+       if (!jiffy_elapsed)
+               jiffy_elapsed_rnd = throtl_slice;
+
+       jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
+
+       io_allowed = (tg->iops[rw] * jiffies_to_msecs(jiffy_elapsed_rnd))
+                               / MSEC_PER_SEC;
+
+       if (tg->io_disp[rw] + 1 <= io_allowed) {
+               if (wait)
+                       *wait = 0;
+               return 1;
+       }
+
+       /* Calc approx time to dispatch */
+       jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1;
+
+       if (jiffy_wait > jiffy_elapsed)
+               jiffy_wait = jiffy_wait - jiffy_elapsed;
+       else
+               jiffy_wait = 1;
+
+       if (wait)
+               *wait = jiffy_wait;
+       return 0;
+}
+
+static bool tg_with_in_bps_limit(struct throtl_data *td, struct throtl_grp *tg,
+               struct bio *bio, unsigned long *wait)
+{
+       bool rw = bio_data_dir(bio);
+       u64 bytes_allowed, extra_bytes, tmp;
+       unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
+
+       jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
+
+       /* Slice has just started. Consider one slice interval */
+       if (!jiffy_elapsed)
+               jiffy_elapsed_rnd = throtl_slice;
+
+       jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
+
+       tmp = tg->bps[rw] * jiffies_to_msecs(jiffy_elapsed_rnd);
+       do_div(tmp, MSEC_PER_SEC);
+       bytes_allowed = tmp;
+
+       if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) {
+               if (wait)
+                       *wait = 0;
+               return 1;
+       }
+
+       /* Calc approx time to dispatch */
+       extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed;
+       jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
+
+       if (!jiffy_wait)
+               jiffy_wait = 1;
+
+       /*
+        * This wait time is without taking into consideration the rounding
+        * up we did. Add that time also.
+        */
+       jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
+       if (wait)
+               *wait = jiffy_wait;
+       return 0;
+}
+
 /*
  * Returns whether one can dispatch a bio or not. Also returns approx number
  * of jiffies to wait before this bio is with-in IO rate and can be dispatched
@@ -410,11 +511,10 @@ static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
                                struct bio *bio, unsigned long *wait)
 {
        bool rw = bio_data_dir(bio);
-       u64 bytes_allowed, extra_bytes;
-       unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
+       unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
 
        /*
-        * Currently whole state machine of group depends on first bio
+        * Currently whole state machine of group depends on first bio
         * queued in the group bio list. So one should not be calling
         * this function with a different bio if there are other bios
         * queued.
@@ -422,7 +522,7 @@ static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
        BUG_ON(tg->nr_queued[rw] && bio != bio_list_peek(&tg->bio_lists[rw]));
 
        /* If tg->bps = -1, then BW is unlimited */
-       if (tg->bps[rw] == -1) {
+       if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
                if (wait)
                        *wait = 0;
                return 1;
@@ -440,41 +540,20 @@ static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
                        throtl_extend_slice(td, tg, rw, jiffies + throtl_slice);
        }
 
-       jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
-
-       /* Slice has just started. Consider one slice interval */
-       if (!jiffy_elapsed)
-               jiffy_elapsed_rnd = throtl_slice;
-
-       jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
-
-       bytes_allowed = (tg->bps[rw] * jiffies_to_msecs(jiffy_elapsed_rnd))
-                               / MSEC_PER_SEC;
-
-       if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) {
+       if (tg_with_in_bps_limit(td, tg, bio, &bps_wait)
+           && tg_with_in_iops_limit(td, tg, bio, &iops_wait)) {
                if (wait)
                        *wait = 0;
                return 1;
        }
 
-       /* Calc approx time to dispatch */
-       extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed;
-       jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
-
-       if (!jiffy_wait)
-               jiffy_wait = 1;
-
-       /*
-        * This wait time is without taking into consideration the rounding
-        * up we did. Add that time also.
-        */
-       jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
+       max_wait = max(bps_wait, iops_wait);
 
        if (wait)
-               *wait = jiffy_wait;
+               *wait = max_wait;
 
-       if (time_before(tg->slice_end[rw], jiffies + jiffy_wait))
-               throtl_extend_slice(td, tg, rw, jiffies + jiffy_wait);
+       if (time_before(tg->slice_end[rw], jiffies + max_wait))
+               throtl_extend_slice(td, tg, rw, jiffies + max_wait);
 
        return 0;
 }
@@ -486,13 +565,13 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
 
        /* Charge the bio to the group */
        tg->bytes_disp[rw] += bio->bi_size;
+       tg->io_disp[rw]++;
 
        /*
         * TODO: This will take blkg->stats_lock. Figure out a way
         * to avoid this cost.
         */
        blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, rw, sync);
-
 }
 
 static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg,
@@ -522,15 +601,6 @@ static void tg_update_disptime(struct throtl_data *td, struct throtl_grp *tg)
        min_wait = min(read_wait, write_wait);
        disptime = jiffies + min_wait;
 
-       /*
-        * If group is already on active tree, then update dispatch time
-        * only if it is lesser than existing dispatch time. Otherwise
-        * always update the dispatch time
-        */
-
-       if (throtl_tg_on_rr(tg) && time_before(disptime, tg->disptime))
-               return;
-
        /* Update dispatch time */
        throtl_dequeue_tg(td, tg);
        tg->disptime = disptime;
@@ -621,6 +691,46 @@ static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl)
        return nr_disp;
 }
 
+static void throtl_process_limit_change(struct throtl_data *td)
+{
+       struct throtl_grp *tg;
+       struct hlist_node *pos, *n;
+
+       /*
+        * Make sure atomic_inc() effects from
+        * throtl_update_blkio_group_read_bps(), group of functions are
+        * visible.
+        * Is this required or smp_mb__after_atomic_inc() was suffcient
+        * after the atomic_inc().
+        */
+       smp_rmb();
+       if (!atomic_read(&td->limits_changed))
+               return;
+
+       throtl_log(td, "limit changed =%d", atomic_read(&td->limits_changed));
+
+       hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
+               /*
+                * Do I need an smp_rmb() here to make sure tg->limits_changed
+                * update is visible. I am relying on smp_rmb() at the
+                * beginning of function and not putting a new one here.
+                */
+
+               if (throtl_tg_on_rr(tg) && tg->limits_changed) {
+                       throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu"
+                               " riops=%u wiops=%u", tg->bps[READ],
+                               tg->bps[WRITE], tg->iops[READ],
+                               tg->iops[WRITE]);
+                       tg_update_disptime(td, tg);
+                       tg->limits_changed = false;
+               }
+       }
+
+       smp_mb__before_atomic_dec();
+       atomic_dec(&td->limits_changed);
+       smp_mb__after_atomic_dec();
+}
+
 /* Dispatch throttled bios. Should be called without queue lock held. */
 static int throtl_dispatch(struct request_queue *q)
 {
@@ -631,6 +741,8 @@ static int throtl_dispatch(struct request_queue *q)
 
        spin_lock_irq(q->queue_lock);
 
+       throtl_process_limit_change(td);
+
        if (!total_nr_queued(td))
                goto out;
 
@@ -751,16 +863,74 @@ void throtl_unlink_blkio_group(void *key, struct blkio_group *blkg)
        spin_unlock_irqrestore(td->queue->queue_lock, flags);
 }
 
-static void throtl_update_blkio_group_read_bps (struct blkio_group *blkg,
-                       u64 read_bps)
+/*
+ * For all update functions, key should be a valid pointer because these
+ * update functions are called under blkcg_lock, that means, blkg is
+ * valid and in turn key is valid. queue exit path can not race becuase
+ * of blkcg_lock
+ *
+ * Can not take queue lock in update functions as queue lock under blkcg_lock
+ * is not allowed. Under other paths we take blkcg_lock under queue_lock.
+ */
+static void throtl_update_blkio_group_read_bps(void *key,
+                               struct blkio_group *blkg, u64 read_bps)
 {
+       struct throtl_data *td = key;
+
        tg_of_blkg(blkg)->bps[READ] = read_bps;
+       /* Make sure read_bps is updated before setting limits_changed */
+       smp_wmb();
+       tg_of_blkg(blkg)->limits_changed = true;
+
+       /* Make sure tg->limits_changed is updated before td->limits_changed */
+       smp_mb__before_atomic_inc();
+       atomic_inc(&td->limits_changed);
+       smp_mb__after_atomic_inc();
+
+       /* Schedule a work now to process the limit change */
+       throtl_schedule_delayed_work(td->queue, 0);
 }
 
-static void throtl_update_blkio_group_write_bps (struct blkio_group *blkg,
-                       u64 write_bps)
+static void throtl_update_blkio_group_write_bps(void *key,
+                               struct blkio_group *blkg, u64 write_bps)
 {
+       struct throtl_data *td = key;
+
        tg_of_blkg(blkg)->bps[WRITE] = write_bps;
+       smp_wmb();
+       tg_of_blkg(blkg)->limits_changed = true;
+       smp_mb__before_atomic_inc();
+       atomic_inc(&td->limits_changed);
+       smp_mb__after_atomic_inc();
+       throtl_schedule_delayed_work(td->queue, 0);
+}
+
+static void throtl_update_blkio_group_read_iops(void *key,
+                       struct blkio_group *blkg, unsigned int read_iops)
+{
+       struct throtl_data *td = key;
+
+       tg_of_blkg(blkg)->iops[READ] = read_iops;
+       smp_wmb();
+       tg_of_blkg(blkg)->limits_changed = true;
+       smp_mb__before_atomic_inc();
+       atomic_inc(&td->limits_changed);
+       smp_mb__after_atomic_inc();
+       throtl_schedule_delayed_work(td->queue, 0);
+}
+
+static void throtl_update_blkio_group_write_iops(void *key,
+                       struct blkio_group *blkg, unsigned int write_iops)
+{
+       struct throtl_data *td = key;
+
+       tg_of_blkg(blkg)->iops[WRITE] = write_iops;
+       smp_wmb();
+       tg_of_blkg(blkg)->limits_changed = true;
+       smp_mb__before_atomic_inc();
+       atomic_inc(&td->limits_changed);
+       smp_mb__after_atomic_inc();
+       throtl_schedule_delayed_work(td->queue, 0);
 }
 
 void throtl_shutdown_timer_wq(struct request_queue *q)
@@ -777,7 +947,12 @@ static struct blkio_policy_type blkio_policy_throtl = {
                                        throtl_update_blkio_group_read_bps,
                .blkio_update_group_write_bps_fn =
                                        throtl_update_blkio_group_write_bps,
+               .blkio_update_group_read_iops_fn =
+                                       throtl_update_blkio_group_read_iops,
+               .blkio_update_group_write_iops_fn =
+                                       throtl_update_blkio_group_write_iops,
        },
+       .plid = BLKIO_POLICY_THROTL,
 };
 
 int blk_throtl_bio(struct request_queue *q, struct bio **biop)
@@ -799,8 +974,14 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop)
                /*
                 * There is already another bio queued in same dir. No
                 * need to update dispatch time.
+                * Still update the disptime if rate limits on this group
+                * were changed.
                 */
-               update_disptime = false;
+               if (!tg->limits_changed)
+                       update_disptime = false;
+               else
+                       tg->limits_changed = false;
+
                goto queue_bio;
        }
 
@@ -811,9 +992,11 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop)
        }
 
 queue_bio:
-       throtl_log_tg(td, tg, "[%c] bio. disp=%u sz=%u bps=%llu"
-                       " queued=%d/%d", rw == READ ? 'R' : 'W',
+       throtl_log_tg(td, tg, "[%c] bio. bdisp=%u sz=%u bps=%llu"
+                       " iodisp=%u iops=%u queued=%d/%d",
+                       rw == READ ? 'R' : 'W',
                        tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
+                       tg->io_disp[rw], tg->iops[rw],
                        tg->nr_queued[READ], tg->nr_queued[WRITE]);
 
        throtl_add_bio_tg(q->td, tg, bio);
@@ -840,6 +1023,7 @@ int blk_throtl_init(struct request_queue *q)
 
        INIT_HLIST_HEAD(&td->tg_list);
        td->tg_service_tree = THROTL_RB_ROOT;
+       atomic_set(&td->limits_changed, 0);
 
        /* Init root group */
        tg = &td->root_tg;
@@ -850,7 +1034,18 @@ int blk_throtl_init(struct request_queue *q)
 
        /* Practically unlimited BW */
        tg->bps[0] = tg->bps[1] = -1;
-       atomic_set(&tg->ref, 1);
+       tg->iops[0] = tg->iops[1] = -1;
+
+       /*
+        * Set root group reference to 2. One reference will be dropped when
+        * all groups on tg_list are being deleted during queue exit. Other
+        * reference will remain there as we don't want to delete this group
+        * as it is statically allocated and gets destroyed when throtl_data
+        * goes away.
+        */
+       atomic_set(&tg->ref, 2);
+       hlist_add_head(&tg->tg_node, &td->tg_list);
+       td->nr_undestroyed_grps++;
 
        INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work);
 
@@ -876,10 +1071,9 @@ void blk_throtl_exit(struct request_queue *q)
 
        spin_lock_irq(q->queue_lock);
        throtl_release_tgs(td);
-       blkiocg_del_blkio_group(&td->root_tg.blkg);
 
        /* If there are other groups */
-       if (td->nr_undestroyed_grps >= 1)
+       if (td->nr_undestroyed_grps > 0)
                wait = true;
 
        spin_unlock_irq(q->queue_lock);
@@ -897,6 +1091,13 @@ void blk_throtl_exit(struct request_queue *q)
         */
        if (wait)
                synchronize_rcu();
+
+       /*
+        * Just being safe to make sure after previous flush if some body did
+        * update limits through cgroup and another work got queued, cancel
+        * it.
+        */
+       throtl_shutdown_timer_wq(q);
        throtl_td_free(td);
 }