]> nv-tegra.nvidia Code Review - linux-2.6.git/blobdiff - kernel/cgroup_freezer.c
tracing: Add tracepoints for hotplug
[linux-2.6.git] / kernel / cgroup_freezer.c
index e5c0244962b020ab3cd4e4d00a0be04e5cc18af5..a3f638ac3de142c550132b5ecdca879ed8c590e6 100644 (file)
@@ -48,20 +48,19 @@ static inline struct freezer *task_freezer(struct task_struct *task)
                            struct freezer, css);
 }
 
-int cgroup_freezing_or_frozen(struct task_struct *task)
+static inline int __cgroup_freezing_or_frozen(struct task_struct *task)
 {
-       struct freezer *freezer;
-       enum freezer_state state;
+       enum freezer_state state = task_freezer(task)->state;
+       return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN);
+}
 
+int cgroup_freezing_or_frozen(struct task_struct *task)
+{
+       int result;
        task_lock(task);
-       freezer = task_freezer(task);
-       if (!freezer->css.cgroup->parent)
-               state = CGROUP_THAWED; /* root cgroup can't be frozen */
-       else
-               state = freezer->state;
+       result = __cgroup_freezing_or_frozen(task);
        task_unlock(task);
-
-       return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN);
+       return result;
 }
 
 /*
@@ -89,10 +88,10 @@ struct cgroup_subsys freezer_subsys;
 
 /* Locks taken and their ordering
  * ------------------------------
- * css_set_lock
  * cgroup_mutex (AKA cgroup_lock)
- * task->alloc_lock (AKA task_lock)
  * freezer->lock
+ * css_set_lock
+ * task->alloc_lock (AKA task_lock)
  * task->sighand->siglock
  *
  * cgroup code forces css_set_lock to be taken before task->alloc_lock
@@ -100,33 +99,38 @@ struct cgroup_subsys freezer_subsys;
  * freezer_create(), freezer_destroy():
  * cgroup_mutex [ by cgroup core ]
  *
- * can_attach():
- * cgroup_mutex
+ * freezer_can_attach():
+ * cgroup_mutex (held by caller of can_attach)
  *
- * cgroup_frozen():
+ * cgroup_freezing_or_frozen():
  * task->alloc_lock (to get task's cgroup)
  *
  * freezer_fork() (preserving fork() performance means can't take cgroup_mutex):
- * task->alloc_lock (to get task's cgroup)
  * freezer->lock
  *  sighand->siglock (if the cgroup is freezing)
  *
  * freezer_read():
  * cgroup_mutex
  *  freezer->lock
+ *   write_lock css_set_lock (cgroup iterator start)
+ *    task->alloc_lock
  *   read_lock css_set_lock (cgroup iterator start)
  *
  * freezer_write() (freeze):
  * cgroup_mutex
  *  freezer->lock
+ *   write_lock css_set_lock (cgroup iterator start)
+ *    task->alloc_lock
  *   read_lock css_set_lock (cgroup iterator start)
- *    sighand->siglock
+ *    sighand->siglock (fake signal delivery inside freeze_task())
  *
  * freezer_write() (unfreeze):
  * cgroup_mutex
  *  freezer->lock
+ *   write_lock css_set_lock (cgroup iterator start)
+ *    task->alloc_lock
  *   read_lock css_set_lock (cgroup iterator start)
- *    task->alloc_lock (to prevent races with freeze_task())
+ *    task->alloc_lock (inside thaw_process(), prevents race with refrigerator())
  *     sighand->siglock
  */
 static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
@@ -149,7 +153,7 @@ static void freezer_destroy(struct cgroup_subsys *ss,
        kfree(cgroup_freezer(cgroup));
 }
 
-/* Task is frozen or will freeze immediately when next it gets woken */
+/* task is frozen or will freeze immediately when next it gets woken */
 static bool is_task_frozen_enough(struct task_struct *task)
 {
        return frozen(task) ||
@@ -163,37 +167,29 @@ static bool is_task_frozen_enough(struct task_struct *task)
  */
 static int freezer_can_attach(struct cgroup_subsys *ss,
                              struct cgroup *new_cgroup,
-                             struct task_struct *task, bool threadgroup)
+                             struct task_struct *task)
 {
        struct freezer *freezer;
 
        /*
         * Anything frozen can't move or be moved to/from.
-        *
-        * Since orig_freezer->state == FROZEN means that @task has been
-        * frozen, so it's sufficient to check the latter condition.
         */
 
-       if (is_task_frozen_enough(task))
-               return -EBUSY;
-
        freezer = cgroup_freezer(new_cgroup);
-       if (freezer->state == CGROUP_FROZEN)
+       if (freezer->state != CGROUP_THAWED)
                return -EBUSY;
 
-       if (threadgroup) {
-               struct task_struct *c;
+       return 0;
+}
 
-               rcu_read_lock();
-               list_for_each_entry_rcu(c, &task->thread_group, thread_group) {
-                       if (is_task_frozen_enough(c)) {
-                               rcu_read_unlock();
-                               return -EBUSY;
-                       }
-               }
+static int freezer_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
+{
+       rcu_read_lock();
+       if (__cgroup_freezing_or_frozen(tsk)) {
                rcu_read_unlock();
+               return -EBUSY;
        }
-
+       rcu_read_unlock();
        return 0;
 }
 
@@ -231,12 +227,13 @@ static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
 /*
  * caller must hold freezer->lock
  */
-static void update_freezer_state(struct cgroup *cgroup,
+static void update_if_frozen(struct cgroup *cgroup,
                                 struct freezer *freezer)
 {
        struct cgroup_iter it;
        struct task_struct *task;
        unsigned int nfrozen = 0, ntotal = 0;
+       enum freezer_state old_state = freezer->state;
 
        cgroup_iter_start(cgroup, &it);
        while ((task = cgroup_iter_next(cgroup, &it))) {
@@ -245,17 +242,15 @@ static void update_freezer_state(struct cgroup *cgroup,
                        nfrozen++;
        }
 
-       /*
-        * Transition to FROZEN when no new tasks can be added ensures
-        * that we never exist in the FROZEN state while there are unfrozen
-        * tasks.
-        */
-       if (nfrozen == ntotal)
-               freezer->state = CGROUP_FROZEN;
-       else if (nfrozen > 0)
-               freezer->state = CGROUP_FREEZING;
-       else
-               freezer->state = CGROUP_THAWED;
+       if (old_state == CGROUP_THAWED) {
+               BUG_ON(nfrozen > 0);
+       } else if (old_state == CGROUP_FREEZING) {
+               if (nfrozen == ntotal)
+                       freezer->state = CGROUP_FROZEN;
+       } else { /* old_state == CGROUP_FROZEN */
+               BUG_ON(nfrozen != ntotal);
+       }
+
        cgroup_iter_end(cgroup, &it);
 }
 
@@ -274,7 +269,7 @@ static int freezer_read(struct cgroup *cgroup, struct cftype *cft,
        if (state == CGROUP_FREEZING) {
                /* We change from FREEZING to FROZEN lazily if the cgroup was
                 * only partially frozen when we exitted write. */
-               update_freezer_state(cgroup, freezer);
+               update_if_frozen(cgroup, freezer);
                state = freezer->state;
        }
        spin_unlock_irq(&freezer->lock);
@@ -330,7 +325,7 @@ static int freezer_change_state(struct cgroup *cgroup,
 
        spin_lock_irq(&freezer->lock);
 
-       update_freezer_state(cgroup, freezer);
+       update_if_frozen(cgroup, freezer);
        if (goal_state == freezer->state)
                goto out;
 
@@ -393,6 +388,9 @@ struct cgroup_subsys freezer_subsys = {
        .populate       = freezer_populate,
        .subsys_id      = freezer_subsys_id,
        .can_attach     = freezer_can_attach,
+       .can_attach_task = freezer_can_attach_task,
+       .pre_attach     = NULL,
+       .attach_task    = NULL,
        .attach         = NULL,
        .fork           = freezer_fork,
        .exit           = NULL,