]> nv-tegra.nvidia Code Review - linux-2.6.git/blobdiff - kernel/async.c
cgroup: Add generic cgroup subsystem permission checks.
[linux-2.6.git] / kernel / async.c
index e23399d88bac44aef003fb12937ac3a407af2e32..d5fe7af0de2ee2d5181569775565dc8a767bdaba 100644 (file)
@@ -49,38 +49,34 @@ asynchronous and synchronous parts of the kernel.
 */
 
 #include <linux/async.h>
+#include <linux/atomic.h>
+#include <linux/ktime.h>
 #include <linux/module.h>
 #include <linux/wait.h>
 #include <linux/sched.h>
-#include <linux/init.h>
-#include <linux/kthread.h>
-#include <linux/delay.h>
-#include <asm/atomic.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
 
 static async_cookie_t next_cookie = 1;
 
-#define MAX_THREADS    256
 #define MAX_WORK       32768
 
 static LIST_HEAD(async_pending);
 static LIST_HEAD(async_running);
 static DEFINE_SPINLOCK(async_lock);
 
-static int async_enabled = 0;
-
 struct async_entry {
-       struct list_head list;
-       async_cookie_t   cookie;
-       async_func_ptr   *func;
-       void             *data;
-       struct list_head *running;
+       struct list_head        list;
+       struct work_struct      work;
+       async_cookie_t          cookie;
+       async_func_ptr          *func;
+       void                    *data;
+       struct list_head        *running;
 };
 
 static DECLARE_WAIT_QUEUE_HEAD(async_done);
-static DECLARE_WAIT_QUEUE_HEAD(async_new);
 
 static atomic_t entry_count;
-static atomic_t thread_count;
 
 extern int initcall_debug;
 
@@ -91,19 +87,18 @@ extern int initcall_debug;
 static async_cookie_t  __lowest_in_progress(struct list_head *running)
 {
        struct async_entry *entry;
+
        if (!list_empty(running)) {
                entry = list_first_entry(running,
                        struct async_entry, list);
                return entry->cookie;
-       } else if (!list_empty(&async_pending)) {
-               entry = list_first_entry(&async_pending,
-                       struct async_entry, list);
-               return entry->cookie;
-       } else {
-               /* nothing in progress... next_cookie is "infinity" */
-               return next_cookie;
        }
 
+       list_for_each_entry(entry, &async_pending, list)
+               if (entry->running == running)
+                       return entry->cookie;
+
+       return next_cookie;     /* "infinity" value */
 }
 
 static async_cookie_t  lowest_in_progress(struct list_head *running)
@@ -116,30 +111,26 @@ static async_cookie_t  lowest_in_progress(struct list_head *running)
        spin_unlock_irqrestore(&async_lock, flags);
        return ret;
 }
+
 /*
  * pick the first pending entry and run it
  */
-static void run_one_entry(void)
+static void async_run_entry_fn(struct work_struct *work)
 {
+       struct async_entry *entry =
+               container_of(work, struct async_entry, work);
        unsigned long flags;
-       struct async_entry *entry;
        ktime_t calltime, delta, rettime;
 
-       /* 1) pick one task from the pending queue */
-
+       /* 1) move self to the running queue */
        spin_lock_irqsave(&async_lock, flags);
-       if (list_empty(&async_pending))
-               goto out;
-       entry = list_first_entry(&async_pending, struct async_entry, list);
-
-       /* 2) move it to the running queue */
-       list_del(&entry->list);
-       list_add_tail(&entry->list, entry->running);
+       list_move_tail(&entry->list, entry->running);
        spin_unlock_irqrestore(&async_lock, flags);
 
-       /* 3) run it (and print duration)*/
+       /* 2) run (and print duration) */
        if (initcall_debug && system_state == SYSTEM_BOOTING) {
-               printk("calling  %lli_%pF @ %i\n", (long long)entry->cookie,
+               printk(KERN_DEBUG "calling  %lli_%pF @ %i\n",
+                       (long long)entry->cookie,
                        entry->func, task_pid_nr(current));
                calltime = ktime_get();
        }
@@ -147,37 +138,31 @@ static void run_one_entry(void)
        if (initcall_debug && system_state == SYSTEM_BOOTING) {
                rettime = ktime_get();
                delta = ktime_sub(rettime, calltime);
-               printk("initcall %lli_%pF returned 0 after %lld usecs\n",
+               printk(KERN_DEBUG "initcall %lli_%pF returned 0 after %lld usecs\n",
                        (long long)entry->cookie,
                        entry->func,
                        (long long)ktime_to_ns(delta) >> 10);
        }
 
-       /* 4) remove it from the running queue */
+       /* 3) remove self from the running queue */
        spin_lock_irqsave(&async_lock, flags);
        list_del(&entry->list);
 
-       /* 5) free the entry  */
+       /* 4) free the entry */
        kfree(entry);
        atomic_dec(&entry_count);
 
        spin_unlock_irqrestore(&async_lock, flags);
 
-       /* 6) wake up any waiters. */
+       /* 5) wake up any waiters */
        wake_up(&async_done);
-       return;
-
-out:
-       spin_unlock_irqrestore(&async_lock, flags);
 }
 
-
 static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct list_head *running)
 {
        struct async_entry *entry;
        unsigned long flags;
        async_cookie_t newcookie;
-       
 
        /* allow irq-off callers */
        entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
@@ -186,7 +171,7 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct l
         * If we're out of memory or if there's too much work
         * pending already, we execute synchronously.
         */
-       if (!async_enabled || !entry || atomic_read(&entry_count) > MAX_WORK) {
+       if (!entry || atomic_read(&entry_count) > MAX_WORK) {
                kfree(entry);
                spin_lock_irqsave(&async_lock, flags);
                newcookie = next_cookie++;
@@ -196,6 +181,7 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct l
                ptr(data, newcookie);
                return newcookie;
        }
+       INIT_WORK(&entry->work, async_run_entry_fn);
        entry->func = ptr;
        entry->data = data;
        entry->running = running;
@@ -205,7 +191,10 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct l
        list_add_tail(&entry->list, &async_pending);
        atomic_inc(&entry_count);
        spin_unlock_irqrestore(&async_lock, flags);
-       wake_up(&async_new);
+
+       /* schedule for execution */
+       queue_work(system_unbound_wq, &entry->work);
+
        return newcookie;
 }
 
@@ -283,7 +272,7 @@ void async_synchronize_cookie_domain(async_cookie_t cookie,
        ktime_t starttime, delta, endtime;
 
        if (initcall_debug && system_state == SYSTEM_BOOTING) {
-               printk("async_waiting @ %i\n", task_pid_nr(current));
+               printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current));
                starttime = ktime_get();
        }
 
@@ -293,7 +282,7 @@ void async_synchronize_cookie_domain(async_cookie_t cookie,
                endtime = ktime_get();
                delta = ktime_sub(endtime, starttime);
 
-               printk("async_continuing @ %i after %lli usec\n",
+               printk(KERN_DEBUG "async_continuing @ %i after %lli usec\n",
                        task_pid_nr(current),
                        (long long)ktime_to_ns(delta) >> 10);
        }
@@ -312,96 +301,3 @@ void async_synchronize_cookie(async_cookie_t cookie)
        async_synchronize_cookie_domain(cookie, &async_running);
 }
 EXPORT_SYMBOL_GPL(async_synchronize_cookie);
-
-
-static int async_thread(void *unused)
-{
-       DECLARE_WAITQUEUE(wq, current);
-       add_wait_queue(&async_new, &wq);
-
-       while (!kthread_should_stop()) {
-               int ret = HZ;
-               set_current_state(TASK_INTERRUPTIBLE);
-               /*
-                * check the list head without lock.. false positives
-                * are dealt with inside run_one_entry() while holding
-                * the lock.
-                */
-               rmb();
-               if (!list_empty(&async_pending))
-                       run_one_entry();
-               else
-                       ret = schedule_timeout(HZ);
-
-               if (ret == 0) {
-                       /*
-                        * we timed out, this means we as thread are redundant.
-                        * we sign off and die, but we to avoid any races there
-                        * is a last-straw check to see if work snuck in.
-                        */
-                       atomic_dec(&thread_count);
-                       wmb(); /* manager must see our departure first */
-                       if (list_empty(&async_pending))
-                               break;
-                       /*
-                        * woops work came in between us timing out and us
-                        * signing off; we need to stay alive and keep working.
-                        */
-                       atomic_inc(&thread_count);
-               }
-       }
-       remove_wait_queue(&async_new, &wq);
-
-       return 0;
-}
-
-static int async_manager_thread(void *unused)
-{
-       DECLARE_WAITQUEUE(wq, current);
-       add_wait_queue(&async_new, &wq);
-
-       while (!kthread_should_stop()) {
-               int tc, ec;
-
-               set_current_state(TASK_INTERRUPTIBLE);
-
-               tc = atomic_read(&thread_count);
-               rmb();
-               ec = atomic_read(&entry_count);
-
-               while (tc < ec && tc < MAX_THREADS) {
-                       if (IS_ERR(kthread_run(async_thread, NULL, "async/%i",
-                                              tc))) {
-                               msleep(100);
-                               continue;
-                       }
-                       atomic_inc(&thread_count);
-                       tc++;
-               }
-
-               schedule();
-       }
-       remove_wait_queue(&async_new, &wq);
-
-       return 0;
-}
-
-static int __init async_init(void)
-{
-       if (async_enabled)
-               if (IS_ERR(kthread_run(async_manager_thread, NULL,
-                                      "async/mgr")))
-                       async_enabled = 0;
-       return 0;
-}
-
-static int __init setup_async(char *str)
-{
-       async_enabled = 1;
-       return 1;
-}
-
-__setup("fastboot", setup_async);
-
-
-core_initcall(async_init);