devres: fix possible use after free
[linux-2.6.git] / kernel / irq / manage.c
index eb47f8b..0a7840a 100644 (file)
 
 #include "internals.h"
 
+#ifdef CONFIG_IRQ_FORCED_THREADING
+__read_mostly bool force_irqthreads;
+
+static int __init setup_forced_irqthreads(char *arg)
+{
+       force_irqthreads = true;
+       return 0;
+}
+early_param("threadirqs", setup_forced_irqthreads);
+#endif
+
 /**
  *     synchronize_irq - wait for pending IRQ handlers (on other CPUs)
  *     @irq: interrupt number to wait for
@@ -30,7 +41,7 @@
 void synchronize_irq(unsigned int irq)
 {
        struct irq_desc *desc = irq_to_desc(irq);
-       unsigned int status;
+       bool inprogress;
 
        if (!desc)
                return;
@@ -42,16 +53,16 @@ void synchronize_irq(unsigned int irq)
                 * Wait until we're out of the critical section.  This might
                 * give the wrong answer due to the lack of memory barriers.
                 */
-               while (desc->status & IRQ_INPROGRESS)
+               while (irqd_irq_inprogress(&desc->irq_data))
                        cpu_relax();
 
                /* Ok, that indicated we're done: double-check carefully. */
-               spin_lock_irqsave(&desc->lock, flags);
-               status = desc->status;
-               spin_unlock_irqrestore(&desc->lock, flags);
+               raw_spin_lock_irqsave(&desc->lock, flags);
+               inprogress = irqd_irq_inprogress(&desc->irq_data);
+               raw_spin_unlock_irqrestore(&desc->lock, flags);
 
                /* Oops, that failed? */
-       } while (status & IRQ_INPROGRESS);
+       } while (inprogress);
 
        /*
         * We made sure that no hardirq handler is running. Now verify
@@ -73,64 +84,207 @@ int irq_can_set_affinity(unsigned int irq)
 {
        struct irq_desc *desc = irq_to_desc(irq);
 
-       if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip ||
-           !desc->chip->set_affinity)
+       if (!desc || !irqd_can_balance(&desc->irq_data) ||
+           !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
                return 0;
 
        return 1;
 }
 
-static void
-irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask)
+/**
+ *     irq_set_thread_affinity - Notify irq threads to adjust affinity
+ *     @desc:          irq descriptor which has affitnity changed
+ *
+ *     We just set IRQTF_AFFINITY and delegate the affinity setting
+ *     to the interrupt thread itself. We can not call
+ *     set_cpus_allowed_ptr() here as we hold desc->lock and this
+ *     code can be called from hard interrupt context.
+ */
+void irq_set_thread_affinity(struct irq_desc *desc)
 {
        struct irqaction *action = desc->action;
 
        while (action) {
                if (action->thread)
-                       set_cpus_allowed_ptr(action->thread, cpumask);
+                       set_bit(IRQTF_AFFINITY, &action->thread_flags);
                action = action->next;
        }
 }
 
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+static inline bool irq_can_move_pcntxt(struct irq_data *data)
+{
+       return irqd_can_move_in_process_context(data);
+}
+static inline bool irq_move_pending(struct irq_data *data)
+{
+       return irqd_is_setaffinity_pending(data);
+}
+static inline void
+irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
+{
+       cpumask_copy(desc->pending_mask, mask);
+}
+static inline void
+irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
+{
+       cpumask_copy(mask, desc->pending_mask);
+}
+#else
+static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
+static inline bool irq_move_pending(struct irq_data *data) { return false; }
+static inline void
+irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
+static inline void
+irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
+#endif
+
+int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
+{
+       struct irq_chip *chip = irq_data_get_irq_chip(data);
+       struct irq_desc *desc = irq_data_to_desc(data);
+       int ret = 0;
+
+       if (!chip || !chip->irq_set_affinity)
+               return -EINVAL;
+
+       if (irq_can_move_pcntxt(data)) {
+               ret = chip->irq_set_affinity(data, mask, false);
+               switch (ret) {
+               case IRQ_SET_MASK_OK:
+                       cpumask_copy(data->affinity, mask);
+               case IRQ_SET_MASK_OK_NOCOPY:
+                       irq_set_thread_affinity(desc);
+                       ret = 0;
+               }
+       } else {
+               irqd_set_move_pending(data);
+               irq_copy_pending(desc, mask);
+       }
+
+       if (desc->affinity_notify) {
+               kref_get(&desc->affinity_notify->kref);
+               schedule_work(&desc->affinity_notify->work);
+       }
+       irqd_set(data, IRQD_AFFINITY_SET);
+
+       return ret;
+}
+
 /**
  *     irq_set_affinity - Set the irq affinity of a given irq
  *     @irq:           Interrupt to set affinity
- *     @cpumask:       cpumask
+ *     @mask:          cpumask
  *
  */
-int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
+int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
 {
        struct irq_desc *desc = irq_to_desc(irq);
        unsigned long flags;
+       int ret;
 
-       if (!desc->chip->set_affinity)
+       if (!desc)
                return -EINVAL;
 
-       spin_lock_irqsave(&desc->lock, flags);
+       raw_spin_lock_irqsave(&desc->lock, flags);
+       ret =  __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask);
+       raw_spin_unlock_irqrestore(&desc->lock, flags);
+       return ret;
+}
 
-#ifdef CONFIG_GENERIC_PENDING_IRQ
-       if (desc->status & IRQ_MOVE_PCNTXT)
-               desc->chip->set_affinity(irq, cpumask);
-       else {
-               desc->status |= IRQ_MOVE_PENDING;
-               cpumask_copy(desc->pending_mask, cpumask);
+int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
+{
+       unsigned long flags;
+       struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
+
+       if (!desc)
+               return -EINVAL;
+       desc->affinity_hint = m;
+       irq_put_desc_unlock(desc, flags);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
+
+static void irq_affinity_notify(struct work_struct *work)
+{
+       struct irq_affinity_notify *notify =
+               container_of(work, struct irq_affinity_notify, work);
+       struct irq_desc *desc = irq_to_desc(notify->irq);
+       cpumask_var_t cpumask;
+       unsigned long flags;
+
+       if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
+               goto out;
+
+       raw_spin_lock_irqsave(&desc->lock, flags);
+       if (irq_move_pending(&desc->irq_data))
+               irq_get_pending(cpumask, desc);
+       else
+               cpumask_copy(cpumask, desc->irq_data.affinity);
+       raw_spin_unlock_irqrestore(&desc->lock, flags);
+
+       notify->notify(notify, cpumask);
+
+       free_cpumask_var(cpumask);
+out:
+       kref_put(&notify->kref, notify->release);
+}
+
+/**
+ *     irq_set_affinity_notifier - control notification of IRQ affinity changes
+ *     @irq:           Interrupt for which to enable/disable notification
+ *     @notify:        Context for notification, or %NULL to disable
+ *                     notification.  Function pointers must be initialised;
+ *                     the other fields will be initialised by this function.
+ *
+ *     Must be called in process context.  Notification may only be enabled
+ *     after the IRQ is allocated and must be disabled before the IRQ is
+ *     freed using free_irq().
+ */
+int
+irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+       struct irq_affinity_notify *old_notify;
+       unsigned long flags;
+
+       /* The release function is promised process context */
+       might_sleep();
+
+       if (!desc)
+               return -EINVAL;
+
+       /* Complete initialisation of *notify */
+       if (notify) {
+               notify->irq = irq;
+               kref_init(&notify->kref);
+               INIT_WORK(&notify->work, irq_affinity_notify);
        }
-#else
-       cpumask_copy(desc->affinity, cpumask);
-       desc->chip->set_affinity(irq, cpumask);
-#endif
-       irq_set_thread_affinity(desc, cpumask);
-       desc->status |= IRQ_AFFINITY_SET;
-       spin_unlock_irqrestore(&desc->lock, flags);
+
+       raw_spin_lock_irqsave(&desc->lock, flags);
+       old_notify = desc->affinity_notify;
+       desc->affinity_notify = notify;
+       raw_spin_unlock_irqrestore(&desc->lock, flags);
+
+       if (old_notify)
+               kref_put(&old_notify->kref, old_notify->release);
+
        return 0;
 }
+EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
 
 #ifndef CONFIG_AUTO_IRQ_AFFINITY
 /*
  * Generic version of the affinity autoselector.
  */
-static int setup_affinity(unsigned int irq, struct irq_desc *desc)
+static int
+setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
 {
+       struct irq_chip *chip = irq_desc_get_chip(desc);
+       struct cpumask *set = irq_default_affinity;
+       int ret;
+
+       /* Excludes PER_CPU and NO_BALANCE interrupts */
        if (!irq_can_set_affinity(irq))
                return 0;
 
@@ -138,22 +292,27 @@ static int setup_affinity(unsigned int irq, struct irq_desc *desc)
         * Preserve an userspace affinity setup, but make sure that
         * one of the targets is online.
         */
-       if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) {
-               if (cpumask_any_and(desc->affinity, cpu_online_mask)
-                   < nr_cpu_ids)
-                       goto set_affinity;
+       if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
+               if (cpumask_intersects(desc->irq_data.affinity,
+                                      cpu_online_mask))
+                       set = desc->irq_data.affinity;
                else
-                       desc->status &= ~IRQ_AFFINITY_SET;
+                       irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
        }
 
-       cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity);
-set_affinity:
-       desc->chip->set_affinity(irq, desc->affinity);
-
+       cpumask_and(mask, cpu_online_mask, set);
+       ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
+       switch (ret) {
+       case IRQ_SET_MASK_OK:
+               cpumask_copy(desc->irq_data.affinity, mask);
+       case IRQ_SET_MASK_OK_NOCOPY:
+               irq_set_thread_affinity(desc);
+       }
        return 0;
 }
 #else
-static inline int setup_affinity(unsigned int irq, struct irq_desc *d)
+static inline int
+setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
 {
        return irq_select_affinity(irq);
 }
@@ -162,23 +321,21 @@ static inline int setup_affinity(unsigned int irq, struct irq_desc *d)
 /*
  * Called when affinity is set via /proc/irq
  */
-int irq_select_affinity_usr(unsigned int irq)
+int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
 {
        struct irq_desc *desc = irq_to_desc(irq);
        unsigned long flags;
        int ret;
 
-       spin_lock_irqsave(&desc->lock, flags);
-       ret = setup_affinity(irq, desc);
-       if (!ret)
-               irq_set_thread_affinity(desc, desc->affinity);
-       spin_unlock_irqrestore(&desc->lock, flags);
-
+       raw_spin_lock_irqsave(&desc->lock, flags);
+       ret = setup_affinity(irq, desc, mask);
+       raw_spin_unlock_irqrestore(&desc->lock, flags);
        return ret;
 }
 
 #else
-static inline int setup_affinity(unsigned int irq, struct irq_desc *desc)
+static inline int
+setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
 {
        return 0;
 }
@@ -187,15 +344,25 @@ static inline int setup_affinity(unsigned int irq, struct irq_desc *desc)
 void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
 {
        if (suspend) {
-               if (!desc->action || (desc->action->flags & IRQF_TIMER))
+               if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
                        return;
-               desc->status |= IRQ_SUSPENDED;
+               desc->istate |= IRQS_SUSPENDED;
        }
 
-       if (!desc->depth++) {
-               desc->status |= IRQ_DISABLED;
-               desc->chip->disable(irq);
-       }
+       if (!desc->depth++)
+               irq_disable(desc);
+}
+
+static int __disable_irq_nosync(unsigned int irq)
+{
+       unsigned long flags;
+       struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
+
+       if (!desc)
+               return -EINVAL;
+       __disable_irq(desc, irq, false);
+       irq_put_desc_busunlock(desc, flags);
+       return 0;
 }
 
 /**
@@ -211,15 +378,7 @@ void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
  */
 void disable_irq_nosync(unsigned int irq)
 {
-       struct irq_desc *desc = irq_to_desc(irq);
-       unsigned long flags;
-
-       if (!desc)
-               return;
-
-       spin_lock_irqsave(&desc->lock, flags);
-       __disable_irq(desc, irq, false);
-       spin_unlock_irqrestore(&desc->lock, flags);
+       __disable_irq_nosync(irq);
 }
 EXPORT_SYMBOL(disable_irq_nosync);
 
@@ -237,21 +396,24 @@ EXPORT_SYMBOL(disable_irq_nosync);
  */
 void disable_irq(unsigned int irq)
 {
-       struct irq_desc *desc = irq_to_desc(irq);
-
-       if (!desc)
-               return;
-
-       disable_irq_nosync(irq);
-       if (desc->action)
+       if (!__disable_irq_nosync(irq))
                synchronize_irq(irq);
 }
 EXPORT_SYMBOL(disable_irq);
 
 void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
 {
-       if (resume)
-               desc->status &= ~IRQ_SUSPENDED;
+       if (resume) {
+               if (!(desc->istate & IRQS_SUSPENDED)) {
+                       if (!desc->action)
+                               return;
+                       if (!(desc->action->flags & IRQF_FORCE_RESUME))
+                               return;
+                       /* Pretend that it got disabled ! */
+                       desc->depth++;
+               }
+               desc->istate &= ~IRQS_SUSPENDED;
+       }
 
        switch (desc->depth) {
        case 0:
@@ -259,12 +421,11 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
                WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
                break;
        case 1: {
-               unsigned int status = desc->status & ~IRQ_DISABLED;
-
-               if (desc->status & IRQ_SUSPENDED)
+               if (desc->istate & IRQS_SUSPENDED)
                        goto err_out;
                /* Prevent probing on this irq: */
-               desc->status = status | IRQ_NOPROBE;
+               irq_settings_set_noprobe(desc);
+               irq_enable(desc);
                check_irq_resend(desc, irq);
                /* fall-through */
        }
@@ -281,19 +442,23 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
  *     matches the last disable, processing of interrupts on this
  *     IRQ line is re-enabled.
  *
- *     This function may be called from IRQ context.
+ *     This function may be called from IRQ context only when
+ *     desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
  */
 void enable_irq(unsigned int irq)
 {
-       struct irq_desc *desc = irq_to_desc(irq);
        unsigned long flags;
+       struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
 
        if (!desc)
                return;
+       if (WARN(!desc->irq_data.chip,
+                KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
+               goto out;
 
-       spin_lock_irqsave(&desc->lock, flags);
        __enable_irq(desc, irq, false);
-       spin_unlock_irqrestore(&desc->lock, flags);
+out:
+       irq_put_desc_busunlock(desc, flags);
 }
 EXPORT_SYMBOL(enable_irq);
 
@@ -302,14 +467,14 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on)
        struct irq_desc *desc = irq_to_desc(irq);
        int ret = -ENXIO;
 
-       if (desc->chip->set_wake)
-               ret = desc->chip->set_wake(irq, on);
+       if (desc->irq_data.chip->irq_set_wake)
+               ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
 
        return ret;
 }
 
 /**
- *     set_irq_wake - control irq power management wakeup
+ *     irq_set_irq_wake - control irq power management wakeup
  *     @irq:   interrupt to control
  *     @on:    enable/disable power management wakeup
  *
@@ -320,23 +485,25 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on)
  *     Wakeup mode lets this IRQ wake the system from sleep
  *     states like "suspend to RAM".
  */
-int set_irq_wake(unsigned int irq, unsigned int on)
+int irq_set_irq_wake(unsigned int irq, unsigned int on)
 {
-       struct irq_desc *desc = irq_to_desc(irq);
        unsigned long flags;
+       struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
        int ret = 0;
 
+       if (!desc)
+               return -EINVAL;
+
        /* wakeup-capable irqs can be shared between drivers that
         * don't need to have the same sleep mode behaviors.
         */
-       spin_lock_irqsave(&desc->lock, flags);
        if (on) {
                if (desc->wake_depth++ == 0) {
                        ret = set_irq_wake_real(irq, on);
                        if (ret)
                                desc->wake_depth = 0;
                        else
-                               desc->status |= IRQ_WAKEUP;
+                               irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
                }
        } else {
                if (desc->wake_depth == 0) {
@@ -346,14 +513,13 @@ int set_irq_wake(unsigned int irq, unsigned int on)
                        if (ret)
                                desc->wake_depth = 1;
                        else
-                               desc->status &= ~IRQ_WAKEUP;
+                               irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
                }
        }
-
-       spin_unlock_irqrestore(&desc->lock, flags);
+       irq_put_desc_busunlock(desc, flags);
        return ret;
 }
-EXPORT_SYMBOL(set_irq_wake);
+EXPORT_SYMBOL(irq_set_irq_wake);
 
 /*
  * Internal function that tells the architecture code whether a
@@ -362,41 +528,29 @@ EXPORT_SYMBOL(set_irq_wake);
  */
 int can_request_irq(unsigned int irq, unsigned long irqflags)
 {
-       struct irq_desc *desc = irq_to_desc(irq);
-       struct irqaction *action;
+       unsigned long flags;
+       struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
+       int canrequest = 0;
 
        if (!desc)
                return 0;
 
-       if (desc->status & IRQ_NOREQUEST)
-               return 0;
-
-       action = desc->action;
-       if (action)
-               if (irqflags & action->flags & IRQF_SHARED)
-                       action = NULL;
-
-       return !action;
-}
-
-void compat_irq_chip_set_default_handler(struct irq_desc *desc)
-{
-       /*
-        * If the architecture still has not overriden
-        * the flow handler then zap the default. This
-        * should catch incorrect flow-type setting.
-        */
-       if (desc->handle_irq == &handle_bad_irq)
-               desc->handle_irq = NULL;
+       if (irq_settings_can_request(desc)) {
+               if (desc->action)
+                       if (irqflags & desc->action->flags & IRQF_SHARED)
+                               canrequest =1;
+       }
+       irq_put_desc_unlock(desc, flags);
+       return canrequest;
 }
 
 int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
-               unsigned long flags)
+                     unsigned long flags)
 {
-       int ret;
-       struct irq_chip *chip = desc->chip;
+       struct irq_chip *chip = desc->irq_data.chip;
+       int ret, unmask = 0;
 
-       if (!chip || !chip->set_type) {
+       if (!chip || !chip->irq_set_type) {
                /*
                 * IRQF_TRIGGER_* but the PIC does not support multiple
                 * flow-types?
@@ -406,23 +560,64 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
                return 0;
        }
 
-       /* caller masked out all except trigger mode flags */
-       ret = chip->set_type(irq, flags);
+       flags &= IRQ_TYPE_SENSE_MASK;
 
-       if (ret)
-               pr_err("setting trigger mode %d for irq %u failed (%pF)\n",
-                               (int)flags, irq, chip->set_type);
-       else {
-               if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
-                       flags |= IRQ_LEVEL;
-               /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */
-               desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK);
-               desc->status |= flags;
+       if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
+               if (!irqd_irq_masked(&desc->irq_data))
+                       mask_irq(desc);
+               if (!irqd_irq_disabled(&desc->irq_data))
+                       unmask = 1;
        }
 
+       /* caller masked out all except trigger mode flags */
+       ret = chip->irq_set_type(&desc->irq_data, flags);
+
+       switch (ret) {
+       case IRQ_SET_MASK_OK:
+               irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
+               irqd_set(&desc->irq_data, flags);
+
+       case IRQ_SET_MASK_OK_NOCOPY:
+               flags = irqd_get_trigger_type(&desc->irq_data);
+               irq_settings_set_trigger_mask(desc, flags);
+               irqd_clear(&desc->irq_data, IRQD_LEVEL);
+               irq_settings_clr_level(desc);
+               if (flags & IRQ_TYPE_LEVEL_MASK) {
+                       irq_settings_set_level(desc);
+                       irqd_set(&desc->irq_data, IRQD_LEVEL);
+               }
+
+               ret = 0;
+               break;
+       default:
+               pr_err("setting trigger mode %lu for irq %u failed (%pF)\n",
+                      flags, irq, chip->irq_set_type);
+       }
+       if (unmask)
+               unmask_irq(desc);
        return ret;
 }
 
+/*
+ * Default primary interrupt handler for threaded interrupts. Is
+ * assigned as primary handler when request_threaded_irq is called
+ * with handler == NULL. Useful for oneshot interrupts.
+ */
+static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
+{
+       return IRQ_WAKE_THREAD;
+}
+
+/*
+ * Primary handler for nested threaded interrupts. Should never be
+ * called.
+ */
+static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
+{
+       WARN(1, "Primary handler called for nested irq %d\n", irq);
+       return IRQ_NONE;
+}
+
 static int irq_wait_for_interrupt(struct irqaction *action)
 {
        while (!kthread_should_stop()) {
@@ -439,37 +634,172 @@ static int irq_wait_for_interrupt(struct irqaction *action)
 }
 
 /*
+ * Oneshot interrupts keep the irq line masked until the threaded
+ * handler finished. unmask if the interrupt has not been disabled and
+ * is marked MASKED.
+ */
+static void irq_finalize_oneshot(struct irq_desc *desc,
+                                struct irqaction *action, bool force)
+{
+       if (!(desc->istate & IRQS_ONESHOT))
+               return;
+again:
+       chip_bus_lock(desc);
+       raw_spin_lock_irq(&desc->lock);
+
+       /*
+        * Implausible though it may be we need to protect us against
+        * the following scenario:
+        *
+        * The thread is faster done than the hard interrupt handler
+        * on the other CPU. If we unmask the irq line then the
+        * interrupt can come in again and masks the line, leaves due
+        * to IRQS_INPROGRESS and the irq line is masked forever.
+        *
+        * This also serializes the state of shared oneshot handlers
+        * versus "desc->threads_onehsot |= action->thread_mask;" in
+        * irq_wake_thread(). See the comment there which explains the
+        * serialization.
+        */
+       if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
+               raw_spin_unlock_irq(&desc->lock);
+               chip_bus_sync_unlock(desc);
+               cpu_relax();
+               goto again;
+       }
+
+       /*
+        * Now check again, whether the thread should run. Otherwise
+        * we would clear the threads_oneshot bit of this thread which
+        * was just set.
+        */
+       if (!force && test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
+               goto out_unlock;
+
+       desc->threads_oneshot &= ~action->thread_mask;
+
+       if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
+           irqd_irq_masked(&desc->irq_data))
+               unmask_irq(desc);
+
+out_unlock:
+       raw_spin_unlock_irq(&desc->lock);
+       chip_bus_sync_unlock(desc);
+}
+
+#ifdef CONFIG_SMP
+/*
+ * Check whether we need to chasnge the affinity of the interrupt thread.
+ */
+static void
+irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
+{
+       cpumask_var_t mask;
+
+       if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
+               return;
+
+       /*
+        * In case we are out of memory we set IRQTF_AFFINITY again and
+        * try again next time
+        */
+       if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
+               set_bit(IRQTF_AFFINITY, &action->thread_flags);
+               return;
+       }
+
+       raw_spin_lock_irq(&desc->lock);
+       cpumask_copy(mask, desc->irq_data.affinity);
+       raw_spin_unlock_irq(&desc->lock);
+
+       set_cpus_allowed_ptr(current, mask);
+       free_cpumask_var(mask);
+}
+#else
+static inline void
+irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
+#endif
+
+/*
+ * Interrupts which are not explicitely requested as threaded
+ * interrupts rely on the implicit bh/preempt disable of the hard irq
+ * context. So we need to disable bh here to avoid deadlocks and other
+ * side effects.
+ */
+static irqreturn_t
+irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
+{
+       irqreturn_t ret;
+
+       local_bh_disable();
+       ret = action->thread_fn(action->irq, action->dev_id);
+       irq_finalize_oneshot(desc, action, false);
+       local_bh_enable();
+       return ret;
+}
+
+/*
+ * Interrupts explicitely requested as threaded interupts want to be
+ * preemtible - many of them need to sleep and wait for slow busses to
+ * complete.
+ */
+static irqreturn_t irq_thread_fn(struct irq_desc *desc,
+               struct irqaction *action)
+{
+       irqreturn_t ret;
+
+       ret = action->thread_fn(action->irq, action->dev_id);
+       irq_finalize_oneshot(desc, action, false);
+       return ret;
+}
+
+/*
  * Interrupt handler thread
  */
 static int irq_thread(void *data)
 {
-       struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, };
+       static const struct sched_param param = {
+               .sched_priority = MAX_USER_RT_PRIO/2,
+       };
        struct irqaction *action = data;
        struct irq_desc *desc = irq_to_desc(action->irq);
+       irqreturn_t (*handler_fn)(struct irq_desc *desc,
+                       struct irqaction *action);
        int wake;
 
+       if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD,
+                                       &action->thread_flags))
+               handler_fn = irq_forced_thread_fn;
+       else
+               handler_fn = irq_thread_fn;
+
        sched_setscheduler(current, SCHED_FIFO, &param);
        current->irqaction = action;
 
        while (!irq_wait_for_interrupt(action)) {
 
+               irq_thread_check_affinity(desc, action);
+
                atomic_inc(&desc->threads_active);
 
-               spin_lock_irq(&desc->lock);
-               if (unlikely(desc->status & IRQ_DISABLED)) {
+               raw_spin_lock_irq(&desc->lock);
+               if (unlikely(irqd_irq_disabled(&desc->irq_data))) {
                        /*
                         * CHECKME: We might need a dedicated
                         * IRQ_THREAD_PENDING flag here, which
                         * retriggers the thread in check_irq_resend()
-                        * but AFAICT IRQ_PENDING should be fine as it
+                        * but AFAICT IRQS_PENDING should be fine as it
                         * retriggers the interrupt itself --- tglx
                         */
-                       desc->status |= IRQ_PENDING;
-                       spin_unlock_irq(&desc->lock);
+                       desc->istate |= IRQS_PENDING;
+                       raw_spin_unlock_irq(&desc->lock);
                } else {
-                       spin_unlock_irq(&desc->lock);
+                       irqreturn_t action_ret;
 
-                       action->thread_fn(action->irq, action->dev_id);
+                       raw_spin_unlock_irq(&desc->lock);
+                       action_ret = handler_fn(desc, action);
+                       if (!noirqdebug)
+                               note_interrupt(action->irq, desc, action_ret);
                }
 
                wake = atomic_dec_and_test(&desc->threads_active);
@@ -478,6 +808,9 @@ static int irq_thread(void *data)
                        wake_up(&desc->wait_for_threads);
        }
 
+       /* Prevent a stale desc->threads_oneshot */
+       irq_finalize_oneshot(desc, action, true);
+
        /*
         * Clear irqaction. Otherwise exit_irq_thread() would make
         * fuzz about an active irq thread going into nirvana.
@@ -492,6 +825,7 @@ static int irq_thread(void *data)
 void exit_irq_thread(void)
 {
        struct task_struct *tsk = current;
+       struct irq_desc *desc;
 
        if (!tsk->irqaction)
                return;
@@ -500,6 +834,14 @@ void exit_irq_thread(void)
               "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
               tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq);
 
+       desc = irq_to_desc(tsk->irqaction->irq);
+
+       /*
+        * Prevent a stale desc->threads_oneshot. Must be called
+        * before setting the IRQTF_DIED flag.
+        */
+       irq_finalize_oneshot(desc, tsk->irqaction, true);
+
        /*
         * Set the THREAD DIED flag to prevent further wakeups of the
         * soon to be gone threaded handler.
@@ -507,6 +849,22 @@ void exit_irq_thread(void)
        set_bit(IRQTF_DIED, &tsk->irqaction->flags);
 }
 
+static void irq_setup_forced_threading(struct irqaction *new)
+{
+       if (!force_irqthreads)
+               return;
+       if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
+               return;
+
+       new->flags |= IRQF_ONESHOT;
+
+       if (!new->thread_fn) {
+               set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
+               new->thread_fn = new->handler;
+               new->handler = irq_default_primary_handler;
+       }
+}
+
 /*
  * Internal function to register an irqaction - typically used to
  * allocate special interrupts that are part of the architecture.
@@ -516,14 +874,14 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
 {
        struct irqaction *old, **old_ptr;
        const char *old_name = NULL;
-       unsigned long flags;
-       int shared = 0;
-       int ret;
+       unsigned long flags, thread_mask = 0;
+       int ret, nested, shared = 0;
+       cpumask_var_t mask;
 
        if (!desc)
                return -EINVAL;
 
-       if (desc->chip == &no_irq_chip)
+       if (desc->irq_data.chip == &no_irq_chip)
                return -ENOSYS;
        /*
         * Some drivers like serial.c use request_irq() heavily,
@@ -543,9 +901,30 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
        }
 
        /*
-        * Threaded handler ?
+        * Check whether the interrupt nests into another interrupt
+        * thread.
+        */
+       nested = irq_settings_is_nested_thread(desc);
+       if (nested) {
+               if (!new->thread_fn)
+                       return -EINVAL;
+               /*
+                * Replace the primary handler which was provided from
+                * the driver for non nested interrupt handling by the
+                * dummy function which warns when called.
+                */
+               new->handler = irq_nested_primary_handler;
+       } else {
+               if (irq_settings_can_thread(desc))
+                       irq_setup_forced_threading(new);
+       }
+
+       /*
+        * Create a handler thread when a thread function is supplied
+        * and the interrupt does not nest into another interrupt
+        * thread.
         */
-       if (new->thread_fn) {
+       if (new->thread_fn && !nested) {
                struct task_struct *t;
 
                t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
@@ -559,13 +938,17 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
                 */
                get_task_struct(t);
                new->thread = t;
-               wake_up_process(t);
+       }
+
+       if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
+               ret = -ENOMEM;
+               goto out_thread;
        }
 
        /*
         * The following block of code has to be executed atomically
         */
-       spin_lock_irqsave(&desc->lock, flags);
+       raw_spin_lock_irqsave(&desc->lock, flags);
        old_ptr = &desc->action;
        old = *old_ptr;
        if (old) {
@@ -573,32 +956,41 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
                 * Can't share interrupts unless both agree to and are
                 * the same type (level, edge, polarity). So both flag
                 * fields must have IRQF_SHARED set and the bits which
-                * set the trigger type must match.
+                * set the trigger type must match. Also all must
+                * agree on ONESHOT.
                 */
                if (!((old->flags & new->flags) & IRQF_SHARED) ||
-                   ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) {
+                   ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
+                   ((old->flags ^ new->flags) & IRQF_ONESHOT)) {
                        old_name = old->name;
                        goto mismatch;
                }
 
-#if defined(CONFIG_IRQ_PER_CPU)
                /* All handlers must agree on per-cpuness */
                if ((old->flags & IRQF_PERCPU) !=
                    (new->flags & IRQF_PERCPU))
                        goto mismatch;
-#endif
 
                /* add new interrupt at end of irq queue */
                do {
+                       thread_mask |= old->thread_mask;
                        old_ptr = &old->next;
                        old = *old_ptr;
                } while (old);
                shared = 1;
        }
 
-       if (!shared) {
-               irq_chip_set_defaults(desc->chip);
+       /*
+        * Setup the thread mask for this irqaction. Unlikely to have
+        * 32 resp 64 irqs sharing one line, but who knows.
+        */
+       if (new->flags & IRQF_ONESHOT && thread_mask == ~0UL) {
+               ret = -EBUSY;
+               goto out_mask;
+       }
+       new->thread_mask = 1 << ffz(thread_mask);
 
+       if (!shared) {
                init_waitqueue_head(&desc->wait_for_threads);
 
                /* Setup the type (level, edge polarity) if configured: */
@@ -607,41 +999,47 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
                                        new->flags & IRQF_TRIGGER_MASK);
 
                        if (ret)
-                               goto out_thread;
-               } else
-                       compat_irq_chip_set_default_handler(desc);
-#if defined(CONFIG_IRQ_PER_CPU)
-               if (new->flags & IRQF_PERCPU)
-                       desc->status |= IRQ_PER_CPU;
-#endif
+                               goto out_mask;
+               }
+
+               desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
+                                 IRQS_ONESHOT | IRQS_WAITING);
+               irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
+
+               if (new->flags & IRQF_PERCPU) {
+                       irqd_set(&desc->irq_data, IRQD_PER_CPU);
+                       irq_settings_set_per_cpu(desc);
+               }
 
-               desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING |
-                                 IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED);
+               if (new->flags & IRQF_ONESHOT)
+                       desc->istate |= IRQS_ONESHOT;
 
-               if (!(desc->status & IRQ_NOAUTOEN)) {
-                       desc->depth = 0;
-                       desc->status &= ~IRQ_DISABLED;
-                       desc->chip->startup(irq);
-               } else
+               if (irq_settings_can_autoenable(desc))
+                       irq_startup(desc);
+               else
                        /* Undo nested disables: */
                        desc->depth = 1;
 
                /* Exclude IRQ from balancing if requested */
-               if (new->flags & IRQF_NOBALANCING)
-                       desc->status |= IRQ_NO_BALANCING;
+               if (new->flags & IRQF_NOBALANCING) {
+                       irq_settings_set_no_balancing(desc);
+                       irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
+               }
 
                /* Set default affinity mask once everything is setup */
-               setup_affinity(irq, desc);
+               setup_affinity(irq, desc, mask);
 
-       } else if ((new->flags & IRQF_TRIGGER_MASK)
-                       && (new->flags & IRQF_TRIGGER_MASK)
-                               != (desc->status & IRQ_TYPE_SENSE_MASK)) {
-               /* hope the handler works with the actual trigger mode... */
-               pr_warning("IRQ %d uses trigger mode %d; requested %d\n",
-                               irq, (int)(desc->status & IRQ_TYPE_SENSE_MASK),
-                               (int)(new->flags & IRQF_TRIGGER_MASK));
+       } else if (new->flags & IRQF_TRIGGER_MASK) {
+               unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
+               unsigned int omsk = irq_settings_get_trigger_mask(desc);
+
+               if (nmsk != omsk)
+                       /* hope the handler works with current  trigger mode */
+                       pr_warning("IRQ %d uses trigger mode %u; requested %u\n",
+                                  irq, nmsk, omsk);
        }
 
+       new->irq = irq;
        *old_ptr = new;
 
        /* Reset broken irq detection when installing new handler */
@@ -652,17 +1050,24 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
         * Check whether we disabled the irq via the spurious handler
         * before. Reenable it and give it another chance.
         */
-       if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) {
-               desc->status &= ~IRQ_SPURIOUS_DISABLED;
+       if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
+               desc->istate &= ~IRQS_SPURIOUS_DISABLED;
                __enable_irq(desc, irq, false);
        }
 
-       spin_unlock_irqrestore(&desc->lock, flags);
+       raw_spin_unlock_irqrestore(&desc->lock, flags);
+
+       /*
+        * Strictly no need to wake it up, but hung_task complains
+        * when no hard interrupt wakes the thread up.
+        */
+       if (new->thread)
+               wake_up_process(new->thread);
 
-       new->irq = irq;
        register_irq_proc(irq, desc);
        new->dir = NULL;
        register_handler_proc(irq, new);
+       free_cpumask_var(mask);
 
        return 0;
 
@@ -677,8 +1082,11 @@ mismatch:
 #endif
        ret = -EBUSY;
 
+out_mask:
+       raw_spin_unlock_irqrestore(&desc->lock, flags);
+       free_cpumask_var(mask);
+
 out_thread:
-       spin_unlock_irqrestore(&desc->lock, flags);
        if (new->thread) {
                struct task_struct *t = new->thread;
 
@@ -699,9 +1107,14 @@ out_thread:
  */
 int setup_irq(unsigned int irq, struct irqaction *act)
 {
+       int retval;
        struct irq_desc *desc = irq_to_desc(irq);
 
-       return __setup_irq(irq, desc, act);
+       chip_bus_lock(desc);
+       retval = __setup_irq(irq, desc, act);
+       chip_bus_sync_unlock(desc);
+
+       return retval;
 }
 EXPORT_SYMBOL_GPL(setup_irq);
 
@@ -713,7 +1126,6 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
 {
        struct irq_desc *desc = irq_to_desc(irq);
        struct irqaction *action, **action_ptr;
-       struct task_struct *irqthread;
        unsigned long flags;
 
        WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
@@ -721,7 +1133,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
        if (!desc)
                return NULL;
 
-       spin_lock_irqsave(&desc->lock, flags);
+       raw_spin_lock_irqsave(&desc->lock, flags);
 
        /*
         * There can be multiple actions per IRQ descriptor, find the right
@@ -733,7 +1145,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
 
                if (!action) {
                        WARN(1, "Trying to free already-free IRQ %d\n", irq);
-                       spin_unlock_irqrestore(&desc->lock, flags);
+                       raw_spin_unlock_irqrestore(&desc->lock, flags);
 
                        return NULL;
                }
@@ -748,35 +1160,27 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
 
        /* Currently used only by UML, might disappear one day: */
 #ifdef CONFIG_IRQ_RELEASE_METHOD
-       if (desc->chip->release)
-               desc->chip->release(irq, dev_id);
+       if (desc->irq_data.chip->release)
+               desc->irq_data.chip->release(irq, dev_id);
 #endif
 
        /* If this was the last handler, shut down the IRQ line: */
-       if (!desc->action) {
-               desc->status |= IRQ_DISABLED;
-               if (desc->chip->shutdown)
-                       desc->chip->shutdown(irq);
-               else
-                       desc->chip->disable(irq);
-       }
+       if (!desc->action)
+               irq_shutdown(desc);
 
-       irqthread = action->thread;
-       action->thread = NULL;
+#ifdef CONFIG_SMP
+       /* make sure affinity_hint is cleaned up */
+       if (WARN_ON_ONCE(desc->affinity_hint))
+               desc->affinity_hint = NULL;
+#endif
 
-       spin_unlock_irqrestore(&desc->lock, flags);
+       raw_spin_unlock_irqrestore(&desc->lock, flags);
 
        unregister_handler_proc(irq, action);
 
        /* Make sure it's not being used on another CPU: */
        synchronize_irq(irq);
 
-       if (irqthread) {
-               if (!test_bit(IRQTF_DIED, &action->thread_flags))
-                       kthread_stop(irqthread);
-               put_task_struct(irqthread);
-       }
-
 #ifdef CONFIG_DEBUG_SHIRQ
        /*
         * It's a shared IRQ -- the driver ought to be prepared for an IRQ
@@ -792,6 +1196,13 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
                local_irq_restore(flags);
        }
 #endif
+
+       if (action->thread) {
+               if (!test_bit(IRQTF_DIED, &action->thread_flags))
+                       kthread_stop(action->thread);
+               put_task_struct(action->thread);
+       }
+
        return action;
 }
 
@@ -824,7 +1235,19 @@ EXPORT_SYMBOL_GPL(remove_irq);
  */
 void free_irq(unsigned int irq, void *dev_id)
 {
+       struct irq_desc *desc = irq_to_desc(irq);
+
+       if (!desc)
+               return;
+
+#ifdef CONFIG_SMP
+       if (WARN_ON(desc->affinity_notify))
+               desc->affinity_notify = NULL;
+#endif
+
+       chip_bus_lock(desc);
        kfree(__free_irq(irq, dev_id));
+       chip_bus_sync_unlock(desc);
 }
 EXPORT_SYMBOL(free_irq);
 
@@ -833,6 +1256,8 @@ EXPORT_SYMBOL(free_irq);
  *     @irq: Interrupt line to allocate
  *     @handler: Function to be called when the IRQ occurs.
  *               Primary handler for threaded interrupts
+ *               If NULL and thread_fn != NULL the default
+ *               primary handler is installed
  *     @thread_fn: Function called from the irq handler thread
  *                 If NULL, no irq thread is created
  *     @irqflags: Interrupt type flags
@@ -865,7 +1290,6 @@ EXPORT_SYMBOL(free_irq);
  *     Flags:
  *
  *     IRQF_SHARED             Interrupt is shared
- *     IRQF_DISABLED   Disable local interrupts while processing
  *     IRQF_SAMPLE_RANDOM      The interrupt can be used for entropy
  *     IRQF_TRIGGER_*          Specify active edge(s) or level
  *
@@ -879,25 +1303,6 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
        int retval;
 
        /*
-        * handle_IRQ_event() always ignores IRQF_DISABLED except for
-        * the _first_ irqaction (sigh).  That can cause oopsing, but
-        * the behavior is classified as "will not fix" so we need to
-        * start nudging drivers away from using that idiom.
-        */
-       if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) ==
-                                       (IRQF_SHARED|IRQF_DISABLED)) {
-               pr_warning(
-                 "IRQ %d/%s: IRQF_DISABLED is not guaranteed on shared IRQs\n",
-                       irq, devname);
-       }
-
-#ifdef CONFIG_LOCKDEP
-       /*
-        * Lockdep wants atomic interrupt handlers:
-        */
-       irqflags |= IRQF_DISABLED;
-#endif
-       /*
         * Sanity-check: shared interrupts must pass in a real dev-ID,
         * otherwise we'll have trouble later trying to figure out
         * which interrupt is which (messes up the interrupt freeing
@@ -910,11 +1315,15 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
        if (!desc)
                return -EINVAL;
 
-       if (desc->status & IRQ_NOREQUEST)
-               return -EINVAL;
-       if (!handler)
+       if (!irq_settings_can_request(desc))
                return -EINVAL;
 
+       if (!handler) {
+               if (!thread_fn)
+                       return -EINVAL;
+               handler = irq_default_primary_handler;
+       }
+
        action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
        if (!action)
                return -ENOMEM;
@@ -925,12 +1334,15 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
        action->name = devname;
        action->dev_id = dev_id;
 
+       chip_bus_lock(desc);
        retval = __setup_irq(irq, desc, action);
+       chip_bus_sync_unlock(desc);
+
        if (retval)
                kfree(action);
 
-#ifdef CONFIG_DEBUG_SHIRQ
-       if (irqflags & IRQF_SHARED) {
+#ifdef CONFIG_DEBUG_SHIRQ_FIXME
+       if (!retval && (irqflags & IRQF_SHARED)) {
                /*
                 * It's a shared IRQ -- the driver ought to be prepared for it
                 * to happen immediately, so let's make sure....
@@ -951,3 +1363,40 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
        return retval;
 }
 EXPORT_SYMBOL(request_threaded_irq);
+
+/**
+ *     request_any_context_irq - allocate an interrupt line
+ *     @irq: Interrupt line to allocate
+ *     @handler: Function to be called when the IRQ occurs.
+ *               Threaded handler for threaded interrupts.
+ *     @flags: Interrupt type flags
+ *     @name: An ascii name for the claiming device
+ *     @dev_id: A cookie passed back to the handler function
+ *
+ *     This call allocates interrupt resources and enables the
+ *     interrupt line and IRQ handling. It selects either a
+ *     hardirq or threaded handling method depending on the
+ *     context.
+ *
+ *     On failure, it returns a negative value. On success,
+ *     it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
+ */
+int request_any_context_irq(unsigned int irq, irq_handler_t handler,
+                           unsigned long flags, const char *name, void *dev_id)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+       int ret;
+
+       if (!desc)
+               return -EINVAL;
+
+       if (irq_settings_is_nested_thread(desc)) {
+               ret = request_threaded_irq(irq, NULL, handler,
+                                          flags, name, dev_id);
+               return !ret ? IRQC_IS_NESTED : ret;
+       }
+
+       ret = request_irq(irq, handler, flags, name, dev_id);
+       return !ret ? IRQC_IS_HARDIRQ : ret;
+}
+EXPORT_SYMBOL_GPL(request_any_context_irq);