Merge branch 'for-3.7' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
Linus Torvalds [Tue, 2 Oct 2012 16:54:49 +0000 (09:54 -0700)]
Pull workqueue changes from Tejun Heo:
 "This is workqueue updates for v3.7-rc1.  A lot of activities this
  round including considerable API and behavior cleanups.

   * delayed_work combines a timer and a work item.  The handling of the
     timer part has always been a bit clunky leading to confusing
     cancelation API with weird corner-case behaviors.  delayed_work is
     updated to use new IRQ safe timer and cancelation now works as
     expected.

   * Another deficiency of delayed_work was lack of the counterpart of
     mod_timer() which led to cancel+queue combinations or open-coded
     timer+work usages.  mod_delayed_work[_on]() are added.

     These two delayed_work changes make delayed_work provide interface
     and behave like timer which is executed with process context.

   * A work item could be executed concurrently on multiple CPUs, which
     is rather unintuitive and made flush_work() behavior confusing and
     half-broken under certain circumstances.  This problem doesn't
     exist for non-reentrant workqueues.  While non-reentrancy check
     isn't free, the overhead is incurred only when a work item bounces
     across different CPUs and even in simulated pathological scenario
     the overhead isn't too high.

     All workqueues are made non-reentrant.  This removes the
     distinction between flush_[delayed_]work() and
     flush_[delayed_]_work_sync().  The former is now as strong as the
     latter and the specified work item is guaranteed to have finished
     execution of any previous queueing on return.

   * In addition to the various bug fixes, Lai redid and simplified CPU
     hotplug handling significantly.

   * Joonsoo introduced system_highpri_wq and used it during CPU
     hotplug.

  There are two merge commits - one to pull in IRQ safe timer from
  tip/timers/core and the other to pull in CPU hotplug fixes from
  wq/for-3.6-fixes as Lai's hotplug restructuring depended on them."

Fixed a number of trivial conflicts, but the more interesting conflicts
were silent ones where the deprecated interfaces had been used by new
code in the merge window, and thus didn't cause any real data conflicts.

Tejun pointed out a few of them, I fixed a couple more.

* 'for-3.7' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: (46 commits)
  workqueue: remove spurious WARN_ON_ONCE(in_irq()) from try_to_grab_pending()
  workqueue: use cwq_set_max_active() helper for workqueue_set_max_active()
  workqueue: introduce cwq_set_max_active() helper for thaw_workqueues()
  workqueue: remove @delayed from cwq_dec_nr_in_flight()
  workqueue: fix possible stall on try_to_grab_pending() of a delayed work item
  workqueue: use hotcpu_notifier() for workqueue_cpu_down_callback()
  workqueue: use __cpuinit instead of __devinit for cpu callbacks
  workqueue: rename manager_mutex to assoc_mutex
  workqueue: WORKER_REBIND is no longer necessary for idle rebinding
  workqueue: WORKER_REBIND is no longer necessary for busy rebinding
  workqueue: reimplement idle worker rebinding
  workqueue: deprecate __cancel_delayed_work()
  workqueue: reimplement cancel_delayed_work() using try_to_grab_pending()
  workqueue: use mod_delayed_work() instead of __cancel + queue
  workqueue: use irqsafe timer for delayed_work
  workqueue: clean up delayed_work initializers and add missing one
  workqueue: make deferrable delayed_work initializer names consistent
  workqueue: cosmetic whitespace updates for macro definitions
  workqueue: deprecate system_nrt[_freezable]_wq
  workqueue: deprecate flush[_delayed]_work_sync()
  ...

134 files changed:
arch/arm/mach-pxa/sharpsl_pm.c
arch/arm/plat-omap/mailbox.c
arch/powerpc/platforms/cell/cpufreq_spudemand.c
arch/sh/drivers/push-switch.c
block/blk-core.c
block/blk-throttle.c
block/genhd.c
drivers/block/floppy.c
drivers/block/xen-blkfront.c
drivers/cdrom/gdrom.c
drivers/char/sonypi.c
drivers/char/tpm/tpm.c
drivers/cpufreq/cpufreq_conservative.c
drivers/cpufreq/cpufreq_ondemand.c
drivers/devfreq/devfreq.c
drivers/edac/edac_mc.c
drivers/extcon/extcon-adc-jack.c
drivers/gpu/drm/drm_crtc_helper.c
drivers/gpu/drm/exynos/exynos_drm_g2d.c
drivers/gpu/drm/nouveau/nouveau_gpio.c
drivers/gpu/drm/radeon/radeon_irq_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
drivers/hid/hid-picolcd_fb.c
drivers/hid/hid-wiimote-ext.c
drivers/infiniband/core/addr.c
drivers/infiniband/core/mad.c
drivers/infiniband/hw/nes/nes_hw.c
drivers/infiniband/hw/nes/nes_nic.c
drivers/input/keyboard/qt2160.c
drivers/input/mouse/synaptics_i2c.c
drivers/input/touchscreen/wm831x-ts.c
drivers/isdn/mISDN/hwchannel.c
drivers/leds/leds-lm3533.c
drivers/leds/leds-lp8788.c
drivers/leds/leds-wm8350.c
drivers/macintosh/ams/ams-core.c
drivers/md/dm-mpath.c
drivers/md/dm-raid1.c
drivers/md/dm-stripe.c
drivers/media/dvb/dvb-core/dvb_net.c
drivers/media/dvb/mantis/mantis_evm.c
drivers/media/dvb/mantis/mantis_uart.c
drivers/media/video/bt8xx/bttv-driver.c
drivers/media/video/cx18/cx18-driver.c
drivers/media/video/cx231xx/cx231xx-cards.c
drivers/media/video/cx23885/cx23885-input.c
drivers/media/video/cx88/cx88-mpeg.c
drivers/media/video/em28xx/em28xx-cards.c
drivers/media/video/omap24xxcam.c
drivers/media/video/saa7134/saa7134-core.c
drivers/media/video/saa7134/saa7134-empress.c
drivers/media/video/tm6000/tm6000-cards.c
drivers/mfd/menelaus.c
drivers/misc/ioc4.c
drivers/mmc/core/host.c
drivers/mtd/mtdoops.c
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
drivers/net/ethernet/mellanox/mlx4/sense.c
drivers/net/ethernet/neterion/vxge/vxge-main.c
drivers/net/ethernet/sun/cassini.c
drivers/net/ethernet/sun/niu.c
drivers/net/virtio_net.c
drivers/net/wireless/hostap/hostap_ap.c
drivers/net/wireless/hostap/hostap_hw.c
drivers/net/wireless/ipw2x00/ipw2100.c
drivers/net/wireless/zd1211rw/zd_usb.c
drivers/platform/x86/thinkpad_acpi.c
drivers/power/ab8500_btemp.c
drivers/power/ab8500_charger.c
drivers/power/ab8500_fg.c
drivers/power/abx500_chargalg.c
drivers/power/charger-manager.c
drivers/power/collie_battery.c
drivers/power/ds2760_battery.c
drivers/power/jz4740-battery.c
drivers/power/max17040_battery.c
drivers/power/tosa_battery.c
drivers/power/wm97xx_battery.c
drivers/power/z2_battery.c
drivers/regulator/core.c
drivers/scsi/arcmsr/arcmsr_hba.c
drivers/scsi/ipr.c
drivers/scsi/pmcraid.c
drivers/scsi/qla2xxx/qla_target.c
drivers/staging/ccg/u_ether.c
drivers/staging/nvec/nvec.c
drivers/thermal/thermal_sys.c
drivers/tty/hvc/hvsi.c
drivers/tty/ipwireless/hardware.c
drivers/tty/ipwireless/network.c
drivers/tty/serial/kgdboc.c
drivers/tty/serial/omap-serial.c
drivers/tty/tty_ldisc.c
drivers/usb/atm/speedtch.c
drivers/usb/atm/ueagle-atm.c
drivers/usb/gadget/u_ether.c
drivers/usb/host/ohci-hcd.c
drivers/usb/otg/isp1301_omap.c
drivers/video/omap2/displays/panel-taal.c
drivers/video/omap2/dss/dsi.c
fs/affs/super.c
fs/afs/callback.c
fs/afs/server.c
fs/afs/vlocation.c
fs/gfs2/lock_dlm.c
fs/gfs2/super.c
fs/hfs/inode.c
fs/ncpfs/inode.c
fs/nfs/nfs4renewd.c
fs/ocfs2/cluster/quorum.c
fs/xfs/xfs_super.c
fs/xfs/xfs_sync.c
include/linux/workqueue.h
kernel/srcu.c
kernel/workqueue.c
mm/slab.c
mm/vmstat.c
net/9p/trans_fd.c
net/core/dst.c
net/core/link_watch.c
net/core/neighbour.c
net/dsa/dsa.c
net/ipv4/inetpeer.c
net/rfkill/input.c
net/sunrpc/cache.c
security/keys/gc.c
security/keys/key.c
sound/i2c/other/ak4113.c
sound/i2c/other/ak4114.c
sound/pci/oxygen/oxygen_lib.c
sound/soc/codecs/wm8350.c
sound/soc/codecs/wm8753.c
sound/soc/soc-core.c
virt/kvm/eventfd.c

index 9a154ba..5a406f7 100644 (file)
@@ -579,8 +579,8 @@ static int sharpsl_ac_check(void)
 static int sharpsl_pm_suspend(struct platform_device *pdev, pm_message_t state)
 {
        sharpsl_pm.flags |= SHARPSL_SUSPENDED;
-       flush_delayed_work_sync(&toggle_charger);
-       flush_delayed_work_sync(&sharpsl_bat);
+       flush_delayed_work(&toggle_charger);
+       flush_delayed_work(&sharpsl_bat);
 
        if (sharpsl_pm.charge_mode == CHRG_ON)
                sharpsl_pm.flags |= SHARPSL_DO_OFFLINE_CHRG;
index 5e13c38..42377ef 100644 (file)
@@ -310,7 +310,7 @@ static void omap_mbox_fini(struct omap_mbox *mbox)
                omap_mbox_disable_irq(mbox, IRQ_RX);
                free_irq(mbox->irq, mbox);
                tasklet_kill(&mbox->txq->tasklet);
-               flush_work_sync(&mbox->rxq->work);
+               flush_work(&mbox->rxq->work);
                mbox_queue_free(mbox->txq);
                mbox_queue_free(mbox->rxq);
        }
index 23bc9db..82607d6 100644 (file)
@@ -76,7 +76,7 @@ static void spu_gov_work(struct work_struct *work)
 static void spu_gov_init_work(struct spu_gov_info_struct *info)
 {
        int delay = usecs_to_jiffies(info->poll_int);
-       INIT_DELAYED_WORK_DEFERRABLE(&info->work, spu_gov_work);
+       INIT_DEFERRABLE_WORK(&info->work, spu_gov_work);
        schedule_delayed_work_on(info->policy->cpu, &info->work, delay);
 }
 
index 637b79b..5bfb341 100644 (file)
@@ -107,7 +107,7 @@ static int switch_drv_remove(struct platform_device *pdev)
                device_remove_file(&pdev->dev, &dev_attr_switch);
 
        platform_set_drvdata(pdev, NULL);
-       flush_work_sync(&psw->work);
+       flush_work(&psw->work);
        del_timer_sync(&psw->debounce);
        free_irq(irq, pdev);
 
index ee3cb3a..d2da641 100644 (file)
@@ -262,7 +262,7 @@ EXPORT_SYMBOL(blk_start_queue);
  **/
 void blk_stop_queue(struct request_queue *q)
 {
-       __cancel_delayed_work(&q->delay_work);
+       cancel_delayed_work(&q->delay_work);
        queue_flag_set(QUEUE_FLAG_STOPPED, q);
 }
 EXPORT_SYMBOL(blk_stop_queue);
@@ -319,10 +319,8 @@ EXPORT_SYMBOL(__blk_run_queue);
  */
 void blk_run_queue_async(struct request_queue *q)
 {
-       if (likely(!blk_queue_stopped(q))) {
-               __cancel_delayed_work(&q->delay_work);
-               queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
-       }
+       if (likely(!blk_queue_stopped(q)))
+               mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
 }
 EXPORT_SYMBOL(blk_run_queue_async);
 
index e287c19..a9664fa 100644 (file)
@@ -180,7 +180,7 @@ static inline unsigned int total_nr_queued(struct throtl_data *td)
 
 /*
  * Worker for allocating per cpu stat for tgs. This is scheduled on the
- * system_nrt_wq once there are some groups on the alloc_list waiting for
+ * system_wq once there are some groups on the alloc_list waiting for
  * allocation.
  */
 static void tg_stats_alloc_fn(struct work_struct *work)
@@ -194,8 +194,7 @@ alloc_stats:
                stats_cpu = alloc_percpu(struct tg_stats_cpu);
                if (!stats_cpu) {
                        /* allocation failed, try again after some time */
-                       queue_delayed_work(system_nrt_wq, dwork,
-                                          msecs_to_jiffies(10));
+                       schedule_delayed_work(dwork, msecs_to_jiffies(10));
                        return;
                }
        }
@@ -238,7 +237,7 @@ static void throtl_pd_init(struct blkcg_gq *blkg)
         */
        spin_lock_irqsave(&tg_stats_alloc_lock, flags);
        list_add(&tg->stats_alloc_node, &tg_stats_alloc_list);
-       queue_delayed_work(system_nrt_wq, &tg_stats_alloc_work, 0);
+       schedule_delayed_work(&tg_stats_alloc_work, 0);
        spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
 }
 
@@ -930,12 +929,7 @@ throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
 
        /* schedule work if limits changed even if no bio is queued */
        if (total_nr_queued(td) || td->limits_changed) {
-               /*
-                * We might have a work scheduled to be executed in future.
-                * Cancel that and schedule a new one.
-                */
-               __cancel_delayed_work(dwork);
-               queue_delayed_work(kthrotld_workqueue, dwork, delay);
+               mod_delayed_work(kthrotld_workqueue, dwork, delay);
                throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
                                delay, jiffies);
        }
index d839723..6cace66 100644 (file)
@@ -1490,9 +1490,9 @@ static void __disk_unblock_events(struct gendisk *disk, bool check_now)
        intv = disk_events_poll_jiffies(disk);
        set_timer_slack(&ev->dwork.timer, intv / 4);
        if (check_now)
-               queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
+               queue_delayed_work(system_freezable_wq, &ev->dwork, 0);
        else if (intv)
-               queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv);
+               queue_delayed_work(system_freezable_wq, &ev->dwork, intv);
 out_unlock:
        spin_unlock_irqrestore(&ev->lock, flags);
 }
@@ -1534,10 +1534,8 @@ void disk_flush_events(struct gendisk *disk, unsigned int mask)
 
        spin_lock_irq(&ev->lock);
        ev->clearing |= mask;
-       if (!ev->block) {
-               cancel_delayed_work(&ev->dwork);
-               queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
-       }
+       if (!ev->block)
+               mod_delayed_work(system_freezable_wq, &ev->dwork, 0);
        spin_unlock_irq(&ev->lock);
 }
 
@@ -1573,7 +1571,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
 
        /* uncondtionally schedule event check and wait for it to finish */
        disk_block_events(disk);
-       queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
+       queue_delayed_work(system_freezable_wq, &ev->dwork, 0);
        flush_delayed_work(&ev->dwork);
        __disk_unblock_events(disk, false);
 
@@ -1610,7 +1608,7 @@ static void disk_events_workfn(struct work_struct *work)
 
        intv = disk_events_poll_jiffies(disk);
        if (!ev->block && intv)
-               queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv);
+               queue_delayed_work(system_freezable_wq, &ev->dwork, intv);
 
        spin_unlock_irq(&ev->lock);
 
index a7d6347..17c675c 100644 (file)
@@ -672,7 +672,6 @@ static void __reschedule_timeout(int drive, const char *message)
 
        if (drive == current_reqD)
                drive = current_drive;
-       __cancel_delayed_work(&fd_timeout);
 
        if (drive < 0 || drive >= N_DRIVE) {
                delay = 20UL * HZ;
@@ -680,7 +679,7 @@ static void __reschedule_timeout(int drive, const char *message)
        } else
                delay = UDP->timeout;
 
-       queue_delayed_work(floppy_wq, &fd_timeout, delay);
+       mod_delayed_work(floppy_wq, &fd_timeout, delay);
        if (UDP->flags & FD_DEBUG)
                DPRINT("reschedule timeout %s\n", message);
        timeout_message = message;
@@ -891,7 +890,7 @@ static void unlock_fdc(void)
 
        raw_cmd = NULL;
        command_status = FD_COMMAND_NONE;
-       __cancel_delayed_work(&fd_timeout);
+       cancel_delayed_work(&fd_timeout);
        do_floppy = NULL;
        cont = NULL;
        clear_bit(0, &fdc_busy);
index 2c2d2e5..007db89 100644 (file)
@@ -670,7 +670,7 @@ static void xlvbd_release_gendisk(struct blkfront_info *info)
        spin_unlock_irqrestore(&info->io_lock, flags);
 
        /* Flush gnttab callback work. Must be done with no locks held. */
-       flush_work_sync(&info->work);
+       flush_work(&info->work);
 
        del_gendisk(info->gd);
 
@@ -719,7 +719,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
        spin_unlock_irq(&info->io_lock);
 
        /* Flush gnttab callback work. Must be done with no locks held. */
-       flush_work_sync(&info->work);
+       flush_work(&info->work);
 
        /* Free resources associated with old device channel. */
        if (info->ring_ref != GRANT_INVALID_REF) {
index 3ceaf00..75d485a 100644 (file)
@@ -840,7 +840,7 @@ probe_fail_no_mem:
 
 static int __devexit remove_gdrom(struct platform_device *devptr)
 {
-       flush_work_sync(&work);
+       flush_work(&work);
        blk_cleanup_queue(gd.gdrom_rq);
        free_irq(HW_EVENT_GDROM_CMD, &gd);
        free_irq(HW_EVENT_GDROM_DMA, &gd);
index f877805..320debb 100644 (file)
@@ -1433,7 +1433,7 @@ static int __devexit sonypi_remove(struct platform_device *dev)
        sonypi_disable();
 
        synchronize_irq(sonypi_device.irq);
-       flush_work_sync(&sonypi_device.input_work);
+       flush_work(&sonypi_device.input_work);
 
        if (useinput) {
                input_unregister_device(sonypi_device.input_key_dev);
index 817f0ee..3af9f4d 100644 (file)
@@ -1172,7 +1172,7 @@ int tpm_release(struct inode *inode, struct file *file)
        struct tpm_chip *chip = file->private_data;
 
        del_singleshot_timer_sync(&chip->user_read_timer);
-       flush_work_sync(&chip->work);
+       flush_work(&chip->work);
        file->private_data = NULL;
        atomic_set(&chip->data_pending, 0);
        kfree(chip->data_buffer);
@@ -1225,7 +1225,7 @@ ssize_t tpm_read(struct file *file, char __user *buf,
        int rc;
 
        del_singleshot_timer_sync(&chip->user_read_timer);
-       flush_work_sync(&chip->work);
+       flush_work(&chip->work);
        ret_size = atomic_read(&chip->data_pending);
        atomic_set(&chip->data_pending, 0);
        if (ret_size > 0) {     /* relay data */
index 235a340..55f0354 100644 (file)
@@ -466,7 +466,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
        delay -= jiffies % delay;
 
        dbs_info->enable = 1;
-       INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
+       INIT_DEFERRABLE_WORK(&dbs_info->work, do_dbs_timer);
        schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay);
 }
 
index 836e9b0..14c1af5 100644 (file)
@@ -644,7 +644,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
                delay -= jiffies % delay;
 
        dbs_info->sample_type = DBS_NORMAL_SAMPLE;
-       INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
+       INIT_DEFERRABLE_WORK(&dbs_info->work, do_dbs_timer);
        schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay);
 }
 
index 70c31d4..b146d76 100644 (file)
@@ -607,7 +607,7 @@ static int __init devfreq_start_polling(void)
        mutex_lock(&devfreq_list_lock);
        polling = false;
        devfreq_wq = create_freezable_workqueue("devfreq_wq");
-       INIT_DELAYED_WORK_DEFERRABLE(&devfreq_work, devfreq_monitor);
+       INIT_DEFERRABLE_WORK(&devfreq_work, devfreq_monitor);
        mutex_unlock(&devfreq_list_lock);
 
        devfreq_monitor(&devfreq_work.work);
index d5dc9da..90f0b73 100644 (file)
@@ -559,7 +559,7 @@ static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
                return;
 
        INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
-       queue_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
+       mod_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
 }
 
 /*
@@ -599,21 +599,6 @@ void edac_mc_reset_delay_period(int value)
 
        mutex_lock(&mem_ctls_mutex);
 
-       /* scan the list and turn off all workq timers, doing so under lock
-        */
-       list_for_each(item, &mc_devices) {
-               mci = list_entry(item, struct mem_ctl_info, link);
-
-               if (mci->op_state == OP_RUNNING_POLL)
-                       cancel_delayed_work(&mci->work);
-       }
-
-       mutex_unlock(&mem_ctls_mutex);
-
-
-       /* re-walk the list, and reset the poll delay */
-       mutex_lock(&mem_ctls_mutex);
-
        list_for_each(item, &mc_devices) {
                mci = list_entry(item, struct mem_ctl_info, link);
 
index 60ac3fb..725eb5a 100644 (file)
@@ -143,7 +143,7 @@ static int __devinit adc_jack_probe(struct platform_device *pdev)
 
        data->handling_delay = msecs_to_jiffies(pdata->handling_delay_ms);
 
-       INIT_DELAYED_WORK_DEFERRABLE(&data->handler, adc_jack_handler);
+       INIT_DEFERRABLE_WORK(&data->handler, adc_jack_handler);
 
        platform_set_drvdata(pdev, data);
 
index 3252e70..8fa9d52 100644 (file)
@@ -968,7 +968,7 @@ static void output_poll_execute(struct work_struct *work)
        }
 
        if (repoll)
-               queue_delayed_work(system_nrt_wq, delayed_work, DRM_OUTPUT_POLL_PERIOD);
+               schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
 }
 
 void drm_kms_helper_poll_disable(struct drm_device *dev)
@@ -993,7 +993,7 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
        }
 
        if (poll)
-               queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
+               schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
 }
 EXPORT_SYMBOL(drm_kms_helper_poll_enable);
 
@@ -1020,6 +1020,6 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
        /* kill timer and schedule immediate execution, this doesn't block */
        cancel_delayed_work(&dev->mode_config.output_poll_work);
        if (drm_kms_helper_poll)
-               queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0);
+               schedule_delayed_work(&dev->mode_config.output_poll_work, 0);
 }
 EXPORT_SYMBOL(drm_helper_hpd_irq_event);
index 1065e90..2526e82 100644 (file)
@@ -878,7 +878,7 @@ static int g2d_suspend(struct device *dev)
                /* FIXME: good range? */
                usleep_range(500, 1000);
 
-       flush_work_sync(&g2d->runqueue_work);
+       flush_work(&g2d->runqueue_work);
 
        return 0;
 }
index 82c19e8..0fe4e17 100644 (file)
@@ -302,7 +302,7 @@ nouveau_gpio_isr_del(struct drm_device *dev, int idx, u8 tag, u8 line,
                spin_unlock_irqrestore(&pgpio->lock, flags);
 
                list_for_each_entry_safe(isr, tmp, &tofree, head) {
-                       flush_work_sync(&isr->work);
+                       flush_work(&isr->work);
                        kfree(isr);
                }
        }
index afaa172..50b596e 100644 (file)
@@ -277,7 +277,7 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
                if (rdev->msi_enabled)
                        pci_disable_msi(rdev->pdev);
        }
-       flush_work_sync(&rdev->hotplug_work);
+       flush_work(&rdev->hotplug_work);
 }
 
 /**
index 3c447bf..a32f2e9 100644 (file)
@@ -594,7 +594,7 @@ int vmw_fb_off(struct vmw_private *vmw_priv)
        par->dirty.active = false;
        spin_unlock_irqrestore(&par->dirty.lock, flags);
 
-       flush_delayed_work_sync(&info->deferred_work);
+       flush_delayed_work(&info->deferred_work);
 
        par->bo_ptr = NULL;
        ttm_bo_kunmap(&par->map);
index 0008a51..eb00357 100644 (file)
@@ -608,7 +608,7 @@ void picolcd_exit_framebuffer(struct picolcd_data *data)
        /* make sure there is no running update - thus that fbdata->picolcd
         * once obtained under lock is guaranteed not to get free() under
         * the feet of the deferred work */
-       flush_delayed_work_sync(&info->deferred_work);
+       flush_delayed_work(&info->deferred_work);
 
        data->fb_info = NULL;
        unregister_framebuffer(info);
index bc85bf2..38ae877 100644 (file)
@@ -229,7 +229,7 @@ static void wiiext_worker(struct work_struct *work)
 /* schedule work only once, otherwise mark for reschedule */
 static void wiiext_schedule(struct wiimote_ext *ext)
 {
-       queue_work(system_nrt_wq, &ext->worker);
+       schedule_work(&ext->worker);
 }
 
 /*
index 28058ae..eaec8d7 100644 (file)
@@ -152,13 +152,11 @@ static void set_timeout(unsigned long time)
 {
        unsigned long delay;
 
-       cancel_delayed_work(&work);
-
        delay = time - jiffies;
        if ((long)delay <= 0)
                delay = 1;
 
-       queue_delayed_work(addr_wq, &work, delay);
+       mod_delayed_work(addr_wq, &work, delay);
 }
 
 static void queue_req(struct addr_req *req)
index b0d0bc8..dc3fd1e 100644 (file)
@@ -2004,7 +2004,7 @@ static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
        unsigned long delay;
 
        if (list_empty(&mad_agent_priv->wait_list)) {
-               __cancel_delayed_work(&mad_agent_priv->timed_work);
+               cancel_delayed_work(&mad_agent_priv->timed_work);
        } else {
                mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
                                         struct ib_mad_send_wr_private,
@@ -2013,13 +2013,11 @@ static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
                if (time_after(mad_agent_priv->timeout,
                               mad_send_wr->timeout)) {
                        mad_agent_priv->timeout = mad_send_wr->timeout;
-                       __cancel_delayed_work(&mad_agent_priv->timed_work);
                        delay = mad_send_wr->timeout - jiffies;
                        if ((long)delay <= 0)
                                delay = 1;
-                       queue_delayed_work(mad_agent_priv->qp_info->
-                                          port_priv->wq,
-                                          &mad_agent_priv->timed_work, delay);
+                       mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
+                                        &mad_agent_priv->timed_work, delay);
                }
        }
 }
@@ -2052,11 +2050,9 @@ static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
        list_add(&mad_send_wr->agent_list, list_item);
 
        /* Reschedule a work item if we have a shorter timeout */
-       if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) {
-               __cancel_delayed_work(&mad_agent_priv->timed_work);
-               queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
-                                  &mad_agent_priv->timed_work, delay);
-       }
+       if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
+               mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
+                                &mad_agent_priv->timed_work, delay);
 }
 
 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
index d42c9f4..9e0895b 100644 (file)
@@ -2679,11 +2679,9 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
                        }
                }
                if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_SFP_D) {
-                       if (nesdev->link_recheck)
-                               cancel_delayed_work(&nesdev->work);
                        nesdev->link_recheck = 1;
-                       schedule_delayed_work(&nesdev->work,
-                                             NES_LINK_RECHECK_DELAY);
+                       mod_delayed_work(system_wq, &nesdev->work,
+                                        NES_LINK_RECHECK_DELAY);
                }
        }
 
index f3a3ecf..e43f6e4 100644 (file)
@@ -243,10 +243,9 @@ static int nes_netdev_open(struct net_device *netdev)
 
        spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags);
        if (nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_SFP_D) {
-               if (nesdev->link_recheck)
-                       cancel_delayed_work(&nesdev->work);
                nesdev->link_recheck = 1;
-               schedule_delayed_work(&nesdev->work, NES_LINK_RECHECK_DELAY);
+               mod_delayed_work(system_wq, &nesdev->work,
+                                NES_LINK_RECHECK_DELAY);
        }
        spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags);
 
index e7a5e36..76b7d43 100644 (file)
@@ -156,8 +156,7 @@ static irqreturn_t qt2160_irq(int irq, void *_qt2160)
 
        spin_lock_irqsave(&qt2160->lock, flags);
 
-       __cancel_delayed_work(&qt2160->dwork);
-       schedule_delayed_work(&qt2160->dwork, 0);
+       mod_delayed_work(system_wq, &qt2160->dwork, 0);
 
        spin_unlock_irqrestore(&qt2160->lock, flags);
 
index f146757..063a174 100644 (file)
@@ -376,12 +376,7 @@ static void synaptics_i2c_reschedule_work(struct synaptics_i2c *touch,
 
        spin_lock_irqsave(&touch->lock, flags);
 
-       /*
-        * If work is already scheduled then subsequent schedules will not
-        * change the scheduled time that's why we have to cancel it first.
-        */
-       __cancel_delayed_work(&touch->dwork);
-       schedule_delayed_work(&touch->dwork, delay);
+       mod_delayed_work(system_wq, &touch->dwork, delay);
 
        spin_unlock_irqrestore(&touch->lock, flags);
 }
index e834107..52abb98 100644 (file)
@@ -221,7 +221,7 @@ static void wm831x_ts_input_close(struct input_dev *idev)
        synchronize_irq(wm831x_ts->pd_irq);
 
        /* Make sure the IRQ completion work is quiesced */
-       flush_work_sync(&wm831x_ts->pd_data_work);
+       flush_work(&wm831x_ts->pd_data_work);
 
        /* If we ended up with the pen down then make sure we revert back
         * to pen detection state for the next time we start up.
index 2602be2..84b4b0f 100644 (file)
@@ -116,7 +116,7 @@ mISDN_freedchannel(struct dchannel *ch)
        }
        skb_queue_purge(&ch->squeue);
        skb_queue_purge(&ch->rqueue);
-       flush_work_sync(&ch->workq);
+       flush_work(&ch->workq);
        return 0;
 }
 EXPORT_SYMBOL(mISDN_freedchannel);
index f56b6e7..f6837b9 100644 (file)
@@ -737,7 +737,7 @@ err_sysfs_remove:
        sysfs_remove_group(&led->cdev.dev->kobj, &lm3533_led_attribute_group);
 err_unregister:
        led_classdev_unregister(&led->cdev);
-       flush_work_sync(&led->work);
+       flush_work(&led->work);
 
        return ret;
 }
@@ -751,7 +751,7 @@ static int __devexit lm3533_led_remove(struct platform_device *pdev)
        lm3533_ctrlbank_disable(&led->cb);
        sysfs_remove_group(&led->cdev.dev->kobj, &lm3533_led_attribute_group);
        led_classdev_unregister(&led->cdev);
-       flush_work_sync(&led->work);
+       flush_work(&led->work);
 
        return 0;
 }
@@ -765,7 +765,7 @@ static void lm3533_led_shutdown(struct platform_device *pdev)
 
        lm3533_ctrlbank_disable(&led->cb);
        lm3533_led_set(&led->cdev, LED_OFF);            /* disable blink */
-       flush_work_sync(&led->work);
+       flush_work(&led->work);
 }
 
 static struct platform_driver lm3533_led_driver = {
index 0ade6eb..64009a1 100644 (file)
@@ -172,7 +172,7 @@ static int __devexit lp8788_led_remove(struct platform_device *pdev)
        struct lp8788_led *led = platform_get_drvdata(pdev);
 
        led_classdev_unregister(&led->led_dev);
-       flush_work_sync(&led->work);
+       flush_work(&led->work);
 
        return 0;
 }
index 918d4ba..4c62113 100644 (file)
@@ -275,7 +275,7 @@ static int wm8350_led_remove(struct platform_device *pdev)
        struct wm8350_led *led = platform_get_drvdata(pdev);
 
        led_classdev_unregister(&led->cdev);
-       flush_work_sync(&led->work);
+       flush_work(&led->work);
        wm8350_led_disable(led);
        regulator_put(led->dcdc);
        regulator_put(led->isink);
index 5c6a2d8..36a4fdd 100644 (file)
@@ -226,7 +226,7 @@ void ams_sensor_detach(void)
         * We do this after ams_info.exit(), because an interrupt might
         * have arrived before disabling them.
         */
-       flush_work_sync(&ams_info.worker);
+       flush_work(&ams_info.worker);
 
        /* Remove device */
        of_device_unregister(ams_info.of_dev);
index 034233e..d778563 100644 (file)
@@ -944,7 +944,7 @@ static void flush_multipath_work(struct multipath *m)
        flush_workqueue(kmpath_handlerd);
        multipath_wait_for_pg_init_completion(m);
        flush_workqueue(kmultipathd);
-       flush_work_sync(&m->trigger_event);
+       flush_work(&m->trigger_event);
 }
 
 static void multipath_dtr(struct dm_target *ti)
index bc5ddba..fd61f98 100644 (file)
@@ -1146,7 +1146,7 @@ static void mirror_dtr(struct dm_target *ti)
 
        del_timer_sync(&ms->timer);
        flush_workqueue(ms->kmirrord_wq);
-       flush_work_sync(&ms->trigger_event);
+       flush_work(&ms->trigger_event);
        dm_kcopyd_client_destroy(ms->kcopyd_client);
        destroy_workqueue(ms->kmirrord_wq);
        free_context(ms, ti, ms->nr_mirrors);
index a087bf2..e2f8765 100644 (file)
@@ -199,7 +199,7 @@ static void stripe_dtr(struct dm_target *ti)
        for (i = 0; i < sc->stripes; i++)
                dm_put_device(ti, sc->stripe[i].dev);
 
-       flush_work_sync(&sc->trigger_event);
+       flush_work(&sc->trigger_event);
        kfree(sc);
 }
 
index 8766ce8..c211768 100644 (file)
@@ -1329,8 +1329,8 @@ static int dvb_net_remove_if(struct dvb_net *dvbnet, unsigned long num)
                return -EBUSY;
 
        dvb_net_stop(net);
-       flush_work_sync(&priv->set_multicast_list_wq);
-       flush_work_sync(&priv->restart_net_feed_wq);
+       flush_work(&priv->set_multicast_list_wq);
+       flush_work(&priv->restart_net_feed_wq);
        printk("dvb_net: removed network interface %s\n", net->name);
        unregister_netdev(net);
        dvbnet->state[num]=0;
index 71ce528..909ff54 100644 (file)
@@ -111,7 +111,7 @@ void mantis_evmgr_exit(struct mantis_ca *ca)
        struct mantis_pci *mantis = ca->ca_priv;
 
        dprintk(MANTIS_DEBUG, 1, "Mantis Host I/F Event manager exiting");
-       flush_work_sync(&ca->hif_evm_work);
+       flush_work(&ca->hif_evm_work);
        mantis_hif_exit(ca);
        mantis_pcmcia_exit(ca);
 }
index 18340da..85e9778 100644 (file)
@@ -183,6 +183,6 @@ void mantis_uart_exit(struct mantis_pci *mantis)
 {
        /* disable interrupt */
        mmwrite(mmread(MANTIS_UART_CTL) & 0xffef, MANTIS_UART_CTL);
-       flush_work_sync(&mantis->uart_work);
+       flush_work(&mantis->uart_work);
 }
 EXPORT_SYMBOL_GPL(mantis_uart_exit);
index b58ff87..2ce7179 100644 (file)
@@ -196,7 +196,7 @@ static void request_modules(struct bttv *dev)
 
 static void flush_request_modules(struct bttv *dev)
 {
-       flush_work_sync(&dev->request_module_wk);
+       flush_work(&dev->request_module_wk);
 }
 #else
 #define request_modules(dev)
index 7e5ffd6..75c8909 100644 (file)
@@ -272,7 +272,7 @@ static void request_modules(struct cx18 *dev)
 
 static void flush_request_modules(struct cx18 *dev)
 {
-       flush_work_sync(&dev->request_module_wk);
+       flush_work(&dev->request_module_wk);
 }
 #else
 #define request_modules(dev)
index 02d4d36..b84ebc5 100644 (file)
@@ -1002,7 +1002,7 @@ static void request_modules(struct cx231xx *dev)
 
 static void flush_request_modules(struct cx231xx *dev)
 {
-       flush_work_sync(&dev->request_module_wk);
+       flush_work(&dev->request_module_wk);
 }
 #else
 #define request_modules(dev)
index ce765e3..bcbf7fa 100644 (file)
@@ -231,9 +231,9 @@ static void cx23885_input_ir_stop(struct cx23885_dev *dev)
                v4l2_subdev_call(dev->sd_ir, ir, rx_s_parameters, &params);
                v4l2_subdev_call(dev->sd_ir, ir, rx_g_parameters, &params);
        }
-       flush_work_sync(&dev->cx25840_work);
-       flush_work_sync(&dev->ir_rx_work);
-       flush_work_sync(&dev->ir_tx_work);
+       flush_work(&dev->cx25840_work);
+       flush_work(&dev->ir_rx_work);
+       flush_work(&dev->ir_tx_work);
 }
 
 static void cx23885_input_ir_close(struct rc_dev *rc)
index cd5386e..c04fb61 100644 (file)
@@ -70,7 +70,7 @@ static void request_modules(struct cx8802_dev *dev)
 
 static void flush_request_modules(struct cx8802_dev *dev)
 {
-       flush_work_sync(&dev->request_module_wk);
+       flush_work(&dev->request_module_wk);
 }
 #else
 #define request_modules(dev)
index ca62b99..f7831e7 100644 (file)
@@ -2900,7 +2900,7 @@ static void request_modules(struct em28xx *dev)
 
 static void flush_request_modules(struct em28xx *dev)
 {
-       flush_work_sync(&dev->request_module_wk);
+       flush_work(&dev->request_module_wk);
 }
 #else
 #define request_modules(dev)
index e5015b0..8d7283b 100644 (file)
@@ -1198,7 +1198,7 @@ static int vidioc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i)
 
        atomic_inc(&cam->reset_disable);
 
-       flush_work_sync(&cam->sensor_reset_work);
+       flush_work(&cam->sensor_reset_work);
 
        rval = videobuf_streamoff(q);
        if (!rval) {
@@ -1512,7 +1512,7 @@ static int omap24xxcam_release(struct file *file)
 
        atomic_inc(&cam->reset_disable);
 
-       flush_work_sync(&cam->sensor_reset_work);
+       flush_work(&cam->sensor_reset_work);
 
        /* stop streaming capture */
        videobuf_streamoff(&fh->vbq);
@@ -1536,7 +1536,7 @@ static int omap24xxcam_release(struct file *file)
         * not be scheduled anymore since streaming is already
         * disabled.)
         */
-       flush_work_sync(&cam->sensor_reset_work);
+       flush_work(&cam->sensor_reset_work);
 
        mutex_lock(&cam->mutex);
        if (atomic_dec_return(&cam->users) == 0) {
index 5fbb4e4..f2b37e0 100644 (file)
@@ -170,7 +170,7 @@ static void request_submodules(struct saa7134_dev *dev)
 
 static void flush_request_submodules(struct saa7134_dev *dev)
 {
-       flush_work_sync(&dev->request_module_wk);
+       flush_work(&dev->request_module_wk);
 }
 
 #else
index dde361a..4df79c6 100644 (file)
@@ -556,7 +556,7 @@ static int empress_fini(struct saa7134_dev *dev)
 
        if (NULL == dev->empress_dev)
                return 0;
-       flush_work_sync(&dev->empress_workqueue);
+       flush_work(&dev->empress_workqueue);
        video_unregister_device(dev->empress_dev);
        dev->empress_dev = NULL;
        return 0;
index 034659b..307d8c5 100644 (file)
@@ -1074,7 +1074,7 @@ static void request_modules(struct tm6000_core *dev)
 
 static void flush_request_modules(struct tm6000_core *dev)
 {
-       flush_work_sync(&dev->request_module_wk);
+       flush_work(&dev->request_module_wk);
 }
 #else
 #define request_modules(dev)
index cb4910a..55d5899 100644 (file)
@@ -1259,7 +1259,7 @@ static int menelaus_probe(struct i2c_client *client,
        return 0;
 fail2:
        free_irq(client->irq, menelaus);
-       flush_work_sync(&menelaus->work);
+       flush_work(&menelaus->work);
 fail1:
        kfree(menelaus);
        return err;
@@ -1270,7 +1270,7 @@ static int __exit menelaus_remove(struct i2c_client *client)
        struct menelaus_chip    *menelaus = i2c_get_clientdata(client);
 
        free_irq(client->irq, menelaus);
-       flush_work_sync(&menelaus->work);
+       flush_work(&menelaus->work);
        kfree(menelaus);
        the_menelaus = NULL;
        return 0;
index df03dd3..6a77106 100644 (file)
@@ -487,7 +487,7 @@ static void __exit
 ioc4_exit(void)
 {
        /* Ensure ioc4_load_modules() has completed before exiting */
-       flush_work_sync(&ioc4_load_modules_work);
+       flush_work(&ioc4_load_modules_work);
        pci_unregister_driver(&ioc4_driver);
 }
 
index 597f189..ee2e16b 100644 (file)
@@ -204,8 +204,8 @@ void mmc_host_clk_release(struct mmc_host *host)
        host->clk_requests--;
        if (mmc_host_may_gate_card(host->card) &&
            !host->clk_requests)
-               queue_delayed_work(system_nrt_wq, &host->clk_gate_work,
-                               msecs_to_jiffies(host->clkgate_delay));
+               schedule_delayed_work(&host->clk_gate_work,
+                                     msecs_to_jiffies(host->clkgate_delay));
        spin_unlock_irqrestore(&host->clk_lock, flags);
 }
 
index 551e316..438737a 100644 (file)
@@ -387,8 +387,8 @@ static void mtdoops_notify_remove(struct mtd_info *mtd)
                printk(KERN_WARNING "mtdoops: could not unregister kmsg_dumper\n");
 
        cxt->mtd = NULL;
-       flush_work_sync(&cxt->work_erase);
-       flush_work_sync(&cxt->work_write);
+       flush_work(&cxt->work_erase);
+       flush_work(&cxt->work_write);
 }
 
 
index 875bbb9..9c9f326 100644 (file)
@@ -1394,7 +1394,7 @@ static int offload_close(struct t3cdev *tdev)
        sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
 
        /* Flush work scheduled while releasing TIDs */
-       flush_work_sync(&td->tid_release_task);
+       flush_work(&td->tid_release_task);
 
        tdev->lldev = NULL;
        cxgb3_set_dummy_ops(tdev);
index 34ee09b..094773d 100644 (file)
@@ -139,5 +139,5 @@ void  mlx4_sense_init(struct mlx4_dev *dev)
        for (port = 1; port <= dev->caps.num_ports; port++)
                sense->do_sense_port[port] = 1;
 
-       INIT_DELAYED_WORK_DEFERRABLE(&sense->sense_poll, mlx4_sense_port);
+       INIT_DEFERRABLE_WORK(&sense->sense_poll, mlx4_sense_port);
 }
index cfa71a3..3e5b750 100644 (file)
@@ -3521,7 +3521,7 @@ static void vxge_device_unregister(struct __vxge_hw_device *hldev)
 
        strncpy(buf, dev->name, IFNAMSIZ);
 
-       flush_work_sync(&vdev->reset_task);
+       flush_work(&vdev->reset_task);
 
        /* in 2.6 will call stop() if device is up */
        unregister_netdev(dev);
index ce4df61..c8251be 100644 (file)
@@ -3890,7 +3890,7 @@ static int cas_change_mtu(struct net_device *dev, int new_mtu)
        schedule_work(&cp->reset_task);
 #endif
 
-       flush_work_sync(&cp->reset_task);
+       flush_work(&cp->reset_task);
        return 0;
 }
 
index 3208dca..8419bf3 100644 (file)
@@ -9927,7 +9927,7 @@ static int niu_suspend(struct pci_dev *pdev, pm_message_t state)
        if (!netif_running(dev))
                return 0;
 
-       flush_work_sync(&np->reset_task);
+       flush_work(&np->reset_task);
        niu_netif_stop(np);
 
        del_timer_sync(&np->timer);
index 83d2b0c..9650c41 100644 (file)
@@ -521,7 +521,7 @@ static void refill_work(struct work_struct *work)
        /* In theory, this can happen: if we don't get any buffers in
         * we will *never* try to fill again. */
        if (still_empty)
-               queue_delayed_work(system_nrt_wq, &vi->refill, HZ/2);
+               schedule_delayed_work(&vi->refill, HZ/2);
 }
 
 static int virtnet_poll(struct napi_struct *napi, int budget)
@@ -540,7 +540,7 @@ again:
 
        if (vi->num < vi->max / 2) {
                if (!try_fill_recv(vi, GFP_ATOMIC))
-                       queue_delayed_work(system_nrt_wq, &vi->refill, 0);
+                       schedule_delayed_work(&vi->refill, 0);
        }
 
        /* Out of packets? */
@@ -745,7 +745,7 @@ static int virtnet_open(struct net_device *dev)
 
        /* Make sure we have some buffers: if oom use wq. */
        if (!try_fill_recv(vi, GFP_KERNEL))
-               queue_delayed_work(system_nrt_wq, &vi->refill, 0);
+               schedule_delayed_work(&vi->refill, 0);
 
        virtnet_napi_enable(vi);
        return 0;
@@ -1020,7 +1020,7 @@ static void virtnet_config_changed(struct virtio_device *vdev)
 {
        struct virtnet_info *vi = vdev->priv;
 
-       queue_work(system_nrt_wq, &vi->config_work);
+       schedule_work(&vi->config_work);
 }
 
 static int init_vqs(struct virtnet_info *vi)
@@ -1152,7 +1152,7 @@ static int virtnet_probe(struct virtio_device *vdev)
           otherwise get link status from config. */
        if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
                netif_carrier_off(dev);
-               queue_work(system_nrt_wq, &vi->config_work);
+               schedule_work(&vi->config_work);
        } else {
                vi->status = VIRTIO_NET_S_LINK_UP;
                netif_carrier_on(dev);
@@ -1264,7 +1264,7 @@ static int virtnet_restore(struct virtio_device *vdev)
        netif_device_attach(vi->dev);
 
        if (!try_fill_recv(vi, GFP_KERNEL))
-               queue_delayed_work(system_nrt_wq, &vi->refill, 0);
+               schedule_delayed_work(&vi->refill, 0);
 
        mutex_lock(&vi->config_lock);
        vi->config_enable = true;
index e1f4102..c6ea995 100644 (file)
@@ -860,10 +860,10 @@ void hostap_free_data(struct ap_data *ap)
                return;
        }
 
-       flush_work_sync(&ap->add_sta_proc_queue);
+       flush_work(&ap->add_sta_proc_queue);
 
 #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
-       flush_work_sync(&ap->wds_oper_queue);
+       flush_work(&ap->wds_oper_queue);
        if (ap->crypt)
                ap->crypt->deinit(ap->crypt_priv);
        ap->crypt = ap->crypt_priv = NULL;
index 50f87b6..8e7000f 100644 (file)
@@ -3311,13 +3311,13 @@ static void prism2_free_local_data(struct net_device *dev)
 
        unregister_netdev(local->dev);
 
-       flush_work_sync(&local->reset_queue);
-       flush_work_sync(&local->set_multicast_list_queue);
-       flush_work_sync(&local->set_tim_queue);
+       flush_work(&local->reset_queue);
+       flush_work(&local->set_multicast_list_queue);
+       flush_work(&local->set_tim_queue);
 #ifndef PRISM2_NO_STATION_MODES
-       flush_work_sync(&local->info_queue);
+       flush_work(&local->info_queue);
 #endif
-       flush_work_sync(&local->comms_qual_update);
+       flush_work(&local->comms_qual_update);
 
        lib80211_crypt_info_free(&local->crypt_info);
 
index 83324b3..534e655 100644 (file)
@@ -2181,8 +2181,7 @@ static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status)
 
        /* Make sure the RF Kill check timer is running */
        priv->stop_rf_kill = 0;
-       cancel_delayed_work(&priv->rf_kill);
-       schedule_delayed_work(&priv->rf_kill, round_jiffies_relative(HZ));
+       mod_delayed_work(system_wq, &priv->rf_kill, round_jiffies_relative(HZ));
 }
 
 static void send_scan_event(void *data)
@@ -4322,9 +4321,8 @@ static int ipw_radio_kill_sw(struct ipw2100_priv *priv, int disable_radio)
                                          "disabled by HW switch\n");
                        /* Make sure the RF_KILL check timer is running */
                        priv->stop_rf_kill = 0;
-                       cancel_delayed_work(&priv->rf_kill);
-                       schedule_delayed_work(&priv->rf_kill,
-                                             round_jiffies_relative(HZ));
+                       mod_delayed_work(system_wq, &priv->rf_kill,
+                                        round_jiffies_relative(HZ));
                } else
                        schedule_reset(priv);
        }
index af83c43..ef2b171 100644 (file)
@@ -1164,8 +1164,7 @@ void zd_usb_reset_rx_idle_timer(struct zd_usb *usb)
 {
        struct zd_usb_rx *rx = &usb->rx;
 
-       cancel_delayed_work(&rx->idle_work);
-       queue_delayed_work(zd_workqueue, &rx->idle_work, ZD_RX_IDLE_INTERVAL);
+       mod_delayed_work(zd_workqueue, &rx->idle_work, ZD_RX_IDLE_INTERVAL);
 }
 
 static inline void init_usb_interrupt(struct zd_usb *usb)
index 52daaa8..9da5fe7 100644 (file)
@@ -7685,25 +7685,15 @@ static int fan_set_speed(int speed)
 
 static void fan_watchdog_reset(void)
 {
-       static int fan_watchdog_active;
-
        if (fan_control_access_mode == TPACPI_FAN_WR_NONE)
                return;
 
-       if (fan_watchdog_active)
-               cancel_delayed_work(&fan_watchdog_task);
-
        if (fan_watchdog_maxinterval > 0 &&
-           tpacpi_lifecycle != TPACPI_LIFE_EXITING) {
-               fan_watchdog_active = 1;
-               if (!queue_delayed_work(tpacpi_wq, &fan_watchdog_task,
-                               msecs_to_jiffies(fan_watchdog_maxinterval
-                                                * 1000))) {
-                       pr_err("failed to queue the fan watchdog, "
-                              "watchdog will not trigger\n");
-               }
-       } else
-               fan_watchdog_active = 0;
+           tpacpi_lifecycle != TPACPI_LIFE_EXITING)
+               mod_delayed_work(tpacpi_wq, &fan_watchdog_task,
+                       msecs_to_jiffies(fan_watchdog_maxinterval * 1000));
+       else
+               cancel_delayed_work(&fan_watchdog_task);
 }
 
 static void fan_watchdog_fire(struct work_struct *ignored)
index bba3cca..3041514 100644 (file)
@@ -1018,7 +1018,7 @@ static int __devinit ab8500_btemp_probe(struct platform_device *pdev)
        }
 
        /* Init work for measuring temperature periodically */
-       INIT_DELAYED_WORK_DEFERRABLE(&di->btemp_periodic_work,
+       INIT_DEFERRABLE_WORK(&di->btemp_periodic_work,
                ab8500_btemp_periodic_work);
 
        /* Identify the battery */
index d4f0c98..0701dbc 100644 (file)
@@ -2618,9 +2618,9 @@ static int __devinit ab8500_charger_probe(struct platform_device *pdev)
        }
 
        /* Init work for HW failure check */
-       INIT_DELAYED_WORK_DEFERRABLE(&di->check_hw_failure_work,
+       INIT_DEFERRABLE_WORK(&di->check_hw_failure_work,
                ab8500_charger_check_hw_failure_work);
-       INIT_DELAYED_WORK_DEFERRABLE(&di->check_usbchgnotok_work,
+       INIT_DEFERRABLE_WORK(&di->check_usbchgnotok_work,
                ab8500_charger_check_usbchargernotok_work);
 
        /*
@@ -2632,10 +2632,10 @@ static int __devinit ab8500_charger_probe(struct platform_device *pdev)
         * watchdog have to be kicked by the charger driver
         * when the AC charger is disabled
         */
-       INIT_DELAYED_WORK_DEFERRABLE(&di->kick_wd_work,
+       INIT_DEFERRABLE_WORK(&di->kick_wd_work,
                ab8500_charger_kick_watchdog_work);
 
-       INIT_DELAYED_WORK_DEFERRABLE(&di->check_vbat_work,
+       INIT_DEFERRABLE_WORK(&di->check_vbat_work,
                ab8500_charger_check_vbat_work);
 
        /* Init work for charger detection */
index bf02225..5c9e7c2 100644 (file)
@@ -2516,19 +2516,19 @@ static int __devinit ab8500_fg_probe(struct platform_device *pdev)
        INIT_WORK(&di->fg_acc_cur_work, ab8500_fg_acc_cur_work);
 
        /* Init work for reinitialising the fg algorithm */
-       INIT_DELAYED_WORK_DEFERRABLE(&di->fg_reinit_work,
+       INIT_DEFERRABLE_WORK(&di->fg_reinit_work,
                ab8500_fg_reinit_work);
 
        /* Work delayed Queue to run the state machine */
-       INIT_DELAYED_WORK_DEFERRABLE(&di->fg_periodic_work,
+       INIT_DEFERRABLE_WORK(&di->fg_periodic_work,
                ab8500_fg_periodic_work);
 
        /* Work to check low battery condition */
-       INIT_DELAYED_WORK_DEFERRABLE(&di->fg_low_bat_work,
+       INIT_DEFERRABLE_WORK(&di->fg_low_bat_work,
                ab8500_fg_low_bat_work);
 
        /* Init work for HW failure check */
-       INIT_DELAYED_WORK_DEFERRABLE(&di->fg_check_hw_failure_work,
+       INIT_DEFERRABLE_WORK(&di->fg_check_hw_failure_work,
                ab8500_fg_check_hw_failure_work);
 
        /* Initialize OVV, and other registers */
index 804b88c..4d30280 100644 (file)
@@ -1848,9 +1848,9 @@ static int __devinit abx500_chargalg_probe(struct platform_device *pdev)
        }
 
        /* Init work for chargalg */
-       INIT_DELAYED_WORK_DEFERRABLE(&di->chargalg_periodic_work,
+       INIT_DEFERRABLE_WORK(&di->chargalg_periodic_work,
                abx500_chargalg_periodic_work);
-       INIT_DELAYED_WORK_DEFERRABLE(&di->chargalg_wd_work,
+       INIT_DEFERRABLE_WORK(&di->chargalg_wd_work,
                abx500_chargalg_wd_work);
 
        /* Init work for chargalg */
index 526e5c9..7ff83cf 100644 (file)
@@ -509,9 +509,8 @@ static void _setup_polling(struct work_struct *work)
        if (!delayed_work_pending(&cm_monitor_work) ||
            (delayed_work_pending(&cm_monitor_work) &&
             time_after(next_polling, _next_polling))) {
-               cancel_delayed_work_sync(&cm_monitor_work);
                next_polling = jiffies + polling_jiffy;
-               queue_delayed_work(cm_wq, &cm_monitor_work, polling_jiffy);
+               mod_delayed_work(cm_wq, &cm_monitor_work, polling_jiffy);
        }
 
 out:
@@ -546,10 +545,8 @@ static void fullbatt_handler(struct charger_manager *cm)
        if (cm_suspended)
                device_set_wakeup_capable(cm->dev, true);
 
-       if (delayed_work_pending(&cm->fullbatt_vchk_work))
-               cancel_delayed_work(&cm->fullbatt_vchk_work);
-       queue_delayed_work(cm_wq, &cm->fullbatt_vchk_work,
-                          msecs_to_jiffies(desc->fullbatt_vchkdrop_ms));
+       mod_delayed_work(cm_wq, &cm->fullbatt_vchk_work,
+                        msecs_to_jiffies(desc->fullbatt_vchkdrop_ms));
        cm->fullbatt_vchk_jiffies_at = jiffies + msecs_to_jiffies(
                                       desc->fullbatt_vchkdrop_ms);
 
index 74c6b23..b19bfe4 100644 (file)
@@ -290,7 +290,7 @@ static struct gpio collie_batt_gpios[] = {
 static int collie_bat_suspend(struct ucb1x00_dev *dev, pm_message_t state)
 {
        /* flush all pending status updates */
-       flush_work_sync(&bat_work);
+       flush_work(&bat_work);
        return 0;
 }
 
index 076e211..704e652 100644 (file)
@@ -355,8 +355,7 @@ static void ds2760_battery_external_power_changed(struct power_supply *psy)
 
        dev_dbg(di->dev, "%s\n", __func__);
 
-       cancel_delayed_work(&di->monitor_work);
-       queue_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ/10);
+       mod_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ/10);
 }
 
 
@@ -401,8 +400,7 @@ static void ds2760_battery_set_charged(struct power_supply *psy)
 
        /* postpone the actual work by 20 secs. This is for debouncing GPIO
         * signals and to let the current value settle. See AN4188. */
-       cancel_delayed_work(&di->set_charged_work);
-       queue_delayed_work(di->monitor_wqueue, &di->set_charged_work, HZ * 20);
+       mod_delayed_work(di->monitor_wqueue, &di->set_charged_work, HZ * 20);
 }
 
 static int ds2760_battery_get_property(struct power_supply *psy,
@@ -616,8 +614,7 @@ static int ds2760_battery_resume(struct platform_device *pdev)
        di->charge_status = POWER_SUPPLY_STATUS_UNKNOWN;
        power_supply_changed(&di->bat);
 
-       cancel_delayed_work(&di->monitor_work);
-       queue_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ);
+       mod_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ);
 
        return 0;
 }
index 8dbc7bf..ffbed5e 100644 (file)
@@ -173,16 +173,14 @@ static void jz_battery_external_power_changed(struct power_supply *psy)
 {
        struct jz_battery *jz_battery = psy_to_jz_battery(psy);
 
-       cancel_delayed_work(&jz_battery->work);
-       schedule_delayed_work(&jz_battery->work, 0);
+       mod_delayed_work(system_wq, &jz_battery->work, 0);
 }
 
 static irqreturn_t jz_battery_charge_irq(int irq, void *data)
 {
        struct jz_battery *jz_battery = data;
 
-       cancel_delayed_work(&jz_battery->work);
-       schedule_delayed_work(&jz_battery->work, 0);
+       mod_delayed_work(system_wq, &jz_battery->work, 0);
 
        return IRQ_HANDLED;
 }
index c284143..58e6783 100644 (file)
@@ -232,7 +232,7 @@ static int __devinit max17040_probe(struct i2c_client *client,
        max17040_reset(client);
        max17040_get_version(client);
 
-       INIT_DELAYED_WORK_DEFERRABLE(&chip->work, max17040_work);
+       INIT_DEFERRABLE_WORK(&chip->work, max17040_work);
        schedule_delayed_work(&chip->work, MAX17040_DELAY);
 
        return 0;
index 28bbe7e..51199b5 100644 (file)
@@ -327,7 +327,7 @@ static struct gpio tosa_bat_gpios[] = {
 static int tosa_bat_suspend(struct platform_device *dev, pm_message_t state)
 {
        /* flush all pending status updates */
-       flush_work_sync(&bat_work);
+       flush_work(&bat_work);
        return 0;
 }
 
index d2d4c08..1245fe1 100644 (file)
@@ -146,7 +146,7 @@ static irqreturn_t wm97xx_chrg_irq(int irq, void *data)
 #ifdef CONFIG_PM
 static int wm97xx_bat_suspend(struct device *dev)
 {
-       flush_work_sync(&bat_work);
+       flush_work(&bat_work);
        return 0;
 }
 
index 8c9a607..5757d0d 100644 (file)
@@ -276,7 +276,7 @@ static int z2_batt_suspend(struct device *dev)
        struct i2c_client *client = to_i2c_client(dev);
        struct z2_charger *charger = i2c_get_clientdata(client);
 
-       flush_work_sync(&charger->bat_work);
+       flush_work(&charger->bat_work);
        return 0;
 }
 
index 2e0352d..5c4829c 100644 (file)
@@ -3476,7 +3476,7 @@ void regulator_unregister(struct regulator_dev *rdev)
                regulator_put(rdev->supply);
        mutex_lock(&regulator_list_mutex);
        debugfs_remove_recursive(rdev->debugfs);
-       flush_work_sync(&rdev->disable_work.work);
+       flush_work(&rdev->disable_work.work);
        WARN_ON(rdev->open_count);
        unset_regulator_supplies(rdev);
        list_del(&rdev->list);
index def24a1..33c52bc 100644 (file)
@@ -999,7 +999,7 @@ static void arcmsr_remove(struct pci_dev *pdev)
        int poll_count = 0;
        arcmsr_free_sysfs_attr(acb);
        scsi_remove_host(host);
-       flush_work_sync(&acb->arcmsr_do_message_isr_bh);
+       flush_work(&acb->arcmsr_do_message_isr_bh);
        del_timer_sync(&acb->eternal_timer);
        arcmsr_disable_outbound_ints(acb);
        arcmsr_stop_adapter_bgrb(acb);
@@ -1045,7 +1045,7 @@ static void arcmsr_shutdown(struct pci_dev *pdev)
                (struct AdapterControlBlock *)host->hostdata;
        del_timer_sync(&acb->eternal_timer);
        arcmsr_disable_outbound_ints(acb);
-       flush_work_sync(&acb->arcmsr_do_message_isr_bh);
+       flush_work(&acb->arcmsr_do_message_isr_bh);
        arcmsr_stop_adapter_bgrb(acb);
        arcmsr_flush_adapter_cache(acb);
 }
index 0a2c5a8..45e192a 100644 (file)
@@ -9020,7 +9020,7 @@ static void __ipr_remove(struct pci_dev *pdev)
 
        spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
        wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
-       flush_work_sync(&ioa_cfg->work_q);
+       flush_work(&ioa_cfg->work_q);
        spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
 
        spin_lock(&ipr_driver_lock);
index ea8a0b4..af763ea 100644 (file)
@@ -5459,7 +5459,7 @@ static void __devexit pmcraid_remove(struct pci_dev *pdev)
        pmcraid_shutdown(pdev);
 
        pmcraid_disable_interrupts(pinstance, ~0);
-       flush_work_sync(&pinstance->worker_q);
+       flush_work(&pinstance->worker_q);
 
        pmcraid_kill_tasklets(pinstance);
        pmcraid_unregister_interrupt_handler(pinstance);
index 5b30132..bddc97c 100644 (file)
@@ -969,7 +969,7 @@ void qlt_stop_phase1(struct qla_tgt *tgt)
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
        mutex_unlock(&ha->tgt.tgt_mutex);
 
-       flush_delayed_work_sync(&tgt->sess_del_work);
+       flush_delayed_work(&tgt->sess_del_work);
 
        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
            "Waiting for sess works (tgt %p)", tgt);
index 1154a99..d0dabcf 100644 (file)
@@ -827,7 +827,7 @@ void gether_cleanup(void)
                return;
 
        unregister_netdev(the_dev->net);
-       flush_work_sync(&the_dev->work);
+       flush_work(&the_dev->work);
        free_netdev(the_dev->net);
 
        the_dev = NULL;
index 24d8eeb..094fdc3 100644 (file)
@@ -264,7 +264,7 @@ int nvec_write_async(struct nvec_chip *nvec, const unsigned char *data,
        list_add_tail(&msg->node, &nvec->tx_data);
        spin_unlock_irqrestore(&nvec->tx_lock, flags);
 
-       queue_work(system_nrt_wq, &nvec->tx_work);
+       schedule_work(&nvec->tx_work);
 
        return 0;
 }
@@ -471,7 +471,7 @@ static void nvec_rx_completed(struct nvec_chip *nvec)
        if (!nvec_msg_is_event(nvec->rx))
                complete(&nvec->ec_transfer);
 
-       queue_work(system_nrt_wq, &nvec->rx_work);
+       schedule_work(&nvec->rx_work);
 }
 
 /**
index 2ab31e4..67789b8 100644 (file)
@@ -694,17 +694,14 @@ thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
 static void thermal_zone_device_set_polling(struct thermal_zone_device *tz,
                                            int delay)
 {
-       cancel_delayed_work(&(tz->poll_queue));
-
-       if (!delay)
-               return;
-
        if (delay > 1000)
-               queue_delayed_work(system_freezable_wq, &(tz->poll_queue),
-                                     round_jiffies(msecs_to_jiffies(delay)));
+               mod_delayed_work(system_freezable_wq, &tz->poll_queue,
+                                round_jiffies(msecs_to_jiffies(delay)));
+       else if (delay)
+               mod_delayed_work(system_freezable_wq, &tz->poll_queue,
+                                msecs_to_jiffies(delay));
        else
-               queue_delayed_work(system_freezable_wq, &(tz->poll_queue),
-                                     msecs_to_jiffies(delay));
+               cancel_delayed_work(&tz->poll_queue);
 }
 
 static void thermal_zone_device_passive(struct thermal_zone_device *tz,
index 0083bc1..5b95b4f 100644 (file)
@@ -765,7 +765,7 @@ static void hvsi_flush_output(struct hvsi_struct *hp)
 
        /* 'writer' could still be pending if it didn't see n_outbuf = 0 yet */
        cancel_delayed_work_sync(&hp->writer);
-       flush_work_sync(&hp->handshaker);
+       flush_work(&hp->handshaker);
 
        /*
         * it's also possible that our timeout expired and hvsi_write_worker
index 0aeb5a3..b4ba067 100644 (file)
@@ -1729,7 +1729,7 @@ void ipwireless_hardware_free(struct ipw_hardware *hw)
 
        ipwireless_stop_interrupts(hw);
 
-       flush_work_sync(&hw->work_rx);
+       flush_work(&hw->work_rx);
 
        for (i = 0; i < NL_NUM_OF_ADDRESSES; i++)
                if (hw->packet_assembler[i] != NULL)
index d2af155..57102e6 100644 (file)
@@ -435,8 +435,8 @@ void ipwireless_network_free(struct ipw_network *network)
        network->shutting_down = 1;
 
        ipwireless_ppp_close(network);
-       flush_work_sync(&network->work_go_online);
-       flush_work_sync(&network->work_go_offline);
+       flush_work(&network->work_go_online);
+       flush_work(&network->work_go_offline);
 
        ipwireless_stop_interrupts(network->hardware);
        ipwireless_associate_network(network->hardware, NULL);
index 3f63d83..c0b3343 100644 (file)
@@ -122,7 +122,7 @@ static void kgdboc_unregister_kbd(void)
                        i--;
                }
        }
-       flush_work_sync(&kgdboc_restore_input_work);
+       flush_work(&kgdboc_restore_input_work);
 }
 #else /* ! CONFIG_KDB_KEYBOARD */
 #define kgdboc_register_kbd(x) 0
index ccc2f35..6ede6fd 100644 (file)
@@ -1227,7 +1227,7 @@ static int serial_omap_suspend(struct device *dev)
        struct uart_omap_port *up = dev_get_drvdata(dev);
 
        uart_suspend_port(&serial_omap_reg, &up->port);
-       flush_work_sync(&up->qos_work);
+       flush_work(&up->qos_work);
 
        return 0;
 }
index 4d7b562..0f2a2c5 100644 (file)
@@ -523,9 +523,9 @@ static int tty_ldisc_halt(struct tty_struct *tty)
  */
 static void tty_ldisc_flush_works(struct tty_struct *tty)
 {
-       flush_work_sync(&tty->hangup_work);
-       flush_work_sync(&tty->SAK_work);
-       flush_work_sync(&tty->buf.work);
+       flush_work(&tty->hangup_work);
+       flush_work(&tty->SAK_work);
+       flush_work(&tty->buf.work);
 }
 
 /**
index 975e9c6..807627b 100644 (file)
@@ -718,7 +718,7 @@ static void speedtch_atm_stop(struct usbatm_data *usbatm, struct atm_dev *atm_de
        del_timer_sync(&instance->resubmit_timer);
        usb_free_urb(int_urb);
 
-       flush_work_sync(&instance->status_check_work);
+       flush_work(&instance->status_check_work);
 }
 
 static int speedtch_pre_reset(struct usb_interface *intf)
index e1f8b2c..defff43 100644 (file)
@@ -2262,7 +2262,7 @@ static void uea_stop(struct uea_softc *sc)
        usb_free_urb(sc->urb_int);
 
        /* flush the work item, when no one can schedule it */
-       flush_work_sync(&sc->task);
+       flush_work(&sc->task);
 
        release_firmware(sc->dsp_firm);
        uea_leaves(INS_TO_USBDEV(sc));
index b9c4690..6458764 100644 (file)
@@ -834,7 +834,7 @@ void gether_cleanup(void)
                return;
 
        unregister_netdev(the_dev->net);
-       flush_work_sync(&the_dev->work);
+       flush_work(&the_dev->work);
        free_netdev(the_dev->net);
 
        the_dev = NULL;
index 6780010..4a1d64d 100644 (file)
@@ -893,7 +893,7 @@ static void ohci_stop (struct usb_hcd *hcd)
        ohci_dump (ohci, 1);
 
        if (quirk_nec(ohci))
-               flush_work_sync(&ohci->nec_work);
+               flush_work(&ohci->nec_work);
 
        ohci_usb_reset (ohci);
        ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
index 81f1f9a..ceee211 100644 (file)
@@ -1230,7 +1230,7 @@ static int __exit isp1301_remove(struct i2c_client *i2c)
        isp->timer.data = 0;
        set_bit(WORK_STOP, &isp->todo);
        del_timer_sync(&isp->timer);
-       flush_work_sync(&isp->work);
+       flush_work(&isp->work);
 
        put_device(&i2c->dev);
        the_transceiver = NULL;
index 3f5acc7..6b5e6e0 100644 (file)
@@ -906,7 +906,7 @@ static int taal_probe(struct omap_dss_device *dssdev)
                r = -ENOMEM;
                goto err_wq;
        }
-       INIT_DELAYED_WORK_DEFERRABLE(&td->esd_work, taal_esd_work);
+       INIT_DEFERRABLE_WORK(&td->esd_work, taal_esd_work);
        INIT_DELAYED_WORK(&td->ulps_work, taal_ulps_work);
 
        dev_set_drvdata(&dssdev->dev, td);
@@ -962,8 +962,8 @@ static int taal_probe(struct omap_dss_device *dssdev)
                        goto err_irq;
                }
 
-               INIT_DELAYED_WORK_DEFERRABLE(&td->te_timeout_work,
-                                       taal_te_timeout_work_callback);
+               INIT_DEFERRABLE_WORK(&td->te_timeout_work,
+                                    taal_te_timeout_work_callback);
 
                dev_dbg(&dssdev->dev, "Using GPIO TE\n");
        }
index b07e886..05ee046 100644 (file)
@@ -4306,7 +4306,7 @@ static void dsi_framedone_irq_callback(void *data, u32 mask)
         * and is sending the data.
         */
 
-       __cancel_delayed_work(&dsi->framedone_timeout_work);
+       cancel_delayed_work(&dsi->framedone_timeout_work);
 
        dsi_handle_framedone(dsidev, 0);
 }
@@ -4863,8 +4863,8 @@ static int __init omap_dsihw_probe(struct platform_device *dsidev)
        mutex_init(&dsi->lock);
        sema_init(&dsi->bus_lock, 1);
 
-       INIT_DELAYED_WORK_DEFERRABLE(&dsi->framedone_timeout_work,
-                       dsi_framedone_timeout_work_callback);
+       INIT_DEFERRABLE_WORK(&dsi->framedone_timeout_work,
+                            dsi_framedone_timeout_work_callback);
 
 #ifdef DSI_CATCH_MISSING_TE
        init_timer(&dsi->te_timer);
index c70f1e5..022cecb 100644 (file)
@@ -551,7 +551,7 @@ affs_remount(struct super_block *sb, int *flags, char *data)
                return -EINVAL;
        }
 
-       flush_delayed_work_sync(&sbi->sb_work);
+       flush_delayed_work(&sbi->sb_work);
        replace_mount_options(sb, new_opts);
 
        sbi->s_flags = mount_flags;
index 587ef51..7ef637d 100644 (file)
@@ -351,9 +351,7 @@ void afs_dispatch_give_up_callbacks(struct work_struct *work)
  */
 void afs_flush_callback_breaks(struct afs_server *server)
 {
-       cancel_delayed_work(&server->cb_break_work);
-       queue_delayed_work(afs_callback_update_worker,
-                          &server->cb_break_work, 0);
+       mod_delayed_work(afs_callback_update_worker, &server->cb_break_work, 0);
 }
 
 #if 0
index d59b751..f342acf 100644 (file)
@@ -285,12 +285,7 @@ static void afs_reap_server(struct work_struct *work)
                expiry = server->time_of_death + afs_server_timeout;
                if (expiry > now) {
                        delay = (expiry - now) * HZ;
-                       if (!queue_delayed_work(afs_wq, &afs_server_reaper,
-                                               delay)) {
-                               cancel_delayed_work(&afs_server_reaper);
-                               queue_delayed_work(afs_wq, &afs_server_reaper,
-                                                  delay);
-                       }
+                       mod_delayed_work(afs_wq, &afs_server_reaper, delay);
                        break;
                }
 
@@ -323,6 +318,5 @@ static void afs_reap_server(struct work_struct *work)
 void __exit afs_purge_servers(void)
 {
        afs_server_timeout = 0;
-       cancel_delayed_work(&afs_server_reaper);
-       queue_delayed_work(afs_wq, &afs_server_reaper, 0);
+       mod_delayed_work(afs_wq, &afs_server_reaper, 0);
 }
index 431984d..57bcb15 100644 (file)
@@ -561,12 +561,7 @@ static void afs_vlocation_reaper(struct work_struct *work)
                if (expiry > now) {
                        delay = (expiry - now) * HZ;
                        _debug("delay %lu", delay);
-                       if (!queue_delayed_work(afs_wq, &afs_vlocation_reap,
-                                               delay)) {
-                               cancel_delayed_work(&afs_vlocation_reap);
-                               queue_delayed_work(afs_wq, &afs_vlocation_reap,
-                                                  delay);
-                       }
+                       mod_delayed_work(afs_wq, &afs_vlocation_reap, delay);
                        break;
                }
 
@@ -614,13 +609,10 @@ void afs_vlocation_purge(void)
        spin_lock(&afs_vlocation_updates_lock);
        list_del_init(&afs_vlocation_updates);
        spin_unlock(&afs_vlocation_updates_lock);
-       cancel_delayed_work(&afs_vlocation_update);
-       queue_delayed_work(afs_vlocation_update_worker,
-                          &afs_vlocation_update, 0);
+       mod_delayed_work(afs_vlocation_update_worker, &afs_vlocation_update, 0);
        destroy_workqueue(afs_vlocation_update_worker);
 
-       cancel_delayed_work(&afs_vlocation_reap);
-       queue_delayed_work(afs_wq, &afs_vlocation_reap, 0);
+       mod_delayed_work(afs_wq, &afs_vlocation_reap, 0);
 }
 
 /*
index 4a38db7..0fb6539 100644 (file)
@@ -1289,7 +1289,7 @@ static void gdlm_unmount(struct gfs2_sbd *sdp)
        spin_lock(&ls->ls_recover_spin);
        set_bit(DFL_UNMOUNT, &ls->ls_recover_flags);
        spin_unlock(&ls->ls_recover_spin);
-       flush_delayed_work_sync(&sdp->sd_control_work);
+       flush_delayed_work(&sdp->sd_control_work);
 
        /* mounted_lock and control_lock will be purged in dlm recovery */
 release:
index a8d90f2..bc73726 100644 (file)
@@ -1579,7 +1579,7 @@ out:
        clear_inode(inode);
        gfs2_dir_hash_inval(ip);
        ip->i_gl->gl_object = NULL;
-       flush_delayed_work_sync(&ip->i_gl->gl_work);
+       flush_delayed_work(&ip->i_gl->gl_work);
        gfs2_glock_add_to_lru(ip->i_gl);
        gfs2_glock_put(ip->i_gl);
        ip->i_gl = NULL;
index ee1bc55..5539093 100644 (file)
@@ -644,7 +644,7 @@ static int hfs_file_fsync(struct file *filp, loff_t start, loff_t end,
 
        /* sync the superblock to buffers */
        sb = inode->i_sb;
-       flush_delayed_work_sync(&HFS_SB(sb)->mdb_work);
+       flush_delayed_work(&HFS_SB(sb)->mdb_work);
        /* .. finally sync the buffers to disk */
        err = sync_blockdev(sb->s_bdev);
        if (!ret)
index 333df07..eaa7432 100644 (file)
@@ -314,11 +314,11 @@ static void ncp_stop_tasks(struct ncp_server *server) {
        release_sock(sk);
        del_timer_sync(&server->timeout_tm);
 
-       flush_work_sync(&server->rcv.tq);
+       flush_work(&server->rcv.tq);
        if (sk->sk_socket->type == SOCK_STREAM)
-               flush_work_sync(&server->tx.tq);
+               flush_work(&server->tx.tq);
        else
-               flush_work_sync(&server->timeout_tq);
+               flush_work(&server->timeout_tq);
 }
 
 static int  ncp_show_options(struct seq_file *seq, struct dentry *root)
index 6930bec..1720d32 100644 (file)
@@ -117,8 +117,7 @@ nfs4_schedule_state_renewal(struct nfs_client *clp)
                timeout = 5 * HZ;
        dprintk("%s: requeueing work. Lease period = %ld\n",
                        __func__, (timeout + HZ - 1) / HZ);
-       cancel_delayed_work(&clp->cl_renewd);
-       schedule_delayed_work(&clp->cl_renewd, timeout);
+       mod_delayed_work(system_wq, &clp->cl_renewd, timeout);
        set_bit(NFS_CS_RENEWD, &clp->cl_res_state);
        spin_unlock(&clp->cl_lock);
 }
index 8f9cea1..c19897d 100644 (file)
@@ -327,5 +327,5 @@ void o2quo_exit(void)
 {
        struct o2quo_state *qs = &o2quo_state;
 
-       flush_work_sync(&qs->qs_work);
+       flush_work(&qs->qs_work);
 }
index 19e2380..001537f 100644 (file)
@@ -954,7 +954,7 @@ xfs_fs_sync_fs(
                 * We schedule xfssyncd now (now that the disk is
                 * active) instead of later (when it might not be).
                 */
-               flush_delayed_work_sync(&mp->m_sync_work);
+               flush_delayed_work(&mp->m_sync_work);
        }
 
        return 0;
index 9654817..9500caf 100644 (file)
@@ -475,7 +475,7 @@ xfs_flush_inodes(
        struct xfs_mount        *mp = ip->i_mount;
 
        queue_work(xfs_syncd_wq, &mp->m_flush_work);
-       flush_work_sync(&mp->m_flush_work);
+       flush_work(&mp->m_flush_work);
 }
 
 STATIC void
index af15545..2b58905 100644 (file)
@@ -16,6 +16,7 @@ struct workqueue_struct;
 
 struct work_struct;
 typedef void (*work_func_t)(struct work_struct *work);
+void delayed_work_timer_fn(unsigned long __data);
 
 /*
  * The first word is the work queue pointer and the flags rolled into
@@ -67,9 +68,18 @@ enum {
        WORK_STRUCT_FLAG_BITS   = WORK_STRUCT_COLOR_SHIFT +
                                  WORK_STRUCT_COLOR_BITS,
 
+       /* data contains off-queue information when !WORK_STRUCT_CWQ */
+       WORK_OFFQ_FLAG_BASE     = WORK_STRUCT_FLAG_BITS,
+
+       WORK_OFFQ_CANCELING     = (1 << WORK_OFFQ_FLAG_BASE),
+
+       WORK_OFFQ_FLAG_BITS     = 1,
+       WORK_OFFQ_CPU_SHIFT     = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
+
+       /* convenience constants */
        WORK_STRUCT_FLAG_MASK   = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
        WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
-       WORK_STRUCT_NO_CPU      = WORK_CPU_NONE << WORK_STRUCT_FLAG_BITS,
+       WORK_STRUCT_NO_CPU      = (unsigned long)WORK_CPU_NONE << WORK_OFFQ_CPU_SHIFT,
 
        /* bit mask for work_busy() return values */
        WORK_BUSY_PENDING       = 1 << 0,
@@ -92,6 +102,7 @@ struct work_struct {
 struct delayed_work {
        struct work_struct work;
        struct timer_list timer;
+       int cpu;
 };
 
 static inline struct delayed_work *to_delayed_work(struct work_struct *work)
@@ -115,41 +126,38 @@ struct execute_work {
 #define __WORK_INIT_LOCKDEP_MAP(n, k)
 #endif
 
-#define __WORK_INITIALIZER(n, f) {                             \
-       .data = WORK_DATA_STATIC_INIT(),                        \
-       .entry  = { &(n).entry, &(n).entry },                   \
-       .func = (f),                                            \
-       __WORK_INIT_LOCKDEP_MAP(#n, &(n))                       \
+#define __WORK_INITIALIZER(n, f) {                                     \
+       .data = WORK_DATA_STATIC_INIT(),                                \
+       .entry  = { &(n).entry, &(n).entry },                           \
+       .func = (f),                                                    \
+       __WORK_INIT_LOCKDEP_MAP(#n, &(n))                               \
        }
 
-#define __DELAYED_WORK_INITIALIZER(n, f) {                     \
-       .work = __WORK_INITIALIZER((n).work, (f)),              \
-       .timer = TIMER_INITIALIZER(NULL, 0, 0),                 \
+#define __DELAYED_WORK_INITIALIZER(n, f, tflags) {                     \
+       .work = __WORK_INITIALIZER((n).work, (f)),                      \
+       .timer = __TIMER_INITIALIZER(delayed_work_timer_fn,             \
+                                    0, (unsigned long)&(n),            \
+                                    (tflags) | TIMER_IRQSAFE),         \
        }
 
-#define __DEFERRED_WORK_INITIALIZER(n, f) {                    \
-       .work = __WORK_INITIALIZER((n).work, (f)),              \
-       .timer = TIMER_DEFERRED_INITIALIZER(NULL, 0, 0),        \
-       }
-
-#define DECLARE_WORK(n, f)                                     \
+#define DECLARE_WORK(n, f)                                             \
        struct work_struct n = __WORK_INITIALIZER(n, f)
 
-#define DECLARE_DELAYED_WORK(n, f)                             \
-       struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f)
+#define DECLARE_DELAYED_WORK(n, f)                                     \
+       struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
 
-#define DECLARE_DEFERRED_WORK(n, f)                            \
-       struct delayed_work n = __DEFERRED_WORK_INITIALIZER(n, f)
+#define DECLARE_DEFERRABLE_WORK(n, f)                                  \
+       struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
 
 /*
  * initialize a work item's function pointer
  */
-#define PREPARE_WORK(_work, _func)                             \
-       do {                                                    \
-               (_work)->func = (_func);                        \
+#define PREPARE_WORK(_work, _func)                                     \
+       do {                                                            \
+               (_work)->func = (_func);                                \
        } while (0)
 
-#define PREPARE_DELAYED_WORK(_work, _func)                     \
+#define PREPARE_DELAYED_WORK(_work, _func)                             \
        PREPARE_WORK(&(_work)->work, (_func))
 
 #ifdef CONFIG_DEBUG_OBJECTS_WORK
@@ -179,7 +187,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
                                                                        \
                __init_work((_work), _onstack);                         \
                (_work)->data = (atomic_long_t) WORK_DATA_INIT();       \
-               lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0);\
+               lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0); \
                INIT_LIST_HEAD(&(_work)->entry);                        \
                PREPARE_WORK((_work), (_func));                         \
        } while (0)
@@ -193,33 +201,44 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
        } while (0)
 #endif
 
-#define INIT_WORK(_work, _func)                                        \
-       do {                                                    \
-               __INIT_WORK((_work), (_func), 0);               \
+#define INIT_WORK(_work, _func)                                                \
+       do {                                                            \
+               __INIT_WORK((_work), (_func), 0);                       \
        } while (0)
 
-#define INIT_WORK_ONSTACK(_work, _func)                                \
-       do {                                                    \
-               __INIT_WORK((_work), (_func), 1);               \
+#define INIT_WORK_ONSTACK(_work, _func)                                        \
+       do {                                                            \
+               __INIT_WORK((_work), (_func), 1);                       \
        } while (0)
 
-#define INIT_DELAYED_WORK(_work, _func)                                \
-       do {                                                    \
-               INIT_WORK(&(_work)->work, (_func));             \
-               init_timer(&(_work)->timer);                    \
+#define __INIT_DELAYED_WORK(_work, _func, _tflags)                     \
+       do {                                                            \
+               INIT_WORK(&(_work)->work, (_func));                     \
+               __setup_timer(&(_work)->timer, delayed_work_timer_fn,   \
+                             (unsigned long)(_work),                   \
+                             (_tflags) | TIMER_IRQSAFE);               \
        } while (0)
 
-#define INIT_DELAYED_WORK_ONSTACK(_work, _func)                        \
-       do {                                                    \
-               INIT_WORK_ONSTACK(&(_work)->work, (_func));     \
-               init_timer_on_stack(&(_work)->timer);           \
+#define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags)             \
+       do {                                                            \
+               INIT_WORK_ONSTACK(&(_work)->work, (_func));             \
+               __setup_timer_on_stack(&(_work)->timer,                 \
+                                      delayed_work_timer_fn,           \
+                                      (unsigned long)(_work),          \
+                                      (_tflags) | TIMER_IRQSAFE);      \
        } while (0)
 
-#define INIT_DELAYED_WORK_DEFERRABLE(_work, _func)             \
-       do {                                                    \
-               INIT_WORK(&(_work)->work, (_func));             \
-               init_timer_deferrable(&(_work)->timer);         \
-       } while (0)
+#define INIT_DELAYED_WORK(_work, _func)                                        \
+       __INIT_DELAYED_WORK(_work, _func, 0)
+
+#define INIT_DELAYED_WORK_ONSTACK(_work, _func)                                \
+       __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)
+
+#define INIT_DEFERRABLE_WORK(_work, _func)                             \
+       __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)
+
+#define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func)                     \
+       __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
 
 /**
  * work_pending - Find out whether a work item is currently pending
@@ -278,10 +297,6 @@ enum {
  * system_long_wq is similar to system_wq but may host long running
  * works.  Queue flushing might take relatively long.
  *
- * system_nrt_wq is non-reentrant and guarantees that any given work
- * item is never executed in parallel by multiple CPUs.  Queue
- * flushing might take relatively long.
- *
  * system_unbound_wq is unbound workqueue.  Workers are not bound to
  * any specific CPU, not concurrency managed, and all queued works are
  * executed immediately as long as max_active limit is not reached and
@@ -289,16 +304,25 @@ enum {
  *
  * system_freezable_wq is equivalent to system_wq except that it's
  * freezable.
- *
- * system_nrt_freezable_wq is equivalent to system_nrt_wq except that
- * it's freezable.
  */
 extern struct workqueue_struct *system_wq;
 extern struct workqueue_struct *system_long_wq;
-extern struct workqueue_struct *system_nrt_wq;
 extern struct workqueue_struct *system_unbound_wq;
 extern struct workqueue_struct *system_freezable_wq;
-extern struct workqueue_struct *system_nrt_freezable_wq;
+
+static inline struct workqueue_struct * __deprecated __system_nrt_wq(void)
+{
+       return system_wq;
+}
+
+static inline struct workqueue_struct * __deprecated __system_nrt_freezable_wq(void)
+{
+       return system_freezable_wq;
+}
+
+/* equivlalent to system_wq and system_freezable_wq, deprecated */
+#define system_nrt_wq                  __system_nrt_wq()
+#define system_nrt_freezable_wq                __system_nrt_freezable_wq()
 
 extern struct workqueue_struct *
 __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
@@ -321,22 +345,22 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
  * Pointer to the allocated workqueue on success, %NULL on failure.
  */
 #ifdef CONFIG_LOCKDEP
-#define alloc_workqueue(fmt, flags, max_active, args...)       \
-({                                                             \
-       static struct lock_class_key __key;                     \
-       const char *__lock_name;                                \
-                                                               \
-       if (__builtin_constant_p(fmt))                          \
-               __lock_name = (fmt);                            \
-       else                                                    \
-               __lock_name = #fmt;                             \
-                                                               \
-       __alloc_workqueue_key((fmt), (flags), (max_active),     \
-                             &__key, __lock_name, ##args);     \
+#define alloc_workqueue(fmt, flags, max_active, args...)               \
+({                                                                     \
+       static struct lock_class_key __key;                             \
+       const char *__lock_name;                                        \
+                                                                       \
+       if (__builtin_constant_p(fmt))                                  \
+               __lock_name = (fmt);                                    \
+       else                                                            \
+               __lock_name = #fmt;                                     \
+                                                                       \
+       __alloc_workqueue_key((fmt), (flags), (max_active),             \
+                             &__key, __lock_name, ##args);             \
 })
 #else
-#define alloc_workqueue(fmt, flags, max_active, args...)       \
-       __alloc_workqueue_key((fmt), (flags), (max_active),     \
+#define alloc_workqueue(fmt, flags, max_active, args...)               \
+       __alloc_workqueue_key((fmt), (flags), (max_active),             \
                              NULL, NULL, ##args)
 #endif
 
@@ -353,46 +377,50 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
  * RETURNS:
  * Pointer to the allocated workqueue on success, %NULL on failure.
  */
-#define alloc_ordered_workqueue(fmt, flags, args...)           \
+#define alloc_ordered_workqueue(fmt, flags, args...)                   \
        alloc_workqueue(fmt, WQ_UNBOUND | (flags), 1, ##args)
 
-#define create_workqueue(name)                                 \
+#define create_workqueue(name)                                         \
        alloc_workqueue((name), WQ_MEM_RECLAIM, 1)
-#define create_freezable_workqueue(name)                       \
+#define create_freezable_workqueue(name)                               \
        alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
-#define create_singlethread_workqueue(name)                    \
+#define create_singlethread_workqueue(name)                            \
        alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
 
 extern void destroy_workqueue(struct workqueue_struct *wq);
 
-extern int queue_work(struct workqueue_struct *wq, struct work_struct *work);
-extern int queue_work_on(int cpu, struct workqueue_struct *wq,
+extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
                        struct work_struct *work);
-extern int queue_delayed_work(struct workqueue_struct *wq,
+extern bool queue_work(struct workqueue_struct *wq, struct work_struct *work);
+extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
                        struct delayed_work *work, unsigned long delay);
-extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
+extern bool queue_delayed_work(struct workqueue_struct *wq,
                        struct delayed_work *work, unsigned long delay);
+extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
+                       struct delayed_work *dwork, unsigned long delay);
+extern bool mod_delayed_work(struct workqueue_struct *wq,
+                       struct delayed_work *dwork, unsigned long delay);
 
 extern void flush_workqueue(struct workqueue_struct *wq);
 extern void drain_workqueue(struct workqueue_struct *wq);
 extern void flush_scheduled_work(void);
 
-extern int schedule_work(struct work_struct *work);
-extern int schedule_work_on(int cpu, struct work_struct *work);
-extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay);
-extern int schedule_delayed_work_on(int cpu, struct delayed_work *work,
-                                       unsigned long delay);
+extern bool schedule_work_on(int cpu, struct work_struct *work);
+extern bool schedule_work(struct work_struct *work);
+extern bool schedule_delayed_work_on(int cpu, struct delayed_work *work,
+                                    unsigned long delay);
+extern bool schedule_delayed_work(struct delayed_work *work,
+                                 unsigned long delay);
 extern int schedule_on_each_cpu(work_func_t func);
 extern int keventd_up(void);
 
 int execute_in_process_context(work_func_t fn, struct execute_work *);
 
 extern bool flush_work(struct work_struct *work);
-extern bool flush_work_sync(struct work_struct *work);
 extern bool cancel_work_sync(struct work_struct *work);
 
 extern bool flush_delayed_work(struct delayed_work *dwork);
-extern bool flush_delayed_work_sync(struct delayed_work *work);
+extern bool cancel_delayed_work(struct delayed_work *dwork);
 extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
 
 extern void workqueue_set_max_active(struct workqueue_struct *wq,
@@ -402,27 +430,11 @@ extern unsigned int work_cpu(struct work_struct *work);
 extern unsigned int work_busy(struct work_struct *work);
 
 /*
- * Kill off a pending schedule_delayed_work().  Note that the work callback
- * function may still be running on return from cancel_delayed_work(), unless
- * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or
- * cancel_work_sync() to wait on it.
- */
-static inline bool cancel_delayed_work(struct delayed_work *work)
-{
-       bool ret;
-
-       ret = del_timer_sync(&work->timer);
-       if (ret)
-               work_clear_pending(&work->work);
-       return ret;
-}
-
-/*
  * Like above, but uses del_timer() instead of del_timer_sync(). This means,
  * if it returns 0 the timer function may be running and the queueing is in
  * progress.
  */
-static inline bool __cancel_delayed_work(struct delayed_work *work)
+static inline bool __deprecated __cancel_delayed_work(struct delayed_work *work)
 {
        bool ret;
 
@@ -432,6 +444,18 @@ static inline bool __cancel_delayed_work(struct delayed_work *work)
        return ret;
 }
 
+/* used to be different but now identical to flush_work(), deprecated */
+static inline bool __deprecated flush_work_sync(struct work_struct *work)
+{
+       return flush_work(work);
+}
+
+/* used to be different but now identical to flush_delayed_work(), deprecated */
+static inline bool __deprecated flush_delayed_work_sync(struct delayed_work *dwork)
+{
+       return flush_delayed_work(dwork);
+}
+
 #ifndef CONFIG_SMP
 static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
 {
index 2095be3..97c465e 100644 (file)
@@ -379,7 +379,7 @@ void call_srcu(struct srcu_struct *sp, struct rcu_head *head,
        rcu_batch_queue(&sp->batch_queue, head);
        if (!sp->running) {
                sp->running = true;
-               queue_delayed_work(system_nrt_wq, &sp->work, 0);
+               schedule_delayed_work(&sp->work, 0);
        }
        spin_unlock_irqrestore(&sp->queue_lock, flags);
 }
@@ -631,7 +631,7 @@ static void srcu_reschedule(struct srcu_struct *sp)
        }
 
        if (pending)
-               queue_delayed_work(system_nrt_wq, &sp->work, SRCU_INTERVAL);
+               schedule_delayed_work(&sp->work, SRCU_INTERVAL);
 }
 
 /*
index 3c5a79e..d951daa 100644 (file)
@@ -58,7 +58,7 @@ enum {
         * be executing on any CPU.  The gcwq behaves as an unbound one.
         *
         * Note that DISASSOCIATED can be flipped only while holding
-        * managership of all pools on the gcwq to avoid changing binding
+        * assoc_mutex of all pools on the gcwq to avoid changing binding
         * state while create_worker() is in progress.
         */
        GCWQ_DISASSOCIATED      = 1 << 0,       /* cpu can't serve workers */
@@ -73,11 +73,10 @@ enum {
        WORKER_DIE              = 1 << 1,       /* die die die */
        WORKER_IDLE             = 1 << 2,       /* is idle */
        WORKER_PREP             = 1 << 3,       /* preparing to run works */
-       WORKER_REBIND           = 1 << 5,       /* mom is home, come back */
        WORKER_CPU_INTENSIVE    = 1 << 6,       /* cpu intensive */
        WORKER_UNBOUND          = 1 << 7,       /* worker is unbound */
 
-       WORKER_NOT_RUNNING      = WORKER_PREP | WORKER_REBIND | WORKER_UNBOUND |
+       WORKER_NOT_RUNNING      = WORKER_PREP | WORKER_UNBOUND |
                                  WORKER_CPU_INTENSIVE,
 
        NR_WORKER_POOLS         = 2,            /* # worker pools per gcwq */
@@ -126,7 +125,6 @@ enum {
 
 struct global_cwq;
 struct worker_pool;
-struct idle_rebind;
 
 /*
  * The poor guys doing the actual heavy lifting.  All on-duty workers
@@ -150,7 +148,6 @@ struct worker {
        int                     id;             /* I: worker id */
 
        /* for rebinding worker to CPU */
-       struct idle_rebind      *idle_rebind;   /* L: for idle worker */
        struct work_struct      rebind_work;    /* L: for busy worker */
 };
 
@@ -160,13 +157,15 @@ struct worker_pool {
 
        struct list_head        worklist;       /* L: list of pending works */
        int                     nr_workers;     /* L: total number of workers */
+
+       /* nr_idle includes the ones off idle_list for rebinding */
        int                     nr_idle;        /* L: currently idle ones */
 
        struct list_head        idle_list;      /* X: list of idle workers */
        struct timer_list       idle_timer;     /* L: worker idle timeout */
        struct timer_list       mayday_timer;   /* L: SOS timer for workers */
 
-       struct mutex            manager_mutex;  /* mutex manager should hold */
+       struct mutex            assoc_mutex;    /* protect GCWQ_DISASSOCIATED */
        struct ida              worker_ida;     /* L: for worker IDs */
 };
 
@@ -184,9 +183,8 @@ struct global_cwq {
        struct hlist_head       busy_hash[BUSY_WORKER_HASH_SIZE];
                                                /* L: hash of busy workers */
 
-       struct worker_pool      pools[2];       /* normal and highpri pools */
-
-       wait_queue_head_t       rebind_hold;    /* rebind hold wait */
+       struct worker_pool      pools[NR_WORKER_POOLS];
+                                               /* normal and highpri pools */
 } ____cacheline_aligned_in_smp;
 
 /*
@@ -269,17 +267,15 @@ struct workqueue_struct {
 };
 
 struct workqueue_struct *system_wq __read_mostly;
-struct workqueue_struct *system_long_wq __read_mostly;
-struct workqueue_struct *system_nrt_wq __read_mostly;
-struct workqueue_struct *system_unbound_wq __read_mostly;
-struct workqueue_struct *system_freezable_wq __read_mostly;
-struct workqueue_struct *system_nrt_freezable_wq __read_mostly;
 EXPORT_SYMBOL_GPL(system_wq);
+struct workqueue_struct *system_highpri_wq __read_mostly;
+EXPORT_SYMBOL_GPL(system_highpri_wq);
+struct workqueue_struct *system_long_wq __read_mostly;
 EXPORT_SYMBOL_GPL(system_long_wq);
-EXPORT_SYMBOL_GPL(system_nrt_wq);
+struct workqueue_struct *system_unbound_wq __read_mostly;
 EXPORT_SYMBOL_GPL(system_unbound_wq);
+struct workqueue_struct *system_freezable_wq __read_mostly;
 EXPORT_SYMBOL_GPL(system_freezable_wq);
-EXPORT_SYMBOL_GPL(system_nrt_freezable_wq);
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/workqueue.h>
@@ -534,18 +530,24 @@ static int work_next_color(int color)
 }
 
 /*
- * A work's data points to the cwq with WORK_STRUCT_CWQ set while the
- * work is on queue.  Once execution starts, WORK_STRUCT_CWQ is
- * cleared and the work data contains the cpu number it was last on.
+ * While queued, %WORK_STRUCT_CWQ is set and non flag bits of a work's data
+ * contain the pointer to the queued cwq.  Once execution starts, the flag
+ * is cleared and the high bits contain OFFQ flags and CPU number.
  *
- * set_work_{cwq|cpu}() and clear_work_data() can be used to set the
- * cwq, cpu or clear work->data.  These functions should only be
- * called while the work is owned - ie. while the PENDING bit is set.
+ * set_work_cwq(), set_work_cpu_and_clear_pending(), mark_work_canceling()
+ * and clear_work_data() can be used to set the cwq, cpu or clear
+ * work->data.  These functions should only be called while the work is
+ * owned - ie. while the PENDING bit is set.
  *
- * get_work_[g]cwq() can be used to obtain the gcwq or cwq
- * corresponding to a work.  gcwq is available once the work has been
- * queued anywhere after initialization.  cwq is available only from
- * queueing until execution starts.
+ * get_work_[g]cwq() can be used to obtain the gcwq or cwq corresponding to
+ * a work.  gcwq is available once the work has been queued anywhere after
+ * initialization until it is sync canceled.  cwq is available only while
+ * the work item is queued.
+ *
+ * %WORK_OFFQ_CANCELING is used to mark a work item which is being
+ * canceled.  While being canceled, a work item may have its PENDING set
+ * but stay off timer and worklist for arbitrarily long and nobody should
+ * try to steal the PENDING bit.
  */
 static inline void set_work_data(struct work_struct *work, unsigned long data,
                                 unsigned long flags)
@@ -562,13 +564,22 @@ static void set_work_cwq(struct work_struct *work,
                      WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags);
 }
 
-static void set_work_cpu(struct work_struct *work, unsigned int cpu)
+static void set_work_cpu_and_clear_pending(struct work_struct *work,
+                                          unsigned int cpu)
 {
-       set_work_data(work, cpu << WORK_STRUCT_FLAG_BITS, WORK_STRUCT_PENDING);
+       /*
+        * The following wmb is paired with the implied mb in
+        * test_and_set_bit(PENDING) and ensures all updates to @work made
+        * here are visible to and precede any updates by the next PENDING
+        * owner.
+        */
+       smp_wmb();
+       set_work_data(work, (unsigned long)cpu << WORK_OFFQ_CPU_SHIFT, 0);
 }
 
 static void clear_work_data(struct work_struct *work)
 {
+       smp_wmb();      /* see set_work_cpu_and_clear_pending() */
        set_work_data(work, WORK_STRUCT_NO_CPU, 0);
 }
 
@@ -591,7 +602,7 @@ static struct global_cwq *get_work_gcwq(struct work_struct *work)
                return ((struct cpu_workqueue_struct *)
                        (data & WORK_STRUCT_WQ_DATA_MASK))->pool->gcwq;
 
-       cpu = data >> WORK_STRUCT_FLAG_BITS;
+       cpu = data >> WORK_OFFQ_CPU_SHIFT;
        if (cpu == WORK_CPU_NONE)
                return NULL;
 
@@ -599,6 +610,22 @@ static struct global_cwq *get_work_gcwq(struct work_struct *work)
        return get_gcwq(cpu);
 }
 
+static void mark_work_canceling(struct work_struct *work)
+{
+       struct global_cwq *gcwq = get_work_gcwq(work);
+       unsigned long cpu = gcwq ? gcwq->cpu : WORK_CPU_NONE;
+
+       set_work_data(work, (cpu << WORK_OFFQ_CPU_SHIFT) | WORK_OFFQ_CANCELING,
+                     WORK_STRUCT_PENDING);
+}
+
+static bool work_is_canceling(struct work_struct *work)
+{
+       unsigned long data = atomic_long_read(&work->data);
+
+       return !(data & WORK_STRUCT_CWQ) && (data & WORK_OFFQ_CANCELING);
+}
+
 /*
  * Policy functions.  These define the policies on how the global worker
  * pools are managed.  Unless noted otherwise, these functions assume that
@@ -657,6 +684,13 @@ static bool too_many_workers(struct worker_pool *pool)
        int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
        int nr_busy = pool->nr_workers - nr_idle;
 
+       /*
+        * nr_idle and idle_list may disagree if idle rebinding is in
+        * progress.  Never return %true if idle_list is empty.
+        */
+       if (list_empty(&pool->idle_list))
+               return false;
+
        return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
 }
 
@@ -903,6 +937,206 @@ static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
 }
 
 /**
+ * move_linked_works - move linked works to a list
+ * @work: start of series of works to be scheduled
+ * @head: target list to append @work to
+ * @nextp: out paramter for nested worklist walking
+ *
+ * Schedule linked works starting from @work to @head.  Work series to
+ * be scheduled starts at @work and includes any consecutive work with
+ * WORK_STRUCT_LINKED set in its predecessor.
+ *
+ * If @nextp is not NULL, it's updated to point to the next work of
+ * the last scheduled work.  This allows move_linked_works() to be
+ * nested inside outer list_for_each_entry_safe().
+ *
+ * CONTEXT:
+ * spin_lock_irq(gcwq->lock).
+ */
+static void move_linked_works(struct work_struct *work, struct list_head *head,
+                             struct work_struct **nextp)
+{
+       struct work_struct *n;
+
+       /*
+        * Linked worklist will always end before the end of the list,
+        * use NULL for list head.
+        */
+       list_for_each_entry_safe_from(work, n, NULL, entry) {
+               list_move_tail(&work->entry, head);
+               if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
+                       break;
+       }
+
+       /*
+        * If we're already inside safe list traversal and have moved
+        * multiple works to the scheduled queue, the next position
+        * needs to be updated.
+        */
+       if (nextp)
+               *nextp = n;
+}
+
+static void cwq_activate_delayed_work(struct work_struct *work)
+{
+       struct cpu_workqueue_struct *cwq = get_work_cwq(work);
+
+       trace_workqueue_activate_work(work);
+       move_linked_works(work, &cwq->pool->worklist, NULL);
+       __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
+       cwq->nr_active++;
+}
+
+static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
+{
+       struct work_struct *work = list_first_entry(&cwq->delayed_works,
+                                                   struct work_struct, entry);
+
+       cwq_activate_delayed_work(work);
+}
+
+/**
+ * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
+ * @cwq: cwq of interest
+ * @color: color of work which left the queue
+ *
+ * A work either has completed or is removed from pending queue,
+ * decrement nr_in_flight of its cwq and handle workqueue flushing.
+ *
+ * CONTEXT:
+ * spin_lock_irq(gcwq->lock).
+ */
+static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
+{
+       /* ignore uncolored works */
+       if (color == WORK_NO_COLOR)
+               return;
+
+       cwq->nr_in_flight[color]--;
+
+       cwq->nr_active--;
+       if (!list_empty(&cwq->delayed_works)) {
+               /* one down, submit a delayed one */
+               if (cwq->nr_active < cwq->max_active)
+                       cwq_activate_first_delayed(cwq);
+       }
+
+       /* is flush in progress and are we at the flushing tip? */
+       if (likely(cwq->flush_color != color))
+               return;
+
+       /* are there still in-flight works? */
+       if (cwq->nr_in_flight[color])
+               return;
+
+       /* this cwq is done, clear flush_color */
+       cwq->flush_color = -1;
+
+       /*
+        * If this was the last cwq, wake up the first flusher.  It
+        * will handle the rest.
+        */
+       if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
+               complete(&cwq->wq->first_flusher->done);
+}
+
+/**
+ * try_to_grab_pending - steal work item from worklist and disable irq
+ * @work: work item to steal
+ * @is_dwork: @work is a delayed_work
+ * @flags: place to store irq state
+ *
+ * Try to grab PENDING bit of @work.  This function can handle @work in any
+ * stable state - idle, on timer or on worklist.  Return values are
+ *
+ *  1          if @work was pending and we successfully stole PENDING
+ *  0          if @work was idle and we claimed PENDING
+ *  -EAGAIN    if PENDING couldn't be grabbed at the moment, safe to busy-retry
+ *  -ENOENT    if someone else is canceling @work, this state may persist
+ *             for arbitrarily long
+ *
+ * On >= 0 return, the caller owns @work's PENDING bit.  To avoid getting
+ * interrupted while holding PENDING and @work off queue, irq must be
+ * disabled on entry.  This, combined with delayed_work->timer being
+ * irqsafe, ensures that we return -EAGAIN for finite short period of time.
+ *
+ * On successful return, >= 0, irq is disabled and the caller is
+ * responsible for releasing it using local_irq_restore(*@flags).
+ *
+ * This function is safe to call from any context including IRQ handler.
+ */
+static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
+                              unsigned long *flags)
+{
+       struct global_cwq *gcwq;
+
+       local_irq_save(*flags);
+
+       /* try to steal the timer if it exists */
+       if (is_dwork) {
+               struct delayed_work *dwork = to_delayed_work(work);
+
+               /*
+                * dwork->timer is irqsafe.  If del_timer() fails, it's
+                * guaranteed that the timer is not queued anywhere and not
+                * running on the local CPU.
+                */
+               if (likely(del_timer(&dwork->timer)))
+                       return 1;
+       }
+
+       /* try to claim PENDING the normal way */
+       if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
+               return 0;
+
+       /*
+        * The queueing is in progress, or it is already queued. Try to
+        * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
+        */
+       gcwq = get_work_gcwq(work);
+       if (!gcwq)
+               goto fail;
+
+       spin_lock(&gcwq->lock);
+       if (!list_empty(&work->entry)) {
+               /*
+                * This work is queued, but perhaps we locked the wrong gcwq.
+                * In that case we must see the new value after rmb(), see
+                * insert_work()->wmb().
+                */
+               smp_rmb();
+               if (gcwq == get_work_gcwq(work)) {
+                       debug_work_deactivate(work);
+
+                       /*
+                        * A delayed work item cannot be grabbed directly
+                        * because it might have linked NO_COLOR work items
+                        * which, if left on the delayed_list, will confuse
+                        * cwq->nr_active management later on and cause
+                        * stall.  Make sure the work item is activated
+                        * before grabbing.
+                        */
+                       if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
+                               cwq_activate_delayed_work(work);
+
+                       list_del_init(&work->entry);
+                       cwq_dec_nr_in_flight(get_work_cwq(work),
+                               get_work_color(work));
+
+                       spin_unlock(&gcwq->lock);
+                       return 1;
+               }
+       }
+       spin_unlock(&gcwq->lock);
+fail:
+       local_irq_restore(*flags);
+       if (work_is_canceling(work))
+               return -ENOENT;
+       cpu_relax();
+       return -EAGAIN;
+}
+
+/**
  * insert_work - insert a work into gcwq
  * @cwq: cwq @work belongs to
  * @work: work to insert
@@ -982,7 +1216,15 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
        struct cpu_workqueue_struct *cwq;
        struct list_head *worklist;
        unsigned int work_flags;
-       unsigned long flags;
+       unsigned int req_cpu = cpu;
+
+       /*
+        * While a work item is PENDING && off queue, a task trying to
+        * steal the PENDING will busy-loop waiting for it to either get
+        * queued or lose PENDING.  Grabbing PENDING and queueing should
+        * happen with IRQ disabled.
+        */
+       WARN_ON_ONCE(!irqs_disabled());
 
        debug_work_activate(work);
 
@@ -995,21 +1237,22 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
        if (!(wq->flags & WQ_UNBOUND)) {
                struct global_cwq *last_gcwq;
 
-               if (unlikely(cpu == WORK_CPU_UNBOUND))
+               if (cpu == WORK_CPU_UNBOUND)
                        cpu = raw_smp_processor_id();
 
                /*
-                * It's multi cpu.  If @wq is non-reentrant and @work
-                * was previously on a different cpu, it might still
-                * be running there, in which case the work needs to
-                * be queued on that cpu to guarantee non-reentrance.
+                * It's multi cpu.  If @work was previously on a different
+                * cpu, it might still be running there, in which case the
+                * work needs to be queued on that cpu to guarantee
+                * non-reentrancy.
                 */
                gcwq = get_gcwq(cpu);
-               if (wq->flags & WQ_NON_REENTRANT &&
-                   (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) {
+               last_gcwq = get_work_gcwq(work);
+
+               if (last_gcwq && last_gcwq != gcwq) {
                        struct worker *worker;
 
-                       spin_lock_irqsave(&last_gcwq->lock, flags);
+                       spin_lock(&last_gcwq->lock);
 
                        worker = find_worker_executing_work(last_gcwq, work);
 
@@ -1017,22 +1260,23 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
                                gcwq = last_gcwq;
                        else {
                                /* meh... not running there, queue here */
-                               spin_unlock_irqrestore(&last_gcwq->lock, flags);
-                               spin_lock_irqsave(&gcwq->lock, flags);
+                               spin_unlock(&last_gcwq->lock);
+                               spin_lock(&gcwq->lock);
                        }
-               } else
-                       spin_lock_irqsave(&gcwq->lock, flags);
+               } else {
+                       spin_lock(&gcwq->lock);
+               }
        } else {
                gcwq = get_gcwq(WORK_CPU_UNBOUND);
-               spin_lock_irqsave(&gcwq->lock, flags);
+               spin_lock(&gcwq->lock);
        }
 
        /* gcwq determined, get cwq and queue */
        cwq = get_cwq(gcwq->cpu, wq);
-       trace_workqueue_queue_work(cpu, cwq, work);
+       trace_workqueue_queue_work(req_cpu, cwq, work);
 
        if (WARN_ON(!list_empty(&work->entry))) {
-               spin_unlock_irqrestore(&gcwq->lock, flags);
+               spin_unlock(&gcwq->lock);
                return;
        }
 
@@ -1050,79 +1294,110 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
 
        insert_work(cwq, work, worklist, work_flags);
 
-       spin_unlock_irqrestore(&gcwq->lock, flags);
+       spin_unlock(&gcwq->lock);
 }
 
 /**
- * queue_work - queue work on a workqueue
+ * queue_work_on - queue work on specific cpu
+ * @cpu: CPU number to execute work on
  * @wq: workqueue to use
  * @work: work to queue
  *
- * Returns 0 if @work was already on a queue, non-zero otherwise.
+ * Returns %false if @work was already on a queue, %true otherwise.
  *
- * We queue the work to the CPU on which it was submitted, but if the CPU dies
- * it can be processed by another CPU.
+ * We queue the work to a specific CPU, the caller must ensure it
+ * can't go away.
  */
-int queue_work(struct workqueue_struct *wq, struct work_struct *work)
+bool queue_work_on(int cpu, struct workqueue_struct *wq,
+                  struct work_struct *work)
 {
-       int ret;
+       bool ret = false;
+       unsigned long flags;
 
-       ret = queue_work_on(get_cpu(), wq, work);
-       put_cpu();
+       local_irq_save(flags);
+
+       if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
+               __queue_work(cpu, wq, work);
+               ret = true;
+       }
 
+       local_irq_restore(flags);
        return ret;
 }
-EXPORT_SYMBOL_GPL(queue_work);
+EXPORT_SYMBOL_GPL(queue_work_on);
 
 /**
- * queue_work_on - queue work on specific cpu
- * @cpu: CPU number to execute work on
+ * queue_work - queue work on a workqueue
  * @wq: workqueue to use
  * @work: work to queue
  *
- * Returns 0 if @work was already on a queue, non-zero otherwise.
+ * Returns %false if @work was already on a queue, %true otherwise.
  *
- * We queue the work to a specific CPU, the caller must ensure it
- * can't go away.
+ * We queue the work to the CPU on which it was submitted, but if the CPU dies
+ * it can be processed by another CPU.
  */
-int
-queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
+bool queue_work(struct workqueue_struct *wq, struct work_struct *work)
 {
-       int ret = 0;
-
-       if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
-               __queue_work(cpu, wq, work);
-               ret = 1;
-       }
-       return ret;
+       return queue_work_on(WORK_CPU_UNBOUND, wq, work);
 }
-EXPORT_SYMBOL_GPL(queue_work_on);
+EXPORT_SYMBOL_GPL(queue_work);
 
-static void delayed_work_timer_fn(unsigned long __data)
+void delayed_work_timer_fn(unsigned long __data)
 {
        struct delayed_work *dwork = (struct delayed_work *)__data;
        struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work);
 
-       __queue_work(smp_processor_id(), cwq->wq, &dwork->work);
+       /* should have been called from irqsafe timer with irq already off */
+       __queue_work(dwork->cpu, cwq->wq, &dwork->work);
 }
+EXPORT_SYMBOL_GPL(delayed_work_timer_fn);
 
-/**
- * queue_delayed_work - queue work on a workqueue after delay
- * @wq: workqueue to use
- * @dwork: delayable work to queue
- * @delay: number of jiffies to wait before queueing
- *
- * Returns 0 if @work was already on a queue, non-zero otherwise.
- */
-int queue_delayed_work(struct workqueue_struct *wq,
-                       struct delayed_work *dwork, unsigned long delay)
+static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
+                               struct delayed_work *dwork, unsigned long delay)
 {
-       if (delay == 0)
-               return queue_work(wq, &dwork->work);
+       struct timer_list *timer = &dwork->timer;
+       struct work_struct *work = &dwork->work;
+       unsigned int lcpu;
+
+       WARN_ON_ONCE(timer->function != delayed_work_timer_fn ||
+                    timer->data != (unsigned long)dwork);
+       BUG_ON(timer_pending(timer));
+       BUG_ON(!list_empty(&work->entry));
+
+       timer_stats_timer_set_start_info(&dwork->timer);
+
+       /*
+        * This stores cwq for the moment, for the timer_fn.  Note that the
+        * work's gcwq is preserved to allow reentrance detection for
+        * delayed works.
+        */
+       if (!(wq->flags & WQ_UNBOUND)) {
+               struct global_cwq *gcwq = get_work_gcwq(work);
 
-       return queue_delayed_work_on(-1, wq, dwork, delay);
+               /*
+                * If we cannot get the last gcwq from @work directly,
+                * select the last CPU such that it avoids unnecessarily
+                * triggering non-reentrancy check in __queue_work().
+                */
+               lcpu = cpu;
+               if (gcwq)
+                       lcpu = gcwq->cpu;
+               if (lcpu == WORK_CPU_UNBOUND)
+                       lcpu = raw_smp_processor_id();
+       } else {
+               lcpu = WORK_CPU_UNBOUND;
+       }
+
+       set_work_cwq(work, get_cwq(lcpu, wq), 0);
+
+       dwork->cpu = cpu;
+       timer->expires = jiffies + delay;
+
+       if (unlikely(cpu != WORK_CPU_UNBOUND))
+               add_timer_on(timer, cpu);
+       else
+               add_timer(timer);
 }
-EXPORT_SYMBOL_GPL(queue_delayed_work);
 
 /**
  * queue_delayed_work_on - queue work on specific CPU after delay
@@ -1131,53 +1406,100 @@ EXPORT_SYMBOL_GPL(queue_delayed_work);
  * @dwork: work to queue
  * @delay: number of jiffies to wait before queueing
  *
- * Returns 0 if @work was already on a queue, non-zero otherwise.
+ * Returns %false if @work was already on a queue, %true otherwise.  If
+ * @delay is zero and @dwork is idle, it will be scheduled for immediate
+ * execution.
  */
-int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
-                       struct delayed_work *dwork, unsigned long delay)
+bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
+                          struct delayed_work *dwork, unsigned long delay)
 {
-       int ret = 0;
-       struct timer_list *timer = &dwork->timer;
        struct work_struct *work = &dwork->work;
+       bool ret = false;
+       unsigned long flags;
 
-       if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
-               unsigned int lcpu;
+       if (!delay)
+               return queue_work_on(cpu, wq, &dwork->work);
 
-               BUG_ON(timer_pending(timer));
-               BUG_ON(!list_empty(&work->entry));
+       /* read the comment in __queue_work() */
+       local_irq_save(flags);
 
-               timer_stats_timer_set_start_info(&dwork->timer);
+       if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
+               __queue_delayed_work(cpu, wq, dwork, delay);
+               ret = true;
+       }
 
-               /*
-                * This stores cwq for the moment, for the timer_fn.
-                * Note that the work's gcwq is preserved to allow
-                * reentrance detection for delayed works.
-                */
-               if (!(wq->flags & WQ_UNBOUND)) {
-                       struct global_cwq *gcwq = get_work_gcwq(work);
+       local_irq_restore(flags);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(queue_delayed_work_on);
 
-                       if (gcwq && gcwq->cpu != WORK_CPU_UNBOUND)
-                               lcpu = gcwq->cpu;
-                       else
-                               lcpu = raw_smp_processor_id();
-               } else
-                       lcpu = WORK_CPU_UNBOUND;
+/**
+ * queue_delayed_work - queue work on a workqueue after delay
+ * @wq: workqueue to use
+ * @dwork: delayable work to queue
+ * @delay: number of jiffies to wait before queueing
+ *
+ * Equivalent to queue_delayed_work_on() but tries to use the local CPU.
+ */
+bool queue_delayed_work(struct workqueue_struct *wq,
+                       struct delayed_work *dwork, unsigned long delay)
+{
+       return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
+}
+EXPORT_SYMBOL_GPL(queue_delayed_work);
 
-               set_work_cwq(work, get_cwq(lcpu, wq), 0);
+/**
+ * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
+ * @cpu: CPU number to execute work on
+ * @wq: workqueue to use
+ * @dwork: work to queue
+ * @delay: number of jiffies to wait before queueing
+ *
+ * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise,
+ * modify @dwork's timer so that it expires after @delay.  If @delay is
+ * zero, @work is guaranteed to be scheduled immediately regardless of its
+ * current state.
+ *
+ * Returns %false if @dwork was idle and queued, %true if @dwork was
+ * pending and its timer was modified.
+ *
+ * This function is safe to call from any context including IRQ handler.
+ * See try_to_grab_pending() for details.
+ */
+bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
+                        struct delayed_work *dwork, unsigned long delay)
+{
+       unsigned long flags;
+       int ret;
 
-               timer->expires = jiffies + delay;
-               timer->data = (unsigned long)dwork;
-               timer->function = delayed_work_timer_fn;
+       do {
+               ret = try_to_grab_pending(&dwork->work, true, &flags);
+       } while (unlikely(ret == -EAGAIN));
 
-               if (unlikely(cpu >= 0))
-                       add_timer_on(timer, cpu);
-               else
-                       add_timer(timer);
-               ret = 1;
+       if (likely(ret >= 0)) {
+               __queue_delayed_work(cpu, wq, dwork, delay);
+               local_irq_restore(flags);
        }
+
+       /* -ENOENT from try_to_grab_pending() becomes %true */
        return ret;
 }
-EXPORT_SYMBOL_GPL(queue_delayed_work_on);
+EXPORT_SYMBOL_GPL(mod_delayed_work_on);
+
+/**
+ * mod_delayed_work - modify delay of or queue a delayed work
+ * @wq: workqueue to use
+ * @dwork: work to queue
+ * @delay: number of jiffies to wait before queueing
+ *
+ * mod_delayed_work_on() on local CPU.
+ */
+bool mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork,
+                     unsigned long delay)
+{
+       return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
+}
+EXPORT_SYMBOL_GPL(mod_delayed_work);
 
 /**
  * worker_enter_idle - enter idle state
@@ -1305,37 +1627,21 @@ __acquires(&gcwq->lock)
        }
 }
 
-struct idle_rebind {
-       int                     cnt;            /* # workers to be rebound */
-       struct completion       done;           /* all workers rebound */
-};
-
 /*
- * Rebind an idle @worker to its CPU.  During CPU onlining, this has to
- * happen synchronously for idle workers.  worker_thread() will test
- * %WORKER_REBIND before leaving idle and call this function.
+ * Rebind an idle @worker to its CPU.  worker_thread() will test
+ * list_empty(@worker->entry) before leaving idle and call this function.
  */
 static void idle_worker_rebind(struct worker *worker)
 {
        struct global_cwq *gcwq = worker->pool->gcwq;
 
-       /* CPU must be online at this point */
-       WARN_ON(!worker_maybe_bind_and_lock(worker));
-       if (!--worker->idle_rebind->cnt)
-               complete(&worker->idle_rebind->done);
-       spin_unlock_irq(&worker->pool->gcwq->lock);
+       /* CPU may go down again inbetween, clear UNBOUND only on success */
+       if (worker_maybe_bind_and_lock(worker))
+               worker_clr_flags(worker, WORKER_UNBOUND);
 
-       /* we did our part, wait for rebind_workers() to finish up */
-       wait_event(gcwq->rebind_hold, !(worker->flags & WORKER_REBIND));
-
-       /*
-        * rebind_workers() shouldn't finish until all workers passed the
-        * above WORKER_REBIND wait.  Tell it when done.
-        */
-       spin_lock_irq(&worker->pool->gcwq->lock);
-       if (!--worker->idle_rebind->cnt)
-               complete(&worker->idle_rebind->done);
-       spin_unlock_irq(&worker->pool->gcwq->lock);
+       /* rebind complete, become available again */
+       list_add(&worker->entry, &worker->pool->idle_list);
+       spin_unlock_irq(&gcwq->lock);
 }
 
 /*
@@ -1349,16 +1655,8 @@ static void busy_worker_rebind_fn(struct work_struct *work)
        struct worker *worker = container_of(work, struct worker, rebind_work);
        struct global_cwq *gcwq = worker->pool->gcwq;
 
-       worker_maybe_bind_and_lock(worker);
-
-       /*
-        * %WORKER_REBIND must be cleared even if the above binding failed;
-        * otherwise, we may confuse the next CPU_UP cycle or oops / get
-        * stuck by calling idle_worker_rebind() prematurely.  If CPU went
-        * down again inbetween, %WORKER_UNBOUND would be set, so clearing
-        * %WORKER_REBIND is always safe.
-        */
-       worker_clr_flags(worker, WORKER_REBIND);
+       if (worker_maybe_bind_and_lock(worker))
+               worker_clr_flags(worker, WORKER_UNBOUND);
 
        spin_unlock_irq(&gcwq->lock);
 }
@@ -1370,123 +1668,74 @@ static void busy_worker_rebind_fn(struct work_struct *work)
  * @gcwq->cpu is coming online.  Rebind all workers to the CPU.  Rebinding
  * is different for idle and busy ones.
  *
- * The idle ones should be rebound synchronously and idle rebinding should
- * be complete before any worker starts executing work items with
- * concurrency management enabled; otherwise, scheduler may oops trying to
- * wake up non-local idle worker from wq_worker_sleeping().
+ * Idle ones will be removed from the idle_list and woken up.  They will
+ * add themselves back after completing rebind.  This ensures that the
+ * idle_list doesn't contain any unbound workers when re-bound busy workers
+ * try to perform local wake-ups for concurrency management.
  *
- * This is achieved by repeatedly requesting rebinding until all idle
- * workers are known to have been rebound under @gcwq->lock and holding all
- * idle workers from becoming busy until idle rebinding is complete.
+ * Busy workers can rebind after they finish their current work items.
+ * Queueing the rebind work item at the head of the scheduled list is
+ * enough.  Note that nr_running will be properly bumped as busy workers
+ * rebind.
  *
- * Once idle workers are rebound, busy workers can be rebound as they
- * finish executing their current work items.  Queueing the rebind work at
- * the head of their scheduled lists is enough.  Note that nr_running will
- * be properbly bumped as busy workers rebind.
- *
- * On return, all workers are guaranteed to either be bound or have rebind
- * work item scheduled.
+ * On return, all non-manager workers are scheduled for rebind - see
+ * manage_workers() for the manager special case.  Any idle worker
+ * including the manager will not appear on @idle_list until rebind is
+ * complete, making local wake-ups safe.
  */
 static void rebind_workers(struct global_cwq *gcwq)
-       __releases(&gcwq->lock) __acquires(&gcwq->lock)
 {
-       struct idle_rebind idle_rebind;
        struct worker_pool *pool;
-       struct worker *worker;
+       struct worker *worker, *n;
        struct hlist_node *pos;
        int i;
 
        lockdep_assert_held(&gcwq->lock);
 
        for_each_worker_pool(pool, gcwq)
-               lockdep_assert_held(&pool->manager_mutex);
+               lockdep_assert_held(&pool->assoc_mutex);
 
-       /*
-        * Rebind idle workers.  Interlocked both ways.  We wait for
-        * workers to rebind via @idle_rebind.done.  Workers will wait for
-        * us to finish up by watching %WORKER_REBIND.
-        */
-       init_completion(&idle_rebind.done);
-retry:
-       idle_rebind.cnt = 1;
-       INIT_COMPLETION(idle_rebind.done);
-
-       /* set REBIND and kick idle ones, we'll wait for these later */
+       /* dequeue and kick idle ones */
        for_each_worker_pool(pool, gcwq) {
-               list_for_each_entry(worker, &pool->idle_list, entry) {
-                       unsigned long worker_flags = worker->flags;
-
-                       if (worker->flags & WORKER_REBIND)
-                               continue;
-
-                       /* morph UNBOUND to REBIND atomically */
-                       worker_flags &= ~WORKER_UNBOUND;
-                       worker_flags |= WORKER_REBIND;
-                       ACCESS_ONCE(worker->flags) = worker_flags;
-
-                       idle_rebind.cnt++;
-                       worker->idle_rebind = &idle_rebind;
+               list_for_each_entry_safe(worker, n, &pool->idle_list, entry) {
+                       /*
+                        * idle workers should be off @pool->idle_list
+                        * until rebind is complete to avoid receiving
+                        * premature local wake-ups.
+                        */
+                       list_del_init(&worker->entry);
 
-                       /* worker_thread() will call idle_worker_rebind() */
+                       /*
+                        * worker_thread() will see the above dequeuing
+                        * and call idle_worker_rebind().
+                        */
                        wake_up_process(worker->task);
                }
        }
 
-       if (--idle_rebind.cnt) {
-               spin_unlock_irq(&gcwq->lock);
-               wait_for_completion(&idle_rebind.done);
-               spin_lock_irq(&gcwq->lock);
-               /* busy ones might have become idle while waiting, retry */
-               goto retry;
-       }
-
-       /* all idle workers are rebound, rebind busy workers */
+       /* rebind busy workers */
        for_each_busy_worker(worker, i, pos, gcwq) {
                struct work_struct *rebind_work = &worker->rebind_work;
-               unsigned long worker_flags = worker->flags;
-
-               /* morph UNBOUND to REBIND atomically */
-               worker_flags &= ~WORKER_UNBOUND;
-               worker_flags |= WORKER_REBIND;
-               ACCESS_ONCE(worker->flags) = worker_flags;
+               struct workqueue_struct *wq;
 
                if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
                                     work_data_bits(rebind_work)))
                        continue;
 
-               /* wq doesn't matter, use the default one */
                debug_work_activate(rebind_work);
-               insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work,
-                           worker->scheduled.next,
-                           work_color_to_flags(WORK_NO_COLOR));
-       }
-
-       /*
-        * All idle workers are rebound and waiting for %WORKER_REBIND to
-        * be cleared inside idle_worker_rebind().  Clear and release.
-        * Clearing %WORKER_REBIND from this foreign context is safe
-        * because these workers are still guaranteed to be idle.
-        *
-        * We need to make sure all idle workers passed WORKER_REBIND wait
-        * in idle_worker_rebind() before returning; otherwise, workers can
-        * get stuck at the wait if hotplug cycle repeats.
-        */
-       idle_rebind.cnt = 1;
-       INIT_COMPLETION(idle_rebind.done);
-
-       for_each_worker_pool(pool, gcwq) {
-               list_for_each_entry(worker, &pool->idle_list, entry) {
-                       worker->flags &= ~WORKER_REBIND;
-                       idle_rebind.cnt++;
-               }
-       }
 
-       wake_up_all(&gcwq->rebind_hold);
+               /*
+                * wq doesn't really matter but let's keep @worker->pool
+                * and @cwq->pool consistent for sanity.
+                */
+               if (worker_pool_pri(worker->pool))
+                       wq = system_highpri_wq;
+               else
+                       wq = system_wq;
 
-       if (--idle_rebind.cnt) {
-               spin_unlock_irq(&gcwq->lock);
-               wait_for_completion(&idle_rebind.done);
-               spin_lock_irq(&gcwq->lock);
+               insert_work(get_cwq(gcwq->cpu, wq), rebind_work,
+                       worker->scheduled.next,
+                       work_color_to_flags(WORK_NO_COLOR));
        }
 }
 
@@ -1844,22 +2093,22 @@ static bool manage_workers(struct worker *worker)
         * grab %POOL_MANAGING_WORKERS to achieve this because that can
         * lead to idle worker depletion (all become busy thinking someone
         * else is managing) which in turn can result in deadlock under
-        * extreme circumstances.  Use @pool->manager_mutex to synchronize
+        * extreme circumstances.  Use @pool->assoc_mutex to synchronize
         * manager against CPU hotplug.
         *
-        * manager_mutex would always be free unless CPU hotplug is in
+        * assoc_mutex would always be free unless CPU hotplug is in
         * progress.  trylock first without dropping @gcwq->lock.
         */
-       if (unlikely(!mutex_trylock(&pool->manager_mutex))) {
+       if (unlikely(!mutex_trylock(&pool->assoc_mutex))) {
                spin_unlock_irq(&pool->gcwq->lock);
-               mutex_lock(&pool->manager_mutex);
+               mutex_lock(&pool->assoc_mutex);
                /*
                 * CPU hotplug could have happened while we were waiting
-                * for manager_mutex.  Hotplug itself can't handle us
+                * for assoc_mutex.  Hotplug itself can't handle us
                 * because manager isn't either on idle or busy list, and
                 * @gcwq's state and ours could have deviated.
                 *
-                * As hotplug is now excluded via manager_mutex, we can
+                * As hotplug is now excluded via assoc_mutex, we can
                 * simply try to bind.  It will succeed or fail depending
                 * on @gcwq's current state.  Try it and adjust
                 * %WORKER_UNBOUND accordingly.
@@ -1882,112 +2131,11 @@ static bool manage_workers(struct worker *worker)
        ret |= maybe_create_worker(pool);
 
        pool->flags &= ~POOL_MANAGING_WORKERS;
-       mutex_unlock(&pool->manager_mutex);
+       mutex_unlock(&pool->assoc_mutex);
        return ret;
 }
 
 /**
- * move_linked_works - move linked works to a list
- * @work: start of series of works to be scheduled
- * @head: target list to append @work to
- * @nextp: out paramter for nested worklist walking
- *
- * Schedule linked works starting from @work to @head.  Work series to
- * be scheduled starts at @work and includes any consecutive work with
- * WORK_STRUCT_LINKED set in its predecessor.
- *
- * If @nextp is not NULL, it's updated to point to the next work of
- * the last scheduled work.  This allows move_linked_works() to be
- * nested inside outer list_for_each_entry_safe().
- *
- * CONTEXT:
- * spin_lock_irq(gcwq->lock).
- */
-static void move_linked_works(struct work_struct *work, struct list_head *head,
-                             struct work_struct **nextp)
-{
-       struct work_struct *n;
-
-       /*
-        * Linked worklist will always end before the end of the list,
-        * use NULL for list head.
-        */
-       list_for_each_entry_safe_from(work, n, NULL, entry) {
-               list_move_tail(&work->entry, head);
-               if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
-                       break;
-       }
-
-       /*
-        * If we're already inside safe list traversal and have moved
-        * multiple works to the scheduled queue, the next position
-        * needs to be updated.
-        */
-       if (nextp)
-               *nextp = n;
-}
-
-static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
-{
-       struct work_struct *work = list_first_entry(&cwq->delayed_works,
-                                                   struct work_struct, entry);
-
-       trace_workqueue_activate_work(work);
-       move_linked_works(work, &cwq->pool->worklist, NULL);
-       __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
-       cwq->nr_active++;
-}
-
-/**
- * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
- * @cwq: cwq of interest
- * @color: color of work which left the queue
- * @delayed: for a delayed work
- *
- * A work either has completed or is removed from pending queue,
- * decrement nr_in_flight of its cwq and handle workqueue flushing.
- *
- * CONTEXT:
- * spin_lock_irq(gcwq->lock).
- */
-static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color,
-                                bool delayed)
-{
-       /* ignore uncolored works */
-       if (color == WORK_NO_COLOR)
-               return;
-
-       cwq->nr_in_flight[color]--;
-
-       if (!delayed) {
-               cwq->nr_active--;
-               if (!list_empty(&cwq->delayed_works)) {
-                       /* one down, submit a delayed one */
-                       if (cwq->nr_active < cwq->max_active)
-                               cwq_activate_first_delayed(cwq);
-               }
-       }
-
-       /* is flush in progress and are we at the flushing tip? */
-       if (likely(cwq->flush_color != color))
-               return;
-
-       /* are there still in-flight works? */
-       if (cwq->nr_in_flight[color])
-               return;
-
-       /* this cwq is done, clear flush_color */
-       cwq->flush_color = -1;
-
-       /*
-        * If this was the last cwq, wake up the first flusher.  It
-        * will handle the rest.
-        */
-       if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
-               complete(&cwq->wq->first_flusher->done);
-}
-
-/**
  * process_one_work - process single work
  * @worker: self
  * @work: work to process
@@ -2030,7 +2178,7 @@ __acquires(&gcwq->lock)
         * necessary to avoid spurious warnings from rescuers servicing the
         * unbound or a disassociated gcwq.
         */
-       WARN_ON_ONCE(!(worker->flags & (WORKER_UNBOUND | WORKER_REBIND)) &&
+       WARN_ON_ONCE(!(worker->flags & WORKER_UNBOUND) &&
                     !(gcwq->flags & GCWQ_DISASSOCIATED) &&
                     raw_smp_processor_id() != gcwq->cpu);
 
@@ -2046,15 +2194,13 @@ __acquires(&gcwq->lock)
                return;
        }
 
-       /* claim and process */
+       /* claim and dequeue */
        debug_work_deactivate(work);
        hlist_add_head(&worker->hentry, bwh);
        worker->current_work = work;
        worker->current_cwq = cwq;
        work_color = get_work_color(work);
 
-       /* record the current cpu number in the work data and dequeue */
-       set_work_cpu(work, gcwq->cpu);
        list_del_init(&work->entry);
 
        /*
@@ -2071,9 +2217,16 @@ __acquires(&gcwq->lock)
        if ((worker->flags & WORKER_UNBOUND) && need_more_worker(pool))
                wake_up_worker(pool);
 
+       /*
+        * Record the last CPU and clear PENDING which should be the last
+        * update to @work.  Also, do this inside @gcwq->lock so that
+        * PENDING and queued state changes happen together while IRQ is
+        * disabled.
+        */
+       set_work_cpu_and_clear_pending(work, gcwq->cpu);
+
        spin_unlock_irq(&gcwq->lock);
 
-       work_clear_pending(work);
        lock_map_acquire_read(&cwq->wq->lockdep_map);
        lock_map_acquire(&lockdep_map);
        trace_workqueue_execute_start(work);
@@ -2087,11 +2240,9 @@ __acquires(&gcwq->lock)
        lock_map_release(&cwq->wq->lockdep_map);
 
        if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
-               printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
-                      "%s/0x%08x/%d\n",
-                      current->comm, preempt_count(), task_pid_nr(current));
-               printk(KERN_ERR "    last function: ");
-               print_symbol("%s\n", (unsigned long)f);
+               pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
+                      "     last function: %pf\n",
+                      current->comm, preempt_count(), task_pid_nr(current), f);
                debug_show_held_locks(current);
                dump_stack();
        }
@@ -2106,7 +2257,7 @@ __acquires(&gcwq->lock)
        hlist_del_init(&worker->hentry);
        worker->current_work = NULL;
        worker->current_cwq = NULL;
-       cwq_dec_nr_in_flight(cwq, work_color, false);
+       cwq_dec_nr_in_flight(cwq, work_color);
 }
 
 /**
@@ -2151,18 +2302,17 @@ static int worker_thread(void *__worker)
 woke_up:
        spin_lock_irq(&gcwq->lock);
 
-       /*
-        * DIE can be set only while idle and REBIND set while busy has
-        * @worker->rebind_work scheduled.  Checking here is enough.
-        */
-       if (unlikely(worker->flags & (WORKER_REBIND | WORKER_DIE))) {
+       /* we are off idle list if destruction or rebind is requested */
+       if (unlikely(list_empty(&worker->entry))) {
                spin_unlock_irq(&gcwq->lock);
 
+               /* if DIE is set, destruction is requested */
                if (worker->flags & WORKER_DIE) {
                        worker->task->flags &= ~PF_WQ_WORKER;
                        return 0;
                }
 
+               /* otherwise, rebind */
                idle_worker_rebind(worker);
                goto woke_up;
        }
@@ -2645,8 +2795,8 @@ reflush:
 
                if (++flush_cnt == 10 ||
                    (flush_cnt % 100 == 0 && flush_cnt <= 1000))
-                       pr_warning("workqueue %s: flush on destruction isn't complete after %u tries\n",
-                                  wq->name, flush_cnt);
+                       pr_warn("workqueue %s: flush on destruction isn't complete after %u tries\n",
+                               wq->name, flush_cnt);
                goto reflush;
        }
 
@@ -2657,8 +2807,7 @@ reflush:
 }
 EXPORT_SYMBOL_GPL(drain_workqueue);
 
-static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
-                            bool wait_executing)
+static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
 {
        struct worker *worker = NULL;
        struct global_cwq *gcwq;
@@ -2680,13 +2829,12 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
                cwq = get_work_cwq(work);
                if (unlikely(!cwq || gcwq != cwq->pool->gcwq))
                        goto already_gone;
-       } else if (wait_executing) {
+       } else {
                worker = find_worker_executing_work(gcwq, work);
                if (!worker)
                        goto already_gone;
                cwq = worker->current_cwq;
-       } else
-               goto already_gone;
+       }
 
        insert_wq_barrier(cwq, barr, work, worker);
        spin_unlock_irq(&gcwq->lock);
@@ -2713,15 +2861,8 @@ already_gone:
  * flush_work - wait for a work to finish executing the last queueing instance
  * @work: the work to flush
  *
- * Wait until @work has finished execution.  This function considers
- * only the last queueing instance of @work.  If @work has been
- * enqueued across different CPUs on a non-reentrant workqueue or on
- * multiple workqueues, @work might still be executing on return on
- * some of the CPUs from earlier queueing.
- *
- * If @work was queued only on a non-reentrant, ordered or unbound
- * workqueue, @work is guaranteed to be idle on return if it hasn't
- * been requeued since flush started.
+ * Wait until @work has finished execution.  @work is guaranteed to be idle
+ * on return if it hasn't been requeued since flush started.
  *
  * RETURNS:
  * %true if flush_work() waited for the work to finish execution,
@@ -2734,140 +2875,36 @@ bool flush_work(struct work_struct *work)
        lock_map_acquire(&work->lockdep_map);
        lock_map_release(&work->lockdep_map);
 
-       if (start_flush_work(work, &barr, true)) {
+       if (start_flush_work(work, &barr)) {
                wait_for_completion(&barr.done);
                destroy_work_on_stack(&barr.work);
                return true;
-       } else
-               return false;
-}
-EXPORT_SYMBOL_GPL(flush_work);
-
-static bool wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
-{
-       struct wq_barrier barr;
-       struct worker *worker;
-
-       spin_lock_irq(&gcwq->lock);
-
-       worker = find_worker_executing_work(gcwq, work);
-       if (unlikely(worker))
-               insert_wq_barrier(worker->current_cwq, &barr, work, worker);
-
-       spin_unlock_irq(&gcwq->lock);
-
-       if (unlikely(worker)) {
-               wait_for_completion(&barr.done);
-               destroy_work_on_stack(&barr.work);
-               return true;
-       } else
+       } else {
                return false;
-}
-
-static bool wait_on_work(struct work_struct *work)
-{
-       bool ret = false;
-       int cpu;
-
-       might_sleep();
-
-       lock_map_acquire(&work->lockdep_map);
-       lock_map_release(&work->lockdep_map);
-
-       for_each_gcwq_cpu(cpu)
-               ret |= wait_on_cpu_work(get_gcwq(cpu), work);
-       return ret;
-}
-
-/**
- * flush_work_sync - wait until a work has finished execution
- * @work: the work to flush
- *
- * Wait until @work has finished execution.  On return, it's
- * guaranteed that all queueing instances of @work which happened
- * before this function is called are finished.  In other words, if
- * @work hasn't been requeued since this function was called, @work is
- * guaranteed to be idle on return.
- *
- * RETURNS:
- * %true if flush_work_sync() waited for the work to finish execution,
- * %false if it was already idle.
- */
-bool flush_work_sync(struct work_struct *work)
-{
-       struct wq_barrier barr;
-       bool pending, waited;
-
-       /* we'll wait for executions separately, queue barr only if pending */
-       pending = start_flush_work(work, &barr, false);
-
-       /* wait for executions to finish */
-       waited = wait_on_work(work);
-
-       /* wait for the pending one */
-       if (pending) {
-               wait_for_completion(&barr.done);
-               destroy_work_on_stack(&barr.work);
        }
-
-       return pending || waited;
-}
-EXPORT_SYMBOL_GPL(flush_work_sync);
-
-/*
- * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
- * so this work can't be re-armed in any way.
- */
-static int try_to_grab_pending(struct work_struct *work)
-{
-       struct global_cwq *gcwq;
-       int ret = -1;
-
-       if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
-               return 0;
-
-       /*
-        * The queueing is in progress, or it is already queued. Try to
-        * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
-        */
-       gcwq = get_work_gcwq(work);
-       if (!gcwq)
-               return ret;
-
-       spin_lock_irq(&gcwq->lock);
-       if (!list_empty(&work->entry)) {
-               /*
-                * This work is queued, but perhaps we locked the wrong gcwq.
-                * In that case we must see the new value after rmb(), see
-                * insert_work()->wmb().
-                */
-               smp_rmb();
-               if (gcwq == get_work_gcwq(work)) {
-                       debug_work_deactivate(work);
-                       list_del_init(&work->entry);
-                       cwq_dec_nr_in_flight(get_work_cwq(work),
-                               get_work_color(work),
-                               *work_data_bits(work) & WORK_STRUCT_DELAYED);
-                       ret = 1;
-               }
-       }
-       spin_unlock_irq(&gcwq->lock);
-
-       return ret;
 }
+EXPORT_SYMBOL_GPL(flush_work);
 
-static bool __cancel_work_timer(struct work_struct *work,
-                               struct timer_list* timer)
+static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
 {
+       unsigned long flags;
        int ret;
 
        do {
-               ret = (timer && likely(del_timer(timer)));
-               if (!ret)
-                       ret = try_to_grab_pending(work);
-               wait_on_work(work);
+               ret = try_to_grab_pending(work, is_dwork, &flags);
+               /*
+                * If someone else is canceling, wait for the same event it
+                * would be waiting for before retrying.
+                */
+               if (unlikely(ret == -ENOENT))
+                       flush_work(work);
        } while (unlikely(ret < 0));
 
+       /* tell other tasks trying to grab @work to back off */
+       mark_work_canceling(work);
+       local_irq_restore(flags);
+
+       flush_work(work);
        clear_work_data(work);
        return ret;
 }
@@ -2892,7 +2929,7 @@ static bool __cancel_work_timer(struct work_struct *work,
  */
 bool cancel_work_sync(struct work_struct *work)
 {
-       return __cancel_work_timer(work, NULL);
+       return __cancel_work_timer(work, false);
 }
 EXPORT_SYMBOL_GPL(cancel_work_sync);
 
@@ -2910,33 +2947,44 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
  */
 bool flush_delayed_work(struct delayed_work *dwork)
 {
+       local_irq_disable();
        if (del_timer_sync(&dwork->timer))
-               __queue_work(raw_smp_processor_id(),
+               __queue_work(dwork->cpu,
                             get_work_cwq(&dwork->work)->wq, &dwork->work);
+       local_irq_enable();
        return flush_work(&dwork->work);
 }
 EXPORT_SYMBOL(flush_delayed_work);
 
 /**
- * flush_delayed_work_sync - wait for a dwork to finish
- * @dwork: the delayed work to flush
+ * cancel_delayed_work - cancel a delayed work
+ * @dwork: delayed_work to cancel
  *
- * Delayed timer is cancelled and the pending work is queued for
- * execution immediately.  Other than timer handling, its behavior
- * is identical to flush_work_sync().
+ * Kill off a pending delayed_work.  Returns %true if @dwork was pending
+ * and canceled; %false if wasn't pending.  Note that the work callback
+ * function may still be running on return, unless it returns %true and the
+ * work doesn't re-arm itself.  Explicitly flush or use
+ * cancel_delayed_work_sync() to wait on it.
  *
- * RETURNS:
- * %true if flush_work_sync() waited for the work to finish execution,
- * %false if it was already idle.
+ * This function is safe to call from any context including IRQ handler.
  */
-bool flush_delayed_work_sync(struct delayed_work *dwork)
+bool cancel_delayed_work(struct delayed_work *dwork)
 {
-       if (del_timer_sync(&dwork->timer))
-               __queue_work(raw_smp_processor_id(),
-                            get_work_cwq(&dwork->work)->wq, &dwork->work);
-       return flush_work_sync(&dwork->work);
+       unsigned long flags;
+       int ret;
+
+       do {
+               ret = try_to_grab_pending(&dwork->work, true, &flags);
+       } while (unlikely(ret == -EAGAIN));
+
+       if (unlikely(ret < 0))
+               return false;
+
+       set_work_cpu_and_clear_pending(&dwork->work, work_cpu(&dwork->work));
+       local_irq_restore(flags);
+       return true;
 }
-EXPORT_SYMBOL(flush_delayed_work_sync);
+EXPORT_SYMBOL(cancel_delayed_work);
 
 /**
  * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
@@ -2949,54 +2997,39 @@ EXPORT_SYMBOL(flush_delayed_work_sync);
  */
 bool cancel_delayed_work_sync(struct delayed_work *dwork)
 {
-       return __cancel_work_timer(&dwork->work, &dwork->timer);
+       return __cancel_work_timer(&dwork->work, true);
 }
 EXPORT_SYMBOL(cancel_delayed_work_sync);
 
 /**
- * schedule_work - put work task in global workqueue
- * @work: job to be done
- *
- * Returns zero if @work was already on the kernel-global workqueue and
- * non-zero otherwise.
- *
- * This puts a job in the kernel-global workqueue if it was not already
- * queued and leaves it in the same position on the kernel-global
- * workqueue otherwise.
- */
-int schedule_work(struct work_struct *work)
-{
-       return queue_work(system_wq, work);
-}
-EXPORT_SYMBOL(schedule_work);
-
-/*
  * schedule_work_on - put work task on a specific cpu
  * @cpu: cpu to put the work task on
  * @work: job to be done
  *
  * This puts a job on a specific cpu
  */
-int schedule_work_on(int cpu, struct work_struct *work)
+bool schedule_work_on(int cpu, struct work_struct *work)
 {
        return queue_work_on(cpu, system_wq, work);
 }
 EXPORT_SYMBOL(schedule_work_on);
 
 /**
- * schedule_delayed_work - put work task in global workqueue after delay
- * @dwork: job to be done
- * @delay: number of jiffies to wait or 0 for immediate execution
+ * schedule_work - put work task in global workqueue
+ * @work: job to be done
  *
- * After waiting for a given time this puts a job in the kernel-global
- * workqueue.
+ * Returns %false if @work was already on the kernel-global workqueue and
+ * %true otherwise.
+ *
+ * This puts a job in the kernel-global workqueue if it was not already
+ * queued and leaves it in the same position on the kernel-global
+ * workqueue otherwise.
  */
-int schedule_delayed_work(struct delayed_work *dwork,
-                                       unsigned long delay)
+bool schedule_work(struct work_struct *work)
 {
-       return queue_delayed_work(system_wq, dwork, delay);
+       return queue_work(system_wq, work);
 }
-EXPORT_SYMBOL(schedule_delayed_work);
+EXPORT_SYMBOL(schedule_work);
 
 /**
  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
@@ -3007,14 +3040,28 @@ EXPORT_SYMBOL(schedule_delayed_work);
  * After waiting for a given time this puts a job in the kernel-global
  * workqueue on the specified CPU.
  */
-int schedule_delayed_work_on(int cpu,
-                       struct delayed_work *dwork, unsigned long delay)
+bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
+                             unsigned long delay)
 {
        return queue_delayed_work_on(cpu, system_wq, dwork, delay);
 }
 EXPORT_SYMBOL(schedule_delayed_work_on);
 
 /**
+ * schedule_delayed_work - put work task in global workqueue after delay
+ * @dwork: job to be done
+ * @delay: number of jiffies to wait or 0 for immediate execution
+ *
+ * After waiting for a given time this puts a job in the kernel-global
+ * workqueue.
+ */
+bool schedule_delayed_work(struct delayed_work *dwork, unsigned long delay)
+{
+       return queue_delayed_work(system_wq, dwork, delay);
+}
+EXPORT_SYMBOL(schedule_delayed_work);
+
+/**
  * schedule_on_each_cpu - execute a function synchronously on each online CPU
  * @func: the function to call
  *
@@ -3161,9 +3208,8 @@ static int wq_clamp_max_active(int max_active, unsigned int flags,
        int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
 
        if (max_active < 1 || max_active > lim)
-               printk(KERN_WARNING "workqueue: max_active %d requested for %s "
-                      "is out of range, clamping between %d and %d\n",
-                      max_active, name, 1, lim);
+               pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
+                       max_active, name, 1, lim);
 
        return clamp_val(max_active, 1, lim);
 }
@@ -3319,6 +3365,26 @@ void destroy_workqueue(struct workqueue_struct *wq)
 EXPORT_SYMBOL_GPL(destroy_workqueue);
 
 /**
+ * cwq_set_max_active - adjust max_active of a cwq
+ * @cwq: target cpu_workqueue_struct
+ * @max_active: new max_active value.
+ *
+ * Set @cwq->max_active to @max_active and activate delayed works if
+ * increased.
+ *
+ * CONTEXT:
+ * spin_lock_irq(gcwq->lock).
+ */
+static void cwq_set_max_active(struct cpu_workqueue_struct *cwq, int max_active)
+{
+       cwq->max_active = max_active;
+
+       while (!list_empty(&cwq->delayed_works) &&
+              cwq->nr_active < cwq->max_active)
+               cwq_activate_first_delayed(cwq);
+}
+
+/**
  * workqueue_set_max_active - adjust max_active of a workqueue
  * @wq: target workqueue
  * @max_active: new max_active value.
@@ -3345,7 +3411,7 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
 
                if (!(wq->flags & WQ_FREEZABLE) ||
                    !(gcwq->flags & GCWQ_FREEZING))
-                       get_cwq(gcwq->cpu, wq)->max_active = max_active;
+                       cwq_set_max_active(get_cwq(gcwq->cpu, wq), max_active);
 
                spin_unlock_irq(&gcwq->lock);
        }
@@ -3440,23 +3506,23 @@ EXPORT_SYMBOL_GPL(work_busy);
  */
 
 /* claim manager positions of all pools */
-static void gcwq_claim_management_and_lock(struct global_cwq *gcwq)
+static void gcwq_claim_assoc_and_lock(struct global_cwq *gcwq)
 {
        struct worker_pool *pool;
 
        for_each_worker_pool(pool, gcwq)
-               mutex_lock_nested(&pool->manager_mutex, pool - gcwq->pools);
+               mutex_lock_nested(&pool->assoc_mutex, pool - gcwq->pools);
        spin_lock_irq(&gcwq->lock);
 }
 
 /* release manager positions */
-static void gcwq_release_management_and_unlock(struct global_cwq *gcwq)
+static void gcwq_release_assoc_and_unlock(struct global_cwq *gcwq)
 {
        struct worker_pool *pool;
 
        spin_unlock_irq(&gcwq->lock);
        for_each_worker_pool(pool, gcwq)
-               mutex_unlock(&pool->manager_mutex);
+               mutex_unlock(&pool->assoc_mutex);
 }
 
 static void gcwq_unbind_fn(struct work_struct *work)
@@ -3469,7 +3535,7 @@ static void gcwq_unbind_fn(struct work_struct *work)
 
        BUG_ON(gcwq->cpu != smp_processor_id());
 
-       gcwq_claim_management_and_lock(gcwq);
+       gcwq_claim_assoc_and_lock(gcwq);
 
        /*
         * We've claimed all manager positions.  Make all workers unbound
@@ -3486,7 +3552,7 @@ static void gcwq_unbind_fn(struct work_struct *work)
 
        gcwq->flags |= GCWQ_DISASSOCIATED;
 
-       gcwq_release_management_and_unlock(gcwq);
+       gcwq_release_assoc_and_unlock(gcwq);
 
        /*
         * Call schedule() so that we cross rq->lock and thus can guarantee
@@ -3514,7 +3580,7 @@ static void gcwq_unbind_fn(struct work_struct *work)
  * Workqueues should be brought up before normal priority CPU notifiers.
  * This will be registered high priority CPU notifier.
  */
-static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb,
+static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb,
                                               unsigned long action,
                                               void *hcpu)
 {
@@ -3542,10 +3608,10 @@ static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb,
 
        case CPU_DOWN_FAILED:
        case CPU_ONLINE:
-               gcwq_claim_management_and_lock(gcwq);
+               gcwq_claim_assoc_and_lock(gcwq);
                gcwq->flags &= ~GCWQ_DISASSOCIATED;
                rebind_workers(gcwq);
-               gcwq_release_management_and_unlock(gcwq);
+               gcwq_release_assoc_and_unlock(gcwq);
                break;
        }
        return NOTIFY_OK;
@@ -3555,7 +3621,7 @@ static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb,
  * Workqueues should be brought down after normal priority CPU notifiers.
  * This will be registered as low priority CPU notifier.
  */
-static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,
+static int __cpuinit workqueue_cpu_down_callback(struct notifier_block *nfb,
                                                 unsigned long action,
                                                 void *hcpu)
 {
@@ -3566,7 +3632,7 @@ static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,
        case CPU_DOWN_PREPARE:
                /* unbinding should happen on the local CPU */
                INIT_WORK_ONSTACK(&unbind_work, gcwq_unbind_fn);
-               schedule_work_on(cpu, &unbind_work);
+               queue_work_on(cpu, system_highpri_wq, &unbind_work);
                flush_work(&unbind_work);
                break;
        }
@@ -3735,11 +3801,7 @@ void thaw_workqueues(void)
                                continue;
 
                        /* restore max_active and repopulate worklist */
-                       cwq->max_active = wq->saved_max_active;
-
-                       while (!list_empty(&cwq->delayed_works) &&
-                              cwq->nr_active < cwq->max_active)
-                               cwq_activate_first_delayed(cwq);
+                       cwq_set_max_active(cwq, wq->saved_max_active);
                }
 
                for_each_worker_pool(pool, gcwq)
@@ -3759,8 +3821,12 @@ static int __init init_workqueues(void)
        unsigned int cpu;
        int i;
 
+       /* make sure we have enough bits for OFFQ CPU number */
+       BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_CPU_SHIFT)) <
+                    WORK_CPU_LAST);
+
        cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
-       cpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
+       hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
 
        /* initialize gcwqs */
        for_each_gcwq_cpu(cpu) {
@@ -3786,11 +3852,9 @@ static int __init init_workqueues(void)
                        setup_timer(&pool->mayday_timer, gcwq_mayday_timeout,
                                    (unsigned long)pool);
 
-                       mutex_init(&pool->manager_mutex);
+                       mutex_init(&pool->assoc_mutex);
                        ida_init(&pool->worker_ida);
                }
-
-               init_waitqueue_head(&gcwq->rebind_hold);
        }
 
        /* create the initial worker */
@@ -3813,17 +3877,14 @@ static int __init init_workqueues(void)
        }
 
        system_wq = alloc_workqueue("events", 0, 0);
+       system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
        system_long_wq = alloc_workqueue("events_long", 0, 0);
-       system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0);
        system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
                                            WQ_UNBOUND_MAX_ACTIVE);
        system_freezable_wq = alloc_workqueue("events_freezable",
                                              WQ_FREEZABLE, 0);
-       system_nrt_freezable_wq = alloc_workqueue("events_nrt_freezable",
-                       WQ_NON_REENTRANT | WQ_FREEZABLE, 0);
-       BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq ||
-              !system_unbound_wq || !system_freezable_wq ||
-               !system_nrt_freezable_wq);
+       BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
+              !system_unbound_wq || !system_freezable_wq);
        return 0;
 }
 early_initcall(init_workqueues);
index c685475..1133911 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -900,7 +900,7 @@ static void __cpuinit start_cpu_timer(int cpu)
         */
        if (keventd_up() && reap_work->work.func == NULL) {
                init_reap_node(cpu);
-               INIT_DELAYED_WORK_DEFERRABLE(reap_work, cache_reap);
+               INIT_DEFERRABLE_WORK(reap_work, cache_reap);
                schedule_delayed_work_on(cpu, reap_work,
                                        __round_jiffies_relative(HZ, cpu));
        }
index df7a674..b3e3b9d 100644 (file)
@@ -1157,7 +1157,7 @@ static void __cpuinit start_cpu_timer(int cpu)
 {
        struct delayed_work *work = &per_cpu(vmstat_work, cpu);
 
-       INIT_DELAYED_WORK_DEFERRABLE(work, vmstat_update);
+       INIT_DEFERRABLE_WORK(work, vmstat_update);
        schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu));
 }
 
index 6449bae..505f0ce 100644 (file)
@@ -1083,7 +1083,7 @@ int p9_trans_fd_init(void)
 
 void p9_trans_fd_exit(void)
 {
-       flush_work_sync(&p9_poll_work);
+       flush_work(&p9_poll_work);
        v9fs_unregister_trans(&p9_tcp_trans);
        v9fs_unregister_trans(&p9_unix_trans);
        v9fs_unregister_trans(&p9_fd_trans);
index 56d6361..b8d7c70 100644 (file)
@@ -222,8 +222,8 @@ void __dst_free(struct dst_entry *dst)
        if (dst_garbage.timer_inc > DST_GC_INC) {
                dst_garbage.timer_inc = DST_GC_INC;
                dst_garbage.timer_expires = DST_GC_MIN;
-               cancel_delayed_work(&dst_gc_work);
-               schedule_delayed_work(&dst_gc_work, dst_garbage.timer_expires);
+               mod_delayed_work(system_wq, &dst_gc_work,
+                                dst_garbage.timer_expires);
        }
        spin_unlock_bh(&dst_garbage.lock);
 }
index c3519c6..8e397a6 100644 (file)
@@ -120,22 +120,13 @@ static void linkwatch_schedule_work(int urgent)
                delay = 0;
 
        /*
-        * This is true if we've scheduled it immeditately or if we don't
-        * need an immediate execution and it's already pending.
+        * If urgent, schedule immediate execution; otherwise, don't
+        * override the existing timer.
         */
-       if (schedule_delayed_work(&linkwatch_work, delay) == !delay)
-               return;
-
-       /* Don't bother if there is nothing urgent. */
-       if (!test_bit(LW_URGENT, &linkwatch_flags))
-               return;
-
-       /* It's already running which is good enough. */
-       if (!__cancel_delayed_work(&linkwatch_work))
-               return;
-
-       /* Otherwise we reschedule it again for immediate execution. */
-       schedule_delayed_work(&linkwatch_work, 0);
+       if (test_bit(LW_URGENT, &linkwatch_flags))
+               mod_delayed_work(system_wq, &linkwatch_work, 0);
+       else
+               schedule_delayed_work(&linkwatch_work, delay);
 }
 
 
index 117afaf..112c6e2 100644 (file)
@@ -1545,7 +1545,7 @@ static void neigh_table_init_no_netlink(struct neigh_table *tbl)
                panic("cannot allocate neighbour cache hashes");
 
        rwlock_init(&tbl->lock);
-       INIT_DELAYED_WORK_DEFERRABLE(&tbl->gc_work, neigh_periodic_work);
+       INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
        schedule_delayed_work(&tbl->gc_work, tbl->parms.reachable_time);
        setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
        skb_queue_head_init_class(&tbl->proxy_queue,
index 88e7c2f..45295ca 100644 (file)
@@ -370,7 +370,7 @@ static int dsa_remove(struct platform_device *pdev)
        if (dst->link_poll_needed)
                del_timer_sync(&dst->link_poll_timer);
 
-       flush_work_sync(&dst->link_poll_work);
+       flush_work(&dst->link_poll_work);
 
        for (i = 0; i < dst->pd->nr_chips; i++) {
                struct dsa_switch *ds = dst->ds[i];
index c7527f6..000e3d2 100644 (file)
@@ -194,7 +194,7 @@ void __init inet_initpeers(void)
                        0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
                        NULL);
 
-       INIT_DELAYED_WORK_DEFERRABLE(&gc_work, inetpeer_gc_worker);
+       INIT_DEFERRABLE_WORK(&gc_work, inetpeer_gc_worker);
 }
 
 static int addr_compare(const struct inetpeer_addr *a,
index 24c55c5..c9d931e 100644 (file)
@@ -164,8 +164,7 @@ static void rfkill_schedule_global_op(enum rfkill_sched_op op)
        rfkill_op_pending = true;
        if (op == RFKILL_GLOBAL_OP_EPO && !rfkill_is_epo_lock_active()) {
                /* bypass the limiter for EPO */
-               cancel_delayed_work(&rfkill_op_work);
-               schedule_delayed_work(&rfkill_op_work, 0);
+               mod_delayed_work(system_wq, &rfkill_op_work, 0);
                rfkill_last_scheduled = jiffies;
        } else
                rfkill_schedule_ratelimited();
index 2afd2a8..2a68bb3 100644 (file)
@@ -1635,7 +1635,7 @@ static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
 
 void __init cache_initialize(void)
 {
-       INIT_DELAYED_WORK_DEFERRABLE(&cache_cleaner, do_cache_clean);
+       INIT_DEFERRABLE_WORK(&cache_cleaner, do_cache_clean);
 }
 
 int cache_register_net(struct cache_detail *cd, struct net *net)
index 61ab7c8..d67c97b 100644 (file)
@@ -62,7 +62,7 @@ void key_schedule_gc(time_t gc_at)
 
        if (gc_at <= now || test_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags)) {
                kdebug("IMMEDIATE");
-               queue_work(system_nrt_wq, &key_gc_work);
+               schedule_work(&key_gc_work);
        } else if (gc_at < key_gc_next_run) {
                kdebug("DEFERRED");
                key_gc_next_run = gc_at;
@@ -77,7 +77,7 @@ void key_schedule_gc(time_t gc_at)
 void key_schedule_gc_links(void)
 {
        set_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags);
-       queue_work(system_nrt_wq, &key_gc_work);
+       schedule_work(&key_gc_work);
 }
 
 /*
@@ -120,7 +120,7 @@ void key_gc_keytype(struct key_type *ktype)
        set_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags);
 
        kdebug("schedule");
-       queue_work(system_nrt_wq, &key_gc_work);
+       schedule_work(&key_gc_work);
 
        kdebug("sleep");
        wait_on_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE, key_gc_wait_bit,
@@ -369,7 +369,7 @@ maybe_resched:
        }
 
        if (gc_state & KEY_GC_REAP_AGAIN)
-               queue_work(system_nrt_wq, &key_gc_work);
+               schedule_work(&key_gc_work);
        kleave(" [end %x]", gc_state);
        return;
 
index 50d96d4..3cbe352 100644 (file)
@@ -598,7 +598,7 @@ void key_put(struct key *key)
                key_check(key);
 
                if (atomic_dec_and_test(&key->usage))
-                       queue_work(system_nrt_wq, &key_gc_work);
+                       schedule_work(&key_gc_work);
        }
 }
 EXPORT_SYMBOL(key_put);
index dde5c9c..ef68d71 100644 (file)
@@ -141,7 +141,7 @@ void snd_ak4113_reinit(struct ak4113 *chip)
 {
        chip->init = 1;
        mb();
-       flush_delayed_work_sync(&chip->work);
+       flush_delayed_work(&chip->work);
        ak4113_init_regs(chip);
        /* bring up statistics / event queing */
        chip->init = 0;
index fdf3c1b..816e7d2 100644 (file)
@@ -154,7 +154,7 @@ void snd_ak4114_reinit(struct ak4114 *chip)
 {
        chip->init = 1;
        mb();
-       flush_delayed_work_sync(&chip->work);
+       flush_delayed_work(&chip->work);
        ak4114_init_regs(chip);
        /* bring up statistics / event queing */
        chip->init = 0;
index ab8738e..e9fa2d0 100644 (file)
@@ -573,8 +573,8 @@ static void oxygen_card_free(struct snd_card *card)
        oxygen_shutdown(chip);
        if (chip->irq >= 0)
                free_irq(chip->irq, chip);
-       flush_work_sync(&chip->spdif_input_bits_work);
-       flush_work_sync(&chip->gpio_work);
+       flush_work(&chip->spdif_input_bits_work);
+       flush_work(&chip->gpio_work);
        chip->model.cleanup(chip);
        kfree(chip->model_data);
        mutex_destroy(&chip->mutex);
@@ -751,8 +751,8 @@ static int oxygen_pci_suspend(struct device *dev)
        spin_unlock_irq(&chip->reg_lock);
 
        synchronize_irq(chip->irq);
-       flush_work_sync(&chip->spdif_input_bits_work);
-       flush_work_sync(&chip->gpio_work);
+       flush_work(&chip->spdif_input_bits_work);
+       flush_work(&chip->gpio_work);
        chip->interrupt_mask = saved_interrupt_mask;
 
        pci_disable_device(pci);
index d26c8ae..a4cae06 100644 (file)
@@ -1601,7 +1601,7 @@ static int  wm8350_codec_remove(struct snd_soc_codec *codec)
 
        /* if there was any work waiting then we run it now and
         * wait for its completion */
-       flush_delayed_work_sync(&codec->dapm.delayed_work);
+       flush_delayed_work(&codec->dapm.delayed_work);
 
        wm8350_set_bias_level(codec, SND_SOC_BIAS_OFF);
 
index 13bff87..2e4a775 100644 (file)
@@ -1509,7 +1509,7 @@ static int wm8753_probe(struct snd_soc_codec *codec)
 /* power down chip */
 static int wm8753_remove(struct snd_soc_codec *codec)
 {
-       flush_delayed_work_sync(&codec->dapm.delayed_work);
+       flush_delayed_work(&codec->dapm.delayed_work);
        wm8753_set_bias_level(codec, SND_SOC_BIAS_OFF);
 
        return 0;
index c501af6..cf3d0b0 100644 (file)
@@ -591,7 +591,7 @@ int snd_soc_suspend(struct device *dev)
 
        /* close any waiting streams and save state */
        for (i = 0; i < card->num_rtd; i++) {
-               flush_delayed_work_sync(&card->rtd[i].delayed_work);
+               flush_delayed_work(&card->rtd[i].delayed_work);
                card->rtd[i].codec->dapm.suspend_bias_level = card->rtd[i].codec->dapm.bias_level;
        }
 
@@ -1848,7 +1848,7 @@ static int soc_cleanup_card_resources(struct snd_soc_card *card)
        /* make sure any delayed work runs */
        for (i = 0; i < card->num_rtd; i++) {
                struct snd_soc_pcm_runtime *rtd = &card->rtd[i];
-               flush_delayed_work_sync(&rtd->delayed_work);
+               flush_delayed_work(&rtd->delayed_work);
        }
 
        /* remove auxiliary devices */
@@ -1892,7 +1892,7 @@ int snd_soc_poweroff(struct device *dev)
         * now, we're shutting down so no imminent restart. */
        for (i = 0; i < card->num_rtd; i++) {
                struct snd_soc_pcm_runtime *rtd = &card->rtd[i];
-               flush_delayed_work_sync(&rtd->delayed_work);
+               flush_delayed_work(&rtd->delayed_work);
        }
 
        snd_soc_dapm_shutdown(card);
index 7d7e2aa..67a35e9 100644 (file)
@@ -90,7 +90,7 @@ irqfd_shutdown(struct work_struct *work)
         * We know no new events will be scheduled at this point, so block
         * until all previously outstanding events have completed
         */
-       flush_work_sync(&irqfd->inject);
+       flush_work(&irqfd->inject);
 
        /*
         * It is now safe to release the object's resources