Fix up for make allyesconfig.
Signed-Off-By: David Howells <dhowells@redhat.com>
}
}
-static void mce_work_fn(void *data);
-static DECLARE_WORK(mce_work, mce_work_fn, NULL);
+static void mce_work_fn(struct work_struct *work);
+static DECLARE_DELAYED_WORK(mce_work, mce_work_fn);
-static void mce_work_fn(void *data)
+static void mce_work_fn(struct work_struct *work)
{
on_each_cpu(mce_checkregs, NULL, 1, 1);
schedule_delayed_work(&mce_work, MCE_RATE);
struct warm_boot_cpu_info {
struct completion *complete;
+ struct work_struct task;
int apicid;
int cpu;
};
-static void __cpuinit do_warm_boot_cpu(void *p)
+static void __cpuinit do_warm_boot_cpu(struct work_struct *work)
{
- struct warm_boot_cpu_info *info = p;
+ struct warm_boot_cpu_info *info =
+ container_of(work, struct warm_boot_cpu_info, task);
do_boot_cpu(info->apicid, info->cpu);
complete(info->complete);
}
{
DECLARE_COMPLETION_ONSTACK(done);
struct warm_boot_cpu_info info;
- struct work_struct task;
int apicid, ret;
struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
info.complete = &done;
info.apicid = apicid;
info.cpu = cpu;
- INIT_WORK(&task, do_warm_boot_cpu, &info);
+ INIT_WORK(&info.task, do_warm_boot_cpu);
tsc_sync_disabled = 1;
clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
KERNEL_PGD_PTRS);
flush_tlb_all();
- schedule_work(&task);
+ schedule_work(&info.task);
wait_for_completion(&done);
tsc_sync_disabled = 0;
static unsigned int cpufreq_init = 0;
static struct work_struct cpufreq_delayed_get_work;
-static void handle_cpufreq_delayed_get(void *v)
+static void handle_cpufreq_delayed_get(struct work_struct *work)
{
unsigned int cpu;
{
int ret;
- INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
+ INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get);
ret = cpufreq_register_notifier(&time_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
if (!ret)
/* EEH event workqueue setup. */
static DEFINE_SPINLOCK(eeh_eventlist_lock);
LIST_HEAD(eeh_eventlist);
-static void eeh_thread_launcher(void *);
-DECLARE_WORK(eeh_event_wq, eeh_thread_launcher, NULL);
+static void eeh_thread_launcher(struct work_struct *);
+DECLARE_WORK(eeh_event_wq, eeh_thread_launcher);
/* Serialize reset sequences for a given pci device */
DEFINE_MUTEX(eeh_event_mutex);
* eeh_thread_launcher
* @dummy - unused
*/
-static void eeh_thread_launcher(void *dummy)
+static void eeh_thread_launcher(struct work_struct *dummy)
{
if (kernel_thread(eeh_event_handler, NULL, CLONE_KERNEL) < 0)
printk(KERN_ERR "Failed to start EEH daemon\n");
int flags);
static int idt77252_proc_read(struct atm_dev *dev, loff_t * pos,
char *page);
-static void idt77252_softint(void *dev_id);
+static void idt77252_softint(struct work_struct *work);
static struct atmdev_ops idt77252_ops =
}
static void
-idt77252_softint(void *dev_id)
+idt77252_softint(struct work_struct *work)
{
- struct idt77252_dev *card = dev_id;
+ struct idt77252_dev *card =
+ container_of(work, struct idt77252_dev, tqueue);
u32 stat;
int done;
card->pcidev = pcidev;
sprintf(card->name, "idt77252-%d", card->index);
- INIT_WORK(&card->tqueue, idt77252_softint, (void *)card);
+ INIT_WORK(&card->tqueue, idt77252_softint);
membase = pci_resource_start(pcidev, 1);
srambase = pci_resource_start(pcidev, 2);
void aoecmd_cfg(ushort aoemajor, unsigned char aoeminor);
void aoecmd_ata_rsp(struct sk_buff *);
void aoecmd_cfg_rsp(struct sk_buff *);
-void aoecmd_sleepwork(void *vp);
+void aoecmd_sleepwork(struct work_struct *);
struct sk_buff *new_skb(ulong);
int aoedev_init(void);
/* this function performs work that has been deferred until sleeping is OK
*/
void
-aoecmd_sleepwork(void *vp)
+aoecmd_sleepwork(struct work_struct *work)
{
- struct aoedev *d = (struct aoedev *) vp;
+ struct aoedev *d = container_of(work, struct aoedev, work);
if (d->flags & DEVFL_GDALLOC)
aoeblk_gdalloc(d);
kfree(d);
return NULL;
}
- INIT_WORK(&d->work, aoecmd_sleepwork, d);
+ INIT_WORK(&d->work, aoecmd_sleepwork);
spin_lock_init(&d->lock);
init_timer(&d->timer);
d->timer.data = (ulong) d;
static void run_fsm(void);
-static void ps_tq_int( void *data);
+static void ps_tq_int(struct work_struct *work);
-static DECLARE_WORK(fsm_tq, ps_tq_int, NULL);
+static DECLARE_DELAYED_WORK(fsm_tq, ps_tq_int);
static void schedule_fsm(void)
{
if (!nice)
- schedule_work(&fsm_tq);
+ schedule_delayed_work(&fsm_tq, 0);
else
schedule_delayed_work(&fsm_tq, nice-1);
}
-static void ps_tq_int(void *data)
+static void ps_tq_int(struct work_struct *work)
{
run_fsm();
}
#include <linux/sched.h>
#include <linux/workqueue.h>
-static void ps_tq_int( void *data);
+static void ps_tq_int(struct work_struct *work);
static void (* ps_continuation)(void);
static int (* ps_ready)(void);
static DEFINE_SPINLOCK(ps_spinlock __attribute__((unused)));
-static DECLARE_WORK(ps_tq, ps_tq_int, NULL);
+static DECLARE_DELAYED_WORK(ps_tq, ps_tq_int);
static void ps_set_intr(void (*continuation)(void),
int (*ready)(void),
if (!ps_tq_active) {
ps_tq_active = 1;
if (!ps_nice)
- schedule_work(&ps_tq);
+ schedule_delayed_work(&ps_tq, 0);
else
schedule_delayed_work(&ps_tq, ps_nice-1);
}
spin_unlock_irqrestore(&ps_spinlock,flags);
}
-static void ps_tq_int(void *data)
+static void ps_tq_int(struct work_struct *work)
{
void (*con)(void);
unsigned long flags;
}
ps_tq_active = 1;
if (!ps_nice)
- schedule_work(&ps_tq);
+ schedule_delayed_work(&ps_tq, 0);
else
schedule_delayed_work(&ps_tq, ps_nice-1);
spin_unlock_irqrestore(&ps_spinlock,flags);
return IRQ_RETVAL(handled);
}
-static void carm_fsm_task (void *_data)
+static void carm_fsm_task (struct work_struct *work)
{
- struct carm_host *host = _data;
+ struct carm_host *host =
+ container_of(work, struct carm_host, fsm_task);
unsigned long flags;
unsigned int state;
int rc, i, next_dev;
host->pdev = pdev;
host->flags = pci_dac ? FL_DAC : 0;
spin_lock_init(&host->lock);
- INIT_WORK(&host->fsm_task, carm_fsm_task, host);
+ INIT_WORK(&host->fsm_task, carm_fsm_task);
init_completion(&host->probe_comp);
for (i = 0; i < ARRAY_SIZE(host->req); i++)
int stalled_pipe);
static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd);
static void ub_reset_enter(struct ub_dev *sc, int try);
-static void ub_reset_task(void *arg);
+static void ub_reset_task(struct work_struct *work);
static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun);
static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
struct ub_capacity *ret);
schedule_work(&sc->reset_work);
}
-static void ub_reset_task(void *arg)
+static void ub_reset_task(struct work_struct *work)
{
- struct ub_dev *sc = arg;
+ struct ub_dev *sc = container_of(work, struct ub_dev, reset_work);
unsigned long flags;
struct list_head *p;
struct ub_lun *lun;
usb_init_urb(&sc->work_urb);
tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc);
atomic_set(&sc->poison, 0);
- INIT_WORK(&sc->reset_work, ub_reset_task, sc);
+ INIT_WORK(&sc->reset_work, ub_reset_task);
init_waitqueue_head(&sc->reset_wait);
init_timer(&sc->work_timer);
}
}
-static void bcm203x_work(void *user_data)
+static void bcm203x_work(struct work_struct *work)
{
- struct bcm203x_data *data = user_data;
+ struct bcm203x_data *data =
+ container_of(work, struct bcm203x_data, work);
if (usb_submit_urb(data->urb, GFP_ATOMIC) < 0)
BT_ERR("Can't submit URB");
release_firmware(firmware);
- INIT_WORK(&data->work, bcm203x_work, (void *) data);
+ INIT_WORK(&data->work, bcm203x_work);
usb_set_intfdata(intf, data);
* had to poll every port to see if that port needed servicing.
*/
static void
-do_softint(void *private_)
+do_softint(struct work_struct *work)
{
- struct cyclades_port *info = (struct cyclades_port *) private_;
+ struct cyclades_port *info =
+ container_of(work, struct cyclades_port, tqueue);
struct tty_struct *tty;
tty = info->tty;
info->blocked_open = 0;
info->default_threshold = 0;
info->default_timeout = 0;
- INIT_WORK(&info->tqueue, do_softint, info);
+ INIT_WORK(&info->tqueue, do_softint);
init_waitqueue_head(&info->open_wait);
init_waitqueue_head(&info->close_wait);
init_waitqueue_head(&info->shutdown_wait);
info->blocked_open = 0;
info->default_threshold = 0;
info->default_timeout = 0;
- INIT_WORK(&info->tqueue, do_softint, info);
+ INIT_WORK(&info->tqueue, do_softint);
init_waitqueue_head(&info->open_wait);
init_waitqueue_head(&info->close_wait);
init_waitqueue_head(&info->shutdown_wait);
static void
-via_dmablit_workqueue(void *data)
+via_dmablit_workqueue(struct work_struct *work)
{
- drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;
+ drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);
drm_device_t *dev = blitq->dev;
unsigned long irqsave;
drm_via_sg_info_t *cur_sg;
DRM_INIT_WAITQUEUE(blitq->blit_queue + j);
}
DRM_INIT_WAITQUEUE(&blitq->busy_queue);
- INIT_WORK(&blitq->wq, via_dmablit_workqueue, blitq);
+ INIT_WORK(&blitq->wq, via_dmablit_workqueue);
init_timer(&blitq->poll_timer);
blitq->poll_timer.function = &via_dmablit_timer;
blitq->poll_timer.data = (unsigned long) blitq;
static int info_ioctl(struct tty_struct *, struct file *,
unsigned int, unsigned long);
static void pc_set_termios(struct tty_struct *, struct termios *);
-static void do_softint(void *);
+static void do_softint(struct work_struct *work);
static void pc_stop(struct tty_struct *);
static void pc_start(struct tty_struct *);
static void pc_throttle(struct tty_struct * tty);
ch->brdchan = bc;
ch->mailbox = gd;
- INIT_WORK(&ch->tqueue, do_softint, ch);
+ INIT_WORK(&ch->tqueue, do_softint);
ch->board = &boards[crd];
spin_lock_irqsave(&epca_lock, flags);
/* --------------------- Begin do_softint ----------------------- */
-static void do_softint(void *private_)
+static void do_softint(struct work_struct *work)
{ /* Begin do_softint */
- struct channel *ch = (struct channel *) private_;
+ struct channel *ch = container_of(work, struct channel, tqueue);
/* Called in response to a modem change event */
if (ch && ch->magic == EPCA_MAGIC) { /* Begin EPCA_MAGIC */
struct tty_struct *tty = ch->tty;
* -------------------------------------------------------------------
*/
-static void do_softint(void *private_)
+static void do_softint(struct work_struct *work)
{
- struct esp_struct *info = (struct esp_struct *) private_;
+ struct esp_struct *info =
+ container_of(work, struct esp_struct, tqueue);
struct tty_struct *tty;
tty = info->tty;
* do_serial_hangup() -> tty->hangup() -> esp_hangup()
*
*/
-static void do_serial_hangup(void *private_)
+static void do_serial_hangup(struct work_struct *work)
{
- struct esp_struct *info = (struct esp_struct *) private_;
+ struct esp_struct *info =
+ container_of(work, struct esp_struct, tqueue_hangup);
struct tty_struct *tty;
tty = info->tty;
info->magic = ESP_MAGIC;
info->close_delay = 5*HZ/10;
info->closing_wait = 30*HZ;
- INIT_WORK(&info->tqueue, do_softint, info);
- INIT_WORK(&info->tqueue_hangup, do_serial_hangup, info);
+ INIT_WORK(&info->tqueue, do_softint);
+ INIT_WORK(&info->tqueue_hangup, do_serial_hangup);
info->config.rx_timeout = rx_timeout;
info->config.flow_on = flow_on;
info->config.flow_off = flow_off;
* Routine to poll RTC seconds field for change as often as possible,
* after first RTC_UIE use timer to reduce polling
*/
-static void genrtc_troutine(void *data)
+static void genrtc_troutine(struct work_struct *work)
{
unsigned int tmp = get_rtc_ss();
irq_active = 1;
stop_rtc_timers = 0;
lostint = 0;
- INIT_WORK(&genrtc_task, genrtc_troutine, NULL);
+ INIT_WORK(&genrtc_task, genrtc_troutine);
oldsecs = get_rtc_ss();
init_timer(&timer_task);
#define __ALIGNED__ __attribute__((__aligned__(sizeof(long))))
struct hvsi_struct {
- struct work_struct writer;
+ struct delayed_work writer;
struct work_struct handshaker;
wait_queue_head_t emptyq; /* woken when outbuf is emptied */
wait_queue_head_t stateq; /* woken when HVSI state changes */
return 0;
}
-static void hvsi_handshaker(void *arg)
+static void hvsi_handshaker(struct work_struct *work)
{
- struct hvsi_struct *hp = (struct hvsi_struct *)arg;
+ struct hvsi_struct *hp =
+ container_of(work, struct hvsi_struct, handshaker);
if (hvsi_handshake(hp) >= 0)
return;
}
/* hvsi_write_worker will keep rescheduling itself until outbuf is empty */
-static void hvsi_write_worker(void *arg)
+static void hvsi_write_worker(struct work_struct *work)
{
- struct hvsi_struct *hp = (struct hvsi_struct *)arg;
+ struct hvsi_struct *hp =
+ container_of(work, struct hvsi_struct, writer.work);
unsigned long flags;
#ifdef DEBUG
static long start_j = 0;
}
hp = &hvsi_ports[hvsi_count];
- INIT_WORK(&hp->writer, hvsi_write_worker, hp);
- INIT_WORK(&hp->handshaker, hvsi_handshaker, hp);
+ INIT_DELAYED_WORK(&hp->writer, hvsi_write_worker);
+ INIT_WORK(&hp->handshaker, hvsi_handshaker);
init_waitqueue_head(&hp->emptyq);
init_waitqueue_head(&hp->stateq);
spin_lock_init(&hp->lock);
static void serviceOutgoingFifo(i2eBordStrPtr);
// Functions defined in ip2.c as part of interrupt handling
-static void do_input(void *);
-static void do_status(void *);
+static void do_input(struct work_struct *);
+static void do_status(struct work_struct *);
//***************
//* Debug Data *
pCh->ClosingWaitTime = 30*HZ;
// Initialize task queue objects
- INIT_WORK(&pCh->tqueue_input, do_input, pCh);
- INIT_WORK(&pCh->tqueue_status, do_status, pCh);
+ INIT_WORK(&pCh->tqueue_input, do_input);
+ INIT_WORK(&pCh->tqueue_status, do_status);
#ifdef IP2DEBUG_TRACE
pCh->trace = ip2trace;
#ifdef USE_IQ
schedule_work(&pCh->tqueue_input);
#else
- do_input(pCh);
+ do_input(&pCh->tqueue_input);
#endif
// Note we do not need to maintain any flow-control credits at this
#ifdef USE_IQ
schedule_work(&pCh->tqueue_status);
#else
- do_status(pCh);
+ do_status(&pCh->tqueue_status);
#endif
}
}
unsigned int set, unsigned int clear);
static void set_irq(int, int);
-static void ip2_interrupt_bh(i2eBordStrPtr pB);
+static void ip2_interrupt_bh(struct work_struct *work);
static irqreturn_t ip2_interrupt(int irq, void *dev_id);
static void ip2_poll(unsigned long arg);
static inline void service_all_boards(void);
-static void do_input(void *p);
-static void do_status(void *p);
+static void do_input(struct work_struct *);
+static void do_status(struct work_struct *);
static void ip2_wait_until_sent(PTTY,int);
pCh++;
}
ex_exit:
- INIT_WORK(&pB->tqueue_interrupt, (void(*)(void*)) ip2_interrupt_bh, pB);
+ INIT_WORK(&pB->tqueue_interrupt, ip2_interrupt_bh);
return;
err_release_region:
/******************************************************************************/
-/* Function: ip2_interrupt_bh(pB) */
-/* Parameters: pB - pointer to the board structure */
+/* Function: ip2_interrupt_bh(work) */
+/* Parameters: work - pointer to the board structure */
/* Returns: Nothing */
/* */
/* Description: */
/* */
/******************************************************************************/
static void
-ip2_interrupt_bh(i2eBordStrPtr pB)
+ip2_interrupt_bh(struct work_struct *work)
{
+ i2eBordStrPtr pB = container_of(work, i2eBordStr, tqueue_interrupt);
// pB better well be set or we have a problem! We can only get
// here from the IMMEDIATE queue. Here, we process the boards.
// Checking pB doesn't cost much and it saves us from the sanity checkers.
ip2trace (ITRC_NO_PORT, ITRC_INTR, ITRC_RETURN, 0 );
}
-static void do_input(void *p)
+static void do_input(struct work_struct *work)
{
- i2ChanStrPtr pCh = p;
+ i2ChanStrPtr pCh = container_of(work, i2ChanStr, tqueue_input);
unsigned long flags;
ip2trace(CHANN, ITRC_INPUT, 21, 0 );
}
}
-static void do_status(void *p)
+static void do_status(struct work_struct *work)
{
- i2ChanStrPtr pCh = p;
+ i2ChanStrPtr pCh = container_of(work, i2ChanStr, tqueue_status);
int status;
status = i2GetStatus( pCh, (I2_BRK|I2_PAR|I2_FRA|I2_OVR) );
/* Interrupt handlers */
-static void isicom_bottomhalf(void *data)
+static void isicom_bottomhalf(struct work_struct *work)
{
- struct isi_port *port = (struct isi_port *) data;
+ struct isi_port *port = container_of(work, struct isi_port, bh_tqueue);
struct tty_struct *tty = port->tty;
if (!tty)
}
/* hangup et all */
-static void do_isicom_hangup(void *data)
+static void do_isicom_hangup(struct work_struct *work)
{
- struct isi_port *port = data;
+ struct isi_port *port = container_of(work, struct isi_port, hangup_tq);
struct tty_struct *tty;
tty = port->tty;
port->channel = channel;
port->close_delay = 50 * HZ/100;
port->closing_wait = 3000 * HZ/100;
- INIT_WORK(&port->hangup_tq, do_isicom_hangup, port);
- INIT_WORK(&port->bh_tqueue, isicom_bottomhalf, port);
+ INIT_WORK(&port->hangup_tq, do_isicom_hangup);
+ INIT_WORK(&port->bh_tqueue, isicom_bottomhalf);
port->status = 0;
init_waitqueue_head(&port->open_wait);
init_waitqueue_head(&port->close_wait);
/*
* static functions:
*/
-static void do_moxa_softint(void *);
+static void do_moxa_softint(struct work_struct *);
static int moxa_open(struct tty_struct *, struct file *);
static void moxa_close(struct tty_struct *, struct file *);
static int moxa_write(struct tty_struct *, const unsigned char *, int);
for (i = 0, ch = moxaChannels; i < MAX_PORTS; i++, ch++) {
ch->type = PORT_16550A;
ch->port = i;
- INIT_WORK(&ch->tqueue, do_moxa_softint, ch);
+ INIT_WORK(&ch->tqueue, do_moxa_softint);
ch->tty = NULL;
ch->close_delay = 5 * HZ / 10;
ch->closing_wait = 30 * HZ;
module_init(moxa_init);
module_exit(moxa_exit);
-static void do_moxa_softint(void *private_)
+static void do_moxa_softint(struct work_struct *work)
{
- struct moxa_str *ch = (struct moxa_str *) private_;
+ struct moxa_str *ch = container_of(work, struct moxa_str, tqueue);
struct tty_struct *tty;
if (ch && (tty = ch->tty)) {
/* static void mxser_poll(unsigned long); */
static int mxser_get_ISA_conf(int, struct mxser_hwconf *);
static int mxser_get_PCI_conf(int, int, int, struct mxser_hwconf *);
-static void mxser_do_softint(void *);
+static void mxser_do_softint(struct work_struct *);
static int mxser_open(struct tty_struct *, struct file *);
static void mxser_close(struct tty_struct *, struct file *);
static int mxser_write(struct tty_struct *, const unsigned char *, int);
info->custom_divisor = hwconf->baud_base[i] * 16;
info->close_delay = 5 * HZ / 10;
info->closing_wait = 30 * HZ;
- INIT_WORK(&info->tqueue, mxser_do_softint, info);
+ INIT_WORK(&info->tqueue, mxser_do_softint);
info->normal_termios = mxvar_sdriver->init_termios;
init_waitqueue_head(&info->open_wait);
init_waitqueue_head(&info->close_wait);
return 0;
}
-static void mxser_do_softint(void *private_)
+static void mxser_do_softint(struct work_struct *work)
{
- struct mxser_struct *info = private_;
+ struct mxser_struct *info =
+ container_of(work, struct mxser_struct, tqueue);
struct tty_struct *tty;
tty = info->tty;
/*
* Bottom half interrupt handlers
*/
-static void bh_handler(void* Context);
+static void bh_handler(struct work_struct *work);
static void bh_transmit(MGSLPC_INFO *info);
static void bh_status(MGSLPC_INFO *info);
memset(info, 0, sizeof(MGSLPC_INFO));
info->magic = MGSLPC_MAGIC;
- INIT_WORK(&info->task, bh_handler, info);
+ INIT_WORK(&info->task, bh_handler);
info->max_frame_size = 4096;
info->close_delay = 5*HZ/10;
info->closing_wait = 30*HZ;
return rc;
}
-static void bh_handler(void* Context)
+static void bh_handler(struct work_struct *work)
{
- MGSLPC_INFO *info = (MGSLPC_INFO*)Context;
+ MGSLPC_INFO *info = container_of(work, MGSLPC_INFO, task);
int action;
if (!info)
sonypi_device.bluetooth_power = state;
}
-static void input_keyrelease(void *data)
+static void input_keyrelease(struct work_struct *work)
{
struct sonypi_keypress kp;
goto err_inpdev_unregister;
}
- INIT_WORK(&sonypi_device.input_work, input_keyrelease, NULL);
+ INIT_WORK(&sonypi_device.input_work, input_keyrelease);
}
sonypi_enable(0);
* do_sx_hangup() -> tty->hangup() -> sx_hangup()
*
*/
-static void do_sx_hangup(void *private_)
+static void do_sx_hangup(struct work_struct *work)
{
- struct specialix_port *port = (struct specialix_port *) private_;
+ struct specialix_port *port =
+ container_of(work, struct specialix_port, tqueue_hangup);
struct tty_struct *tty;
func_enter();
}
-static void do_softint(void *private_)
+static void do_softint(struct work_struct *work)
{
- struct specialix_port *port = (struct specialix_port *) private_;
+ struct specialix_port *port =
+ container_of(work, struct specialix_port, tqueue);
struct tty_struct *tty;
func_enter();
memset(sx_port, 0, sizeof(sx_port));
for (i = 0; i < SX_NPORT * SX_NBOARD; i++) {
sx_port[i].magic = SPECIALIX_MAGIC;
- INIT_WORK(&sx_port[i].tqueue, do_softint, &sx_port[i]);
- INIT_WORK(&sx_port[i].tqueue_hangup, do_sx_hangup, &sx_port[i]);
+ INIT_WORK(&sx_port[i].tqueue, do_softint);
+ INIT_WORK(&sx_port[i].tqueue_hangup, do_sx_hangup);
sx_port[i].close_delay = 50 * HZ/100;
sx_port[i].closing_wait = 3000 * HZ/100;
init_waitqueue_head(&sx_port[i].open_wait);
/*
* Bottom half interrupt handlers
*/
-static void mgsl_bh_handler(void* Context);
+static void mgsl_bh_handler(struct work_struct *work);
static void mgsl_bh_receive(struct mgsl_struct *info);
static void mgsl_bh_transmit(struct mgsl_struct *info);
static void mgsl_bh_status(struct mgsl_struct *info);
/*
* Perform bottom half processing of work items queued by ISR.
*/
-static void mgsl_bh_handler(void* Context)
+static void mgsl_bh_handler(struct work_struct *work)
{
- struct mgsl_struct *info = (struct mgsl_struct*)Context;
+ struct mgsl_struct *info =
+ container_of(work, struct mgsl_struct, task);
int action;
if (!info)
} else {
memset(info, 0, sizeof(struct mgsl_struct));
info->magic = MGSL_MAGIC;
- INIT_WORK(&info->task, mgsl_bh_handler, info);
+ INIT_WORK(&info->task, mgsl_bh_handler);
info->max_frame_size = 4096;
info->close_delay = 5*HZ/10;
info->closing_wait = 30*HZ;
static void set_rate(struct slgt_info *info, u32 data_rate);
static int bh_action(struct slgt_info *info);
-static void bh_handler(void* context);
+static void bh_handler(struct work_struct *work);
static void bh_transmit(struct slgt_info *info);
static void isr_serial(struct slgt_info *info);
static void isr_rdma(struct slgt_info *info);
/*
* perform bottom half processing
*/
-static void bh_handler(void* context)
+static void bh_handler(struct work_struct *work)
{
- struct slgt_info *info = context;
+ struct slgt_info *info = container_of(work, struct slgt_info, task);
int action;
if (!info)
} else {
memset(info, 0, sizeof(struct slgt_info));
info->magic = MGSL_MAGIC;
- INIT_WORK(&info->task, bh_handler, info);
+ INIT_WORK(&info->task, bh_handler);
info->max_frame_size = 4096;
info->raw_rx_size = DMABUFSIZE;
info->close_delay = 5*HZ/10;
spin_lock_irqsave(&info->lock, flags);
info->pending_bh |= BH_RECEIVE;
spin_unlock_irqrestore(&info->lock, flags);
- bh_handler(info);
+ bh_handler(&info->task);
}
static void set_rate(SLMP_INFO *info, u32 data_rate);
static int bh_action(SLMP_INFO *info);
-static void bh_handler(void* Context);
+static void bh_handler(struct work_struct *work);
static void bh_receive(SLMP_INFO *info);
static void bh_transmit(SLMP_INFO *info);
static void bh_status(SLMP_INFO *info);
/* Perform bottom half processing of work items queued by ISR.
*/
-void bh_handler(void* Context)
+void bh_handler(struct work_struct *work)
{
- SLMP_INFO *info = (SLMP_INFO*)Context;
+ SLMP_INFO *info = container_of(work, SLMP_INFO, task);
int action;
if (!info)
} else {
memset(info, 0, sizeof(SLMP_INFO));
info->magic = MGSL_MAGIC;
- INIT_WORK(&info->task, bh_handler, info);
+ INIT_WORK(&info->task, bh_handler);
info->max_frame_size = 4096;
info->close_delay = 5*HZ/10;
info->closing_wait = 30*HZ;
schedule_work(&chip->work);
}
-static void timeout_work(void *ptr)
+static void timeout_work(struct work_struct *work)
{
- struct tpm_chip *chip = ptr;
+ struct tpm_chip *chip = container_of(work, struct tpm_chip, work);
down(&chip->buffer_mutex);
atomic_set(&chip->data_pending, 0);
init_MUTEX(&chip->tpm_mutex);
INIT_LIST_HEAD(&chip->list);
- INIT_WORK(&chip->work, timeout_work, chip);
+ INIT_WORK(&chip->work, timeout_work);
init_timer(&chip->user_read_timer);
chip->user_read_timer.function = user_reader_timeout;
#include <linux/connector.h>
#include <linux/delay.h>
-void cn_queue_wrapper(void *data)
+void cn_queue_wrapper(struct work_struct *work)
{
- struct cn_callback_data *d = data;
+ struct cn_callback_entry *cbq =
+ container_of(work, struct cn_callback_entry, work.work);
+ struct cn_callback_data *d = &cbq->data;
d->callback(d->callback_priv);
memcpy(&cbq->id.id, id, sizeof(struct cb_id));
cbq->data.callback = callback;
- INIT_WORK(&cbq->work, &cn_queue_wrapper, &cbq->data);
+ INIT_DELAYED_WORK(&cbq->work, &cn_queue_wrapper);
return cbq;
}
spin_lock_bh(&dev->cbdev->queue_lock);
list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) {
if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
- if (likely(!test_bit(0, &__cbq->work.pending) &&
+ if (likely(!test_bit(WORK_STRUCT_PENDING,
+ &__cbq->work.work.management) &&
__cbq->data.ddata == NULL)) {
__cbq->data.callback_priv = msg;
__cbq->data.ddata = data;
__cbq->data.destruct_data = destruct_data;
- if (queue_work(dev->cbdev->cn_queue,
- &__cbq->work))
+ if (queue_delayed_work(
+ dev->cbdev->cn_queue,
+ &__cbq->work, 0))
err = 0;
} else {
- struct work_struct *w;
struct cn_callback_data *d;
- w = kzalloc(sizeof(*w) + sizeof(*d), GFP_ATOMIC);
- if (w) {
- d = (struct cn_callback_data *)(w+1);
-
+ __cbq = kzalloc(sizeof(*__cbq), GFP_ATOMIC);
+ if (__cbq) {
+ d = &__cbq->data;
d->callback_priv = msg;
d->callback = __cbq->data.callback;
d->ddata = data;
d->destruct_data = destruct_data;
- d->free = w;
+ d->free = __cbq;
- INIT_LIST_HEAD(&w->entry);
- w->pending = 0;
- w->func = &cn_queue_wrapper;
- w->data = d;
- init_timer(&w->timer);
+ INIT_DELAYED_WORK(&__cbq->work,
+ &cn_queue_wrapper);
- if (queue_work(dev->cbdev->cn_queue, w))
+ if (queue_delayed_work(
+ dev->cbdev->cn_queue,
+ &__cbq->work, 0))
err = 0;
else {
- kfree(w);
+ kfree(__cbq);
err = -EINVAL;
}
} else
#define MAX_SAMPLING_DOWN_FACTOR (10)
#define TRANSITION_LATENCY_LIMIT (10 * 1000)
-static void do_dbs_timer(void *data);
+static void do_dbs_timer(struct work_struct *work);
struct cpu_dbs_info_s {
struct cpufreq_policy *cur_policy;
* is recursive for the same process. -Venki
*/
static DEFINE_MUTEX (dbs_mutex);
-static DECLARE_WORK (dbs_work, do_dbs_timer, NULL);
+static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer);
struct dbs_tuners {
unsigned int sampling_rate;
}
}
-static void do_dbs_timer(void *data)
+static void do_dbs_timer(struct work_struct *work)
{
int i;
lock_cpu_hotplug();
static inline void dbs_timer_init(void)
{
- INIT_WORK(&dbs_work, do_dbs_timer, NULL);
schedule_delayed_work(&dbs_work,
usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
return;
#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
#define TRANSITION_LATENCY_LIMIT (10 * 1000)
-static void do_dbs_timer(void *data);
+static void do_dbs_timer(struct work_struct *work);
+
+/* Sampling types */
+enum dbs_sample {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
struct cpu_dbs_info_s {
cputime64_t prev_cpu_idle;
cputime64_t prev_cpu_wall;
struct cpufreq_policy *cur_policy;
- struct work_struct work;
+ struct delayed_work work;
+ enum dbs_sample sample_type;
unsigned int enable;
struct cpufreq_frequency_table *freq_table;
unsigned int freq_lo;
}
}
-/* Sampling types */
-enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
-
-static void do_dbs_timer(void *data)
+static void do_dbs_timer(struct work_struct *work)
{
unsigned int cpu = smp_processor_id();
struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
+ enum dbs_sample sample_type = dbs_info->sample_type;
/* We want all CPUs to do sampling nearly on same jiffy */
int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
+
+ /* Permit rescheduling of this work item */
+ work_release(work);
+
delay -= jiffies % delay;
if (!dbs_info->enable)
return;
/* Common NORMAL_SAMPLE setup */
- INIT_WORK(&dbs_info->work, do_dbs_timer, (void *)DBS_NORMAL_SAMPLE);
+ dbs_info->sample_type = DBS_NORMAL_SAMPLE;
if (!dbs_tuners_ins.powersave_bias ||
- (unsigned long) data == DBS_NORMAL_SAMPLE) {
+ sample_type == DBS_NORMAL_SAMPLE) {
lock_cpu_hotplug();
dbs_check_cpu(dbs_info);
unlock_cpu_hotplug();
if (dbs_info->freq_lo) {
/* Setup timer for SUB_SAMPLE */
- INIT_WORK(&dbs_info->work, do_dbs_timer,
- (void *)DBS_SUB_SAMPLE);
+ dbs_info->sample_type = DBS_SUB_SAMPLE;
delay = dbs_info->freq_hi_jiffies;
}
} else {
delay -= jiffies % delay;
ondemand_powersave_bias_init();
- INIT_WORK(&dbs_info->work, do_dbs_timer, NULL);
+ INIT_DELAYED_WORK_NAR(&dbs_info->work, do_dbs_timer);
+ dbs_info->sample_type = DBS_NORMAL_SAMPLE;
queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
}
return t1;
}
-static void ds1374_set_work(void *arg)
+static ulong new_time;
+
+static void ds1374_set_work(struct work_struct *work)
{
ulong t1, t2;
int limit = 10; /* arbitrary retry limit */
- t1 = *(ulong *) arg;
+ t1 = new_time;
mutex_lock(&ds1374_mutex);
"can't confirm time set from rtc chip\n");
}
-static ulong new_time;
-
static struct workqueue_struct *ds1374_workqueue;
-static DECLARE_WORK(ds1374_work, ds1374_set_work, &new_time);
+static DECLARE_WORK(ds1374_work, ds1374_set_work);
int ds1374_set_rtc_time(ulong nowtime)
{
if (in_interrupt())
queue_work(ds1374_workqueue, &ds1374_work);
else
- ds1374_set_work(&new_time);
+ ds1374_set_work(NULL);
return 0;
}
#include "config_roms.h"
-static void delayed_reset_bus(void * __reset_info)
+static void delayed_reset_bus(struct work_struct *work)
{
- struct hpsb_host *host = (struct hpsb_host*)__reset_info;
+ struct hpsb_host *host =
+ container_of(work, struct hpsb_host, delayed_reset.work);
int generation = host->csr.generation + 1;
/* The generation field rolls over to 2 rather than 0 per IEEE
atomic_set(&h->generation, 0);
- INIT_WORK(&h->delayed_reset, delayed_reset_bus, h);
+ INIT_DELAYED_WORK(&h->delayed_reset, delayed_reset_bus);
init_timer(&h->timeout);
h->timeout.data = (unsigned long) h;
* Config ROM in the near future. */
reset_delay = HZ;
- PREPARE_WORK(&host->delayed_reset, delayed_reset_bus, host);
+ PREPARE_DELAYED_WORK(&host->delayed_reset, delayed_reset_bus);
schedule_delayed_work(&host->delayed_reset, reset_delay);
return 0;
struct class_device class_dev;
int update_config_rom;
- struct work_struct delayed_reset;
+ struct delayed_work delayed_reset;
unsigned int config_roms;
struct list_head addr_space;
scsi_unblock_requests(scsi_id->scsi_host);
}
-static void sbp2util_write_orb_pointer(void *p)
+static void sbp2util_write_orb_pointer(struct work_struct *work)
{
+ struct scsi_id_instance_data *scsi_id =
+ container_of(work, struct scsi_id_instance_data,
+ protocol_work.work);
quadlet_t data[2];
- data[0] = ORB_SET_NODE_ID(
- ((struct scsi_id_instance_data *)p)->hi->host->node_id);
- data[1] = ((struct scsi_id_instance_data *)p)->last_orb_dma;
+ data[0] = ORB_SET_NODE_ID(scsi_id->hi->host->node_id);
+ data[1] = scsi_id->last_orb_dma;
sbp2util_cpu_to_be32_buffer(data, 8);
- sbp2util_notify_fetch_agent(p, SBP2_ORB_POINTER_OFFSET, data, 8);
+ sbp2util_notify_fetch_agent(scsi_id, SBP2_ORB_POINTER_OFFSET, data, 8);
}
-static void sbp2util_write_doorbell(void *p)
+static void sbp2util_write_doorbell(struct work_struct *work)
{
- sbp2util_notify_fetch_agent(p, SBP2_DOORBELL_OFFSET, NULL, 4);
+ struct scsi_id_instance_data *scsi_id =
+ container_of(work, struct scsi_id_instance_data,
+ protocol_work.work);
+ sbp2util_notify_fetch_agent(scsi_id, SBP2_DOORBELL_OFFSET, NULL, 4);
}
/*
INIT_LIST_HEAD(&scsi_id->scsi_list);
spin_lock_init(&scsi_id->sbp2_command_orb_lock);
atomic_set(&scsi_id->state, SBP2LU_STATE_RUNNING);
- INIT_WORK(&scsi_id->protocol_work, NULL, NULL);
+ INIT_DELAYED_WORK(&scsi_id->protocol_work, NULL);
ud->device.driver_data = scsi_id;
* We do not accept new commands until the job is over.
*/
scsi_block_requests(scsi_id->scsi_host);
- PREPARE_WORK(&scsi_id->protocol_work,
+ PREPARE_DELAYED_WORK(&scsi_id->protocol_work,
last_orb ? sbp2util_write_doorbell:
- sbp2util_write_orb_pointer,
- scsi_id);
- schedule_work(&scsi_id->protocol_work);
+ sbp2util_write_orb_pointer);
+ schedule_delayed_work(&scsi_id->protocol_work, 0);
}
}
unsigned workarounds;
atomic_t state;
- struct work_struct protocol_work;
+ struct delayed_work protocol_work;
};
/* For use in scsi_id_instance_data.state */
int status;
};
-static void process_req(void *data);
+static void process_req(struct work_struct *work);
static DEFINE_MUTEX(lock);
static LIST_HEAD(req_list);
-static DECLARE_WORK(work, process_req, NULL);
+static DECLARE_DELAYED_WORK(work, process_req);
static struct workqueue_struct *addr_wq;
void rdma_addr_register_client(struct rdma_addr_client *client)
return ret;
}
-static void process_req(void *data)
+static void process_req(struct work_struct *work)
{
struct addr_req *req, *temp_req;
struct sockaddr_in *src_in, *dst_in;
kfree(tprops);
}
-static void ib_cache_task(void *work_ptr)
+static void ib_cache_task(struct work_struct *_work)
{
- struct ib_update_work *work = work_ptr;
+ struct ib_update_work *work =
+ container_of(_work, struct ib_update_work, work);
ib_cache_update(work->device, work->port_num);
kfree(work);
event->event == IB_EVENT_CLIENT_REREGISTER) {
work = kmalloc(sizeof *work, GFP_ATOMIC);
if (work) {
- INIT_WORK(&work->work, ib_cache_task, work);
+ INIT_WORK(&work->work, ib_cache_task);
work->device = event->device;
work->port_num = event->element.port_num;
schedule_work(&work->work);
};
struct cm_work {
- struct work_struct work;
+ struct delayed_work work;
struct list_head list;
struct cm_port *port;
struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
atomic_t work_count;
};
-static void cm_work_handler(void *data);
+static void cm_work_handler(struct work_struct *work);
static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
{
return ERR_PTR(-ENOMEM);
timewait_info->work.local_id = local_id;
- INIT_WORK(&timewait_info->work.work, cm_work_handler,
- &timewait_info->work);
+ INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
return timewait_info;
}
}
}
-static void cm_work_handler(void *data)
+static void cm_work_handler(struct work_struct *_work)
{
- struct cm_work *work = data;
+ struct cm_work *work = container_of(_work, struct cm_work, work.work);
int ret;
switch (work->cm_event.event) {
* we need to find the cm_id once we're in the context of the
* worker thread, rather than holding a reference on it.
*/
- INIT_WORK(&work->work, cm_work_handler, work);
+ INIT_DELAYED_WORK(&work->work, cm_work_handler);
work->local_id = cm_id->local_id;
work->remote_id = cm_id->remote_id;
work->mad_recv_wc = NULL;
work->cm_event.event = IB_CM_USER_ESTABLISHED;
- queue_work(cm.wq, &work->work);
+ queue_delayed_work(cm.wq, &work->work, 0);
out:
return ret;
}
return;
}
- INIT_WORK(&work->work, cm_work_handler, work);
+ INIT_DELAYED_WORK(&work->work, cm_work_handler);
work->cm_event.event = event;
work->mad_recv_wc = mad_recv_wc;
work->port = (struct cm_port *)mad_agent->context;
- queue_work(cm.wq, &work->work);
+ queue_delayed_work(cm.wq, &work->work, 0);
}
static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
return (id_priv->query_id < 0) ? id_priv->query_id : 0;
}
-static void cma_work_handler(void *data)
+static void cma_work_handler(struct work_struct *_work)
{
- struct cma_work *work = data;
+ struct cma_work *work = container_of(_work, struct cma_work, work);
struct rdma_id_private *id_priv = work->id;
int destroy = 0;
return -ENOMEM;
work->id = id_priv;
- INIT_WORK(&work->work, cma_work_handler, work);
+ INIT_WORK(&work->work, cma_work_handler);
work->old_state = CMA_ROUTE_QUERY;
work->new_state = CMA_ROUTE_RESOLVED;
work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
return -ENOMEM;
work->id = id_priv;
- INIT_WORK(&work->work, cma_work_handler, work);
+ INIT_WORK(&work->work, cma_work_handler);
work->old_state = CMA_ROUTE_QUERY;
work->new_state = CMA_ROUTE_RESOLVED;
work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
}
work->id = id_priv;
- INIT_WORK(&work->work, cma_work_handler, work);
+ INIT_WORK(&work->work, cma_work_handler);
work->old_state = CMA_ADDR_QUERY;
work->new_state = CMA_ADDR_RESOLVED;
work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
* thread asleep on the destroy_comp list vs. an object destroyed
* here synchronously when the last reference is removed.
*/
-static void cm_work_handler(void *arg)
+static void cm_work_handler(struct work_struct *_work)
{
- struct iwcm_work *work = arg, lwork;
+ struct iwcm_work lwork, *work =
+ container_of(_work, struct iwcm_work, work);
struct iwcm_id_private *cm_id_priv = work->cm_id;
unsigned long flags;
int empty;
goto out;
}
- INIT_WORK(&work->work, cm_work_handler, work);
+ INIT_WORK(&work->work, cm_work_handler);
work->cm_id = cm_id_priv;
work->event = *iw_event;
static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
struct ib_mad_private *mad);
static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
-static void timeout_sends(void *data);
-static void local_completions(void *data);
+static void timeout_sends(struct work_struct *work);
+static void local_completions(struct work_struct *work);
static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
struct ib_mad_agent_private *agent_priv,
u8 mgmt_class);
INIT_LIST_HEAD(&mad_agent_priv->wait_list);
INIT_LIST_HEAD(&mad_agent_priv->done_list);
INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
- INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv);
+ INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
INIT_LIST_HEAD(&mad_agent_priv->local_list);
- INIT_WORK(&mad_agent_priv->local_work, local_completions,
- mad_agent_priv);
+ INIT_WORK(&mad_agent_priv->local_work, local_completions);
atomic_set(&mad_agent_priv->refcount, 1);
init_completion(&mad_agent_priv->comp);
/*
* IB MAD completion callback
*/
-static void ib_mad_completion_handler(void *data)
+static void ib_mad_completion_handler(struct work_struct *work)
{
struct ib_mad_port_private *port_priv;
struct ib_wc wc;
- port_priv = (struct ib_mad_port_private *)data;
+ port_priv = container_of(work, struct ib_mad_port_private, work);
ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
}
EXPORT_SYMBOL(ib_cancel_mad);
-static void local_completions(void *data)
+static void local_completions(struct work_struct *work)
{
struct ib_mad_agent_private *mad_agent_priv;
struct ib_mad_local_private *local;
struct ib_wc wc;
struct ib_mad_send_wc mad_send_wc;
- mad_agent_priv = (struct ib_mad_agent_private *)data;
+ mad_agent_priv =
+ container_of(work, struct ib_mad_agent_private, local_work);
spin_lock_irqsave(&mad_agent_priv->lock, flags);
while (!list_empty(&mad_agent_priv->local_list)) {
return ret;
}
-static void timeout_sends(void *data)
+static void timeout_sends(struct work_struct *work)
{
struct ib_mad_agent_private *mad_agent_priv;
struct ib_mad_send_wr_private *mad_send_wr;
struct ib_mad_send_wc mad_send_wc;
unsigned long flags, delay;
- mad_agent_priv = (struct ib_mad_agent_private *)data;
+ mad_agent_priv = container_of(work, struct ib_mad_agent_private,
+ timed_work.work);
mad_send_wc.vendor_err = 0;
spin_lock_irqsave(&mad_agent_priv->lock, flags);
ret = -ENOMEM;
goto error8;
}
- INIT_WORK(&port_priv->work, ib_mad_completion_handler, port_priv);
+ INIT_WORK(&port_priv->work, ib_mad_completion_handler);
spin_lock_irqsave(&ib_mad_port_list_lock, flags);
list_add_tail(&port_priv->port_list, &ib_mad_port_list);
struct list_head send_list;
struct list_head wait_list;
struct list_head done_list;
- struct work_struct timed_work;
+ struct delayed_work timed_work;
unsigned long timeout;
struct list_head local_list;
struct work_struct local_work;
struct mad_rmpp_recv {
struct ib_mad_agent_private *agent;
struct list_head list;
- struct work_struct timeout_work;
- struct work_struct cleanup_work;
+ struct delayed_work timeout_work;
+ struct delayed_work cleanup_work;
struct completion comp;
enum rmpp_state state;
spinlock_t lock;
}
}
-static void recv_timeout_handler(void *data)
+static void recv_timeout_handler(struct work_struct *work)
{
- struct mad_rmpp_recv *rmpp_recv = data;
+ struct mad_rmpp_recv *rmpp_recv =
+ container_of(work, struct mad_rmpp_recv, timeout_work.work);
struct ib_mad_recv_wc *rmpp_wc;
unsigned long flags;
ib_free_recv_mad(rmpp_wc);
}
-static void recv_cleanup_handler(void *data)
+static void recv_cleanup_handler(struct work_struct *work)
{
- struct mad_rmpp_recv *rmpp_recv = data;
+ struct mad_rmpp_recv *rmpp_recv =
+ container_of(work, struct mad_rmpp_recv, cleanup_work.work);
unsigned long flags;
spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
rmpp_recv->agent = agent;
init_completion(&rmpp_recv->comp);
- INIT_WORK(&rmpp_recv->timeout_work, recv_timeout_handler, rmpp_recv);
- INIT_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler, rmpp_recv);
+ INIT_DELAYED_WORK(&rmpp_recv->timeout_work, recv_timeout_handler);
+ INIT_DELAYED_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler);
spin_lock_init(&rmpp_recv->lock);
rmpp_recv->state = RMPP_STATE_ACTIVE;
atomic_set(&rmpp_recv->refcount, 1);
kfree(sm_ah);
}
-static void update_sm_ah(void *port_ptr)
+static void update_sm_ah(struct work_struct *work)
{
- struct ib_sa_port *port = port_ptr;
+ struct ib_sa_port *port =
+ container_of(work, struct ib_sa_port, update_task);
struct ib_sa_sm_ah *new_ah, *old_ah;
struct ib_port_attr port_attr;
struct ib_ah_attr ah_attr;
if (IS_ERR(sa_dev->port[i].agent))
goto err;
- INIT_WORK(&sa_dev->port[i].update_task,
- update_sm_ah, &sa_dev->port[i]);
+ INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
}
ib_set_client_data(device, &sa_client, sa_dev);
goto err;
for (i = 0; i <= e - s; ++i)
- update_sm_ah(&sa_dev->port[i]);
+ update_sm_ah(&sa_dev->port[i].update_task);
return;
up_write(¤t->mm->mmap_sem);
}
-static void ib_umem_account(void *work_ptr)
+static void ib_umem_account(struct work_struct *_work)
{
- struct ib_umem_account_work *work = work_ptr;
+ struct ib_umem_account_work *work =
+ container_of(_work, struct ib_umem_account_work, work);
down_write(&work->mm->mmap_sem);
work->mm->locked_vm -= work->diff;
return;
}
- INIT_WORK(&work->work, ib_umem_account, work);
+ INIT_WORK(&work->work, ib_umem_account);
work->mm = mm;
work->diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT;
unsigned long num_pages;
};
-static void user_pages_account(void *ptr)
+static void user_pages_account(struct work_struct *_work)
{
- struct ipath_user_pages_work *work = ptr;
+ struct ipath_user_pages_work *work =
+ container_of(_work, struct ipath_user_pages_work, work);
down_write(&work->mm->mmap_sem);
work->mm->locked_vm -= work->num_pages;
goto bail;
- INIT_WORK(&work->work, user_pages_account, work);
+ INIT_WORK(&work->work, user_pages_account);
work->mm = mm;
work->num_pages = num_pages;
module_param_named(catas_reset_disable, catas_reset_disable, int, 0644);
MODULE_PARM_DESC(catas_reset_disable, "disable reset on catastrophic event if nonzero");
-static void catas_reset(void *work_ptr)
+static void catas_reset(struct work_struct *work)
{
struct mthca_dev *dev, *tmpdev;
LIST_HEAD(tlist);
int __init mthca_catas_init(void)
{
- INIT_WORK(&catas_work, catas_reset, NULL);
+ INIT_WORK(&catas_work, catas_reset);
catas_wq = create_singlethread_workqueue("mthca_catas");
if (!catas_wq)
struct list_head multicast_list;
struct rb_root multicast_tree;
- struct work_struct pkey_task;
- struct work_struct mcast_task;
+ struct delayed_work pkey_task;
+ struct delayed_work mcast_task;
struct work_struct flush_task;
struct work_struct restart_task;
- struct work_struct ah_reap_task;
+ struct delayed_work ah_reap_task;
struct ib_device *ca;
u8 port;
void ipoib_send(struct net_device *dev, struct sk_buff *skb,
struct ipoib_ah *address, u32 qpn);
-void ipoib_reap_ah(void *dev_ptr);
+void ipoib_reap_ah(struct work_struct *work);
void ipoib_flush_paths(struct net_device *dev);
struct ipoib_dev_priv *ipoib_intf_alloc(const char *format);
int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
-void ipoib_ib_dev_flush(void *dev);
+void ipoib_ib_dev_flush(struct work_struct *work);
void ipoib_ib_dev_cleanup(struct net_device *dev);
int ipoib_ib_dev_open(struct net_device *dev);
int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
void ipoib_dev_cleanup(struct net_device *dev);
-void ipoib_mcast_join_task(void *dev_ptr);
+void ipoib_mcast_join_task(struct work_struct *work);
void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb);
-void ipoib_mcast_restart_task(void *dev_ptr);
+void ipoib_mcast_restart_task(struct work_struct *work);
int ipoib_mcast_start_thread(struct net_device *dev);
int ipoib_mcast_stop_thread(struct net_device *dev, int flush);
int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey);
int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey);
-void ipoib_pkey_poll(void *dev);
+void ipoib_pkey_poll(struct work_struct *work);
int ipoib_pkey_dev_delay_open(struct net_device *dev);
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
spin_unlock_irq(&priv->tx_lock);
}
-void ipoib_reap_ah(void *dev_ptr)
+void ipoib_reap_ah(struct work_struct *work)
{
- struct net_device *dev = dev_ptr;
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv =
+ container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
+ struct net_device *dev = priv->dev;
__ipoib_reap_ah(dev);
return 0;
}
-void ipoib_ib_dev_flush(void *_dev)
+void ipoib_ib_dev_flush(struct work_struct *work)
{
- struct net_device *dev = (struct net_device *)_dev;
- struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv;
+ struct ipoib_dev_priv *cpriv, *priv =
+ container_of(work, struct ipoib_dev_priv, flush_task);
+ struct net_device *dev = priv->dev;
if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) ) {
ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
*/
if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
ipoib_ib_dev_up(dev);
- ipoib_mcast_restart_task(dev);
+ ipoib_mcast_restart_task(&priv->restart_task);
}
mutex_lock(&priv->vlan_mutex);
/* Flush any child interfaces too */
list_for_each_entry(cpriv, &priv->child_intfs, list)
- ipoib_ib_dev_flush(cpriv->dev);
+ ipoib_ib_dev_flush(&cpriv->flush_task);
mutex_unlock(&priv->vlan_mutex);
}
* change async notification is available.
*/
-void ipoib_pkey_poll(void *dev_ptr)
+void ipoib_pkey_poll(struct work_struct *work)
{
- struct net_device *dev = dev_ptr;
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv =
+ container_of(work, struct ipoib_dev_priv, pkey_task.work);
+ struct net_device *dev = priv->dev;
ipoib_pkey_dev_check_presence(dev);
INIT_LIST_HEAD(&priv->dead_ahs);
INIT_LIST_HEAD(&priv->multicast_list);
- INIT_WORK(&priv->pkey_task, ipoib_pkey_poll, priv->dev);
- INIT_WORK(&priv->mcast_task, ipoib_mcast_join_task, priv->dev);
- INIT_WORK(&priv->flush_task, ipoib_ib_dev_flush, priv->dev);
- INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task, priv->dev);
- INIT_WORK(&priv->ah_reap_task, ipoib_reap_ah, priv->dev);
+ INIT_DELAYED_WORK(&priv->pkey_task, ipoib_pkey_poll);
+ INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task);
+ INIT_WORK(&priv->flush_task, ipoib_ib_dev_flush);
+ INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
+ INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
}
struct ipoib_dev_priv *ipoib_intf_alloc(const char *name)
mcast->backoff = 1;
mutex_lock(&mcast_mutex);
if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_work(ipoib_workqueue, &priv->mcast_task);
+ queue_delayed_work(ipoib_workqueue,
+ &priv->mcast_task, 0);
mutex_unlock(&mcast_mutex);
complete(&mcast->done);
return;
if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) {
if (status == -ETIMEDOUT)
- queue_work(ipoib_workqueue, &priv->mcast_task);
+ queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
+ 0);
else
queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
mcast->backoff * HZ);
mcast->query_id = ret;
}
-void ipoib_mcast_join_task(void *dev_ptr)
+void ipoib_mcast_join_task(struct work_struct *work)
{
- struct net_device *dev = dev_ptr;
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv =
+ container_of(work, struct ipoib_dev_priv, mcast_task.work);
+ struct net_device *dev = priv->dev;
if (!test_bit(IPOIB_MCAST_RUN, &priv->flags))
return;
mutex_lock(&mcast_mutex);
if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_work(ipoib_workqueue, &priv->mcast_task);
+ queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0);
mutex_unlock(&mcast_mutex);
spin_lock_irq(&priv->lock);
}
}
-void ipoib_mcast_restart_task(void *dev_ptr)
+void ipoib_mcast_restart_task(struct work_struct *work)
{
- struct net_device *dev = dev_ptr;
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv =
+ container_of(work, struct ipoib_dev_priv, restart_task);
+ struct net_device *dev = priv->dev;
struct dev_mc_list *mclist;
struct ipoib_mcast *mcast, *tmcast;
LIST_HEAD(remove_list);
static void iser_cq_tasklet_fn(unsigned long data);
static void iser_cq_callback(struct ib_cq *cq, void *cq_context);
-static void iser_comp_error_worker(void *data);
+static void iser_comp_error_worker(struct work_struct *work);
static void iser_cq_event_callback(struct ib_event *cause, void *context)
{
init_waitqueue_head(&ib_conn->wait);
atomic_set(&ib_conn->post_recv_buf_count, 0);
atomic_set(&ib_conn->post_send_buf_count, 0);
- INIT_WORK(&ib_conn->comperror_work, iser_comp_error_worker,
- ib_conn);
+ INIT_WORK(&ib_conn->comperror_work, iser_comp_error_worker);
INIT_LIST_HEAD(&ib_conn->conn_list);
spin_lock_init(&ib_conn->lock);
return ret_val;
}
-static void iser_comp_error_worker(void *data)
+static void iser_comp_error_worker(struct work_struct *work)
{
- struct iser_conn *ib_conn = data;
+ struct iser_conn *ib_conn =
+ container_of(work, struct iser_conn, comperror_work);
/* getting here when the state is UP means that the conn is being *
* terminated asynchronously from the iSCSI layer's perspective. */
wait_for_completion(&target->done);
}
-static void srp_remove_work(void *target_ptr)
+static void srp_remove_work(struct work_struct *work)
{
- struct srp_target_port *target = target_ptr;
+ struct srp_target_port *target =
+ container_of(work, struct srp_target_port, work);
spin_lock_irq(target->scsi_host->host_lock);
if (target->state != SRP_TARGET_DEAD) {
spin_lock_irq(target->scsi_host->host_lock);
if (target->state == SRP_TARGET_CONNECTING) {
target->state = SRP_TARGET_DEAD;
- INIT_WORK(&target->work, srp_remove_work, target);
+ INIT_WORK(&target->work, srp_remove_work);
schedule_work(&target->work);
}
spin_unlock_irq(target->scsi_host->host_lock);
* were in.
*/
static void
-lkkbd_reinit (void *data)
+lkkbd_reinit (struct work_struct *work)
{
- struct lkkbd *lk = data;
+ struct lkkbd *lk = container_of(work, struct lkkbd, tq);
int division;
unsigned char leds_on = 0;
unsigned char leds_off = 0;
lk->serio = serio;
lk->dev = input_dev;
- INIT_WORK (&lk->tq, lkkbd_reinit, lk);
+ INIT_WORK (&lk->tq, lkkbd_reinit);
lk->bell_volume = bell_volume;
lk->keyclick_volume = keyclick_volume;
lk->ctrlclick_volume = ctrlclick_volume;
* were in.
*/
-static void sunkbd_reinit(void *data)
+static void sunkbd_reinit(struct work_struct *work)
{
- struct sunkbd *sunkbd = data;
+ struct sunkbd *sunkbd = container_of(work, struct sunkbd, tq);
wait_event_interruptible_timeout(sunkbd->wait, sunkbd->reset >= 0, HZ);
sunkbd->serio = serio;
sunkbd->dev = input_dev;
init_waitqueue_head(&sunkbd->wait);
- INIT_WORK(&sunkbd->tq, sunkbd_reinit, sunkbd);
+ INIT_WORK(&sunkbd->tq, sunkbd_reinit);
snprintf(sunkbd->phys, sizeof(sunkbd->phys), "%s/input0", serio->phys);
serio_set_drvdata(serio, sunkbd);
* psmouse_resync() attempts to re-validate current protocol.
*/
-static void psmouse_resync(void *p)
+static void psmouse_resync(struct work_struct *work)
{
- struct psmouse *psmouse = p, *parent = NULL;
+ struct psmouse *parent = NULL, *psmouse =
+ container_of(work, struct psmouse, resync_work);
struct serio *serio = psmouse->ps2dev.serio;
psmouse_ret_t rc = PSMOUSE_GOOD_DATA;
int failed = 0, enabled = 0;
goto out;
ps2_init(&psmouse->ps2dev, serio);
- INIT_WORK(&psmouse->resync_work, psmouse_resync, psmouse);
+ INIT_WORK(&psmouse->resync_work, psmouse_resync);
psmouse->dev = input_dev;
snprintf(psmouse->phys, sizeof(psmouse->phys), "%s/input0", serio->phys);
}
void
-actcapi_dispatch(act2000_card *card)
+actcapi_dispatch(struct work_struct *work)
{
+ struct act2000_card *card =
+ container_of(work, struct act2000_card, rcv_tq);
struct sk_buff *skb;
actcapi_msg *msg;
__u16 ccmd;
extern void actcapi_select_b2_protocol_req(act2000_card *, act2000_chan *);
extern void actcapi_disconnect_b3_req(act2000_card *, act2000_chan *);
extern void actcapi_connect_resp(act2000_card *, act2000_chan *, __u8);
-extern void actcapi_dispatch(act2000_card *);
+extern void actcapi_dispatch(struct work_struct *);
#ifdef DEBUG_MSG
extern void actcapi_debug_msg(struct sk_buff *skb, int);
#else
}
static void
-act2000_transmit(struct act2000_card *card)
+act2000_transmit(struct work_struct *work)
{
+ struct act2000_card *card =
+ container_of(work, struct act2000_card, snd_tq);
+
switch (card->bus) {
case ACT2000_BUS_ISA:
act2000_isa_send(card);
}
static void
-act2000_receive(struct act2000_card *card)
+act2000_receive(struct work_struct *work)
{
+ struct act2000_card *card =
+ container_of(work, struct act2000_card, poll_tq);
+
switch (card->bus) {
case ACT2000_BUS_ISA:
act2000_isa_receive(card);
act2000_card * card = (act2000_card *)data;
unsigned long flags;
- act2000_receive(card);
+ act2000_receive(&card->poll_tq);
spin_lock_irqsave(&card->lock, flags);
mod_timer(&card->ptimer, jiffies+3);
spin_unlock_irqrestore(&card->lock, flags);
skb_queue_head_init(&card->sndq);
skb_queue_head_init(&card->rcvq);
skb_queue_head_init(&card->ackq);
- INIT_WORK(&card->snd_tq, (void *) (void *) act2000_transmit, card);
- INIT_WORK(&card->rcv_tq, (void *) (void *) actcapi_dispatch, card);
- INIT_WORK(&card->poll_tq, (void *) (void *) act2000_receive, card);
+ INIT_WORK(&card->snd_tq, act2000_transmit);
+ INIT_WORK(&card->rcv_tq, actcapi_dispatch);
+ INIT_WORK(&card->poll_tq, act2000_receive);
init_timer(&card->ptimer);
card->interface.owner = THIS_MODULE;
card->interface.channels = ACT2000_BCH;
}
}
-static void notify_handler(void *data)
+static void notify_handler(struct work_struct *work)
{
- struct capi_notifier *np = data;
+ struct capi_notifier *np =
+ container_of(work, struct capi_notifier, work);
switch (np->cmd) {
case KCI_CONTRUP:
if (!np)
return -ENOMEM;
- INIT_WORK(&np->work, notify_handler, np);
+ INIT_WORK(&np->work, notify_handler);
np->cmd = cmd;
np->controller = controller;
np->applid = applid;
/* -------- Receiver ------------------------------------------ */
-static void recv_handler(void *_ap)
+static void recv_handler(struct work_struct *work)
{
struct sk_buff *skb;
- struct capi20_appl *ap = (struct capi20_appl *) _ap;
+ struct capi20_appl *ap =
+ container_of(work, struct capi20_appl, recv_work);
if ((!ap) || (ap->release_in_progress))
return;
ap->callback = NULL;
init_MUTEX(&ap->recv_sem);
skb_queue_head_init(&ap->recv_queue);
- INIT_WORK(&ap->recv_work, recv_handler, (void *)ap);
+ INIT_WORK(&ap->recv_work, recv_handler);
ap->release_in_progress = 0;
write_unlock_irqrestore(&application_lock, flags);
static void
-Amd7930_bh(struct IsdnCardState *cs)
+Amd7930_bh(struct work_struct *work)
{
-
+ struct IsdnCardState *cs =
+ container_of(work, struct IsdnCardState, tqueue);
struct PStack *stptr;
if (!cs)
void __devinit
setup_Amd7930(struct IsdnCardState *cs)
{
- INIT_WORK(&cs->tqueue, (void *)(void *) Amd7930_bh, cs);
+ INIT_WORK(&cs->tqueue, Amd7930_bh);
cs->dbusytimer.function = (void *) dbusy_timer_handler;
cs->dbusytimer.data = (long) cs;
init_timer(&cs->dbusytimer);
cs->tx_skb = NULL;
cs->tx_cnt = 0;
cs->event = 0;
- cs->tqueue.data = cs;
skb_queue_head_init(&cs->rq);
skb_queue_head_init(&cs->sq);
static int hisax_cardmsg(struct IsdnCardState *cs, int mt, void *arg);
static int hisax_bc_setstack(struct PStack *st, struct BCState *bcs);
static void hisax_bc_close(struct BCState *bcs);
-static void hisax_bh(struct IsdnCardState *cs);
+static void hisax_bh(struct work_struct *work);
static void EChannel_proc_rcv(struct hisax_d_if *d_if);
int hisax_register(struct hisax_d_if *hisax_d_if, struct hisax_b_if *b_if[],
hisax_d_if->cs = cs;
cs->hw.hisax_d_if = hisax_d_if;
cs->cardmsg = hisax_cardmsg;
- INIT_WORK(&cs->tqueue, (void *)(void *)hisax_bh, cs);
+ INIT_WORK(&cs->tqueue, hisax_bh);
cs->channel[0].d_st->l2.l2l1 = hisax_d_l2l1;
for (i = 0; i < 2; i++) {
cs->bcs[i].BC_SetStack = hisax_bc_setstack;
schedule_work(&cs->tqueue);
}
-static void hisax_bh(struct IsdnCardState *cs)
+static void hisax_bh(struct work_struct *work)
{
+ struct IsdnCardState *cs =
+ container_of(work, struct IsdnCardState, tqueue);
struct PStack *st;
int pr;
/* bottom half handler for interrupt */
/*************************************/
static void
-hfc4s8s_bh(hfc4s8s_hw * hw)
+hfc4s8s_bh(struct work_struct *work)
{
+ hfc4s8s_hw *hw = container_of(work, hfc4s8s_hw, tqueue);
u_char b;
struct hfc4s8s_l1 *l1p;
volatile u_char *fifo_stat;
goto out;
}
- INIT_WORK(&hw->tqueue, (void *) (void *) hfc4s8s_bh, hw);
+ INIT_WORK(&hw->tqueue, hfc4s8s_bh);
if (request_irq
(hw->irq, hfc4s8s_interrupt, IRQF_SHARED, hw->card_name, hw)) {
}
static void
-hfcd_bh(struct IsdnCardState *cs)
+hfcd_bh(struct work_struct *work)
{
- if (!cs)
- return;
+ struct IsdnCardState *cs =
+ container_of(work, struct IsdnCardState, tqueue);
+
if (test_and_clear_bit(D_L1STATECHANGE, &cs->event)) {
switch (cs->dc.hfcd.ph_state) {
case (0):
cs->dbusytimer.function = (void *) hfc_dbusy_timer;
cs->dbusytimer.data = (long) cs;
init_timer(&cs->dbusytimer);
- INIT_WORK(&cs->tqueue, (void *)(void *) hfcd_bh, cs);
+ INIT_WORK(&cs->tqueue, hfcd_bh);
}
/* handle L1 state changes */
/***************************/
static void
-hfcpci_bh(struct IsdnCardState *cs)
+hfcpci_bh(struct work_struct *work)
{
+ struct IsdnCardState *cs =
+ container_of(work, struct IsdnCardState, tqueue);
u_long flags;
// struct PStack *stptr;
Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
/* At this point the needed PCI config is done */
/* fifos are still not enabled */
- INIT_WORK(&cs->tqueue, (void *)(void *) hfcpci_bh, cs);
+ INIT_WORK(&cs->tqueue, hfcpci_bh);
cs->setstack_d = setstack_hfcpci;
cs->BC_Send_Data = &hfcpci_send_data;
cs->readisac = NULL;
/* handle L1 state changes */
/***************************/
static void
-hfcsx_bh(struct IsdnCardState *cs)
+hfcsx_bh(struct work_struct *work)
{
+ struct IsdnCardState *cs =
+ container_of(work, struct IsdnCardState, tqueue);
u_long flags;
if (!cs)
cs->dbusytimer.function = (void *) hfcsx_dbusy_timer;
cs->dbusytimer.data = (long) cs;
init_timer(&cs->dbusytimer);
- INIT_WORK(&cs->tqueue, (void *)(void *) hfcsx_bh, cs);
+ INIT_WORK(&cs->tqueue, hfcsx_bh);
cs->readisac = NULL;
cs->writeisac = NULL;
cs->readisacfifo = NULL;
}
static void
-icc_bh(struct IsdnCardState *cs)
+icc_bh(struct work_struct *work)
{
+ struct IsdnCardState *cs =
+ container_of(work, struct IsdnCardState, tqueue);
struct PStack *stptr;
if (!cs)
void __devinit
setup_icc(struct IsdnCardState *cs)
{
- INIT_WORK(&cs->tqueue, (void *)(void *) icc_bh, cs);
+ INIT_WORK(&cs->tqueue, icc_bh);
cs->dbusytimer.function = (void *) dbusy_timer_handler;
cs->dbusytimer.data = (long) cs;
init_timer(&cs->dbusytimer);
}
static void
-isac_bh(struct IsdnCardState *cs)
+isac_bh(struct work_struct *work)
{
+ struct IsdnCardState *cs =
+ container_of(work, struct IsdnCardState, tqueue);
struct PStack *stptr;
if (!cs)
void __devinit
setup_isac(struct IsdnCardState *cs)
{
- INIT_WORK(&cs->tqueue, (void *)(void *) isac_bh, cs);
+ INIT_WORK(&cs->tqueue, isac_bh);
cs->dbusytimer.function = (void *) dbusy_timer_handler;
cs->dbusytimer.data = (long) cs;
init_timer(&cs->dbusytimer);
#define B_LL_OK 10
static void
-isar_bh(struct BCState *bcs)
+isar_bh(struct work_struct *work)
{
+ struct BCState *bcs = container_of(work, struct BCState, tqueue);
+
BChannel_bh(bcs);
if (test_and_clear_bit(B_LL_NOCARRIER, &bcs->event))
ll_deliver_faxstat(bcs, ISDN_FAX_CLASS1_NOCARR);
cs->bcs[i].mode = 0;
cs->bcs[i].hw.isar.dpath = i + 1;
modeisar(&cs->bcs[i], 0, 0);
- INIT_WORK(&cs->bcs[i].tqueue, (void *)(void *) isar_bh, &cs->bcs[i]);
+ INIT_WORK(&cs->bcs[i].tqueue, isar_bh);
}
}
}
void
-BChannel_bh(struct BCState *bcs)
+BChannel_bh(struct work_struct *work)
{
+ struct BCState *bcs = container_of(work, struct BCState, tqueue);
+
if (!bcs)
return;
if (test_and_clear_bit(B_RCVBUFREADY, &bcs->event))
bcs->cs = cs;
bcs->channel = bc;
- INIT_WORK(&bcs->tqueue, (void *)(void *) BChannel_bh, bcs);
+ INIT_WORK(&bcs->tqueue, BChannel_bh);
spin_lock_init(&bcs->aclock);
bcs->BC_SetStack = NULL;
bcs->BC_Close = NULL;
}
static void
-W6692_bh(struct IsdnCardState *cs)
+W6692_bh(struct work_struct *work)
{
+ struct IsdnCardState *cs =
+ container_of(work, struct IsdnCardState, tqueue);
struct PStack *stptr;
if (!cs)
id_list[cs->subtyp].card_name, cs->irq,
cs->hw.w6692.iobase);
- INIT_WORK(&cs->tqueue, (void *)(void *) W6692_bh, cs);
+ INIT_WORK(&cs->tqueue, W6692_bh);
cs->readW6692 = &ReadW6692;
cs->writeW6692 = &WriteW6692;
cs->readisacfifo = &ReadISACfifo;
/*
* called from tq_immediate
*/
-static void isdn_net_softint(void *private)
+static void isdn_net_softint(struct work_struct *work)
{
- isdn_net_local *lp = private;
+ isdn_net_local *lp = container_of(work, isdn_net_local, tqueue);
struct sk_buff *skb;
spin_lock_bh(&lp->xmit_lock);
netdev->local->netdev = netdev;
netdev->local->next = netdev->local;
- INIT_WORK(&netdev->local->tqueue, (void *)(void *) isdn_net_softint, netdev->local);
+ INIT_WORK(&netdev->local->tqueue, isdn_net_softint);
spin_lock_init(&netdev->local->xmit_lock);
netdev->local->isdn_device = -1;
static int pcbit_check_msn(struct pcbit_dev *dev, char *msn);
-extern void pcbit_deliver(void * data);
-
int pcbit_init_dev(int board, int mem_base, int irq)
{
struct pcbit_dev *dev;
memset(dev->b2, 0, sizeof(struct pcbit_chan));
dev->b2->id = 1;
- INIT_WORK(&dev->qdelivery, pcbit_deliver, dev);
+ INIT_WORK(&dev->qdelivery, pcbit_deliver);
/*
* interrupts
* Prototypes
*/
-void pcbit_deliver(void *data);
static void pcbit_transmit(struct pcbit_dev *dev);
static void pcbit_recv_ack(struct pcbit_dev *dev, unsigned char ack);
*/
void
-pcbit_deliver(void *data)
+pcbit_deliver(struct work_struct *work)
{
struct frame_buf *frame;
unsigned long flags, msg;
- struct pcbit_dev *dev = (struct pcbit_dev *) data;
+ struct pcbit_dev *dev =
+ container_of(work, struct pcbit_dev, qdelivery);
spin_lock_irqsave(&dev->lock, flags);
#define L2_RUNNING 5
#define L2_ERROR 6
+extern void pcbit_deliver(struct work_struct *work);
+
#endif
* sysfs visibility
*/
-static void smu_expose_childs(void *unused)
+static void smu_expose_childs(struct work_struct *unused)
{
struct device_node *np;
&smu->of_dev->dev);
}
-static DECLARE_WORK(smu_expose_childs_work, smu_expose_childs, NULL);
+static DECLARE_WORK(smu_expose_childs_work, smu_expose_childs);
static int smu_platform_probe(struct of_device* dev,
const struct of_device_id *match)
* interrupt context.
*/
static struct workqueue_struct *_kcryptd_workqueue;
-static void kcryptd_do_work(void *data);
+static void kcryptd_do_work(struct work_struct *work);
static void kcryptd_queue_io(struct crypt_io *io)
{
- INIT_WORK(&io->work, kcryptd_do_work, io);
+ INIT_WORK(&io->work, kcryptd_do_work);
queue_work(_kcryptd_workqueue, &io->work);
}
dec_pending(io, crypt_convert(cc, &ctx));
}
-static void kcryptd_do_work(void *data)
+static void kcryptd_do_work(struct work_struct *work)
{
- struct crypt_io *io = data;
+ struct crypt_io *io = container_of(work, struct crypt_io, work);
if (io->post_process)
process_read_endio(io);
static kmem_cache_t *_mpio_cache;
struct workqueue_struct *kmultipathd;
-static void process_queued_ios(void *data);
-static void trigger_event(void *data);
+static void process_queued_ios(struct work_struct *work);
+static void trigger_event(struct work_struct *work);
/*-----------------------------------------------
INIT_LIST_HEAD(&m->priority_groups);
spin_lock_init(&m->lock);
m->queue_io = 1;
- INIT_WORK(&m->process_queued_ios, process_queued_ios, m);
- INIT_WORK(&m->trigger_event, trigger_event, m);
+ INIT_WORK(&m->process_queued_ios, process_queued_ios);
+ INIT_WORK(&m->trigger_event, trigger_event);
m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
if (!m->mpio_pool) {
kfree(m);
}
}
-static void process_queued_ios(void *data)
+static void process_queued_ios(struct work_struct *work)
{
- struct multipath *m = (struct multipath *) data;
+ struct multipath *m =
+ container_of(work, struct multipath, process_queued_ios);
struct hw_handler *hwh = &m->hw_handler;
struct pgpath *pgpath = NULL;
unsigned init_required = 0, must_queue = 1;
* An event is triggered whenever a path is taken out of use.
* Includes path failure and PG bypass.
*/
-static void trigger_event(void *data)
+static void trigger_event(struct work_struct *work)
{
- struct multipath *m = (struct multipath *) data;
+ struct multipath *m =
+ container_of(work, struct multipath, trigger_event);
dm_table_event(m->ti->table);
}
do_writes(ms, &writes);
}
-static void do_work(void *ignored)
+static void do_work(struct work_struct *ignored)
{
struct mirror_set *ms;
dm_dirty_log_exit();
return r;
}
- INIT_WORK(&_kmirrord_work, do_work, NULL);
+ INIT_WORK(&_kmirrord_work, do_work);
r = dm_register_target(&mirror_target);
if (r < 0) {
#define SNAPSHOT_PAGES 256
struct workqueue_struct *ksnapd;
-static void flush_queued_bios(void *data);
+static void flush_queued_bios(struct work_struct *work);
struct pending_exception {
struct exception e;
}
bio_list_init(&s->queued_bios);
- INIT_WORK(&s->queued_bios_work, flush_queued_bios, s);
+ INIT_WORK(&s->queued_bios_work, flush_queued_bios);
/* Add snapshot to the list of snapshots for this origin */
/* Exceptions aren't triggered till snapshot_resume() is called */
}
}
-static void flush_queued_bios(void *data)
+static void flush_queued_bios(struct work_struct *work)
{
- struct dm_snapshot *s = (struct dm_snapshot *) data;
+ struct dm_snapshot *s =
+ container_of(work, struct dm_snapshot, queued_bios_work);
struct bio *queued_bios;
unsigned long flags;
/*
* kcopyd does this every time it's woken up.
*/
-static void do_work(void *ignored)
+static void do_work(struct work_struct *ignored)
{
/*
* The order that these are called is *very* important.
}
kcopyd_clients++;
- INIT_WORK(&_kcopyd_work, do_work, NULL);
+ INIT_WORK(&_kcopyd_work, do_work);
mutex_unlock(&kcopyd_init_lock);
return 0;
}
unsigned long last_irq;
- struct work_struct irq_check_work;
+ struct delayed_work irq_check_work;
struct flexcop_device *fc_dev;
};
return 0;
}
-static void flexcop_pci_irq_check_work(void *data)
+static void flexcop_pci_irq_check_work(struct work_struct *work)
{
- struct flexcop_pci *fc_pci = data;
+ struct flexcop_pci *fc_pci =
+ container_of(work, struct flexcop_pci, irq_check_work.work);
struct flexcop_device *fc = fc_pci->fc_dev;
flexcop_ibi_value v = fc->read_ibi_reg(fc,sram_dest_reg_714);
if ((ret = flexcop_pci_dma_init(fc_pci)) != 0)
goto err_fc_exit;
- INIT_WORK(&fc_pci->irq_check_work, flexcop_pci_irq_check_work, fc_pci);
+ INIT_DELAYED_WORK(&fc_pci->irq_check_work, flexcop_pci_irq_check_work);
return ret;
struct dvbt_set_parameters_msg param;
struct dvbt_get_status_msg status;
- struct work_struct query_work;
+ struct delayed_work query_work;
wait_queue_head_t poll_wq;
int pending_fe_events;
#ifdef ENABLE_RC
struct input_dev *rc_input_dev;
char phys[64];
- struct work_struct rc_query_work;
+ struct delayed_work rc_query_work;
int rc_input_event;
u32 rc_last_code;
unsigned long last_event_jiffies;
#ifdef ENABLE_RC
-static void cinergyt2_query_rc (void *data)
+static void cinergyt2_query_rc (struct work_struct *work)
{
- struct cinergyt2 *cinergyt2 = data;
+ struct cinergyt2 *cinergyt2 =
+ container_of(work, struct cinergyt2, rc_query_work.work);
char buf[1] = { CINERGYT2_EP1_GET_RC_EVENTS };
struct cinergyt2_rc_event rc_events[12];
int n, len, i;
strlcat(cinergyt2->phys, "/input0", sizeof(cinergyt2->phys));
cinergyt2->rc_input_event = KEY_MAX;
cinergyt2->rc_last_code = ~0;
- INIT_WORK(&cinergyt2->rc_query_work, cinergyt2_query_rc, cinergyt2);
+ INIT_DELAYED_WORK(&cinergyt2->rc_query_work, cinergyt2_query_rc);
input_dev->name = DRIVER_NAME " remote control";
input_dev->phys = cinergyt2->phys;
#endif /* ENABLE_RC */
-static void cinergyt2_query (void *data)
+static void cinergyt2_query (struct work_struct *work)
{
- struct cinergyt2 *cinergyt2 = (struct cinergyt2 *) data;
+ struct cinergyt2 *cinergyt2 =
+ container_of(work, struct cinergyt2, query_work.work);
char cmd [] = { CINERGYT2_EP1_GET_TUNER_STATUS };
struct dvbt_get_status_msg *s = &cinergyt2->status;
uint8_t lock_bits;
mutex_init(&cinergyt2->sem);
init_waitqueue_head (&cinergyt2->poll_wq);
- INIT_WORK(&cinergyt2->query_work, cinergyt2_query, cinergyt2);
+ INIT_DELAYED_WORK(&cinergyt2->query_work, cinergyt2_query);
cinergyt2->udev = interface_to_usbdev(intf);
cinergyt2->param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
int in_use;
struct net_device_stats stats;
u16 pid;
+ struct net_device *net;
struct dvb_net *host;
struct dmx_demux *demux;
struct dmx_section_feed *secfeed;
}
-static void wq_set_multicast_list (void *data)
+static void wq_set_multicast_list (struct work_struct *work)
{
- struct net_device *dev = data;
- struct dvb_net_priv *priv = dev->priv;
+ struct dvb_net_priv *priv =
+ container_of(work, struct dvb_net_priv, set_multicast_list_wq);
+ struct net_device *dev = priv->net;
dvb_net_feed_stop(dev);
priv->rx_mode = RX_MODE_UNI;
}
-static void wq_restart_net_feed (void *data)
+static void wq_restart_net_feed (struct work_struct *work)
{
- struct net_device *dev = data;
+ struct dvb_net_priv *priv =
+ container_of(work, struct dvb_net_priv, restart_net_feed_wq);
+ struct net_device *dev = priv->net;
if (netif_running(dev)) {
dvb_net_feed_stop(dev);
dvbnet->device[if_num] = net;
priv = net->priv;
+ priv->net = net;
priv->demux = dvbnet->demux;
priv->pid = pid;
priv->rx_mode = RX_MODE_UNI;
priv->feedtype = feedtype;
reset_ule(priv);
- INIT_WORK(&priv->set_multicast_list_wq, wq_set_multicast_list, net);
- INIT_WORK(&priv->restart_net_feed_wq, wq_restart_net_feed, net);
+ INIT_WORK(&priv->set_multicast_list_wq, wq_set_multicast_list);
+ INIT_WORK(&priv->restart_net_feed_wq, wq_restart_net_feed);
mutex_init(&priv->mutex);
net->base_addr = pid;
*
* TODO: Fix the repeat rate of the input device.
*/
-static void dvb_usb_read_remote_control(void *data)
+static void dvb_usb_read_remote_control(struct work_struct *work)
{
- struct dvb_usb_device *d = data;
+ struct dvb_usb_device *d =
+ container_of(work, struct dvb_usb_device, rc_query_work.work);
u32 event;
int state;
input_register_device(d->rc_input_dev);
- INIT_WORK(&d->rc_query_work, dvb_usb_read_remote_control, d);
+ INIT_DELAYED_WORK(&d->rc_query_work, dvb_usb_read_remote_control);
info("schedule remote query interval to %d msecs.", d->props.rc_interval);
schedule_delayed_work(&d->rc_query_work,msecs_to_jiffies(d->props.rc_interval));
/* remote control */
struct input_dev *rc_input_dev;
char rc_phys[64];
- struct work_struct rc_query_work;
+ struct delayed_work rc_query_work;
u32 last_event;
int last_state;
struct pardevice *pdev;
struct parport *port;
struct work_struct cb_task;
+ void (*cb_func)(void *cbdata);
+ void *cb_data;
int open_count;
wait_queue_head_t wq_stream;
/* image state flags */
#define PARPORT_CHUNK_SIZE PAGE_SIZE
+static void cpia_pp_run_callback(struct work_struct *work)
+{
+ void (*cb_func)(void *cbdata);
+ void *cb_data;
+ struct pp_cam_entry *cam;
+
+ cam = container_of(work, struct pp_cam_entry, cb_task);
+ cb_func = cam->cb_func;
+ cb_data = cam->cb_data;
+ work_release(work);
+
+ cb_func(cb_data);
+}
+
/****************************************************************************
*
* CPiA-specific low-level parport functions for nibble uploads
int retval = 0;
if(cam->port->irq != PARPORT_IRQ_NONE) {
- INIT_WORK(&cam->cb_task, cb, cbdata);
+ cam->cb_func = cb;
+ cam->cb_data = cbdata;
+ INIT_WORK_NAR(&cam->cb_task, cpia_pp_run_callback);
} else {
retval = -1;
}
schedule_work(&ir->work);
}
-static void cx88_ir_work(void *data)
+static void cx88_ir_work(struct work_struct *work)
{
- struct cx88_IR *ir = data;
+ struct cx88_IR *ir = container_of(work, struct cx88_IR, work);
unsigned long timeout;
cx88_ir_handle_key(ir);
core->ir = ir;
if (ir->polling) {
- INIT_WORK(&ir->work, cx88_ir_work, ir);
+ INIT_WORK(&ir->work, cx88_ir_work);
init_timer(&ir->timer);
ir->timer.function = ir_timer;
ir->timer.data = (unsigned long)ir;
schedule_work(&ir->work);
}
-static void ir_work(void *data)
+static void ir_work(struct work_struct *work)
{
- struct IR_i2c *ir = data;
+ struct IR_i2c *ir = container_of(work, struct IR_i2c, work);
ir_key_poll(ir);
mod_timer(&ir->timer, jiffies+HZ/10);
}
ir->input->name,ir->input->phys,adap->name);
/* start polling via eventd */
- INIT_WORK(&ir->work, ir_work, ir);
+ INIT_WORK(&ir->work, ir_work);
init_timer(&ir->timer);
ir->timer.function = ir_timer;
ir->timer.data = (unsigned long)ir;
}
-static void pvr2_context_poll(struct pvr2_context *mp)
+static void pvr2_context_poll(struct work_struct *work)
{
+ struct pvr2_context *mp =
+ container_of(work, struct pvr2_context, workpoll);
pvr2_context_enter(mp); do {
pvr2_hdw_poll(mp->hdw);
} while (0); pvr2_context_exit(mp);
}
-static void pvr2_context_setup(struct pvr2_context *mp)
+static void pvr2_context_setup(struct work_struct *work)
{
+ struct pvr2_context *mp =
+ container_of(work, struct pvr2_context, workinit);
+
pvr2_context_enter(mp); do {
if (!pvr2_hdw_dev_ok(mp->hdw)) break;
pvr2_hdw_setup(mp->hdw);
}
mp->workqueue = create_singlethread_workqueue("pvrusb2");
- INIT_WORK(&mp->workinit,(void (*)(void*))pvr2_context_setup,mp);
- INIT_WORK(&mp->workpoll,(void (*)(void*))pvr2_context_poll,mp);
+ INIT_WORK(&mp->workinit, pvr2_context_setup);
+ INIT_WORK(&mp->workpoll, pvr2_context_poll);
queue_work(mp->workqueue,&mp->workinit);
done:
return mp;
schedule_work(&s->work);
}
-static void saa6588_work(void *data)
+static void saa6588_work(struct work_struct *work)
{
- struct saa6588 *s = (struct saa6588 *)data;
+ struct saa6588 *s = container_of(work, struct saa6588, work);
saa6588_i2c_poll(s);
mod_timer(&s->timer, jiffies + msecs_to_jiffies(20));
saa6588_configure(s);
/* start polling via eventd */
- INIT_WORK(&s->work, saa6588_work, s);
+ INIT_WORK(&s->work, saa6588_work);
init_timer(&s->timer);
s->timer.function = saa6588_timer;
s->timer.data = (unsigned long)s;
.minor = -1,
};
-static void empress_signal_update(void* data)
+static void empress_signal_update(struct work_struct *work)
{
- struct saa7134_dev* dev = (struct saa7134_dev*) data;
+ struct saa7134_dev* dev =
+ container_of(work, struct saa7134_dev, empress_workqueue);
if (dev->nosignal) {
dprintk("no video signal\n");
"%s empress (%s)", dev->name,
saa7134_boards[dev->board].name);
- INIT_WORK(&dev->empress_workqueue, empress_signal_update, (void*) dev);
+ INIT_WORK(&dev->empress_workqueue, empress_signal_update);
err = video_register_device(dev->empress_dev,VFL_TYPE_GRABBER,
empress_nr[dev->nr]);
sizeof(struct saa7134_buf),
dev);
- empress_signal_update(dev);
+ empress_signal_update(&dev->empress_workqueue);
return 0;
}
}
static void
-mptfc_setup_reset(void *arg)
+mptfc_setup_reset(struct work_struct *work)
{
- MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg;
+ MPT_ADAPTER *ioc =
+ container_of(work, MPT_ADAPTER, fc_setup_reset_work);
u64 pn;
struct mptfc_rport_info *ri;
}
static void
-mptfc_rescan_devices(void *arg)
+mptfc_rescan_devices(struct work_struct *work)
{
- MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg;
+ MPT_ADAPTER *ioc =
+ container_of(work, MPT_ADAPTER, fc_rescan_work);
int ii;
u64 pn;
struct mptfc_rport_info *ri;
}
spin_lock_init(&ioc->fc_rescan_work_lock);
- INIT_WORK(&ioc->fc_rescan_work, mptfc_rescan_devices,(void *)ioc);
- INIT_WORK(&ioc->fc_setup_reset_work, mptfc_setup_reset, (void *)ioc);
+ INIT_WORK(&ioc->fc_rescan_work, mptfc_rescan_devices);
+ INIT_WORK(&ioc->fc_setup_reset_work, mptfc_setup_reset);
spin_lock_irqsave(&ioc->FreeQlock, flags);
u32 total_received;
struct net_device_stats stats; /* Per device statistics */
- struct work_struct post_buckets_task;
+ struct delayed_work post_buckets_task;
+ struct net_device *dev;
unsigned long post_buckets_active;
};
static int mpt_lan_open(struct net_device *dev);
static int mpt_lan_reset(struct net_device *dev);
static int mpt_lan_close(struct net_device *dev);
-static void mpt_lan_post_receive_buckets(void *dev_id);
+static void mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv);
static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
int priority);
static int mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
} else {
- mpt_lan_post_receive_buckets(dev);
+ mpt_lan_post_receive_buckets(priv);
netif_wake_queue(dev);
}
dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
- mpt_lan_post_receive_buckets(dev);
+ mpt_lan_post_receive_buckets(priv);
printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
IOC_AND_NETDEV_NAMES_s_s(dev));
if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
if (priority) {
- schedule_work(&priv->post_buckets_task);
+ schedule_delayed_work(&priv->post_buckets_task, 0);
} else {
schedule_delayed_work(&priv->post_buckets_task, 1);
dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
/* Simple SGE's only at the moment */
static void
-mpt_lan_post_receive_buckets(void *dev_id)
+mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
{
- struct net_device *dev = dev_id;
- struct mpt_lan_priv *priv = dev->priv;
+ struct net_device *dev = priv->dev;
MPT_ADAPTER *mpt_dev = priv->mpt_dev;
MPT_FRAME_HDR *mf;
LANReceivePostRequest_t *pRecvReq;
clear_bit(0, &priv->post_buckets_active);
}
+static void
+mpt_lan_post_receive_buckets_work(struct work_struct *work)
+{
+ mpt_lan_post_receive_buckets(container_of(work, struct mpt_lan_priv,
+ post_buckets_task.work));
+}
+
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static struct net_device *
mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
priv = netdev_priv(dev);
+ priv->dev = dev;
priv->mpt_dev = mpt_dev;
priv->pnum = pnum;
- memset(&priv->post_buckets_task, 0, sizeof(struct work_struct));
- INIT_WORK(&priv->post_buckets_task, mpt_lan_post_receive_buckets, dev);
+ memset(&priv->post_buckets_task, 0, sizeof(priv->post_buckets_task));
+ INIT_DELAYED_WORK(&priv->post_buckets_task,
+ mpt_lan_post_receive_buckets_work);
priv->post_buckets_active = 0;
dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
*(Mutex LOCKED)
*/
static void
-mptsas_discovery_work(void * arg)
+mptsas_discovery_work(struct work_struct *work)
{
- struct mptsas_discovery_event *ev = arg;
+ struct mptsas_discovery_event *ev =
+ container_of(work, struct mptsas_discovery_event, work);
MPT_ADAPTER *ioc = ev->ioc;
mutex_lock(&ioc->sas_discovery_mutex);
* Work queue thread to clear the persitency table
*/
static void
-mptsas_persist_clear_table(void * arg)
+mptsas_persist_clear_table(struct work_struct *work)
{
- MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg;
+ MPT_ADAPTER *ioc = container_of(work, MPT_ADAPTER, sas_persist_task);
mptbase_sas_persist_operation(ioc, MPI_SAS_OP_CLEAR_NOT_PRESENT);
}
* Work queue thread to handle SAS hotplug events
*/
static void
-mptsas_hotplug_work(void *arg)
+mptsas_hotplug_work(struct work_struct *work)
{
- struct mptsas_hotplug_event *ev = arg;
+ struct mptsas_hotplug_event *ev =
+ container_of(work, struct mptsas_hotplug_event, work);
MPT_ADAPTER *ioc = ev->ioc;
struct mptsas_phyinfo *phy_info;
struct sas_rphy *rphy;
break;
}
- INIT_WORK(&ev->work, mptsas_hotplug_work, ev);
+ INIT_WORK(&ev->work, mptsas_hotplug_work);
ev->ioc = ioc;
ev->handle = le16_to_cpu(sas_event_data->DevHandle);
ev->parent_handle =
* Persistent table is full.
*/
INIT_WORK(&ioc->sas_persist_task,
- mptsas_persist_clear_table, (void *)ioc);
+ mptsas_persist_clear_table);
schedule_work(&ioc->sas_persist_task);
break;
case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
return;
}
- INIT_WORK(&ev->work, mptsas_hotplug_work, ev);
+ INIT_WORK(&ev->work, mptsas_hotplug_work);
ev->ioc = ioc;
ev->id = raid_event_data->VolumeID;
ev->event_type = MPTSAS_IGNORE_EVENT;
ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
if (!ev)
return;
- INIT_WORK(&ev->work, mptsas_discovery_work, ev);
+ INIT_WORK(&ev->work, mptsas_discovery_work);
ev->ioc = ioc;
schedule_work(&ev->work);
};
break;
case MPI_EVENT_PERSISTENT_TABLE_FULL:
INIT_WORK(&ioc->sas_persist_task,
- mptsas_persist_clear_table,
- (void *)ioc);
+ mptsas_persist_clear_table);
schedule_work(&ioc->sas_persist_task);
break;
case MPI_EVENT_SAS_DISCOVERY:
int disk;
};
-static void mpt_work_wrapper(void *data)
+static void mpt_work_wrapper(struct work_struct *work)
{
- struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data;
+ struct work_queue_wrapper *wqw =
+ container_of(work, struct work_queue_wrapper, work);
struct _MPT_SCSI_HOST *hd = wqw->hd;
struct Scsi_Host *shost = hd->ioc->sh;
struct scsi_device *sdev;
disk);
return;
}
- INIT_WORK(&wqw->work, mpt_work_wrapper, wqw);
+ INIT_WORK(&wqw->work, mpt_work_wrapper);
wqw->hd = hd;
wqw->disk = disk;
* renegotiate for a given target
*/
static void
-mptspi_dv_renegotiate_work(void *data)
+mptspi_dv_renegotiate_work(struct work_struct *work)
{
- struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data;
+ struct work_queue_wrapper *wqw =
+ container_of(work, struct work_queue_wrapper, work);
struct _MPT_SCSI_HOST *hd = wqw->hd;
struct scsi_device *sdev;
if (!wqw)
return;
- INIT_WORK(&wqw->work, mptspi_dv_renegotiate_work, wqw);
+ INIT_WORK(&wqw->work, mptspi_dv_renegotiate_work);
wqw->hd = hd;
schedule_work(&wqw->work);
break;
}
- INIT_WORK(&evt->work, (void (*)(void *))drv->event, evt);
+ INIT_WORK(&evt->work, drv->event);
queue_work(drv->event_queue, &evt->work);
return 1;
}
* new LCT and if the buffer for the LCT was to small sends a LCT NOTIFY
* again, otherwise send LCT NOTIFY to get informed on next LCT change.
*/
-static void i2o_exec_lct_modified(struct i2o_exec_lct_notify_work *work)
+static void i2o_exec_lct_modified(struct work_struct *_work)
{
+ struct i2o_exec_lct_notify_work *work =
+ container_of(_work, struct i2o_exec_lct_notify_work, work);
u32 change_ind = 0;
struct i2o_controller *c = work->c;
work->c = c;
- INIT_WORK(&work->work, (void (*)(void *))i2o_exec_lct_modified,
- work);
+ INIT_WORK(&work->work, i2o_exec_lct_modified);
queue_work(i2o_exec_driver.event_queue, &work->work);
return 1;
}
/**
* i2o_exec_event - Event handling function
- * @evt: Event which occurs
+ * @work: Work item in occurring event
*
* Handles events send by the Executive device. At the moment does not do
* anything useful.
*/
-static void i2o_exec_event(struct i2o_event *evt)
+static void i2o_exec_event(struct work_struct *work)
{
+ struct i2o_event *evt = container_of(work, struct i2o_event, work);
+
if (likely(evt->i2o_dev))
osm_debug("Event received from device: %d\n",
evt->i2o_dev->lct_data.tid);
/**
* i2o_block_delayed_request_fn - delayed request queue function
- * delayed_request: the delayed request with the queue to start
+ * @work: the delayed request with the queue to start
*
* If the request queue is stopped for a disk, and there is no open
* request, a new event is created, which calls this function to start
* the queue after I2O_BLOCK_REQUEST_TIME. Otherwise the queue will never
* be started again.
*/
-static void i2o_block_delayed_request_fn(void *delayed_request)
+static void i2o_block_delayed_request_fn(struct work_struct *work)
{
- struct i2o_block_delayed_request *dreq = delayed_request;
+ struct i2o_block_delayed_request *dreq =
+ container_of(work, struct i2o_block_delayed_request,
+ work.work);
struct request_queue *q = dreq->queue;
unsigned long flags;
return 1;
};
-static void i2o_block_event(struct i2o_event *evt)
+static void i2o_block_event(struct work_struct *work)
{
+ struct i2o_event *evt = container_of(work, struct i2o_event, work);
osm_debug("event received\n");
kfree(evt);
};
continue;
dreq->queue = q;
- INIT_WORK(&dreq->work, i2o_block_delayed_request_fn,
- dreq);
+ INIT_DELAYED_WORK(&dreq->work,
+ i2o_block_delayed_request_fn);
if (!queue_delayed_work(i2o_block_driver.event_queue,
&dreq->work,
/* I2O Block device delayed request */
struct i2o_block_delayed_request {
- struct work_struct work;
+ struct delayed_work work;
struct request_queue *queue;
};
spin_unlock_irqrestore(&fm->lock, flags);
}
-static void tifm_7xx1_remove_media(void *adapter)
+static void tifm_7xx1_remove_media(struct work_struct *work)
{
- struct tifm_adapter *fm = adapter;
+ struct tifm_adapter *fm =
+ container_of(work, struct tifm_adapter, media_remover);
unsigned long flags;
int cnt;
struct tifm_dev *sock;
return base_addr + ((sock_num + 1) << 10);
}
-static void tifm_7xx1_insert_media(void *adapter)
+static void tifm_7xx1_insert_media(struct work_struct *work)
{
- struct tifm_adapter *fm = adapter;
+ struct tifm_adapter *fm =
+ container_of(work, struct tifm_adapter, media_inserter);
unsigned long flags;
tifm_media_id media_id;
char *card_name = "xx";
spin_unlock_irqrestore(&fm->lock, flags);
flush_workqueue(fm->wq);
- tifm_7xx1_remove_media(fm);
+ tifm_7xx1_remove_media(&fm->media_remover);
pci_set_power_state(dev, PCI_D3hot);
pci_disable_device(dev);
if (!fm->sockets)
goto err_out_free;
- INIT_WORK(&fm->media_inserter, tifm_7xx1_insert_media, fm);
- INIT_WORK(&fm->media_remover, tifm_7xx1_remove_media, fm);
+ INIT_WORK(&fm->media_inserter, tifm_7xx1_insert_media);
+ INIT_WORK(&fm->media_remover, tifm_7xx1_remove_media);
fm->eject = tifm_7xx1_eject;
pci_set_drvdata(dev, fm);
flush_workqueue(fm->wq);
- tifm_7xx1_remove_media(fm);
+ tifm_7xx1_remove_media(&fm->media_remover);
writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
free_irq(dev->irq, fm);
*/
void mmc_detect_change(struct mmc_host *host, unsigned long delay)
{
- if (delay)
- mmc_schedule_delayed_work(&host->detect, delay);
- else
- mmc_schedule_work(&host->detect);
+ mmc_schedule_delayed_work(&host->detect, delay);
}
EXPORT_SYMBOL(mmc_detect_change);
-static void mmc_rescan(void *data)
+static void mmc_rescan(struct work_struct *work)
{
- struct mmc_host *host = data;
+ struct mmc_host *host =
+ container_of(work, struct mmc_host, detect.work);
struct list_head *l, *n;
unsigned char power_mode;
spin_lock_init(&host->lock);
init_waitqueue_head(&host->wq);
INIT_LIST_HEAD(&host->cards);
- INIT_WORK(&host->detect, mmc_rescan, host);
+ INIT_DELAYED_WORK(&host->detect, mmc_rescan);
/*
* By default, hosts do not support SGIO or large requests.
*/
int mmc_resume_host(struct mmc_host *host)
{
- mmc_rescan(host);
+ mmc_rescan(&host->detect.work);
return 0;
}
void mmc_free_host_sysfs(struct mmc_host *host);
int mmc_schedule_work(struct work_struct *work);
-int mmc_schedule_delayed_work(struct work_struct *work, unsigned long delay);
+int mmc_schedule_delayed_work(struct delayed_work *work, unsigned long delay);
void mmc_flush_scheduled_work(void);
#endif
static struct workqueue_struct *workqueue;
/*
- * Internal function. Schedule work in the MMC work queue.
- */
-int mmc_schedule_work(struct work_struct *work)
-{
- return queue_work(workqueue, work);
-}
-
-/*
* Internal function. Schedule delayed work in the MMC work queue.
*/
-int mmc_schedule_delayed_work(struct work_struct *work, unsigned long delay)
+int mmc_schedule_delayed_work(struct delayed_work *work, unsigned long delay)
{
return queue_delayed_work(workqueue, work, delay);
}
struct mmc_request *req;
struct work_struct cmd_handler;
- struct work_struct abort_handler;
+ struct delayed_work abort_handler;
wait_queue_head_t can_eject;
size_t written_blocks;
mmc_request_done(mmc, mrq);
}
-static void tifm_sd_end_cmd(void *data)
+static void tifm_sd_end_cmd(struct work_struct *work)
{
- struct tifm_sd *host = data;
+ struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler);
struct tifm_dev *sock = host->dev;
struct mmc_host *mmc = tifm_get_drvdata(sock);
struct mmc_request *mrq;
mmc_request_done(mmc, mrq);
}
-static void tifm_sd_end_cmd_nodma(void *data)
+static void tifm_sd_end_cmd_nodma(struct work_struct *work)
{
- struct tifm_sd *host = (struct tifm_sd*)data;
+ struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler);
struct tifm_dev *sock = host->dev;
struct mmc_host *mmc = tifm_get_drvdata(sock);
struct mmc_request *mrq;
mmc_request_done(mmc, mrq);
}
-static void tifm_sd_abort(void *data)
+static void tifm_sd_abort(struct work_struct *work)
{
+ struct tifm_sd *host =
+ container_of(work, struct tifm_sd, abort_handler.work);
+
printk(KERN_ERR DRIVER_NAME
": card failed to respond for a long period of time");
- tifm_eject(((struct tifm_sd*)data)->dev);
+ tifm_eject(host->dev);
}
static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios)
.get_ro = tifm_sd_ro
};
-static void tifm_sd_register_host(void *data)
+static void tifm_sd_register_host(struct work_struct *work)
{
- struct tifm_sd *host = (struct tifm_sd*)data;
+ struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler);
struct tifm_dev *sock = host->dev;
struct mmc_host *mmc = tifm_get_drvdata(sock);
unsigned long flags;
spin_lock_irqsave(&sock->lock, flags);
host->flags |= HOST_REG;
PREPARE_WORK(&host->cmd_handler,
- no_dma ? tifm_sd_end_cmd_nodma : tifm_sd_end_cmd,
- data);
+ no_dma ? tifm_sd_end_cmd_nodma : tifm_sd_end_cmd);
spin_unlock_irqrestore(&sock->lock, flags);
dev_dbg(&sock->dev, "adding host\n");
mmc_add_host(mmc);
host->dev = sock;
host->clk_div = 61;
init_waitqueue_head(&host->can_eject);
- INIT_WORK(&host->cmd_handler, tifm_sd_register_host, host);
- INIT_WORK(&host->abort_handler, tifm_sd_abort, host);
+ INIT_WORK(&host->cmd_handler, tifm_sd_register_host);
+ INIT_DELAYED_WORK(&host->abort_handler, tifm_sd_abort);
tifm_set_drvdata(sock, mmc);
sock->signal_irq = tifm_sd_signal_irq;
u32 rx_config;
struct rtl_extra_stats xstats;
- struct work_struct thread;
+ struct delayed_work thread;
struct mii_if_info mii;
unsigned int regs_len;
static void rtl8139_set_rx_mode (struct net_device *dev);
static void __set_rx_mode (struct net_device *dev);
static void rtl8139_hw_start (struct net_device *dev);
-static void rtl8139_thread (void *_data);
-static void rtl8139_tx_timeout_task(void *_data);
+static void rtl8139_thread (struct work_struct *work);
+static void rtl8139_tx_timeout_task(struct work_struct *work);
static const struct ethtool_ops rtl8139_ethtool_ops;
/* write MMIO register, with flush */
(debug < 0 ? RTL8139_DEF_MSG_ENABLE : ((1 << debug) - 1));
spin_lock_init (&tp->lock);
spin_lock_init (&tp->rx_lock);
- INIT_WORK(&tp->thread, rtl8139_thread, dev);
+ INIT_DELAYED_WORK(&tp->thread, rtl8139_thread);
tp->mii.dev = dev;
tp->mii.mdio_read = mdio_read;
tp->mii.mdio_write = mdio_write;
RTL_R8 (Config1));
}
-static void rtl8139_thread (void *_data)
+static void rtl8139_thread (struct work_struct *work)
{
- struct net_device *dev = _data;
- struct rtl8139_private *tp = netdev_priv(dev);
+ struct rtl8139_private *tp =
+ container_of(work, struct rtl8139_private, thread.work);
+ struct net_device *dev = tp->mii.dev;
unsigned long thr_delay = next_tick;
if (tp->watchdog_fired) {
tp->watchdog_fired = 0;
- rtl8139_tx_timeout_task(_data);
+ rtl8139_tx_timeout_task(work);
} else if (rtnl_trylock()) {
rtl8139_thread_iter (dev, tp, tp->mmio_addr);
rtnl_unlock ();
/* XXX account for unsent Tx packets in tp->stats.tx_dropped */
}
-static void rtl8139_tx_timeout_task (void *_data)
+static void rtl8139_tx_timeout_task (struct work_struct *work)
{
- struct net_device *dev = _data;
- struct rtl8139_private *tp = netdev_priv(dev);
+ struct rtl8139_private *tp =
+ container_of(work, struct rtl8139_private, thread.work);
+ struct net_device *dev = tp->mii.dev;
void __iomem *ioaddr = tp->mmio_addr;
int i;
u8 tmp8;
struct rtl8139_private *tp = netdev_priv(dev);
if (!tp->have_thread) {
- INIT_WORK(&tp->thread, rtl8139_tx_timeout_task, dev);
+ INIT_DELAYED_WORK(&tp->thread, rtl8139_tx_timeout_task);
schedule_delayed_work(&tp->thread, next_tick);
} else
tp->watchdog_fired = 1;
}
static void
-bnx2_reset_task(void *data)
+bnx2_reset_task(struct work_struct *work)
{
- struct bnx2 *bp = data;
+ struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
if (!netif_running(bp->dev))
return;
bp->pdev = pdev;
spin_lock_init(&bp->phy_lock);
- INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
+ INIT_WORK(&bp->reset_task, bnx2_reset_task);
dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
mem_len = MB_GET_CID_ADDR(17);
return 0;
}
-static void cas_reset_task(void *data)
+static void cas_reset_task(struct work_struct *work)
{
- struct cas *cp = (struct cas *) data;
+ struct cas *cp = container_of(work, struct cas, reset_task);
#if 0
int pending = atomic_read(&cp->reset_task_pending);
#else
atomic_set(&cp->reset_task_pending_spare, 0);
atomic_set(&cp->reset_task_pending_mtu, 0);
#endif
- INIT_WORK(&cp->reset_task, cas_reset_task, cp);
+ INIT_WORK(&cp->reset_task, cas_reset_task);
/* Default link parameters */
if (link_mode >= 0 && link_mode <= 6)
struct peespi *espi;
struct port_info port[MAX_NPORTS];
- struct work_struct stats_update_task;
+ struct delayed_work stats_update_task;
struct timer_list stats_update_timer;
struct semaphore mib_mutex;
* Periodic accumulation of MAC statistics. This is used only if the MAC
* does not have any other way to prevent stats counter overflow.
*/
-static void mac_stats_task(void *data)
+static void mac_stats_task(struct work_struct *work)
{
int i;
- struct adapter *adapter = data;
+ struct adapter *adapter =
+ container_of(work, struct adapter, stats_update_task.work);
for_each_port(adapter, i) {
struct port_info *p = &adapter->port[i];
/*
* Processes elmer0 external interrupts in process context.
*/
-static void ext_intr_task(void *data)
+static void ext_intr_task(struct work_struct *work)
{
- struct adapter *adapter = data;
+ struct adapter *adapter =
+ container_of(work, struct adapter, ext_intr_handler_task);
elmer0_ext_intr_handler(adapter);
spin_lock_init(&adapter->async_lock);
INIT_WORK(&adapter->ext_intr_handler_task,
- ext_intr_task, adapter);
- INIT_WORK(&adapter->stats_update_task, mac_stats_task,
- adapter);
+ ext_intr_task);
+ INIT_DELAYED_WORK(&adapter->stats_update_task,
+ mac_stats_task);
#ifdef work_struct
init_timer(&adapter->stats_update_timer);
adapter->stats_update_timer.function = mac_stats_timer;
schedule_work(&nic->tx_timeout_task);
}
-static void e100_tx_timeout_task(struct net_device *netdev)
+static void e100_tx_timeout_task(struct work_struct *work)
{
- struct nic *nic = netdev_priv(netdev);
+ struct nic *nic = container_of(work, struct nic, tx_timeout_task);
+ struct net_device *netdev = nic->netdev;
DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
readb(&nic->csr->scb.status));
nic->blink_timer.function = e100_blink_led;
nic->blink_timer.data = (unsigned long)nic;
- INIT_WORK(&nic->tx_timeout_task,
- (void (*)(void *))e100_tx_timeout_task, netdev);
+ INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
if((err = e100_alloc(nic))) {
DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
return ret;
}
-static void ehea_reset_port(void *data)
+static void ehea_reset_port(struct work_struct *work)
{
int ret;
- struct net_device *dev = data;
- struct ehea_port *port = netdev_priv(dev);
+ struct ehea_port *port =
+ container_of(work, struct ehea_port, reset_task);
+ struct net_device *dev = port->netdev;
port->resets++;
down(&port->port_lock);
dev->tx_timeout = &ehea_tx_watchdog;
dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
- INIT_WORK(&port->reset_task, ehea_reset_port, dev);
+ INIT_WORK(&port->reset_task, ehea_reset_port);
ehea_set_ethtool_ops(dev);
int magic;
struct pardevice *pdev;
+ struct net_device *dev;
unsigned int work_running;
- struct work_struct run_work;
+ struct delayed_work run_work;
unsigned int modem;
unsigned int bitrate;
unsigned char stat;
#define GETTICK(x)
#endif /* __i386__ */
-static void epp_bh(struct net_device *dev)
+static void epp_bh(struct work_struct *work)
{
+ struct net_device *dev;
struct baycom_state *bc;
struct parport *pp;
unsigned char stat;
unsigned char tmp[2];
unsigned int time1 = 0, time2 = 0, time3 = 0;
int cnt, cnt2;
-
- bc = netdev_priv(dev);
+
+ bc = container_of(work, struct baycom_state, run_work.work);
+ dev = bc->dev;
if (!bc->work_running)
return;
baycom_int_freq(bc);
return -EBUSY;
}
dev->irq = /*pp->irq*/ 0;
- INIT_WORK(&bc->run_work, (void *)(void *)epp_bh, dev);
+ INIT_DELAYED_WORK(&bc->run_work, epp_bh);
bc->work_running = 1;
bc->modem = EPP_CONVENTIONAL;
if (eppconfig(bc))
/*
* initialize part of the baycom_state struct
*/
+ bc->dev = dev;
bc->magic = BAYCOM_MAGIC;
bc->cfg.fclk = 19666600;
bc->cfg.bps = 9600;
return ret;
}
-static void mcs_speed_work(void *arg)
+static void mcs_speed_work(struct work_struct *work)
{
- struct mcs_cb *mcs = arg;
+ struct mcs_cb *mcs = container_of(work, struct mcs_cb, work);
struct net_device *netdev = mcs->netdev;
mcs_speed_change(mcs);
irda_qos_bits_to_value(&mcs->qos);
/* Speed change work initialisation*/
- INIT_WORK(&mcs->work, mcs_speed_work, mcs);
+ INIT_WORK(&mcs->work, mcs_speed_work);
/* Override the network functions we need to use */
ndev->hard_start_xmit = mcs_hard_xmit;
struct sir_fsm {
struct semaphore sem;
- struct work_struct work;
+ struct delayed_work work;
unsigned state, substate;
int param;
int result;
* Both must be unlocked/restarted on completion - but only on final exit.
*/
-static void sirdev_config_fsm(void *data)
+static void sirdev_config_fsm(struct work_struct *work)
{
- struct sir_dev *dev = data;
+ struct sir_dev *dev = container_of(work, struct sir_dev, fsm.work.work);
struct sir_fsm *fsm = &dev->fsm;
int next_state;
int ret = -1;
fsm->param = param;
fsm->result = 0;
- INIT_WORK(&fsm->work, sirdev_config_fsm, dev);
- queue_work(irda_sir_wq, &fsm->work);
+ INIT_DELAYED_WORK(&fsm->work, sirdev_config_fsm);
+ queue_delayed_work(irda_sir_wq, &fsm->work, 0);
return 0;
}
struct veth_lpar_connection {
HvLpIndex remote_lp;
- struct work_struct statemachine_wq;
+ struct delayed_work statemachine_wq;
struct veth_msg *msgs;
int num_events;
struct veth_cap_data local_caps;
static inline void veth_kick_statemachine(struct veth_lpar_connection *cnx)
{
- schedule_work(&cnx->statemachine_wq);
+ schedule_delayed_work(&cnx->statemachine_wq, 0);
}
static void veth_take_cap(struct veth_lpar_connection *cnx,
}
/* FIXME: The gotos here are a bit dubious */
-static void veth_statemachine(void *p)
+static void veth_statemachine(struct work_struct *work)
{
- struct veth_lpar_connection *cnx = (struct veth_lpar_connection *)p;
+ struct veth_lpar_connection *cnx =
+ container_of(work, struct veth_lpar_connection,
+ statemachine_wq.work);
int rlp = cnx->remote_lp;
int rc;
cnx->remote_lp = rlp;
spin_lock_init(&cnx->lock);
- INIT_WORK(&cnx->statemachine_wq, veth_statemachine, cnx);
+ INIT_DELAYED_WORK(&cnx->statemachine_wq, veth_statemachine);
init_timer(&cnx->ack_timer);
cnx->ack_timer.function = veth_timed_ack;
static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter);
void ixgb_set_ethtool_ops(struct net_device *netdev);
static void ixgb_tx_timeout(struct net_device *dev);
-static void ixgb_tx_timeout_task(struct net_device *dev);
+static void ixgb_tx_timeout_task(struct work_struct *work);
static void ixgb_vlan_rx_register(struct net_device *netdev,
struct vlan_group *grp);
static void ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
adapter->watchdog_timer.function = &ixgb_watchdog;
adapter->watchdog_timer.data = (unsigned long)adapter;
- INIT_WORK(&adapter->tx_timeout_task,
- (void (*)(void *))ixgb_tx_timeout_task, netdev);
+ INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task);
strcpy(netdev->name, "eth%d");
if((err = register_netdev(netdev)))
}
static void
-ixgb_tx_timeout_task(struct net_device *netdev)
+ixgb_tx_timeout_task(struct work_struct *work)
{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
+ struct ixgb_adapter *adapter =
+ container_of(work, struct ixgb_adapter, tx_timeout_task);
adapter->tx_timeout_count++;
ixgb_down(adapter, TRUE);
* This watchdog is used to check whether the board has suffered
* from a parity error and needs to be recovered.
*/
-static void myri10ge_watchdog(void *arg)
+static void myri10ge_watchdog(struct work_struct *work)
{
- struct myri10ge_priv *mgp = arg;
+ struct myri10ge_priv *mgp =
+ container_of(work, struct myri10ge_priv, watchdog_work);
u32 reboot;
int status;
u16 cmd, vendor;
(unsigned long)mgp);
SET_ETHTOOL_OPS(netdev, &myri10ge_ethtool_ops);
- INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog, mgp);
+ INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog);
status = register_netdev(netdev);
if (status != 0) {
dev_err(&pdev->dev, "register_netdev failed: %d\n", status);
u8 __iomem *base;
struct pci_dev *pci_dev;
+ struct net_device *ndev;
#ifdef NS83820_VLAN_ACCEL_SUPPORT
struct vlan_group *vlgrp;
}
/* REFILL */
-static inline void queue_refill(void *_dev)
+static inline void queue_refill(struct work_struct *work)
{
- struct net_device *ndev = _dev;
- struct ns83820 *dev = PRIV(ndev);
+ struct ns83820 *dev = container_of(work, struct ns83820, tq_refill);
+ struct net_device *ndev = dev->ndev;
rx_refill(ndev, GFP_KERNEL);
if (dev->rx_info.up)
ndev = alloc_etherdev(sizeof(struct ns83820));
dev = PRIV(ndev);
+ dev->ndev = ndev;
err = -ENOMEM;
if (!dev)
goto out;
SET_MODULE_OWNER(ndev);
SET_NETDEV_DEV(ndev, &pci_dev->dev);
- INIT_WORK(&dev->tq_refill, queue_refill, ndev);
+ INIT_WORK(&dev->tq_refill, queue_refill);
tasklet_init(&dev->rx_tasklet, rx_action, (unsigned long)ndev);
err = pci_enable_device(pci_dev);
*/
typedef struct local_info_t {
+ struct net_device *dev;
struct pcmcia_device *p_dev;
dev_node_t node;
struct net_device_stats stats;
*/
static int do_start_xmit(struct sk_buff *skb, struct net_device *dev);
static void do_tx_timeout(struct net_device *dev);
-static void xirc2ps_tx_timeout_task(void *data);
+static void xirc2ps_tx_timeout_task(struct work_struct *work);
static struct net_device_stats *do_get_stats(struct net_device *dev);
static void set_addresses(struct net_device *dev);
static void set_multicast_list(struct net_device *dev);
if (!dev)
return -ENOMEM;
local = netdev_priv(dev);
+ local->dev = dev;
local->p_dev = link;
link->priv = dev;
#ifdef HAVE_TX_TIMEOUT
dev->tx_timeout = do_tx_timeout;
dev->watchdog_timeo = TX_TIMEOUT;
- INIT_WORK(&local->tx_timeout_task, xirc2ps_tx_timeout_task, dev);
+ INIT_WORK(&local->tx_timeout_task, xirc2ps_tx_timeout_task);
#endif
return xirc2ps_config(link);
/*====================================================================*/
static void
-xirc2ps_tx_timeout_task(void *data)
+xirc2ps_tx_timeout_task(struct work_struct *work)
{
- struct net_device *dev = data;
+ local_info_t *local =
+ container_of(work, local_info_t, tx_timeout_task);
+ struct net_device *dev = local->dev;
/* reset the card */
do_reset(dev,1);
dev->trans_start = jiffies;
EXPORT_SYMBOL(phy_start_aneg);
-static void phy_change(void *data);
+static void phy_change(struct work_struct *work);
static void phy_timer(unsigned long data);
/* phy_start_machine:
{
int err = 0;
- INIT_WORK(&phydev->phy_queue, phy_change, phydev);
+ INIT_WORK(&phydev->phy_queue, phy_change);
if (request_irq(phydev->irq, phy_interrupt,
IRQF_SHARED,
/* Scheduled by the phy_interrupt/timer to handle PHY changes */
-static void phy_change(void *data)
+static void phy_change(struct work_struct *work)
{
int err;
- struct phy_device *phydev = data;
+ struct phy_device *phydev =
+ container_of(work, struct phy_device, phy_queue);
err = phy_disable_interrupts(phydev);
#define PLIP_NIBBLE_WAIT 3000
/* Bottom halves */
-static void plip_kick_bh(struct net_device *dev);
-static void plip_bh(struct net_device *dev);
-static void plip_timer_bh(struct net_device *dev);
+static void plip_kick_bh(struct work_struct *work);
+static void plip_bh(struct work_struct *work);
+static void plip_timer_bh(struct work_struct *work);
/* Interrupt handler */
static void plip_interrupt(int irq, void *dev_id);
struct net_local {
struct net_device_stats enet_stats;
+ struct net_device *dev;
struct work_struct immediate;
- struct work_struct deferred;
- struct work_struct timer;
+ struct delayed_work deferred;
+ struct delayed_work timer;
struct plip_local snd_data;
struct plip_local rcv_data;
struct pardevice *pardev;
nl->nibble = PLIP_NIBBLE_WAIT;
/* Initialize task queue structures */
- INIT_WORK(&nl->immediate, (void (*)(void *))plip_bh, dev);
- INIT_WORK(&nl->deferred, (void (*)(void *))plip_kick_bh, dev);
+ INIT_WORK(&nl->immediate, plip_bh);
+ INIT_DELAYED_WORK(&nl->deferred, plip_kick_bh);
if (dev->irq == -1)
- INIT_WORK(&nl->timer, (void (*)(void *))plip_timer_bh, dev);
+ INIT_DELAYED_WORK(&nl->timer, plip_timer_bh);
spin_lock_init(&nl->lock);
}
This routine is kicked by do_timer().
Request `plip_bh' to be invoked. */
static void
-plip_kick_bh(struct net_device *dev)
+plip_kick_bh(struct work_struct *work)
{
- struct net_local *nl = netdev_priv(dev);
+ struct net_local *nl =
+ container_of(work, struct net_local, deferred.work);
if (nl->is_deferred)
schedule_work(&nl->immediate);
/* Bottom half handler of PLIP. */
static void
-plip_bh(struct net_device *dev)
+plip_bh(struct work_struct *work)
{
- struct net_local *nl = netdev_priv(dev);
+ struct net_local *nl = container_of(work, struct net_local, immediate);
struct plip_local *snd = &nl->snd_data;
struct plip_local *rcv = &nl->rcv_data;
plip_func f;
nl->is_deferred = 0;
f = connection_state_table[nl->connection];
- if ((r = (*f)(dev, nl, snd, rcv)) != OK
- && (r = plip_bh_timeout_error(dev, nl, snd, rcv, r)) != OK) {
+ if ((r = (*f)(nl->dev, nl, snd, rcv)) != OK
+ && (r = plip_bh_timeout_error(nl->dev, nl, snd, rcv, r)) != OK) {
nl->is_deferred = 1;
schedule_delayed_work(&nl->deferred, 1);
}
}
static void
-plip_timer_bh(struct net_device *dev)
+plip_timer_bh(struct work_struct *work)
{
- struct net_local *nl = netdev_priv(dev);
+ struct net_local *nl =
+ container_of(work, struct net_local, timer.work);
if (!(atomic_read (&nl->kill_timer))) {
- plip_interrupt (-1, dev);
+ plip_interrupt (-1, nl->dev);
schedule_delayed_work(&nl->timer, 1);
}
}
nl = netdev_priv(dev);
+ nl->dev = dev;
nl->pardev = parport_register_device(port, name, plip_preempt,
plip_wakeup, plip_interrupt,
0, dev);
"%s: Another function issued a reset to the "
"chip. ISR value = %x.\n", ndev->name, value);
}
- queue_work(qdev->workqueue, &qdev->reset_work);
+ queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
spin_unlock(&qdev->adapter_lock);
} else if (value & ISP_IMR_DISABLE_CMPL_INT) {
ql_disable_interrupts(qdev);
/*
* Wake up the worker to process this event.
*/
- queue_work(qdev->workqueue, &qdev->tx_timeout_work);
+ queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0);
}
-static void ql_reset_work(struct ql3_adapter *qdev)
+static void ql_reset_work(struct work_struct *work)
{
+ struct ql3_adapter *qdev =
+ container_of(work, struct ql3_adapter, reset_work.work);
struct net_device *ndev = qdev->ndev;
u32 value;
struct ql_tx_buf_cb *tx_cb;
}
}
-static void ql_tx_timeout_work(struct ql3_adapter *qdev)
+static void ql_tx_timeout_work(struct work_struct *work)
{
- ql_cycle_adapter(qdev,QL_DO_RESET);
+ struct ql3_adapter *qdev =
+ container_of(work, struct ql3_adapter, tx_timeout_work.work);
+
+ ql_cycle_adapter(qdev, QL_DO_RESET);
}
static void ql_get_board_info(struct ql3_adapter *qdev)
netif_stop_queue(ndev);
qdev->workqueue = create_singlethread_workqueue(ndev->name);
- INIT_WORK(&qdev->reset_work, (void (*)(void *))ql_reset_work, qdev);
- INIT_WORK(&qdev->tx_timeout_work,
- (void (*)(void *))ql_tx_timeout_work, qdev);
+ INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work);
+ INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
init_timer(&qdev->adapter_timer);
qdev->adapter_timer.function = ql3xxx_timer;
u32 numPorts;
struct net_device_stats stats;
struct workqueue_struct *workqueue;
- struct work_struct reset_work;
- struct work_struct tx_timeout_work;
+ struct delayed_work reset_work;
+ struct delayed_work tx_timeout_work;
u32 max_frame_size;
};
struct rtl8169_private {
void __iomem *mmio_addr; /* memory map physical address */
struct pci_dev *pci_dev; /* Index of PCI device */
+ struct net_device *dev;
struct net_device_stats stats; /* statistics of net device */
spinlock_t lock; /* spin lock flag */
u32 msg_enable;
void (*phy_reset_enable)(void __iomem *);
unsigned int (*phy_reset_pending)(void __iomem *);
unsigned int (*link_ok)(void __iomem *);
- struct work_struct task;
+ struct delayed_work task;
unsigned wol_enabled : 1;
};
SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pdev->dev);
tp = netdev_priv(dev);
+ tp->dev = dev;
tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
/* enable device (incl. PCI PM wakeup and hotplug setup) */
if (retval < 0)
goto err_free_rx;
- INIT_WORK(&tp->task, NULL, dev);
+ INIT_DELAYED_WORK(&tp->task, NULL);
rtl8169_hw_start(dev);
tp->cur_tx = tp->dirty_tx = 0;
}
-static void rtl8169_schedule_work(struct net_device *dev, void (*task)(void *))
+static void rtl8169_schedule_work(struct net_device *dev, work_func_t task)
{
struct rtl8169_private *tp = netdev_priv(dev);
- PREPARE_WORK(&tp->task, task, dev);
+ PREPARE_DELAYED_WORK(&tp->task, task);
schedule_delayed_work(&tp->task, 4);
}
netif_poll_enable(dev);
}
-static void rtl8169_reinit_task(void *_data)
+static void rtl8169_reinit_task(struct work_struct *work)
{
- struct net_device *dev = _data;
+ struct rtl8169_private *tp =
+ container_of(work, struct rtl8169_private, task.work);
+ struct net_device *dev = tp->dev;
int ret;
if (netif_running(dev)) {
}
}
-static void rtl8169_reset_task(void *_data)
+static void rtl8169_reset_task(struct work_struct *work)
{
- struct net_device *dev = _data;
- struct rtl8169_private *tp = netdev_priv(dev);
+ struct rtl8169_private *tp =
+ container_of(work, struct rtl8169_private, task.work);
+ struct net_device *dev = tp->dev;
if (!netif_running(dev))
return;
* Description: Sets the link status for the adapter
*/
-static void s2io_set_link(unsigned long data)
+static void s2io_set_link(struct work_struct *work)
{
- nic_t *nic = (nic_t *) data;
+ nic_t *nic = container_of(work, nic_t, set_link_task);
struct net_device *dev = nic->dev;
XENA_dev_config_t __iomem *bar0 = nic->bar0;
register u64 val64;
* spin lock.
*/
-static void s2io_restart_nic(unsigned long data)
+static void s2io_restart_nic(struct work_struct *work)
{
- struct net_device *dev = (struct net_device *) data;
- nic_t *sp = dev->priv;
+ nic_t *sp = container_of(work, nic_t, rst_timer_task);
+ struct net_device *dev = sp->dev;
s2io_card_down(sp);
if (s2io_card_up(sp)) {
dev->tx_timeout = &s2io_tx_watchdog;
dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
- INIT_WORK(&sp->rst_timer_task,
- (void (*)(void *)) s2io_restart_nic, dev);
- INIT_WORK(&sp->set_link_task,
- (void (*)(void *)) s2io_set_link, sp);
+ INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
+ INIT_WORK(&sp->set_link_task, s2io_set_link);
pci_save_state(sp->pdev);
static irqreturn_t s2io_isr(int irq, void *dev_id);
static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag);
static const struct ethtool_ops netdev_ethtool_ops;
-static void s2io_set_link(unsigned long data);
+static void s2io_set_link(struct work_struct *work);
static int s2io_set_swapper(nic_t * sp);
static void s2io_card_down(nic_t *nic);
static int s2io_card_up(nic_t *nic);
struct sis190_private {
void __iomem *mmio_addr;
struct pci_dev *pci_dev;
+ struct net_device *dev;
struct net_device_stats stats;
spinlock_t lock;
u32 rx_buf_sz;
netif_start_queue(dev);
}
-static void sis190_phy_task(void * data)
+static void sis190_phy_task(struct work_struct *work)
{
- struct net_device *dev = data;
- struct sis190_private *tp = netdev_priv(dev);
+ struct sis190_private *tp =
+ container_of(work, struct sis190_private, phy_task);
+ struct net_device *dev = tp->dev;
void __iomem *ioaddr = tp->mmio_addr;
int phy_id = tp->mii_if.phy_id;
u16 val;
if (rc < 0)
goto err_free_rx_1;
- INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
+ INIT_WORK(&tp->phy_task, sis190_phy_task);
sis190_request_timer(dev);
SET_NETDEV_DEV(dev, &pdev->dev);
tp = netdev_priv(dev);
+ tp->dev = dev;
tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
rc = pci_enable_device(pdev);
sis190_init_rxfilter(dev);
- INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
+ INIT_WORK(&tp->phy_task, sis190_phy_task);
dev->open = sis190_open;
dev->stop = sis190_close;
* Since internal PHY is wired to a level triggered pin, can't
* get an interrupt when carrier is detected.
*/
-static void xm_link_timer(void *arg)
+static void xm_link_timer(struct work_struct *work)
{
- struct net_device *dev = arg;
- struct skge_port *skge = netdev_priv(arg);
+ struct skge_port *skge =
+ container_of(work, struct skge_port, link_thread.work);
+ struct net_device *dev = skge->netdev;
struct skge_hw *hw = skge->hw;
int port = skge->port;
* because accessing phy registers requires spin wait which might
* cause excess interrupt latency.
*/
-static void skge_extirq(void *arg)
+static void skge_extirq(struct work_struct *work)
{
- struct skge_hw *hw = arg;
+ struct skge_hw *hw = container_of(work, struct skge_hw, phy_work);
int port;
mutex_lock(&hw->phy_mutex);
skge->port = port;
/* Only used for Genesis XMAC */
- INIT_WORK(&skge->link_thread, xm_link_timer, dev);
+ INIT_DELAYED_WORK(&skge->link_thread, xm_link_timer);
if (hw->chip_id != CHIP_ID_GENESIS) {
dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
hw->pdev = pdev;
mutex_init(&hw->phy_mutex);
- INIT_WORK(&hw->phy_work, skge_extirq, hw);
+ INIT_WORK(&hw->phy_work, skge_extirq);
spin_lock_init(&hw->hw_lock);
hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
struct net_device_stats net_stats;
- struct work_struct link_thread;
+ struct delayed_work link_thread;
enum pause_control flow_control;
enum pause_status flow_status;
u8 rx_csum;
* called as task when tx hangs, resets interface (if interface is up)
*/
static void
-spider_net_tx_timeout_task(void *data)
+spider_net_tx_timeout_task(struct work_struct *work)
{
- struct net_device *netdev = data;
- struct spider_net_card *card = netdev_priv(netdev);
+ struct spider_net_card *card =
+ container_of(work, struct spider_net_card, tx_timeout_task);
+ struct net_device *netdev = card->netdev;
if (!(netdev->flags & IFF_UP))
goto out;
card = netdev_priv(netdev);
card->netdev = netdev;
card->msg_enable = SPIDER_NET_DEFAULT_MSG;
- INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task, netdev);
+ INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task);
init_waitqueue_head(&card->waitq);
atomic_set(&card->tx_timeout_task_counter, 0);
}
}
-static void gem_reset_task(void *data)
+static void gem_reset_task(struct work_struct *work)
{
- struct gem *gp = (struct gem *) data;
+ struct gem *gp = container_of(work, struct gem, reset_task);
mutex_lock(&gp->pm_mutex);
gp->link_timer.function = gem_link_timer;
gp->link_timer.data = (unsigned long) gp;
- INIT_WORK(&gp->reset_task, gem_reset_task, gp);
+ INIT_WORK(&gp->reset_task, gem_reset_task);
gp->lstate = link_down;
gp->timer_ticks = 0;
}
#endif
-static void tg3_reset_task(void *_data)
+static void tg3_reset_task(struct work_struct *work)
{
- struct tg3 *tp = _data;
+ struct tg3 *tp = container_of(work, struct tg3, reset_task);
unsigned int restart_timer;
tg3_full_lock(tp, 0);
#endif
spin_lock_init(&tp->lock);
spin_lock_init(&tp->indirect_lock);
- INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
+ INIT_WORK(&tp->reset_task, tg3_reset_task);
tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
if (tp->regs == 0UL) {
static int TLan_ioctl( struct net_device *dev, struct ifreq *rq, int cmd);
static int TLan_probe1( struct pci_dev *pdev, long ioaddr, int irq, int rev, const struct pci_device_id *ent);
static void TLan_tx_timeout( struct net_device *dev);
+static void TLan_tx_timeout_work(struct work_struct *work);
static int tlan_init_one( struct pci_dev *pdev, const struct pci_device_id *ent);
static u32 TLan_HandleInvalid( struct net_device *, u16 );
priv = netdev_priv(dev);
priv->pciDev = pdev;
+ priv->dev = dev;
/* Is this a PCI device? */
if (pdev) {
/* This will be used when we get an adapter error from
* within our irq handler */
- INIT_WORK(&priv->tlan_tqueue, (void *)(void*)TLan_tx_timeout, dev);
+ INIT_WORK(&priv->tlan_tqueue, TLan_tx_timeout_work);
spin_lock_init(&priv->lock);
}
+ /***************************************************************
+ * TLan_tx_timeout_work
+ *
+ * Returns: nothing
+ *
+ * Params:
+ * work work item of device which timed out
+ *
+ **************************************************************/
+
+static void TLan_tx_timeout_work(struct work_struct *work)
+{
+ TLanPrivateInfo *priv =
+ container_of(work, TLanPrivateInfo, tlan_tqueue);
+
+ TLan_tx_timeout(priv->dev);
+}
+
+
/***************************************************************
* TLan_StartTx
typedef struct tlan_private_tag {
struct net_device *nextDevice;
struct pci_dev *pciDev;
+ struct net_device *dev;
void *dmaStorage;
dma_addr_t dmaStorageDMA;
unsigned int dmaSize;
/* Handle the 21143 uniquely: do autoselect with NWay, not the EEPROM list
of available transceivers. */
-void t21142_media_task(void *data)
+void t21142_media_task(struct work_struct *work)
{
- struct net_device *dev = data;
- struct tulip_private *tp = netdev_priv(dev);
+ struct tulip_private *tp =
+ container_of(work, struct tulip_private, media_work);
+ struct net_device *dev = tp->dev;
void __iomem *ioaddr = tp->base_addr;
int csr12 = ioread32(ioaddr + CSR12);
int next_tick = 60*HZ;
#include "tulip.h"
-void tulip_media_task(void *data)
+void tulip_media_task(struct work_struct *work)
{
- struct net_device *dev = data;
- struct tulip_private *tp = netdev_priv(dev);
+ struct tulip_private *tp =
+ container_of(work, struct tulip_private, media_work);
+ struct net_device *dev = tp->dev;
void __iomem *ioaddr = tp->base_addr;
u32 csr12 = ioread32(ioaddr + CSR12);
int next_tick = 2*HZ;
int valid_intrs; /* CSR7 interrupt enable settings */
int flags;
void (*media_timer) (unsigned long);
- void (*media_task) (void *);
+ work_func_t media_task;
};
int csr12_shadow;
int pad0; /* Used for 8-byte alignment */
struct work_struct media_work;
+ struct net_device *dev;
};
/* 21142.c */
extern u16 t21142_csr14[];
-void t21142_media_task(void *data);
+void t21142_media_task(struct work_struct *work);
void t21142_start_nway(struct net_device *dev);
void t21142_lnk_change(struct net_device *dev, int csr5);
void pnic_timer(unsigned long data);
/* timer.c */
-void tulip_media_task(void *data);
+void tulip_media_task(struct work_struct *work);
void mxic_timer(unsigned long data);
void comet_timer(unsigned long data);
* it is zeroed and aligned in alloc_etherdev
*/
tp = netdev_priv(dev);
+ tp->dev = dev;
tp->rx_ring = pci_alloc_consistent(pdev,
sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
tp->timer.data = (unsigned long)dev;
tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
- INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task, dev);
+ INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
dev->base_addr = (unsigned long)ioaddr;
static int cpc_tty_chars_in_buffer(struct tty_struct *tty);
static void cpc_tty_flush_buffer(struct tty_struct *tty);
static void cpc_tty_hangup(struct tty_struct *tty);
-static void cpc_tty_rx_work(void *data);
-static void cpc_tty_tx_work(void *data);
+static void cpc_tty_rx_work(struct work_struct *work);
+static void cpc_tty_tx_work(struct work_struct *work);
static int cpc_tty_send_to_card(pc300dev_t *dev,void *buf, int len);
static void cpc_tty_trace(pc300dev_t *dev, char* buf, int len, char rxtx);
static void cpc_tty_signal_off(pc300dev_t *pc300dev, unsigned char);
cpc_tty->tty_minor = port + CPC_TTY_MINOR_START;
cpc_tty->pc300dev = pc300dev;
- INIT_WORK(&cpc_tty->tty_tx_work, cpc_tty_tx_work, (void *)cpc_tty);
- INIT_WORK(&cpc_tty->tty_rx_work, cpc_tty_rx_work, (void *)port);
+ INIT_WORK(&cpc_tty->tty_tx_work, cpc_tty_tx_work);
+ INIT_WORK(&cpc_tty->tty_rx_work, cpc_tty_rx_work);
cpc_tty->buf_rx.first = cpc_tty->buf_rx.last = NULL;
* o call the line disc. read
* o free memory
*/
-static void cpc_tty_rx_work(void * data)
+static void cpc_tty_rx_work(struct work_struct *work)
{
+ st_cpc_tty_area *cpc_tty;
unsigned long port;
int i, j;
- st_cpc_tty_area *cpc_tty;
volatile st_cpc_rx_buf *buf;
char flags=0,flg_rx=1;
struct tty_ldisc *ld;
if (cpc_tty_cnt == 0) return;
-
for (i=0; (i < 4) && flg_rx ; i++) {
flg_rx = 0;
- port = (unsigned long)data;
+
+ cpc_tty = container_of(work, st_cpc_tty_area, tty_rx_work);
+ port = cpc_tty - cpc_tty_area;
+
for (j=0; j < CPC_TTY_NPORTS; j++) {
cpc_tty = &cpc_tty_area[port];
* o if need call line discipline wakeup
* o call wake_up_interruptible
*/
-static void cpc_tty_tx_work(void *data)
+static void cpc_tty_tx_work(struct work_struct *work)
{
- st_cpc_tty_area *cpc_tty = (st_cpc_tty_area *) data;
+ st_cpc_tty_area *cpc_tty =
+ container_of(work, st_cpc_tty_area, tty_tx_work);
struct tty_struct *tty;
CPC_TTY_DBG("%s: cpc_tty_tx_work init\n",cpc_tty->name);
struct tasklet_struct isr_tasklet;
/* Periodic tasks */
- struct work_struct periodic_work;
+ struct delayed_work periodic_work;
unsigned int periodic_state;
struct work_struct restart_work;
return badness;
}
-static void bcm43xx_periodic_work_handler(void *d)
+static void bcm43xx_periodic_work_handler(struct work_struct *work)
{
- struct bcm43xx_private *bcm = d;
+ struct bcm43xx_private *bcm =
+ container_of(work, struct bcm43xx_private, periodic_work.work);
struct net_device *net_dev = bcm->net_dev;
unsigned long flags;
u32 savedirqs = 0;
void bcm43xx_periodic_tasks_setup(struct bcm43xx_private *bcm)
{
- struct work_struct *work = &(bcm->periodic_work);
+ struct delayed_work *work = &bcm->periodic_work;
assert(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED);
- INIT_WORK(work, bcm43xx_periodic_work_handler, bcm);
- schedule_work(work);
+ INIT_DELAYED_WORK(work, bcm43xx_periodic_work_handler);
+ schedule_delayed_work(work, 0);
}
static void bcm43xx_security_init(struct bcm43xx_private *bcm)
bcm43xx_periodic_tasks_setup(bcm);
/*FIXME: This should be handled by softmac instead. */
- schedule_work(&bcm->softmac->associnfo.work);
+ schedule_delayed_work(&bcm->softmac->associnfo.work, 0);
out:
mutex_unlock(&(bcm)->mutex);
/* Hard-reset the chip. Do not call this directly.
* Use bcm43xx_controller_restart()
*/
-static void bcm43xx_chip_reset(void *_bcm)
+static void bcm43xx_chip_reset(struct work_struct *work)
{
- struct bcm43xx_private *bcm = _bcm;
+ struct bcm43xx_private *bcm =
+ container_of(work, struct bcm43xx_private, restart_work);
struct bcm43xx_phyinfo *phy;
int err = -ENODEV;
if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED)
return;
printk(KERN_ERR PFX "Controller RESET (%s) ...\n", reason);
- INIT_WORK(&bcm->restart_work, bcm43xx_chip_reset, bcm);
+ INIT_WORK(&bcm->restart_work, bcm43xx_chip_reset);
schedule_work(&bcm->restart_work);
}
struct net_device_stats *hostap_get_stats(struct net_device *dev);
void hostap_setup_dev(struct net_device *dev, local_info_t *local,
int main_dev);
-void hostap_set_multicast_list_queue(void *data);
+void hostap_set_multicast_list_queue(struct work_struct *work);
int hostap_set_hostapd(local_info_t *local, int val, int rtnl_locked);
int hostap_set_hostapd_sta(local_info_t *local, int val, int rtnl_locked);
void hostap_cleanup(local_info_t *local);
static struct sta_info* ap_get_sta(struct ap_data *ap, u8 *sta);
static void hostap_event_expired_sta(struct net_device *dev,
struct sta_info *sta);
-static void handle_add_proc_queue(void *data);
+static void handle_add_proc_queue(struct work_struct *work);
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
-static void handle_wds_oper_queue(void *data);
+static void handle_wds_oper_queue(struct work_struct *work);
static void prism2_send_mgmt(struct net_device *dev,
u16 type_subtype, char *body,
int body_len, u8 *addr, u16 tx_cb_idx);
INIT_LIST_HEAD(&ap->sta_list);
/* Initialize task queue structure for AP management */
- INIT_WORK(&local->ap->add_sta_proc_queue, handle_add_proc_queue, ap);
+ INIT_WORK(&local->ap->add_sta_proc_queue, handle_add_proc_queue);
ap->tx_callback_idx =
hostap_tx_callback_register(local, hostap_ap_tx_cb, ap);
printk(KERN_WARNING "%s: failed to register TX callback for "
"AP\n", local->dev->name);
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
- INIT_WORK(&local->ap->wds_oper_queue, handle_wds_oper_queue, local);
+ INIT_WORK(&local->ap->wds_oper_queue, handle_wds_oper_queue);
ap->tx_callback_auth =
hostap_tx_callback_register(local, hostap_ap_tx_cb_auth, ap);
}
-static void handle_add_proc_queue(void *data)
+static void handle_add_proc_queue(struct work_struct *work)
{
- struct ap_data *ap = (struct ap_data *) data;
+ struct ap_data *ap = container_of(work, struct ap_data,
+ add_sta_proc_queue);
struct sta_info *sta;
char name[20];
struct add_sta_proc_data *entry, *prev;
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
-static void handle_wds_oper_queue(void *data)
+static void handle_wds_oper_queue(struct work_struct *work)
{
- local_info_t *local = data;
+ struct ap_data *ap = container_of(work, struct ap_data,
+ wds_oper_queue);
+ local_info_t *local = ap->local;
struct wds_oper_data *entry, *prev;
spin_lock_bh(&local->lock);
/* Called only as scheduled task after noticing card timeout in interrupt
* context */
-static void handle_reset_queue(void *data)
+static void handle_reset_queue(struct work_struct *work)
{
- local_info_t *local = (local_info_t *) data;
+ local_info_t *local = container_of(work, local_info_t, reset_queue);
printk(KERN_DEBUG "%s: scheduled card reset\n", local->dev->name);
prism2_hw_reset(local->dev);
/* Called only as a scheduled task when communications quality values should
* be updated. */
-static void handle_comms_qual_update(void *data)
+static void handle_comms_qual_update(struct work_struct *work)
{
- local_info_t *local = data;
+ local_info_t *local =
+ container_of(work, local_info_t, comms_qual_update);
prism2_update_comms_qual(local->dev);
}
}
-static void handle_set_tim_queue(void *data)
+static void handle_set_tim_queue(struct work_struct *work)
{
- local_info_t *