diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 2023043ce7c0..8f5565bf34cd 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -961,17 +961,31 @@ static void empty(void) { } -static DECLARE_WORK(floppy_work, NULL); +static void (*floppy_work_fn)(void); + +static void floppy_work_workfn(struct work_struct *work) +{ + floppy_work_fn(); +} + +static DECLARE_WORK(floppy_work, floppy_work_workfn); static void schedule_bh(void (*handler)(void)) { WARN_ON(work_pending(&floppy_work)); - PREPARE_WORK(&floppy_work, (work_func_t)handler); + floppy_work_fn = handler; queue_work(floppy_wq, &floppy_work); } -static DECLARE_DELAYED_WORK(fd_timer, NULL); +static void (*fd_timer_fn)(void) = NULL; + +static void fd_timer_workfn(struct work_struct *work) +{ + fd_timer_fn(); +} + +static DECLARE_DELAYED_WORK(fd_timer, fd_timer_workfn); static void cancel_activity(void) { @@ -982,7 +996,7 @@ static void cancel_activity(void) /* this function makes sure that the disk stays in the drive during the * transfer */ -static void fd_watchdog(struct work_struct *arg) +static void fd_watchdog(void) { debug_dcl(DP->flags, "calling disk change from watchdog\n"); @@ -993,7 +1007,7 @@ static void fd_watchdog(struct work_struct *arg) reset_fdc(); } else { cancel_delayed_work(&fd_timer); - PREPARE_DELAYED_WORK(&fd_timer, fd_watchdog); + fd_timer_fn = fd_watchdog; queue_delayed_work(floppy_wq, &fd_timer, HZ / 10); } } @@ -1005,7 +1019,8 @@ static void main_command_interrupt(void) } /* waits for a delay (spinup or select) to pass */ -static int fd_wait_for_completion(unsigned long expires, work_func_t function) +static int fd_wait_for_completion(unsigned long expires, + void (*function)(void)) { if (FDCS->reset) { reset_fdc(); /* do the reset during sleep to win time @@ -1016,7 +1031,7 @@ static int fd_wait_for_completion(unsigned long expires, work_func_t function) if (time_before(jiffies, expires)) { cancel_delayed_work(&fd_timer); - PREPARE_DELAYED_WORK(&fd_timer, function); + fd_timer_fn = function; queue_delayed_work(floppy_wq, &fd_timer, expires - jiffies); return 1; } @@ -1334,8 +1349,7 @@ static int fdc_dtr(void) * Pause 5 msec to avoid trouble. (Needs to be 2 jiffies) */ FDCS->dtr = raw_cmd->rate & 3; - return fd_wait_for_completion(jiffies + 2UL * HZ / 100, - (work_func_t)floppy_ready); + return fd_wait_for_completion(jiffies + 2UL * HZ / 100, floppy_ready); } /* fdc_dtr */ static void tell_sector(void) @@ -1440,7 +1454,7 @@ static void setup_rw_floppy(void) int flags; int dflags; unsigned long ready_date; - work_func_t function; + void (*function)(void); flags = raw_cmd->flags; if (flags & (FD_RAW_READ | FD_RAW_WRITE)) @@ -1454,9 +1468,9 @@ static void setup_rw_floppy(void) */ if (time_after(ready_date, jiffies + DP->select_delay)) { ready_date -= DP->select_delay; - function = (work_func_t)floppy_start; + function = floppy_start; } else - function = (work_func_t)setup_rw_floppy; + function = setup_rw_floppy; /* wait until the floppy is spinning fast enough */ if (fd_wait_for_completion(ready_date, function)) @@ -1486,7 +1500,7 @@ static void setup_rw_floppy(void) inr = result(); cont->interrupt(); } else if (flags & FD_RAW_NEED_DISK) - fd_watchdog(NULL); + fd_watchdog(); } static int blind_seek; @@ -1863,7 +1877,7 @@ static int start_motor(void (*function)(void)) /* wait_for_completion also schedules reset if needed. */ return fd_wait_for_completion(DRS->select_date + DP->select_delay, - (work_func_t)function); + function); } static void floppy_ready(void) diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index 51824d1f23ea..8459e4e7c719 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -993,7 +993,7 @@ static void nvme_abort_cmd(int cmdid, struct nvme_queue *nvmeq) dev_warn(&dev->pci_dev->dev, "I/O %d QID %d timeout, reset controller\n", cmdid, nvmeq->qid); - PREPARE_WORK(&dev->reset_work, nvme_reset_failed_dev); + dev->reset_workfn = nvme_reset_failed_dev; queue_work(nvme_workq, &dev->reset_work); return; } @@ -1696,8 +1696,7 @@ static int nvme_kthread(void *data) list_del_init(&dev->node); dev_warn(&dev->pci_dev->dev, "Failed status, reset controller\n"); - PREPARE_WORK(&dev->reset_work, - nvme_reset_failed_dev); + dev->reset_workfn = nvme_reset_failed_dev; queue_work(nvme_workq, &dev->reset_work); continue; } @@ -2406,7 +2405,7 @@ static int nvme_dev_resume(struct nvme_dev *dev) return ret; if (ret == -EBUSY) { spin_lock(&dev_list_lock); - PREPARE_WORK(&dev->reset_work, nvme_remove_disks); + dev->reset_workfn = nvme_remove_disks; queue_work(nvme_workq, &dev->reset_work); spin_unlock(&dev_list_lock); } @@ -2435,6 +2434,12 @@ static void nvme_reset_failed_dev(struct work_struct *ws) nvme_dev_reset(dev); } +static void nvme_reset_workfn(struct work_struct *work) +{ + struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work); + dev->reset_workfn(work); +} + static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int result = -ENOMEM; @@ -2453,7 +2458,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto free; INIT_LIST_HEAD(&dev->namespaces); - INIT_WORK(&dev->reset_work, nvme_reset_failed_dev); + dev->reset_workfn = nvme_reset_failed_dev; + INIT_WORK(&dev->reset_work, nvme_reset_workfn); dev->pci_dev = pdev; pci_set_drvdata(pdev, dev); result = nvme_set_instance(dev); @@ -2553,7 +2559,7 @@ static int nvme_resume(struct device *dev) struct nvme_dev *ndev = pci_get_drvdata(pdev); if (nvme_dev_resume(ndev) && !work_busy(&ndev->reset_work)) { - PREPARE_WORK(&ndev->reset_work, nvme_reset_failed_dev); + ndev->reset_workfn = nvme_reset_failed_dev; queue_work(nvme_workq, &ndev->reset_work); } return 0; diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 55cd110a49c4..c204b7d1532c 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -2607,7 +2607,7 @@ int dw_mci_probe(struct dw_mci *host) tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host); host->card_workqueue = alloc_workqueue("dw-mci-card", - WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1); + WQ_MEM_RECLAIM, 1); if (!host->card_workqueue) { ret = -ENOMEM; goto err_dmaunmap; diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index caddc1b427a9..42a2e06512f2 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c @@ -764,7 +764,7 @@ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev) /* * Overwrite TX done handler */ - PREPARE_WORK(&rt2x00dev->txdone_work, rt2800usb_work_txdone); + INIT_WORK(&rt2x00dev->txdone_work, rt2800usb_work_txdone); return 0; } diff --git a/drivers/ps3/ps3-vuart.c b/drivers/ps3/ps3-vuart.c index fb7300837fee..bc1e5139ba29 100644 --- a/drivers/ps3/ps3-vuart.c +++ b/drivers/ps3/ps3-vuart.c @@ -699,8 +699,6 @@ int ps3_vuart_read_async(struct ps3_system_bus_device *dev, unsigned int bytes) BUG_ON(!bytes); - PREPARE_WORK(&priv->rx_list.work.work, ps3_vuart_work); - spin_lock_irqsave(&priv->rx_list.lock, flags); if (priv->rx_list.bytes_held >= bytes) { dev_dbg(&dev->core, "%s:%d: schedule_work %xh bytes\n", @@ -1052,7 +1050,7 @@ static int ps3_vuart_probe(struct ps3_system_bus_device *dev) INIT_LIST_HEAD(&priv->rx_list.head); spin_lock_init(&priv->rx_list.lock); - INIT_WORK(&priv->rx_list.work.work, NULL); + INIT_WORK(&priv->rx_list.work.work, ps3_vuart_work); priv->rx_list.work.trigger = 0; priv->rx_list.work.dev = dev; diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c index 8af136e9c9dc..b22142ee5262 100644 --- a/drivers/staging/fwserial/fwserial.c +++ b/drivers/staging/fwserial/fwserial.c @@ -2036,6 +2036,13 @@ static void fwserial_auto_connect(struct work_struct *work) schedule_delayed_work(&peer->connect, CONNECT_RETRY_DELAY); } +static void fwserial_peer_workfn(struct work_struct *work) +{ + struct fwtty_peer *peer = to_peer(work, work); + + peer->workfn(work); +} + /** * fwserial_add_peer - add a newly probed 'serial' unit device as a 'peer' * @serial: aggregate representing the specific fw_card to add the peer to @@ -2100,7 +2107,7 @@ static int fwserial_add_peer(struct fw_serial *serial, struct fw_unit *unit) peer->port = NULL; init_timer(&peer->timer); - INIT_WORK(&peer->work, NULL); + INIT_WORK(&peer->work, fwserial_peer_workfn); INIT_DELAYED_WORK(&peer->connect, fwserial_auto_connect); /* associate peer with specific fw_card */ @@ -2702,7 +2709,7 @@ static int fwserial_parse_mgmt_write(struct fwtty_peer *peer, } else { peer->work_params.plug_req = pkt->plug_req; - PREPARE_WORK(&peer->work, fwserial_handle_plug_req); + peer->workfn = fwserial_handle_plug_req; queue_work(system_unbound_wq, &peer->work); } break; @@ -2731,7 +2738,7 @@ static int fwserial_parse_mgmt_write(struct fwtty_peer *peer, fwtty_err(&peer->unit, "unplug req: busy\n"); rcode = RCODE_CONFLICT_ERROR; } else { - PREPARE_WORK(&peer->work, fwserial_handle_unplug_req); + peer->workfn = fwserial_handle_unplug_req; queue_work(system_unbound_wq, &peer->work); } break; diff --git a/drivers/staging/fwserial/fwserial.h b/drivers/staging/fwserial/fwserial.h index 54f7f9b9b212..98b853d4acbc 100644 --- a/drivers/staging/fwserial/fwserial.h +++ b/drivers/staging/fwserial/fwserial.h @@ -91,6 +91,7 @@ struct fwtty_peer { struct rcu_head rcu; spinlock_t lock; + work_func_t workfn; struct work_struct work; struct peer_work_params work_params; struct timer_list timer; diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 64ea21971be2..5cbf78d0be25 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -1040,7 +1040,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) */ if (type == HUB_INIT) { delay = hub_power_on(hub, false); - PREPARE_DELAYED_WORK(&hub->init_work, hub_init_func2); + INIT_DELAYED_WORK(&hub->init_work, hub_init_func2); schedule_delayed_work(&hub->init_work, msecs_to_jiffies(delay)); @@ -1194,7 +1194,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) /* Don't do a long sleep inside a workqueue routine */ if (type == HUB_INIT2) { - PREPARE_DELAYED_WORK(&hub->init_work, hub_init_func3); + INIT_DELAYED_WORK(&hub->init_work, hub_init_func3); schedule_delayed_work(&hub->init_work, msecs_to_jiffies(delay)); return; /* Continues at init3: below */ diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 6621f8008122..be75b500005d 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -75,6 +75,7 @@ struct afs_call { const struct afs_call_type *type; /* type of call */ const struct afs_wait_mode *wait_mode; /* completion wait mode */ wait_queue_head_t waitq; /* processes awaiting completion */ + work_func_t async_workfn; struct work_struct async_work; /* asynchronous work processor */ struct work_struct work; /* actual work processor */ struct sk_buff_head rx_queue; /* received packets */ diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 8ad8c2a0703a..ef943df73b8c 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -644,7 +644,7 @@ static void afs_process_async_call(struct work_struct *work) /* we can't just delete the call because the work item may be * queued */ - PREPARE_WORK(&call->async_work, afs_delete_async_call); + call->async_workfn = afs_delete_async_call; queue_work(afs_async_calls, &call->async_work); } @@ -663,6 +663,13 @@ void afs_transfer_reply(struct afs_call *call, struct sk_buff *skb) call->reply_size += len; } +static void afs_async_workfn(struct work_struct *work) +{ + struct afs_call *call = container_of(work, struct afs_call, async_work); + + call->async_workfn(work); +} + /* * accept the backlog of incoming calls */ @@ -685,7 +692,8 @@ static void afs_collect_incoming_call(struct work_struct *work) return; } - INIT_WORK(&call->async_work, afs_process_async_call); + call->async_workfn = afs_process_async_call; + INIT_WORK(&call->async_work, afs_async_workfn); call->wait_mode = &afs_async_incoming_call; call->type = &afs_RXCMxxxx; init_waitqueue_head(&call->waitq); diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 69ae03f6eb15..6b9aafed225f 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -87,6 +87,7 @@ struct nvme_dev { struct list_head namespaces; struct kref kref; struct miscdevice miscdev; + work_func_t reset_workfn; struct work_struct reset_work; char name[12]; char serial[20]; diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 704f4f652d0a..532994651684 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -177,17 +177,6 @@ struct execute_work { #define DECLARE_DEFERRABLE_WORK(n, f) \ struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE) -/* - * initialize a work item's function pointer - */ -#define PREPARE_WORK(_work, _func) \ - do { \ - (_work)->func = (_func); \ - } while (0) - -#define PREPARE_DELAYED_WORK(_work, _func) \ - PREPARE_WORK(&(_work)->work, (_func)) - #ifdef CONFIG_DEBUG_OBJECTS_WORK extern void __init_work(struct work_struct *work, int onstack); extern void destroy_work_on_stack(struct work_struct *work); @@ -217,7 +206,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0); \ INIT_LIST_HEAD(&(_work)->entry); \ - PREPARE_WORK((_work), (_func)); \ + (_work)->func = (_func); \ } while (0) #else #define __INIT_WORK(_work, _func, _onstack) \ @@ -225,7 +214,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } __init_work((_work), _onstack); \ (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ INIT_LIST_HEAD(&(_work)->entry); \ - PREPARE_WORK((_work), (_func)); \ + (_work)->func = (_func); \ } while (0) #endif @@ -295,17 +284,11 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } * Documentation/workqueue.txt. */ enum { - /* - * All wqs are now non-reentrant making the following flag - * meaningless. Will be removed. - */ - WQ_NON_REENTRANT = 1 << 0, /* DEPRECATED */ - WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ WQ_FREEZABLE = 1 << 2, /* freeze during suspend */ WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */ WQ_HIGHPRI = 1 << 4, /* high priority */ - WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ + WQ_CPU_INTENSIVE = 1 << 5, /* cpu intensive workqueue */ WQ_SYSFS = 1 << 6, /* visible in sysfs, see wq_sysfs_register() */ /* @@ -602,21 +585,6 @@ static inline bool keventd_up(void) return system_wq != NULL; } -/* - * Like above, but uses del_timer() instead of del_timer_sync(). This means, - * if it returns 0 the timer function may be running and the queueing is in - * progress. - */ -static inline bool __deprecated __cancel_delayed_work(struct delayed_work *work) -{ - bool ret; - - ret = del_timer(&work->timer); - if (ret) - work_clear_pending(&work->work); - return ret; -} - /* used to be different but now identical to flush_work(), deprecated */ static inline bool __deprecated flush_work_sync(struct work_struct *work) { diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 85d9d94c0a3c..c83827e7c324 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -2016,7 +2016,7 @@ static int __init l2tp_init(void) if (rc) goto out; - l2tp_wq = alloc_workqueue("l2tp", WQ_NON_REENTRANT | WQ_UNBOUND, 0); + l2tp_wq = alloc_workqueue("l2tp", WQ_UNBOUND, 0); if (!l2tp_wq) { pr_err("alloc_workqueue failed\n"); rc = -ENOMEM;