blob: 4dcb9558a054072d9272cd2c96292beca1b56db0 [file] [log] [blame]
Arvind M8e87d852018-01-29 00:04:29 -08001From 6c25d4773f675b6dfbe348e96f88edd4c9c31474 Mon Sep 17 00:00:00 2001
Allen Martin685e0f82016-07-26 19:34:29 -07002From: Daniel Wagner <daniel.wagner@bmw-carit.de>
3Date: Fri, 11 Jul 2014 15:26:11 +0200
Arvind M10268e72017-12-04 22:18:06 -08004Subject: [PATCH 141/366] work-simple: Simple work queue implemenation
Allen Martin685e0f82016-07-26 19:34:29 -07005
6Provides a framework for enqueuing callbacks from irq context
7PREEMPT_RT_FULL safe. The callbacks are executed in kthread context.
8
9Bases on wait-simple.
10
11Signed-off-by: Daniel Wagner <daniel.wagner@bmw-carit.de>
12Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Allen Martin685e0f82016-07-26 19:34:29 -070013---
14 include/linux/work-simple.h | 24 ++++++
15 kernel/sched/Makefile | 2 +-
16 kernel/sched/work-simple.c | 173 ++++++++++++++++++++++++++++++++++++++++++++
17 3 files changed, 198 insertions(+), 1 deletion(-)
18 create mode 100644 include/linux/work-simple.h
19 create mode 100644 kernel/sched/work-simple.c
20
21diff --git a/include/linux/work-simple.h b/include/linux/work-simple.h
22new file mode 100644
Ishan Mittalb7998262017-01-17 16:11:50 +053023index 0000000..f175fa9
Allen Martin685e0f82016-07-26 19:34:29 -070024--- /dev/null
25+++ b/include/linux/work-simple.h
26@@ -0,0 +1,24 @@
27+#ifndef _LINUX_SWORK_H
28+#define _LINUX_SWORK_H
29+
30+#include <linux/list.h>
31+
32+struct swork_event {
33+ struct list_head item;
34+ unsigned long flags;
35+ void (*func)(struct swork_event *);
36+};
37+
38+static inline void INIT_SWORK(struct swork_event *event,
39+ void (*func)(struct swork_event *))
40+{
41+ event->flags = 0;
42+ event->func = func;
43+}
44+
45+bool swork_queue(struct swork_event *sev);
46+
47+int swork_get(void);
48+void swork_put(void);
49+
50+#endif /* _LINUX_SWORK_H */
51diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
Ishan Mittalb7998262017-01-17 16:11:50 +053052index 26505c1..6202e8d 100644
Allen Martin685e0f82016-07-26 19:34:29 -070053--- a/kernel/sched/Makefile
54+++ b/kernel/sched/Makefile
55@@ -15,7 +15,7 @@ endif
56
57 obj-y += core.o loadavg.o clock.o cputime.o
58 obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
Allen Martinfc468d82016-11-15 17:57:52 -080059-obj-y += wait.o wait-simple.o completion.o idle.o
60+obj-y += wait.o wait-simple.o work-simple.o completion.o idle.o
Allen Martin685e0f82016-07-26 19:34:29 -070061 obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o
62 obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
63 obj-$(CONFIG_SCHEDSTATS) += stats.o
64diff --git a/kernel/sched/work-simple.c b/kernel/sched/work-simple.c
65new file mode 100644
Ishan Mittalb7998262017-01-17 16:11:50 +053066index 0000000..e57a052
Allen Martin685e0f82016-07-26 19:34:29 -070067--- /dev/null
68+++ b/kernel/sched/work-simple.c
69@@ -0,0 +1,173 @@
70+/*
71+ * Copyright (C) 2014 BMW Car IT GmbH, Daniel Wagner daniel.wagner@bmw-carit.de
72+ *
73+ * Provides a framework for enqueuing callbacks from irq context
74+ * PREEMPT_RT_FULL safe. The callbacks are executed in kthread context.
75+ */
76+
Allen Martinfc468d82016-11-15 17:57:52 -080077+#include <linux/wait-simple.h>
Allen Martin685e0f82016-07-26 19:34:29 -070078+#include <linux/work-simple.h>
79+#include <linux/kthread.h>
80+#include <linux/slab.h>
81+#include <linux/spinlock.h>
82+#include <linux/export.h>
83+
84+#define SWORK_EVENT_PENDING (1 << 0)
85+
86+static DEFINE_MUTEX(worker_mutex);
87+static struct sworker *glob_worker;
88+
89+struct sworker {
90+ struct list_head events;
Allen Martinfc468d82016-11-15 17:57:52 -080091+ struct swait_head wq;
Allen Martin685e0f82016-07-26 19:34:29 -070092+
93+ raw_spinlock_t lock;
94+
95+ struct task_struct *task;
96+ int refs;
97+};
98+
99+static bool swork_readable(struct sworker *worker)
100+{
101+ bool r;
102+
103+ if (kthread_should_stop())
104+ return true;
105+
106+ raw_spin_lock_irq(&worker->lock);
107+ r = !list_empty(&worker->events);
108+ raw_spin_unlock_irq(&worker->lock);
109+
110+ return r;
111+}
112+
113+static int swork_kthread(void *arg)
114+{
115+ struct sworker *worker = arg;
116+
117+ for (;;) {
118+ swait_event_interruptible(worker->wq,
119+ swork_readable(worker));
120+ if (kthread_should_stop())
121+ break;
122+
123+ raw_spin_lock_irq(&worker->lock);
124+ while (!list_empty(&worker->events)) {
125+ struct swork_event *sev;
126+
127+ sev = list_first_entry(&worker->events,
128+ struct swork_event, item);
129+ list_del(&sev->item);
130+ raw_spin_unlock_irq(&worker->lock);
131+
132+ WARN_ON_ONCE(!test_and_clear_bit(SWORK_EVENT_PENDING,
133+ &sev->flags));
134+ sev->func(sev);
135+ raw_spin_lock_irq(&worker->lock);
136+ }
137+ raw_spin_unlock_irq(&worker->lock);
138+ }
139+ return 0;
140+}
141+
142+static struct sworker *swork_create(void)
143+{
144+ struct sworker *worker;
145+
146+ worker = kzalloc(sizeof(*worker), GFP_KERNEL);
147+ if (!worker)
148+ return ERR_PTR(-ENOMEM);
149+
150+ INIT_LIST_HEAD(&worker->events);
151+ raw_spin_lock_init(&worker->lock);
Allen Martinfc468d82016-11-15 17:57:52 -0800152+ init_swait_head(&worker->wq);
Allen Martin685e0f82016-07-26 19:34:29 -0700153+
154+ worker->task = kthread_run(swork_kthread, worker, "kswork");
155+ if (IS_ERR(worker->task)) {
156+ kfree(worker);
157+ return ERR_PTR(-ENOMEM);
158+ }
159+
160+ return worker;
161+}
162+
163+static void swork_destroy(struct sworker *worker)
164+{
165+ kthread_stop(worker->task);
166+
167+ WARN_ON(!list_empty(&worker->events));
168+ kfree(worker);
169+}
170+
171+/**
172+ * swork_queue - queue swork
173+ *
174+ * Returns %false if @work was already on a queue, %true otherwise.
175+ *
176+ * The work is queued and processed on a random CPU
177+ */
178+bool swork_queue(struct swork_event *sev)
179+{
180+ unsigned long flags;
181+
182+ if (test_and_set_bit(SWORK_EVENT_PENDING, &sev->flags))
183+ return false;
184+
185+ raw_spin_lock_irqsave(&glob_worker->lock, flags);
186+ list_add_tail(&sev->item, &glob_worker->events);
187+ raw_spin_unlock_irqrestore(&glob_worker->lock, flags);
188+
Allen Martinfc468d82016-11-15 17:57:52 -0800189+ swait_wake(&glob_worker->wq);
Allen Martin685e0f82016-07-26 19:34:29 -0700190+ return true;
191+}
192+EXPORT_SYMBOL_GPL(swork_queue);
193+
194+/**
195+ * swork_get - get an instance of the sworker
196+ *
197+ * Returns an negative error code if the initialization if the worker did not
198+ * work, %0 otherwise.
199+ *
200+ */
201+int swork_get(void)
202+{
203+ struct sworker *worker;
204+
205+ mutex_lock(&worker_mutex);
206+ if (!glob_worker) {
207+ worker = swork_create();
208+ if (IS_ERR(worker)) {
209+ mutex_unlock(&worker_mutex);
210+ return -ENOMEM;
211+ }
212+
213+ glob_worker = worker;
214+ }
215+
216+ glob_worker->refs++;
217+ mutex_unlock(&worker_mutex);
218+
219+ return 0;
220+}
221+EXPORT_SYMBOL_GPL(swork_get);
222+
223+/**
224+ * swork_put - puts an instance of the sworker
225+ *
226+ * Will destroy the sworker thread. This function must not be called until all
227+ * queued events have been completed.
228+ */
229+void swork_put(void)
230+{
231+ mutex_lock(&worker_mutex);
232+
233+ glob_worker->refs--;
234+ if (glob_worker->refs > 0)
235+ goto out;
236+
237+ swork_destroy(glob_worker);
238+ glob_worker = NULL;
239+out:
240+ mutex_unlock(&worker_mutex);
241+}
242+EXPORT_SYMBOL_GPL(swork_put);
243--
Arvind M10268e72017-12-04 22:18:06 -08002441.9.1
Allen Martin685e0f82016-07-26 19:34:29 -0700245