blob: a307a79e6b0b97e8f84eb81d414b31dba23f1415 [file] [log] [blame]
Paul E. McKenney0af3fe12014-02-04 15:51:41 -08001/*
2 * Module-based torture test facility for locking
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * Copyright (C) IBM Corporation, 2014
19 *
Davidlohr Bueso095777c2015-07-22 14:07:27 -070020 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21 * Davidlohr Bueso <dave@stgolabs.net>
Paul E. McKenney0af3fe12014-02-04 15:51:41 -080022 * Based on kernel/rcu/torture.c.
23 */
Paul E. McKenney0af3fe12014-02-04 15:51:41 -080024#include <linux/kernel.h>
Paul E. McKenney0af3fe12014-02-04 15:51:41 -080025#include <linux/module.h>
26#include <linux/kthread.h>
Davidlohr Bueso095777c2015-07-22 14:07:27 -070027#include <linux/sched/rt.h>
Paul E. McKenney0af3fe12014-02-04 15:51:41 -080028#include <linux/spinlock.h>
Davidlohr Buesoe34191f2014-09-29 06:14:23 -070029#include <linux/rwlock.h>
Davidlohr Bueso42ddc752014-09-11 20:40:18 -070030#include <linux/mutex.h>
Davidlohr Buesoc98fed92014-09-29 06:14:26 -070031#include <linux/rwsem.h>
Paul E. McKenney0af3fe12014-02-04 15:51:41 -080032#include <linux/smp.h>
33#include <linux/interrupt.h>
34#include <linux/sched.h>
Ingo Molnarae7e81c2017-02-01 18:07:51 +010035#include <uapi/linux/sched/types.h>
Ingo Molnar037741a2017-02-03 10:08:30 +010036#include <linux/rtmutex.h>
Paul E. McKenney0af3fe12014-02-04 15:51:41 -080037#include <linux/atomic.h>
Paul E. McKenney0af3fe12014-02-04 15:51:41 -080038#include <linux/moduleparam.h>
Paul E. McKenney0af3fe12014-02-04 15:51:41 -080039#include <linux/delay.h>
Paul E. McKenney0af3fe12014-02-04 15:51:41 -080040#include <linux/slab.h>
Paul E. McKenney617783d2015-08-29 14:46:29 -070041#include <linux/percpu-rwsem.h>
Paul E. McKenney0af3fe12014-02-04 15:51:41 -080042#include <linux/torture.h>
43
44MODULE_LICENSE("GPL");
45MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>");
46
47torture_param(int, nwriters_stress, -1,
48 "Number of write-locking stress-test threads");
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -070049torture_param(int, nreaders_stress, -1,
50 "Number of read-locking stress-test threads");
Paul E. McKenney0af3fe12014-02-04 15:51:41 -080051torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
52torture_param(int, onoff_interval, 0,
53 "Time between CPU hotplugs (s), 0=disable");
54torture_param(int, shuffle_interval, 3,
55 "Number of jiffies between shuffles, 0=disable");
56torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
57torture_param(int, stat_interval, 60,
58 "Number of seconds between stats printk()s");
59torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
60torture_param(bool, verbose, true,
61 "Enable verbose debugging printk()s");
62
63static char *torture_type = "spin_lock";
64module_param(torture_type, charp, 0444);
65MODULE_PARM_DESC(torture_type,
Davidlohr Bueso42ddc752014-09-11 20:40:18 -070066 "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)");
Paul E. McKenney0af3fe12014-02-04 15:51:41 -080067
Paul E. McKenney0af3fe12014-02-04 15:51:41 -080068static struct task_struct *stats_task;
69static struct task_struct **writer_tasks;
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -070070static struct task_struct **reader_tasks;
Paul E. McKenney0af3fe12014-02-04 15:51:41 -080071
Paul E. McKenney0af3fe12014-02-04 15:51:41 -080072static bool lock_is_write_held;
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -070073static bool lock_is_read_held;
Paul E. McKenney0af3fe12014-02-04 15:51:41 -080074
Davidlohr Bueso1e6757a2014-09-11 20:40:20 -070075struct lock_stress_stats {
76 long n_lock_fail;
77 long n_lock_acquired;
Paul E. McKenney0af3fe12014-02-04 15:51:41 -080078};
Paul E. McKenney0af3fe12014-02-04 15:51:41 -080079
Paul E. McKenney5db42982016-04-26 10:22:08 -070080int torture_runnable = IS_ENABLED(MODULE);
Davidlohr Bueso23a8e5c2014-09-11 20:40:16 -070081module_param(torture_runnable, int, 0444);
82MODULE_PARM_DESC(torture_runnable, "Start locktorture at module init");
Paul E. McKenney0af3fe12014-02-04 15:51:41 -080083
84/* Forward reference. */
85static void lock_torture_cleanup(void);
86
87/*
88 * Operations vector for selecting different types of tests.
89 */
90struct lock_torture_ops {
91 void (*init)(void);
92 int (*writelock)(void);
93 void (*write_delay)(struct torture_random_state *trsp);
Davidlohr Bueso095777c2015-07-22 14:07:27 -070094 void (*task_boost)(struct torture_random_state *trsp);
Paul E. McKenney0af3fe12014-02-04 15:51:41 -080095 void (*writeunlock)(void);
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -070096 int (*readlock)(void);
97 void (*read_delay)(struct torture_random_state *trsp);
98 void (*readunlock)(void);
Davidlohr Bueso095777c2015-07-22 14:07:27 -070099
100 unsigned long flags; /* for irq spinlocks */
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800101 const char *name;
102};
103
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700104struct lock_torture_cxt {
105 int nrealwriters_stress;
106 int nrealreaders_stress;
107 bool debug_lock;
108 atomic_t n_lock_torture_errors;
109 struct lock_torture_ops *cur_ops;
110 struct lock_stress_stats *lwsa; /* writer statistics */
111 struct lock_stress_stats *lrsa; /* reader statistics */
112};
113static struct lock_torture_cxt cxt = { 0, 0, false,
114 ATOMIC_INIT(0),
115 NULL, NULL};
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800116/*
117 * Definitions for lock torture testing.
118 */
119
Paul E. McKenneye0864812014-02-11 08:05:07 -0800120static int torture_lock_busted_write_lock(void)
121{
122 return 0; /* BUGGY, do not use in real life!!! */
123}
124
125static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
126{
Paul E. McKenney61d49d22015-04-01 08:42:27 -0700127 const unsigned long longdelay_ms = 100;
Paul E. McKenneye0864812014-02-11 08:05:07 -0800128
129 /* We want a long delay occasionally to force massive contention. */
130 if (!(torture_random(trsp) %
Paul E. McKenney61d49d22015-04-01 08:42:27 -0700131 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
132 mdelay(longdelay_ms);
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700133 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
Paul E. McKenneycc1321c2017-10-16 11:05:03 -0700134 torture_preempt_schedule(); /* Allow test to be preempted. */
Paul E. McKenneye0864812014-02-11 08:05:07 -0800135}
136
137static void torture_lock_busted_write_unlock(void)
138{
139 /* BUGGY, do not use in real life!!! */
140}
141
Davidlohr Bueso095777c2015-07-22 14:07:27 -0700142static void torture_boost_dummy(struct torture_random_state *trsp)
143{
144 /* Only rtmutexes care about priority */
145}
146
Paul E. McKenneye0864812014-02-11 08:05:07 -0800147static struct lock_torture_ops lock_busted_ops = {
148 .writelock = torture_lock_busted_write_lock,
149 .write_delay = torture_lock_busted_write_delay,
Davidlohr Bueso095777c2015-07-22 14:07:27 -0700150 .task_boost = torture_boost_dummy,
Paul E. McKenneye0864812014-02-11 08:05:07 -0800151 .writeunlock = torture_lock_busted_write_unlock,
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -0700152 .readlock = NULL,
153 .read_delay = NULL,
154 .readunlock = NULL,
Paul E. McKenneye0864812014-02-11 08:05:07 -0800155 .name = "lock_busted"
156};
157
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800158static DEFINE_SPINLOCK(torture_spinlock);
159
160static int torture_spin_lock_write_lock(void) __acquires(torture_spinlock)
161{
162 spin_lock(&torture_spinlock);
163 return 0;
164}
165
166static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
167{
168 const unsigned long shortdelay_us = 2;
Paul E. McKenney61d49d22015-04-01 08:42:27 -0700169 const unsigned long longdelay_ms = 100;
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800170
171 /* We want a short delay mostly to emulate likely code, and
172 * we want a long delay occasionally to force massive contention.
173 */
174 if (!(torture_random(trsp) %
Paul E. McKenney61d49d22015-04-01 08:42:27 -0700175 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
176 mdelay(longdelay_ms);
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800177 if (!(torture_random(trsp) %
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700178 (cxt.nrealwriters_stress * 2 * shortdelay_us)))
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800179 udelay(shortdelay_us);
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700180 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
Paul E. McKenneycc1321c2017-10-16 11:05:03 -0700181 torture_preempt_schedule(); /* Allow test to be preempted. */
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800182}
183
184static void torture_spin_lock_write_unlock(void) __releases(torture_spinlock)
185{
186 spin_unlock(&torture_spinlock);
187}
188
189static struct lock_torture_ops spin_lock_ops = {
190 .writelock = torture_spin_lock_write_lock,
191 .write_delay = torture_spin_lock_write_delay,
Davidlohr Bueso095777c2015-07-22 14:07:27 -0700192 .task_boost = torture_boost_dummy,
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800193 .writeunlock = torture_spin_lock_write_unlock,
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -0700194 .readlock = NULL,
195 .read_delay = NULL,
196 .readunlock = NULL,
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800197 .name = "spin_lock"
198};
199
200static int torture_spin_lock_write_lock_irq(void)
Davidlohr Bueso219f8002014-09-29 06:14:24 -0700201__acquires(torture_spinlock)
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800202{
203 unsigned long flags;
204
205 spin_lock_irqsave(&torture_spinlock, flags);
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700206 cxt.cur_ops->flags = flags;
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800207 return 0;
208}
209
210static void torture_lock_spin_write_unlock_irq(void)
211__releases(torture_spinlock)
212{
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700213 spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags);
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800214}
215
216static struct lock_torture_ops spin_lock_irq_ops = {
217 .writelock = torture_spin_lock_write_lock_irq,
218 .write_delay = torture_spin_lock_write_delay,
Davidlohr Bueso095777c2015-07-22 14:07:27 -0700219 .task_boost = torture_boost_dummy,
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800220 .writeunlock = torture_lock_spin_write_unlock_irq,
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -0700221 .readlock = NULL,
222 .read_delay = NULL,
223 .readunlock = NULL,
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800224 .name = "spin_lock_irq"
225};
226
Davidlohr Buesoe34191f2014-09-29 06:14:23 -0700227static DEFINE_RWLOCK(torture_rwlock);
228
229static int torture_rwlock_write_lock(void) __acquires(torture_rwlock)
230{
231 write_lock(&torture_rwlock);
232 return 0;
233}
234
235static void torture_rwlock_write_delay(struct torture_random_state *trsp)
236{
237 const unsigned long shortdelay_us = 2;
238 const unsigned long longdelay_ms = 100;
239
240 /* We want a short delay mostly to emulate likely code, and
241 * we want a long delay occasionally to force massive contention.
242 */
243 if (!(torture_random(trsp) %
244 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
245 mdelay(longdelay_ms);
246 else
247 udelay(shortdelay_us);
248}
249
250static void torture_rwlock_write_unlock(void) __releases(torture_rwlock)
251{
252 write_unlock(&torture_rwlock);
253}
254
255static int torture_rwlock_read_lock(void) __acquires(torture_rwlock)
256{
257 read_lock(&torture_rwlock);
258 return 0;
259}
260
261static void torture_rwlock_read_delay(struct torture_random_state *trsp)
262{
263 const unsigned long shortdelay_us = 10;
264 const unsigned long longdelay_ms = 100;
265
266 /* We want a short delay mostly to emulate likely code, and
267 * we want a long delay occasionally to force massive contention.
268 */
269 if (!(torture_random(trsp) %
270 (cxt.nrealreaders_stress * 2000 * longdelay_ms)))
271 mdelay(longdelay_ms);
272 else
273 udelay(shortdelay_us);
274}
275
276static void torture_rwlock_read_unlock(void) __releases(torture_rwlock)
277{
278 read_unlock(&torture_rwlock);
279}
280
281static struct lock_torture_ops rw_lock_ops = {
282 .writelock = torture_rwlock_write_lock,
283 .write_delay = torture_rwlock_write_delay,
Davidlohr Bueso095777c2015-07-22 14:07:27 -0700284 .task_boost = torture_boost_dummy,
Davidlohr Buesoe34191f2014-09-29 06:14:23 -0700285 .writeunlock = torture_rwlock_write_unlock,
286 .readlock = torture_rwlock_read_lock,
287 .read_delay = torture_rwlock_read_delay,
288 .readunlock = torture_rwlock_read_unlock,
289 .name = "rw_lock"
290};
291
292static int torture_rwlock_write_lock_irq(void) __acquires(torture_rwlock)
293{
294 unsigned long flags;
295
296 write_lock_irqsave(&torture_rwlock, flags);
297 cxt.cur_ops->flags = flags;
298 return 0;
299}
300
301static void torture_rwlock_write_unlock_irq(void)
302__releases(torture_rwlock)
303{
304 write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
305}
306
307static int torture_rwlock_read_lock_irq(void) __acquires(torture_rwlock)
308{
309 unsigned long flags;
310
311 read_lock_irqsave(&torture_rwlock, flags);
312 cxt.cur_ops->flags = flags;
313 return 0;
314}
315
316static void torture_rwlock_read_unlock_irq(void)
317__releases(torture_rwlock)
318{
Alexey Kodanevf548d992015-03-07 03:06:53 +0300319 read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
Davidlohr Buesoe34191f2014-09-29 06:14:23 -0700320}
321
322static struct lock_torture_ops rw_lock_irq_ops = {
323 .writelock = torture_rwlock_write_lock_irq,
324 .write_delay = torture_rwlock_write_delay,
Davidlohr Bueso095777c2015-07-22 14:07:27 -0700325 .task_boost = torture_boost_dummy,
Davidlohr Buesoe34191f2014-09-29 06:14:23 -0700326 .writeunlock = torture_rwlock_write_unlock_irq,
327 .readlock = torture_rwlock_read_lock_irq,
328 .read_delay = torture_rwlock_read_delay,
329 .readunlock = torture_rwlock_read_unlock_irq,
330 .name = "rw_lock_irq"
331};
332
Davidlohr Bueso42ddc752014-09-11 20:40:18 -0700333static DEFINE_MUTEX(torture_mutex);
334
335static int torture_mutex_lock(void) __acquires(torture_mutex)
336{
337 mutex_lock(&torture_mutex);
338 return 0;
339}
340
341static void torture_mutex_delay(struct torture_random_state *trsp)
342{
343 const unsigned long longdelay_ms = 100;
344
345 /* We want a long delay occasionally to force massive contention. */
346 if (!(torture_random(trsp) %
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700347 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
Davidlohr Bueso42ddc752014-09-11 20:40:18 -0700348 mdelay(longdelay_ms * 5);
349 else
350 mdelay(longdelay_ms / 5);
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700351 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
Paul E. McKenneycc1321c2017-10-16 11:05:03 -0700352 torture_preempt_schedule(); /* Allow test to be preempted. */
Davidlohr Bueso42ddc752014-09-11 20:40:18 -0700353}
354
355static void torture_mutex_unlock(void) __releases(torture_mutex)
356{
357 mutex_unlock(&torture_mutex);
358}
359
360static struct lock_torture_ops mutex_lock_ops = {
361 .writelock = torture_mutex_lock,
362 .write_delay = torture_mutex_delay,
Davidlohr Bueso095777c2015-07-22 14:07:27 -0700363 .task_boost = torture_boost_dummy,
Davidlohr Bueso42ddc752014-09-11 20:40:18 -0700364 .writeunlock = torture_mutex_unlock,
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -0700365 .readlock = NULL,
366 .read_delay = NULL,
367 .readunlock = NULL,
Davidlohr Bueso42ddc752014-09-11 20:40:18 -0700368 .name = "mutex_lock"
369};
370
Chris Wilson0186a6c2016-12-01 11:47:05 +0000371#include <linux/ww_mutex.h>
372static DEFINE_WW_CLASS(torture_ww_class);
373static DEFINE_WW_MUTEX(torture_ww_mutex_0, &torture_ww_class);
374static DEFINE_WW_MUTEX(torture_ww_mutex_1, &torture_ww_class);
375static DEFINE_WW_MUTEX(torture_ww_mutex_2, &torture_ww_class);
376
377static int torture_ww_mutex_lock(void)
378__acquires(torture_ww_mutex_0)
379__acquires(torture_ww_mutex_1)
380__acquires(torture_ww_mutex_2)
381{
382 LIST_HEAD(list);
383 struct reorder_lock {
384 struct list_head link;
385 struct ww_mutex *lock;
386 } locks[3], *ll, *ln;
387 struct ww_acquire_ctx ctx;
388
389 locks[0].lock = &torture_ww_mutex_0;
390 list_add(&locks[0].link, &list);
391
392 locks[1].lock = &torture_ww_mutex_1;
393 list_add(&locks[1].link, &list);
394
395 locks[2].lock = &torture_ww_mutex_2;
396 list_add(&locks[2].link, &list);
397
398 ww_acquire_init(&ctx, &torture_ww_class);
399
400 list_for_each_entry(ll, &list, link) {
401 int err;
402
403 err = ww_mutex_lock(ll->lock, &ctx);
404 if (!err)
405 continue;
406
407 ln = ll;
408 list_for_each_entry_continue_reverse(ln, &list, link)
409 ww_mutex_unlock(ln->lock);
410
411 if (err != -EDEADLK)
412 return err;
413
414 ww_mutex_lock_slow(ll->lock, &ctx);
415 list_move(&ll->link, &list);
416 }
417
418 ww_acquire_fini(&ctx);
419 return 0;
420}
421
422static void torture_ww_mutex_unlock(void)
423__releases(torture_ww_mutex_0)
424__releases(torture_ww_mutex_1)
425__releases(torture_ww_mutex_2)
426{
427 ww_mutex_unlock(&torture_ww_mutex_0);
428 ww_mutex_unlock(&torture_ww_mutex_1);
429 ww_mutex_unlock(&torture_ww_mutex_2);
430}
431
432static struct lock_torture_ops ww_mutex_lock_ops = {
433 .writelock = torture_ww_mutex_lock,
434 .write_delay = torture_mutex_delay,
435 .task_boost = torture_boost_dummy,
436 .writeunlock = torture_ww_mutex_unlock,
437 .readlock = NULL,
438 .read_delay = NULL,
439 .readunlock = NULL,
440 .name = "ww_mutex_lock"
441};
442
Davidlohr Bueso095777c2015-07-22 14:07:27 -0700443#ifdef CONFIG_RT_MUTEXES
444static DEFINE_RT_MUTEX(torture_rtmutex);
445
446static int torture_rtmutex_lock(void) __acquires(torture_rtmutex)
447{
448 rt_mutex_lock(&torture_rtmutex);
449 return 0;
450}
451
452static void torture_rtmutex_boost(struct torture_random_state *trsp)
453{
454 int policy;
455 struct sched_param param;
456 const unsigned int factor = 50000; /* yes, quite arbitrary */
457
458 if (!rt_task(current)) {
459 /*
Davidlohr Bueso1f190932016-04-12 08:47:17 -0700460 * Boost priority once every ~50k operations. When the
Davidlohr Bueso095777c2015-07-22 14:07:27 -0700461 * task tries to take the lock, the rtmutex it will account
462 * for the new priority, and do any corresponding pi-dance.
463 */
Davidlohr Bueso1f190932016-04-12 08:47:17 -0700464 if (trsp && !(torture_random(trsp) %
465 (cxt.nrealwriters_stress * factor))) {
Davidlohr Bueso095777c2015-07-22 14:07:27 -0700466 policy = SCHED_FIFO;
467 param.sched_priority = MAX_RT_PRIO - 1;
468 } else /* common case, do nothing */
469 return;
470 } else {
471 /*
472 * The task will remain boosted for another ~500k operations,
473 * then restored back to its original prio, and so forth.
474 *
475 * When @trsp is nil, we want to force-reset the task for
476 * stopping the kthread.
477 */
478 if (!trsp || !(torture_random(trsp) %
479 (cxt.nrealwriters_stress * factor * 2))) {
480 policy = SCHED_NORMAL;
481 param.sched_priority = 0;
482 } else /* common case, do nothing */
483 return;
484 }
485
486 sched_setscheduler_nocheck(current, policy, &param);
487}
488
489static void torture_rtmutex_delay(struct torture_random_state *trsp)
490{
491 const unsigned long shortdelay_us = 2;
492 const unsigned long longdelay_ms = 100;
493
494 /*
495 * We want a short delay mostly to emulate likely code, and
496 * we want a long delay occasionally to force massive contention.
497 */
498 if (!(torture_random(trsp) %
499 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
500 mdelay(longdelay_ms);
501 if (!(torture_random(trsp) %
502 (cxt.nrealwriters_stress * 2 * shortdelay_us)))
503 udelay(shortdelay_us);
Davidlohr Bueso095777c2015-07-22 14:07:27 -0700504 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
Paul E. McKenneycc1321c2017-10-16 11:05:03 -0700505 torture_preempt_schedule(); /* Allow test to be preempted. */
Davidlohr Bueso095777c2015-07-22 14:07:27 -0700506}
507
508static void torture_rtmutex_unlock(void) __releases(torture_rtmutex)
509{
510 rt_mutex_unlock(&torture_rtmutex);
511}
512
513static struct lock_torture_ops rtmutex_lock_ops = {
514 .writelock = torture_rtmutex_lock,
515 .write_delay = torture_rtmutex_delay,
516 .task_boost = torture_rtmutex_boost,
517 .writeunlock = torture_rtmutex_unlock,
518 .readlock = NULL,
519 .read_delay = NULL,
520 .readunlock = NULL,
521 .name = "rtmutex_lock"
522};
523#endif
524
Davidlohr Bueso4a3b4272014-09-11 21:41:30 -0700525static DECLARE_RWSEM(torture_rwsem);
526static int torture_rwsem_down_write(void) __acquires(torture_rwsem)
527{
528 down_write(&torture_rwsem);
529 return 0;
530}
531
532static void torture_rwsem_write_delay(struct torture_random_state *trsp)
533{
534 const unsigned long longdelay_ms = 100;
535
536 /* We want a long delay occasionally to force massive contention. */
537 if (!(torture_random(trsp) %
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700538 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
Davidlohr Bueso4a3b4272014-09-11 21:41:30 -0700539 mdelay(longdelay_ms * 10);
540 else
541 mdelay(longdelay_ms / 10);
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700542 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
Paul E. McKenneycc1321c2017-10-16 11:05:03 -0700543 torture_preempt_schedule(); /* Allow test to be preempted. */
Davidlohr Bueso4a3b4272014-09-11 21:41:30 -0700544}
545
546static void torture_rwsem_up_write(void) __releases(torture_rwsem)
547{
548 up_write(&torture_rwsem);
549}
550
551static int torture_rwsem_down_read(void) __acquires(torture_rwsem)
552{
553 down_read(&torture_rwsem);
554 return 0;
555}
556
557static void torture_rwsem_read_delay(struct torture_random_state *trsp)
558{
559 const unsigned long longdelay_ms = 100;
560
561 /* We want a long delay occasionally to force massive contention. */
562 if (!(torture_random(trsp) %
Davidlohr Buesof2f76262017-05-15 02:07:22 -0700563 (cxt.nrealreaders_stress * 2000 * longdelay_ms)))
Davidlohr Bueso4a3b4272014-09-11 21:41:30 -0700564 mdelay(longdelay_ms * 2);
565 else
566 mdelay(longdelay_ms / 2);
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700567 if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000)))
Paul E. McKenneycc1321c2017-10-16 11:05:03 -0700568 torture_preempt_schedule(); /* Allow test to be preempted. */
Davidlohr Bueso4a3b4272014-09-11 21:41:30 -0700569}
570
571static void torture_rwsem_up_read(void) __releases(torture_rwsem)
572{
573 up_read(&torture_rwsem);
574}
575
576static struct lock_torture_ops rwsem_lock_ops = {
577 .writelock = torture_rwsem_down_write,
578 .write_delay = torture_rwsem_write_delay,
Davidlohr Bueso095777c2015-07-22 14:07:27 -0700579 .task_boost = torture_boost_dummy,
Davidlohr Bueso4a3b4272014-09-11 21:41:30 -0700580 .writeunlock = torture_rwsem_up_write,
581 .readlock = torture_rwsem_down_read,
582 .read_delay = torture_rwsem_read_delay,
583 .readunlock = torture_rwsem_up_read,
584 .name = "rwsem_lock"
585};
586
Paul E. McKenney617783d2015-08-29 14:46:29 -0700587#include <linux/percpu-rwsem.h>
588static struct percpu_rw_semaphore pcpu_rwsem;
589
590void torture_percpu_rwsem_init(void)
591{
592 BUG_ON(percpu_init_rwsem(&pcpu_rwsem));
593}
594
595static int torture_percpu_rwsem_down_write(void) __acquires(pcpu_rwsem)
596{
597 percpu_down_write(&pcpu_rwsem);
598 return 0;
599}
600
601static void torture_percpu_rwsem_up_write(void) __releases(pcpu_rwsem)
602{
603 percpu_up_write(&pcpu_rwsem);
604}
605
606static int torture_percpu_rwsem_down_read(void) __acquires(pcpu_rwsem)
607{
608 percpu_down_read(&pcpu_rwsem);
609 return 0;
610}
611
612static void torture_percpu_rwsem_up_read(void) __releases(pcpu_rwsem)
613{
614 percpu_up_read(&pcpu_rwsem);
615}
616
617static struct lock_torture_ops percpu_rwsem_lock_ops = {
618 .init = torture_percpu_rwsem_init,
619 .writelock = torture_percpu_rwsem_down_write,
620 .write_delay = torture_rwsem_write_delay,
621 .task_boost = torture_boost_dummy,
622 .writeunlock = torture_percpu_rwsem_up_write,
623 .readlock = torture_percpu_rwsem_down_read,
624 .read_delay = torture_rwsem_read_delay,
625 .readunlock = torture_percpu_rwsem_up_read,
626 .name = "percpu_rwsem_lock"
627};
628
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800629/*
630 * Lock torture writer kthread. Repeatedly acquires and releases
631 * the lock, checking for duplicate acquisitions.
632 */
633static int lock_torture_writer(void *arg)
634{
Davidlohr Bueso1e6757a2014-09-11 20:40:20 -0700635 struct lock_stress_stats *lwsp = arg;
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800636 static DEFINE_TORTURE_RANDOM(rand);
637
638 VERBOSE_TOROUT_STRING("lock_torture_writer task started");
Dongsheng Yang8698a742014-03-11 18:09:12 +0800639 set_user_nice(current, MAX_NICE);
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800640
641 do {
Paul E. McKenneyda601c62014-02-26 12:14:51 -0800642 if ((torture_random(&rand) & 0xfffff) == 0)
643 schedule_timeout_uninterruptible(1);
Davidlohr Buesoa1229492014-09-29 06:14:25 -0700644
Davidlohr Bueso095777c2015-07-22 14:07:27 -0700645 cxt.cur_ops->task_boost(&rand);
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700646 cxt.cur_ops->writelock();
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800647 if (WARN_ON_ONCE(lock_is_write_held))
Davidlohr Bueso1e6757a2014-09-11 20:40:20 -0700648 lwsp->n_lock_fail++;
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800649 lock_is_write_held = 1;
Davidlohr Buesoa1229492014-09-29 06:14:25 -0700650 if (WARN_ON_ONCE(lock_is_read_held))
651 lwsp->n_lock_fail++; /* rare, but... */
652
Davidlohr Bueso1e6757a2014-09-11 20:40:20 -0700653 lwsp->n_lock_acquired++;
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700654 cxt.cur_ops->write_delay(&rand);
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800655 lock_is_write_held = 0;
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700656 cxt.cur_ops->writeunlock();
Davidlohr Buesoa1229492014-09-29 06:14:25 -0700657
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800658 stutter_wait("lock_torture_writer");
659 } while (!torture_must_stop());
Davidlohr Bueso095777c2015-07-22 14:07:27 -0700660
661 cxt.cur_ops->task_boost(NULL); /* reset prio */
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800662 torture_kthread_stopping("lock_torture_writer");
663 return 0;
664}
665
666/*
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -0700667 * Lock torture reader kthread. Repeatedly acquires and releases
668 * the reader lock.
669 */
670static int lock_torture_reader(void *arg)
671{
672 struct lock_stress_stats *lrsp = arg;
673 static DEFINE_TORTURE_RANDOM(rand);
674
675 VERBOSE_TOROUT_STRING("lock_torture_reader task started");
676 set_user_nice(current, MAX_NICE);
677
678 do {
679 if ((torture_random(&rand) & 0xfffff) == 0)
680 schedule_timeout_uninterruptible(1);
Davidlohr Buesoa1229492014-09-29 06:14:25 -0700681
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700682 cxt.cur_ops->readlock();
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -0700683 lock_is_read_held = 1;
Davidlohr Buesoa1229492014-09-29 06:14:25 -0700684 if (WARN_ON_ONCE(lock_is_write_held))
685 lrsp->n_lock_fail++; /* rare, but... */
686
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -0700687 lrsp->n_lock_acquired++;
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700688 cxt.cur_ops->read_delay(&rand);
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -0700689 lock_is_read_held = 0;
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700690 cxt.cur_ops->readunlock();
Davidlohr Buesoa1229492014-09-29 06:14:25 -0700691
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -0700692 stutter_wait("lock_torture_reader");
693 } while (!torture_must_stop());
694 torture_kthread_stopping("lock_torture_reader");
695 return 0;
696}
697
698/*
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800699 * Create an lock-torture-statistics message in the specified buffer.
700 */
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -0700701static void __torture_print_stats(char *page,
702 struct lock_stress_stats *statp, bool write)
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800703{
704 bool fail = 0;
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -0700705 int i, n_stress;
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800706 long max = 0;
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -0700707 long min = statp[0].n_lock_acquired;
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800708 long long sum = 0;
709
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700710 n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress;
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -0700711 for (i = 0; i < n_stress; i++) {
712 if (statp[i].n_lock_fail)
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800713 fail = true;
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -0700714 sum += statp[i].n_lock_acquired;
715 if (max < statp[i].n_lock_fail)
716 max = statp[i].n_lock_fail;
717 if (min > statp[i].n_lock_fail)
718 min = statp[i].n_lock_fail;
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800719 }
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800720 page += sprintf(page,
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -0700721 "%s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n",
722 write ? "Writes" : "Reads ",
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800723 sum, max, min, max / 2 > min ? "???" : "",
724 fail, fail ? "!!!" : "");
725 if (fail)
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700726 atomic_inc(&cxt.n_lock_torture_errors);
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800727}
728
729/*
730 * Print torture statistics. Caller must ensure that there is only one
731 * call to this function at a given time!!! This is normally accomplished
732 * by relying on the module system to only have one copy of the module
733 * loaded, and then by giving the lock_torture_stats kthread full control
734 * (or the init/cleanup functions when lock_torture_stats thread is not
735 * running).
736 */
737static void lock_torture_stats_print(void)
738{
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700739 int size = cxt.nrealwriters_stress * 200 + 8192;
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800740 char *buf;
741
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700742 if (cxt.cur_ops->readlock)
743 size += cxt.nrealreaders_stress * 200 + 8192;
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -0700744
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800745 buf = kmalloc(size, GFP_KERNEL);
746 if (!buf) {
747 pr_err("lock_torture_stats_print: Out of memory, need: %d",
748 size);
749 return;
750 }
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -0700751
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700752 __torture_print_stats(buf, cxt.lwsa, true);
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800753 pr_alert("%s", buf);
754 kfree(buf);
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -0700755
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700756 if (cxt.cur_ops->readlock) {
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -0700757 buf = kmalloc(size, GFP_KERNEL);
758 if (!buf) {
759 pr_err("lock_torture_stats_print: Out of memory, need: %d",
760 size);
761 return;
762 }
763
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700764 __torture_print_stats(buf, cxt.lrsa, false);
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -0700765 pr_alert("%s", buf);
766 kfree(buf);
767 }
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800768}
769
770/*
771 * Periodically prints torture statistics, if periodic statistics printing
772 * was specified via the stat_interval module parameter.
773 *
774 * No need to worry about fullstop here, since this one doesn't reference
775 * volatile state or register callbacks.
776 */
777static int lock_torture_stats(void *arg)
778{
779 VERBOSE_TOROUT_STRING("lock_torture_stats task started");
780 do {
781 schedule_timeout_interruptible(stat_interval * HZ);
782 lock_torture_stats_print();
783 torture_shutdown_absorb("lock_torture_stats");
784 } while (!torture_must_stop());
785 torture_kthread_stopping("lock_torture_stats");
786 return 0;
787}
788
789static inline void
790lock_torture_print_module_parms(struct lock_torture_ops *cur_ops,
791 const char *tag)
792{
793 pr_alert("%s" TORTURE_FLAG
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -0700794 "--- %s%s: nwriters_stress=%d nreaders_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n",
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700795 torture_type, tag, cxt.debug_lock ? " [debug]": "",
796 cxt.nrealwriters_stress, cxt.nrealreaders_stress, stat_interval,
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -0700797 verbose, shuffle_interval, stutter, shutdown_secs,
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800798 onoff_interval, onoff_holdoff);
799}
800
801static void lock_torture_cleanup(void)
802{
803 int i;
804
Davidlohr Buesod36a7a02014-09-11 20:40:21 -0700805 if (torture_cleanup_begin())
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800806 return;
807
Davidlohr Buesoc1c33b92016-04-12 08:47:18 -0700808 /*
809 * Indicates early cleanup, meaning that the test has not run,
810 * such as when passing bogus args when loading the module. As
811 * such, only perform the underlying torture-specific cleanups,
812 * and avoid anything related to locktorture.
813 */
814 if (!cxt.lwsa)
815 goto end;
816
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800817 if (writer_tasks) {
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700818 for (i = 0; i < cxt.nrealwriters_stress; i++)
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800819 torture_stop_kthread(lock_torture_writer,
820 writer_tasks[i]);
821 kfree(writer_tasks);
822 writer_tasks = NULL;
823 }
824
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -0700825 if (reader_tasks) {
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700826 for (i = 0; i < cxt.nrealreaders_stress; i++)
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -0700827 torture_stop_kthread(lock_torture_reader,
828 reader_tasks[i]);
829 kfree(reader_tasks);
830 reader_tasks = NULL;
831 }
832
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800833 torture_stop_kthread(lock_torture_stats, stats_task);
834 lock_torture_stats_print(); /* -After- the stats thread is stopped! */
835
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700836 if (atomic_read(&cxt.n_lock_torture_errors))
837 lock_torture_print_module_parms(cxt.cur_ops,
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800838 "End of test: FAILURE");
839 else if (torture_onoff_failures())
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700840 lock_torture_print_module_parms(cxt.cur_ops,
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800841 "End of test: LOCK_HOTPLUG");
842 else
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700843 lock_torture_print_module_parms(cxt.cur_ops,
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800844 "End of test: SUCCESS");
Yang Shif4dbba52016-11-10 13:06:39 -0800845
846 kfree(cxt.lwsa);
847 kfree(cxt.lrsa);
848
Davidlohr Buesoc1c33b92016-04-12 08:47:18 -0700849end:
Davidlohr Buesod36a7a02014-09-11 20:40:21 -0700850 torture_cleanup_end();
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800851}
852
853static int __init lock_torture_init(void)
854{
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -0700855 int i, j;
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800856 int firsterr = 0;
857 static struct lock_torture_ops *torture_ops[] = {
Davidlohr Buesoe34191f2014-09-29 06:14:23 -0700858 &lock_busted_ops,
859 &spin_lock_ops, &spin_lock_irq_ops,
860 &rw_lock_ops, &rw_lock_irq_ops,
861 &mutex_lock_ops,
Chris Wilson0186a6c2016-12-01 11:47:05 +0000862 &ww_mutex_lock_ops,
Davidlohr Bueso095777c2015-07-22 14:07:27 -0700863#ifdef CONFIG_RT_MUTEXES
864 &rtmutex_lock_ops,
865#endif
Davidlohr Buesoe34191f2014-09-29 06:14:23 -0700866 &rwsem_lock_ops,
Paul E. McKenney617783d2015-08-29 14:46:29 -0700867 &percpu_rwsem_lock_ops,
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800868 };
869
Davidlohr Bueso23a8e5c2014-09-11 20:40:16 -0700870 if (!torture_init_begin(torture_type, verbose, &torture_runnable))
Paul E. McKenney52280842014-04-07 09:14:11 -0700871 return -EBUSY;
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800872
873 /* Process args and tell the world that the torturer is on the job. */
874 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700875 cxt.cur_ops = torture_ops[i];
876 if (strcmp(torture_type, cxt.cur_ops->name) == 0)
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800877 break;
878 }
879 if (i == ARRAY_SIZE(torture_ops)) {
880 pr_alert("lock-torture: invalid torture type: \"%s\"\n",
881 torture_type);
882 pr_alert("lock-torture types:");
883 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
884 pr_alert(" %s", torture_ops[i]->name);
885 pr_alert("\n");
Paul E. McKenneya36a9962015-08-30 20:01:48 -0700886 firsterr = -EINVAL;
887 goto unwind;
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800888 }
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700889 if (cxt.cur_ops->init)
Paul E. McKenneya36a9962015-08-30 20:01:48 -0700890 cxt.cur_ops->init();
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800891
892 if (nwriters_stress >= 0)
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700893 cxt.nrealwriters_stress = nwriters_stress;
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800894 else
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700895 cxt.nrealwriters_stress = 2 * num_online_cpus();
Davidlohr Buesof095bfc2014-09-11 20:40:19 -0700896
897#ifdef CONFIG_DEBUG_MUTEXES
898 if (strncmp(torture_type, "mutex", 5) == 0)
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700899 cxt.debug_lock = true;
Davidlohr Buesof095bfc2014-09-11 20:40:19 -0700900#endif
Davidlohr Bueso095777c2015-07-22 14:07:27 -0700901#ifdef CONFIG_DEBUG_RT_MUTEXES
902 if (strncmp(torture_type, "rtmutex", 7) == 0)
903 cxt.debug_lock = true;
904#endif
Davidlohr Buesof095bfc2014-09-11 20:40:19 -0700905#ifdef CONFIG_DEBUG_SPINLOCK
Davidlohr Buesoe34191f2014-09-29 06:14:23 -0700906 if ((strncmp(torture_type, "spin", 4) == 0) ||
907 (strncmp(torture_type, "rw_lock", 7) == 0))
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700908 cxt.debug_lock = true;
Davidlohr Buesof095bfc2014-09-11 20:40:19 -0700909#endif
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800910
911 /* Initialize the statistics so that each run gets its own numbers. */
912
913 lock_is_write_held = 0;
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700914 cxt.lwsa = kmalloc(sizeof(*cxt.lwsa) * cxt.nrealwriters_stress, GFP_KERNEL);
915 if (cxt.lwsa == NULL) {
916 VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800917 firsterr = -ENOMEM;
918 goto unwind;
919 }
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700920 for (i = 0; i < cxt.nrealwriters_stress; i++) {
921 cxt.lwsa[i].n_lock_fail = 0;
922 cxt.lwsa[i].n_lock_acquired = 0;
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800923 }
924
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700925 if (cxt.cur_ops->readlock) {
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -0700926 if (nreaders_stress >= 0)
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700927 cxt.nrealreaders_stress = nreaders_stress;
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -0700928 else {
929 /*
930 * By default distribute evenly the number of
931 * readers and writers. We still run the same number
932 * of threads as the writer-only locks default.
933 */
934 if (nwriters_stress < 0) /* user doesn't care */
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700935 cxt.nrealwriters_stress = num_online_cpus();
936 cxt.nrealreaders_stress = cxt.nrealwriters_stress;
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -0700937 }
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800938
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -0700939 lock_is_read_held = 0;
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700940 cxt.lrsa = kmalloc(sizeof(*cxt.lrsa) * cxt.nrealreaders_stress, GFP_KERNEL);
941 if (cxt.lrsa == NULL) {
942 VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -0700943 firsterr = -ENOMEM;
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700944 kfree(cxt.lwsa);
Davidlohr Buesoc1c33b92016-04-12 08:47:18 -0700945 cxt.lwsa = NULL;
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -0700946 goto unwind;
947 }
948
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700949 for (i = 0; i < cxt.nrealreaders_stress; i++) {
950 cxt.lrsa[i].n_lock_fail = 0;
951 cxt.lrsa[i].n_lock_acquired = 0;
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -0700952 }
953 }
Davidlohr Buesoc1c33b92016-04-12 08:47:18 -0700954
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700955 lock_torture_print_module_parms(cxt.cur_ops, "Start of test");
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -0700956
957 /* Prepare torture context. */
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800958 if (onoff_interval > 0) {
959 firsterr = torture_onoff_init(onoff_holdoff * HZ,
960 onoff_interval * HZ);
961 if (firsterr)
962 goto unwind;
963 }
964 if (shuffle_interval > 0) {
965 firsterr = torture_shuffle_init(shuffle_interval);
966 if (firsterr)
967 goto unwind;
968 }
969 if (shutdown_secs > 0) {
970 firsterr = torture_shutdown_init(shutdown_secs,
971 lock_torture_cleanup);
972 if (firsterr)
973 goto unwind;
974 }
975 if (stutter > 0) {
976 firsterr = torture_stutter_init(stutter);
977 if (firsterr)
978 goto unwind;
979 }
980
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700981 writer_tasks = kzalloc(cxt.nrealwriters_stress * sizeof(writer_tasks[0]),
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800982 GFP_KERNEL);
983 if (writer_tasks == NULL) {
984 VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory");
985 firsterr = -ENOMEM;
986 goto unwind;
987 }
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -0700988
Davidlohr Bueso630952c2014-09-11 21:42:25 -0700989 if (cxt.cur_ops->readlock) {
990 reader_tasks = kzalloc(cxt.nrealreaders_stress * sizeof(reader_tasks[0]),
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -0700991 GFP_KERNEL);
992 if (reader_tasks == NULL) {
993 VERBOSE_TOROUT_ERRSTRING("reader_tasks: Out of memory");
Yang Shif4dbba52016-11-10 13:06:39 -0800994 kfree(writer_tasks);
995 writer_tasks = NULL;
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -0700996 firsterr = -ENOMEM;
997 goto unwind;
998 }
999 }
1000
1001 /*
1002 * Create the kthreads and start torturing (oh, those poor little locks).
1003 *
1004 * TODO: Note that we interleave writers with readers, giving writers a
1005 * slight advantage, by creating its kthread first. This can be modified
1006 * for very specific needs, or even let the user choose the policy, if
1007 * ever wanted.
1008 */
Davidlohr Bueso630952c2014-09-11 21:42:25 -07001009 for (i = 0, j = 0; i < cxt.nrealwriters_stress ||
1010 j < cxt.nrealreaders_stress; i++, j++) {
1011 if (i >= cxt.nrealwriters_stress)
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -07001012 goto create_reader;
1013
1014 /* Create writer. */
Davidlohr Bueso630952c2014-09-11 21:42:25 -07001015 firsterr = torture_create_kthread(lock_torture_writer, &cxt.lwsa[i],
Paul E. McKenney0af3fe12014-02-04 15:51:41 -08001016 writer_tasks[i]);
1017 if (firsterr)
1018 goto unwind;
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -07001019
1020 create_reader:
Davidlohr Bueso630952c2014-09-11 21:42:25 -07001021 if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress))
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -07001022 continue;
1023 /* Create reader. */
Davidlohr Bueso630952c2014-09-11 21:42:25 -07001024 firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j],
Davidlohr Bueso4f6332c2014-09-11 21:40:41 -07001025 reader_tasks[j]);
1026 if (firsterr)
1027 goto unwind;
Paul E. McKenney0af3fe12014-02-04 15:51:41 -08001028 }
1029 if (stat_interval > 0) {
1030 firsterr = torture_create_kthread(lock_torture_stats, NULL,
1031 stats_task);
1032 if (firsterr)
1033 goto unwind;
1034 }
1035 torture_init_end();
1036 return 0;
1037
1038unwind:
1039 torture_init_end();
1040 lock_torture_cleanup();
1041 return firsterr;
1042}
1043
1044module_init(lock_torture_init);
1045module_exit(lock_torture_cleanup);