2 * Read-Copy Update module-based torture test facility
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2005, 2006
20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21 * Josh Triplett <josh@freedesktop.org>
23 * See also: Documentation/RCU/torture.txt
25 #include <linux/types.h>
26 #include <linux/kernel.h>
27 #include <linux/init.h>
28 #include <linux/module.h>
29 #include <linux/kthread.h>
30 #include <linux/err.h>
31 #include <linux/spinlock.h>
32 #include <linux/smp.h>
33 #include <linux/rcupdate.h>
34 #include <linux/interrupt.h>
35 #include <linux/sched.h>
36 #include <asm/atomic.h>
37 #include <linux/bitops.h>
38 #include <linux/completion.h>
39 #include <linux/moduleparam.h>
40 #include <linux/percpu.h>
41 #include <linux/notifier.h>
42 #include <linux/freezer.h>
43 #include <linux/cpu.h>
44 #include <linux/delay.h>
45 #include <linux/byteorder/swabb.h>
46 #include <linux/stat.h>
47 #include <linux/srcu.h>
48 #include <linux/slab.h>
50 MODULE_LICENSE("GPL");
51 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and "
52 "Josh Triplett <josh@freedesktop.org>");
54 static int nreaders = -1; /* # reader threads, defaults to 2*ncpus */
55 static int nfakewriters = 4; /* # fake writer threads */
56 static int stat_interval; /* Interval between stats, in seconds. */
57 /* Defaults to "only at end of test". */
58 static int verbose; /* Print more debug info. */
59 static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */
60 static int shuffle_interval = 3; /* Interval between shuffles (in sec)*/
61 static int stutter = 5; /* Start/stop testing interval (in sec) */
62 static char *torture_type = "rcu"; /* What RCU implementation to torture. */
64 module_param(nreaders, int, 0444);
65 MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
66 module_param(nfakewriters, int, 0444);
67 MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads");
68 module_param(stat_interval, int, 0444);
69 MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
70 module_param(verbose, bool, 0444);
71 MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
72 module_param(test_no_idle_hz, bool, 0444);
73 MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs");
74 module_param(shuffle_interval, int, 0444);
75 MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles");
76 module_param(stutter, int, 0444);
77 MODULE_PARM_DESC(stutter, "Number of seconds to run/halt test");
78 module_param(torture_type, charp, 0444);
79 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)");
81 #define TORTURE_FLAG "-torture:"
82 #define PRINTK_STRING(s) \
83 do { printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
84 #define VERBOSE_PRINTK_STRING(s) \
85 do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
86 #define VERBOSE_PRINTK_ERRSTRING(s) \
87 do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG "!!! " s "\n", torture_type); } while (0)
89 static char printk_buf[4096];
91 static int nrealreaders;
92 static struct task_struct *writer_task;
93 static struct task_struct **fakewriter_tasks;
94 static struct task_struct **reader_tasks;
95 static struct task_struct *stats_task;
96 static struct task_struct *shuffler_task;
97 static struct task_struct *stutter_task;
99 #define RCU_TORTURE_PIPE_LEN 10
102 struct rcu_head rtort_rcu;
103 int rtort_pipe_count;
104 struct list_head rtort_free;
108 static int fullstop = 0; /* stop generating callbacks at test end. */
109 static LIST_HEAD(rcu_torture_freelist);
110 static struct rcu_torture *rcu_torture_current = NULL;
111 static long rcu_torture_current_version = 0;
112 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
113 static DEFINE_SPINLOCK(rcu_torture_lock);
114 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
116 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
118 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
119 static atomic_t n_rcu_torture_alloc;
120 static atomic_t n_rcu_torture_alloc_fail;
121 static atomic_t n_rcu_torture_free;
122 static atomic_t n_rcu_torture_mberror;
123 static atomic_t n_rcu_torture_error;
124 static struct list_head rcu_torture_removed;
126 static int stutter_pause_test = 0;
128 #if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE)
129 #define RCUTORTURE_RUNNABLE_INIT 1
131 #define RCUTORTURE_RUNNABLE_INIT 0
133 int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT;
136 * Allocate an element from the rcu_tortures pool.
138 static struct rcu_torture *
139 rcu_torture_alloc(void)
143 spin_lock_bh(&rcu_torture_lock);
144 if (list_empty(&rcu_torture_freelist)) {
145 atomic_inc(&n_rcu_torture_alloc_fail);
146 spin_unlock_bh(&rcu_torture_lock);
149 atomic_inc(&n_rcu_torture_alloc);
150 p = rcu_torture_freelist.next;
152 spin_unlock_bh(&rcu_torture_lock);
153 return container_of(p, struct rcu_torture, rtort_free);
157 * Free an element to the rcu_tortures pool.
160 rcu_torture_free(struct rcu_torture *p)
162 atomic_inc(&n_rcu_torture_free);
163 spin_lock_bh(&rcu_torture_lock);
164 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
165 spin_unlock_bh(&rcu_torture_lock);
168 struct rcu_random_state {
169 unsigned long rrs_state;
173 #define RCU_RANDOM_MULT 39916801 /* prime */
174 #define RCU_RANDOM_ADD 479001701 /* prime */
175 #define RCU_RANDOM_REFRESH 10000
177 #define DEFINE_RCU_RANDOM(name) struct rcu_random_state name = { 0, 0 }
180 * Crude but fast random-number generator. Uses a linear congruential
181 * generator, with occasional help from cpu_clock().
184 rcu_random(struct rcu_random_state *rrsp)
186 if (--rrsp->rrs_count < 0) {
188 (unsigned long)cpu_clock(raw_smp_processor_id());
189 rrsp->rrs_count = RCU_RANDOM_REFRESH;
191 rrsp->rrs_state = rrsp->rrs_state * RCU_RANDOM_MULT + RCU_RANDOM_ADD;
192 return swahw32(rrsp->rrs_state);
196 rcu_stutter_wait(void)
198 while (stutter_pause_test || !rcutorture_runnable)
199 schedule_timeout_interruptible(1);
203 * Operations vector for selecting different types of tests.
206 struct rcu_torture_ops {
208 void (*cleanup)(void);
209 int (*readlock)(void);
210 void (*readdelay)(struct rcu_random_state *rrsp);
211 void (*readunlock)(int idx);
212 int (*completed)(void);
213 void (*deferredfree)(struct rcu_torture *p);
215 void (*cb_barrier)(void);
216 int (*stats)(char *page);
219 static struct rcu_torture_ops *cur_ops = NULL;
222 * Definitions for rcu torture testing.
225 static int rcu_torture_read_lock(void) __acquires(RCU)
231 static void rcu_read_delay(struct rcu_random_state *rrsp)
234 const long longdelay = 200;
236 /* We want there to be long-running readers, but not all the time. */
238 delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay);
243 static void rcu_torture_read_unlock(int idx) __releases(RCU)
248 static int rcu_torture_completed(void)
250 return rcu_batches_completed();
254 rcu_torture_cb(struct rcu_head *p)
257 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
260 /* Test is ending, just drop callbacks on the floor. */
261 /* The next initialization will pick up the pieces. */
264 i = rp->rtort_pipe_count;
265 if (i > RCU_TORTURE_PIPE_LEN)
266 i = RCU_TORTURE_PIPE_LEN;
267 atomic_inc(&rcu_torture_wcount[i]);
268 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
269 rp->rtort_mbtest = 0;
270 rcu_torture_free(rp);
272 cur_ops->deferredfree(rp);
275 static void rcu_torture_deferred_free(struct rcu_torture *p)
277 call_rcu(&p->rtort_rcu, rcu_torture_cb);
280 static struct rcu_torture_ops rcu_ops = {
283 .readlock = rcu_torture_read_lock,
284 .readdelay = rcu_read_delay,
285 .readunlock = rcu_torture_read_unlock,
286 .completed = rcu_torture_completed,
287 .deferredfree = rcu_torture_deferred_free,
288 .sync = synchronize_rcu,
289 .cb_barrier = rcu_barrier,
294 static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
297 struct rcu_torture *rp;
298 struct rcu_torture *rp1;
301 list_add(&p->rtort_free, &rcu_torture_removed);
302 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
303 i = rp->rtort_pipe_count;
304 if (i > RCU_TORTURE_PIPE_LEN)
305 i = RCU_TORTURE_PIPE_LEN;
306 atomic_inc(&rcu_torture_wcount[i]);
307 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
308 rp->rtort_mbtest = 0;
309 list_del(&rp->rtort_free);
310 rcu_torture_free(rp);
315 static void rcu_sync_torture_init(void)
317 INIT_LIST_HEAD(&rcu_torture_removed);
320 static struct rcu_torture_ops rcu_sync_ops = {
321 .init = rcu_sync_torture_init,
323 .readlock = rcu_torture_read_lock,
324 .readdelay = rcu_read_delay,
325 .readunlock = rcu_torture_read_unlock,
326 .completed = rcu_torture_completed,
327 .deferredfree = rcu_sync_torture_deferred_free,
328 .sync = synchronize_rcu,
335 * Definitions for rcu_bh torture testing.
338 static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH)
344 static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH)
346 rcu_read_unlock_bh();
349 static int rcu_bh_torture_completed(void)
351 return rcu_batches_completed_bh();
354 static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
356 call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
359 struct rcu_bh_torture_synchronize {
360 struct rcu_head head;
361 struct completion completion;
364 static void rcu_bh_torture_wakeme_after_cb(struct rcu_head *head)
366 struct rcu_bh_torture_synchronize *rcu;
368 rcu = container_of(head, struct rcu_bh_torture_synchronize, head);
369 complete(&rcu->completion);
372 static void rcu_bh_torture_synchronize(void)
374 struct rcu_bh_torture_synchronize rcu;
376 init_completion(&rcu.completion);
377 call_rcu_bh(&rcu.head, rcu_bh_torture_wakeme_after_cb);
378 wait_for_completion(&rcu.completion);
381 static struct rcu_torture_ops rcu_bh_ops = {
384 .readlock = rcu_bh_torture_read_lock,
385 .readdelay = rcu_read_delay, /* just reuse rcu's version. */
386 .readunlock = rcu_bh_torture_read_unlock,
387 .completed = rcu_bh_torture_completed,
388 .deferredfree = rcu_bh_torture_deferred_free,
389 .sync = rcu_bh_torture_synchronize,
390 .cb_barrier = rcu_barrier_bh,
395 static struct rcu_torture_ops rcu_bh_sync_ops = {
396 .init = rcu_sync_torture_init,
398 .readlock = rcu_bh_torture_read_lock,
399 .readdelay = rcu_read_delay, /* just reuse rcu's version. */
400 .readunlock = rcu_bh_torture_read_unlock,
401 .completed = rcu_bh_torture_completed,
402 .deferredfree = rcu_sync_torture_deferred_free,
403 .sync = rcu_bh_torture_synchronize,
406 .name = "rcu_bh_sync"
410 * Definitions for srcu torture testing.
413 static struct srcu_struct srcu_ctl;
415 static void srcu_torture_init(void)
417 init_srcu_struct(&srcu_ctl);
418 rcu_sync_torture_init();
421 static void srcu_torture_cleanup(void)
423 synchronize_srcu(&srcu_ctl);
424 cleanup_srcu_struct(&srcu_ctl);
427 static int srcu_torture_read_lock(void) __acquires(&srcu_ctl)
429 return srcu_read_lock(&srcu_ctl);
432 static void srcu_read_delay(struct rcu_random_state *rrsp)
435 const long uspertick = 1000000 / HZ;
436 const long longdelay = 10;
438 /* We want there to be long-running readers, but not all the time. */
440 delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay * uspertick);
442 schedule_timeout_interruptible(longdelay);
445 static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl)
447 srcu_read_unlock(&srcu_ctl, idx);
450 static int srcu_torture_completed(void)
452 return srcu_batches_completed(&srcu_ctl);
455 static void srcu_torture_synchronize(void)
457 synchronize_srcu(&srcu_ctl);
460 static int srcu_torture_stats(char *page)
464 int idx = srcu_ctl.completed & 0x1;
466 cnt += sprintf(&page[cnt], "%s%s per-CPU(idx=%d):",
467 torture_type, TORTURE_FLAG, idx);
468 for_each_possible_cpu(cpu) {
469 cnt += sprintf(&page[cnt], " %d(%d,%d)", cpu,
470 per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx],
471 per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx]);
473 cnt += sprintf(&page[cnt], "\n");
477 static struct rcu_torture_ops srcu_ops = {
478 .init = srcu_torture_init,
479 .cleanup = srcu_torture_cleanup,
480 .readlock = srcu_torture_read_lock,
481 .readdelay = srcu_read_delay,
482 .readunlock = srcu_torture_read_unlock,
483 .completed = srcu_torture_completed,
484 .deferredfree = rcu_sync_torture_deferred_free,
485 .sync = srcu_torture_synchronize,
487 .stats = srcu_torture_stats,
492 * Definitions for sched torture testing.
495 static int sched_torture_read_lock(void)
501 static void sched_torture_read_unlock(int idx)
506 static int sched_torture_completed(void)
511 static void rcu_sched_torture_deferred_free(struct rcu_torture *p)
513 call_rcu_sched(&p->rtort_rcu, rcu_torture_cb);
516 static void sched_torture_synchronize(void)
521 static struct rcu_torture_ops sched_ops = {
522 .init = rcu_sync_torture_init,
524 .readlock = sched_torture_read_lock,
525 .readdelay = rcu_read_delay, /* just reuse rcu's version. */
526 .readunlock = sched_torture_read_unlock,
527 .completed = sched_torture_completed,
528 .deferredfree = rcu_sched_torture_deferred_free,
529 .sync = sched_torture_synchronize,
530 .cb_barrier = rcu_barrier_sched,
535 static struct rcu_torture_ops sched_ops_sync = {
536 .init = rcu_sync_torture_init,
538 .readlock = sched_torture_read_lock,
539 .readdelay = rcu_read_delay, /* just reuse rcu's version. */
540 .readunlock = sched_torture_read_unlock,
541 .completed = sched_torture_completed,
542 .deferredfree = rcu_sync_torture_deferred_free,
543 .sync = sched_torture_synchronize,
550 * RCU torture writer kthread. Repeatedly substitutes a new structure
551 * for that pointed to by rcu_torture_current, freeing the old structure
552 * after a series of grace periods (the "pipeline").
555 rcu_torture_writer(void *arg)
558 long oldbatch = rcu_batches_completed();
559 struct rcu_torture *rp;
560 struct rcu_torture *old_rp;
561 static DEFINE_RCU_RANDOM(rand);
563 VERBOSE_PRINTK_STRING("rcu_torture_writer task started");
564 set_user_nice(current, 19);
567 schedule_timeout_uninterruptible(1);
568 if ((rp = rcu_torture_alloc()) == NULL)
570 rp->rtort_pipe_count = 0;
571 udelay(rcu_random(&rand) & 0x3ff);
572 old_rp = rcu_torture_current;
573 rp->rtort_mbtest = 1;
574 rcu_assign_pointer(rcu_torture_current, rp);
577 i = old_rp->rtort_pipe_count;
578 if (i > RCU_TORTURE_PIPE_LEN)
579 i = RCU_TORTURE_PIPE_LEN;
580 atomic_inc(&rcu_torture_wcount[i]);
581 old_rp->rtort_pipe_count++;
582 cur_ops->deferredfree(old_rp);
584 rcu_torture_current_version++;
585 oldbatch = cur_ops->completed();
587 } while (!kthread_should_stop() && !fullstop);
588 VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
589 while (!kthread_should_stop())
590 schedule_timeout_uninterruptible(1);
595 * RCU torture fake writer kthread. Repeatedly calls sync, with a random
596 * delay between calls.
599 rcu_torture_fakewriter(void *arg)
601 DEFINE_RCU_RANDOM(rand);
603 VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task started");
604 set_user_nice(current, 19);
607 schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10);
608 udelay(rcu_random(&rand) & 0x3ff);
611 } while (!kthread_should_stop() && !fullstop);
613 VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");
614 while (!kthread_should_stop())
615 schedule_timeout_uninterruptible(1);
620 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
621 * incrementing the corresponding element of the pipeline array. The
622 * counter in the element should never be greater than 1, otherwise, the
623 * RCU implementation is broken.
626 rcu_torture_reader(void *arg)
630 DEFINE_RCU_RANDOM(rand);
631 struct rcu_torture *p;
634 VERBOSE_PRINTK_STRING("rcu_torture_reader task started");
635 set_user_nice(current, 19);
638 idx = cur_ops->readlock();
639 completed = cur_ops->completed();
640 p = rcu_dereference(rcu_torture_current);
642 /* Wait for rcu_torture_writer to get underway */
643 cur_ops->readunlock(idx);
644 schedule_timeout_interruptible(HZ);
647 if (p->rtort_mbtest == 0)
648 atomic_inc(&n_rcu_torture_mberror);
649 cur_ops->readdelay(&rand);
651 pipe_count = p->rtort_pipe_count;
652 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
653 /* Should not happen, but... */
654 pipe_count = RCU_TORTURE_PIPE_LEN;
656 ++__get_cpu_var(rcu_torture_count)[pipe_count];
657 completed = cur_ops->completed() - completed;
658 if (completed > RCU_TORTURE_PIPE_LEN) {
659 /* Should not happen, but... */
660 completed = RCU_TORTURE_PIPE_LEN;
662 ++__get_cpu_var(rcu_torture_batch)[completed];
664 cur_ops->readunlock(idx);
667 } while (!kthread_should_stop() && !fullstop);
668 VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
669 while (!kthread_should_stop())
670 schedule_timeout_uninterruptible(1);
675 * Create an RCU-torture statistics message in the specified buffer.
678 rcu_torture_printk(char *page)
683 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
684 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
686 for_each_possible_cpu(cpu) {
687 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
688 pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
689 batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
692 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
693 if (pipesummary[i] != 0)
696 cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG);
697 cnt += sprintf(&page[cnt],
698 "rtc: %p ver: %ld tfle: %d rta: %d rtaf: %d rtf: %d "
701 rcu_torture_current_version,
702 list_empty(&rcu_torture_freelist),
703 atomic_read(&n_rcu_torture_alloc),
704 atomic_read(&n_rcu_torture_alloc_fail),
705 atomic_read(&n_rcu_torture_free),
706 atomic_read(&n_rcu_torture_mberror));
707 if (atomic_read(&n_rcu_torture_mberror) != 0)
708 cnt += sprintf(&page[cnt], " !!!");
709 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
711 cnt += sprintf(&page[cnt], "!!! ");
712 atomic_inc(&n_rcu_torture_error);
715 cnt += sprintf(&page[cnt], "Reader Pipe: ");
716 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
717 cnt += sprintf(&page[cnt], " %ld", pipesummary[i]);
718 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
719 cnt += sprintf(&page[cnt], "Reader Batch: ");
720 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
721 cnt += sprintf(&page[cnt], " %ld", batchsummary[i]);
722 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
723 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
724 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
725 cnt += sprintf(&page[cnt], " %d",
726 atomic_read(&rcu_torture_wcount[i]));
728 cnt += sprintf(&page[cnt], "\n");
730 cnt += cur_ops->stats(&page[cnt]);
735 * Print torture statistics. Caller must ensure that there is only
736 * one call to this function at a given time!!! This is normally
737 * accomplished by relying on the module system to only have one copy
738 * of the module loaded, and then by giving the rcu_torture_stats
739 * kthread full control (or the init/cleanup functions when rcu_torture_stats
740 * thread is not running).
743 rcu_torture_stats_print(void)
747 cnt = rcu_torture_printk(printk_buf);
748 printk(KERN_ALERT "%s", printk_buf);
752 * Periodically prints torture statistics, if periodic statistics printing
753 * was specified via the stat_interval module parameter.
755 * No need to worry about fullstop here, since this one doesn't reference
756 * volatile state or register callbacks.
759 rcu_torture_stats(void *arg)
761 VERBOSE_PRINTK_STRING("rcu_torture_stats task started");
763 schedule_timeout_interruptible(stat_interval * HZ);
764 rcu_torture_stats_print();
765 } while (!kthread_should_stop());
766 VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
770 static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
772 /* Shuffle tasks such that we allow @rcu_idle_cpu to become idle. A special case
773 * is when @rcu_idle_cpu = -1, when we allow the tasks to run on all CPUs.
775 static void rcu_torture_shuffle_tasks(void)
780 cpus_setall(tmp_mask);
783 /* No point in shuffling if there is only one online CPU (ex: UP) */
784 if (num_online_cpus() == 1) {
789 if (rcu_idle_cpu != -1)
790 cpu_clear(rcu_idle_cpu, tmp_mask);
792 set_cpus_allowed_ptr(current, &tmp_mask);
795 for (i = 0; i < nrealreaders; i++)
797 set_cpus_allowed_ptr(reader_tasks[i],
801 if (fakewriter_tasks) {
802 for (i = 0; i < nfakewriters; i++)
803 if (fakewriter_tasks[i])
804 set_cpus_allowed_ptr(fakewriter_tasks[i],
809 set_cpus_allowed_ptr(writer_task, &tmp_mask);
812 set_cpus_allowed_ptr(stats_task, &tmp_mask);
814 if (rcu_idle_cpu == -1)
815 rcu_idle_cpu = num_online_cpus() - 1;
822 /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
823 * system to become idle at a time and cut off its timer ticks. This is meant
824 * to test the support for such tickless idle CPU in RCU.
827 rcu_torture_shuffle(void *arg)
829 VERBOSE_PRINTK_STRING("rcu_torture_shuffle task started");
831 schedule_timeout_interruptible(shuffle_interval * HZ);
832 rcu_torture_shuffle_tasks();
833 } while (!kthread_should_stop());
834 VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
838 /* Cause the rcutorture test to "stutter", starting and stopping all
839 * threads periodically.
842 rcu_torture_stutter(void *arg)
844 VERBOSE_PRINTK_STRING("rcu_torture_stutter task started");
846 schedule_timeout_interruptible(stutter * HZ);
847 stutter_pause_test = 1;
848 if (!kthread_should_stop())
849 schedule_timeout_interruptible(stutter * HZ);
850 stutter_pause_test = 0;
851 } while (!kthread_should_stop());
852 VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping");
857 rcu_torture_print_module_parms(char *tag)
859 printk(KERN_ALERT "%s" TORTURE_FLAG
860 "--- %s: nreaders=%d nfakewriters=%d "
861 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
862 "shuffle_interval=%d stutter=%d\n",
863 torture_type, tag, nrealreaders, nfakewriters,
864 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
869 rcu_torture_cleanup(void)
875 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stutter task");
876 kthread_stop(stutter_task);
880 VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
881 kthread_stop(shuffler_task);
883 shuffler_task = NULL;
886 VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
887 kthread_stop(writer_task);
892 for (i = 0; i < nrealreaders; i++) {
893 if (reader_tasks[i]) {
894 VERBOSE_PRINTK_STRING(
895 "Stopping rcu_torture_reader task");
896 kthread_stop(reader_tasks[i]);
898 reader_tasks[i] = NULL;
903 rcu_torture_current = NULL;
905 if (fakewriter_tasks) {
906 for (i = 0; i < nfakewriters; i++) {
907 if (fakewriter_tasks[i]) {
908 VERBOSE_PRINTK_STRING(
909 "Stopping rcu_torture_fakewriter task");
910 kthread_stop(fakewriter_tasks[i]);
912 fakewriter_tasks[i] = NULL;
914 kfree(fakewriter_tasks);
915 fakewriter_tasks = NULL;
919 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
920 kthread_stop(stats_task);
924 /* Wait for all RCU callbacks to fire. */
926 if (cur_ops->cb_barrier != NULL)
927 cur_ops->cb_barrier();
929 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
931 if (cur_ops->cleanup)
933 if (atomic_read(&n_rcu_torture_error))
934 rcu_torture_print_module_parms("End of test: FAILURE");
936 rcu_torture_print_module_parms("End of test: SUCCESS");
940 rcu_torture_init(void)
945 static struct rcu_torture_ops *torture_ops[] =
946 { &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops,
947 &srcu_ops, &sched_ops, &sched_ops_sync, };
949 /* Process args and tell the world that the torturer is on the job. */
950 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
951 cur_ops = torture_ops[i];
952 if (strcmp(torture_type, cur_ops->name) == 0)
955 if (i == ARRAY_SIZE(torture_ops)) {
956 printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n",
961 cur_ops->init(); /* no "goto unwind" prior to this point!!! */
964 nrealreaders = nreaders;
966 nrealreaders = 2 * num_online_cpus();
967 rcu_torture_print_module_parms("Start of test");
970 /* Set up the freelist. */
972 INIT_LIST_HEAD(&rcu_torture_freelist);
973 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
974 rcu_tortures[i].rtort_mbtest = 0;
975 list_add_tail(&rcu_tortures[i].rtort_free,
976 &rcu_torture_freelist);
979 /* Initialize the statistics so that each run gets its own numbers. */
981 rcu_torture_current = NULL;
982 rcu_torture_current_version = 0;
983 atomic_set(&n_rcu_torture_alloc, 0);
984 atomic_set(&n_rcu_torture_alloc_fail, 0);
985 atomic_set(&n_rcu_torture_free, 0);
986 atomic_set(&n_rcu_torture_mberror, 0);
987 atomic_set(&n_rcu_torture_error, 0);
988 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
989 atomic_set(&rcu_torture_wcount[i], 0);
990 for_each_possible_cpu(cpu) {
991 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
992 per_cpu(rcu_torture_count, cpu)[i] = 0;
993 per_cpu(rcu_torture_batch, cpu)[i] = 0;
997 /* Start up the kthreads. */
999 VERBOSE_PRINTK_STRING("Creating rcu_torture_writer task");
1000 writer_task = kthread_run(rcu_torture_writer, NULL,
1001 "rcu_torture_writer");
1002 if (IS_ERR(writer_task)) {
1003 firsterr = PTR_ERR(writer_task);
1004 VERBOSE_PRINTK_ERRSTRING("Failed to create writer");
1008 fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]),
1010 if (fakewriter_tasks == NULL) {
1011 VERBOSE_PRINTK_ERRSTRING("out of memory");
1015 for (i = 0; i < nfakewriters; i++) {
1016 VERBOSE_PRINTK_STRING("Creating rcu_torture_fakewriter task");
1017 fakewriter_tasks[i] = kthread_run(rcu_torture_fakewriter, NULL,
1018 "rcu_torture_fakewriter");
1019 if (IS_ERR(fakewriter_tasks[i])) {
1020 firsterr = PTR_ERR(fakewriter_tasks[i]);
1021 VERBOSE_PRINTK_ERRSTRING("Failed to create fakewriter");
1022 fakewriter_tasks[i] = NULL;
1026 reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]),
1028 if (reader_tasks == NULL) {
1029 VERBOSE_PRINTK_ERRSTRING("out of memory");
1033 for (i = 0; i < nrealreaders; i++) {
1034 VERBOSE_PRINTK_STRING("Creating rcu_torture_reader task");
1035 reader_tasks[i] = kthread_run(rcu_torture_reader, NULL,
1036 "rcu_torture_reader");
1037 if (IS_ERR(reader_tasks[i])) {
1038 firsterr = PTR_ERR(reader_tasks[i]);
1039 VERBOSE_PRINTK_ERRSTRING("Failed to create reader");
1040 reader_tasks[i] = NULL;
1044 if (stat_interval > 0) {
1045 VERBOSE_PRINTK_STRING("Creating rcu_torture_stats task");
1046 stats_task = kthread_run(rcu_torture_stats, NULL,
1047 "rcu_torture_stats");
1048 if (IS_ERR(stats_task)) {
1049 firsterr = PTR_ERR(stats_task);
1050 VERBOSE_PRINTK_ERRSTRING("Failed to create stats");
1055 if (test_no_idle_hz) {
1056 rcu_idle_cpu = num_online_cpus() - 1;
1057 /* Create the shuffler thread */
1058 shuffler_task = kthread_run(rcu_torture_shuffle, NULL,
1059 "rcu_torture_shuffle");
1060 if (IS_ERR(shuffler_task)) {
1061 firsterr = PTR_ERR(shuffler_task);
1062 VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler");
1063 shuffler_task = NULL;
1070 /* Create the stutter thread */
1071 stutter_task = kthread_run(rcu_torture_stutter, NULL,
1072 "rcu_torture_stutter");
1073 if (IS_ERR(stutter_task)) {
1074 firsterr = PTR_ERR(stutter_task);
1075 VERBOSE_PRINTK_ERRSTRING("Failed to create stutter");
1076 stutter_task = NULL;
1083 rcu_torture_cleanup();
1087 module_init(rcu_torture_init);
1088 module_exit(rcu_torture_cleanup);