Commit | Line | Data |
---|---|---|
e260be67 PM |
1 | /* |
2 | * Read-Copy Update mechanism for mutual exclusion (RT implementation) | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
17 | * | |
18 | * Copyright (C) IBM Corporation, 2006 | |
19 | * | |
20 | * Author: Paul McKenney <paulmck@us.ibm.com> | |
21 | * | |
22 | * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com> | |
23 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. | |
24 | * Papers: | |
25 | * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf | |
26 | * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) | |
27 | * | |
28 | * For detailed explanation of Read-Copy Update mechanism see - | |
29 | * Documentation/RCU | |
30 | * | |
31 | */ | |
32 | ||
33 | #ifndef __LINUX_RCUPREEMPT_H | |
34 | #define __LINUX_RCUPREEMPT_H | |
35 | ||
e260be67 PM |
36 | #include <linux/cache.h> |
37 | #include <linux/spinlock.h> | |
38 | #include <linux/threads.h> | |
39 | #include <linux/percpu.h> | |
40 | #include <linux/cpumask.h> | |
41 | #include <linux/seqlock.h> | |
42 | ||
4446a36f PM |
43 | struct rcu_dyntick_sched { |
44 | int dynticks; | |
45 | int dynticks_snap; | |
46 | int sched_qs; | |
47 | int sched_qs_snap; | |
48 | int sched_dynticks_snap; | |
49 | }; | |
50 | ||
51 | DECLARE_PER_CPU(struct rcu_dyntick_sched, rcu_dyntick_sched); | |
52 | ||
53 | static inline void rcu_qsctr_inc(int cpu) | |
54 | { | |
55 | struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); | |
56 | ||
57 | rdssp->sched_qs++; | |
58 | } | |
e260be67 | 59 | #define rcu_bh_qsctr_inc(cpu) |
dd0078f4 SR |
60 | |
61 | /* | |
62 | * Someone might want to pass call_rcu_bh as a function pointer. | |
63 | * So this needs to just be a rename and not a macro function. | |
64 | * (no parentheses) | |
65 | */ | |
66 | #define call_rcu_bh call_rcu | |
e260be67 | 67 | |
4446a36f PM |
68 | /** |
69 | * call_rcu_sched - Queue RCU callback for invocation after sched grace period. | |
70 | * @head: structure to be used for queueing the RCU updates. | |
71 | * @func: actual update function to be invoked after the grace period | |
72 | * | |
73 | * The update function will be invoked some time after a full | |
74 | * synchronize_sched()-style grace period elapses, in other words after | |
75 | * all currently executing preempt-disabled sections of code (including | |
76 | * hardirq handlers, NMI handlers, and local_irq_save() blocks) have | |
77 | * completed. | |
78 | */ | |
79 | extern void call_rcu_sched(struct rcu_head *head, | |
80 | void (*func)(struct rcu_head *head)); | |
81 | ||
b55ab616 PM |
82 | extern void __rcu_read_lock(void) __acquires(RCU); |
83 | extern void __rcu_read_unlock(void) __releases(RCU); | |
e260be67 PM |
84 | extern int rcu_pending(int cpu); |
85 | extern int rcu_needs_cpu(int cpu); | |
86 | ||
87 | #define __rcu_read_lock_bh() { rcu_read_lock(); local_bh_disable(); } | |
88 | #define __rcu_read_unlock_bh() { local_bh_enable(); rcu_read_unlock(); } | |
89 | ||
90 | extern void __synchronize_sched(void); | |
91 | ||
92 | extern void __rcu_init(void); | |
4446a36f | 93 | extern void rcu_init_sched(void); |
e260be67 PM |
94 | extern void rcu_check_callbacks(int cpu, int user); |
95 | extern void rcu_restart_cpu(int cpu); | |
96 | extern long rcu_batches_completed(void); | |
97 | ||
98 | /* | |
99 | * Return the number of RCU batches processed thus far. Useful for debug | |
100 | * and statistic. The _bh variant is identifcal to straight RCU | |
101 | */ | |
102 | static inline long rcu_batches_completed_bh(void) | |
103 | { | |
104 | return rcu_batches_completed(); | |
105 | } | |
106 | ||
107 | #ifdef CONFIG_RCU_TRACE | |
108 | struct rcupreempt_trace; | |
109 | extern long *rcupreempt_flipctr(int cpu); | |
110 | extern long rcupreempt_data_completed(void); | |
111 | extern int rcupreempt_flip_flag(int cpu); | |
112 | extern int rcupreempt_mb_flag(int cpu); | |
113 | extern char *rcupreempt_try_flip_state_name(void); | |
114 | extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu); | |
115 | #endif | |
116 | ||
117 | struct softirq_action; | |
118 | ||
2232c2d8 | 119 | #ifdef CONFIG_NO_HZ |
2232c2d8 SR |
120 | |
121 | static inline void rcu_enter_nohz(void) | |
122 | { | |
717115e1 DY |
123 | static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1); |
124 | ||
ae66be9b | 125 | smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ |
4446a36f | 126 | __get_cpu_var(rcu_dyntick_sched).dynticks++; |
717115e1 | 127 | WARN_ON_RATELIMIT(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1, &rs); |
2232c2d8 SR |
128 | } |
129 | ||
130 | static inline void rcu_exit_nohz(void) | |
131 | { | |
717115e1 DY |
132 | static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1); |
133 | ||
4446a36f | 134 | __get_cpu_var(rcu_dyntick_sched).dynticks++; |
ff9cf2ce | 135 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ |
717115e1 DY |
136 | WARN_ON_RATELIMIT(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1), |
137 | &rs); | |
2232c2d8 SR |
138 | } |
139 | ||
140 | #else /* CONFIG_NO_HZ */ | |
141 | #define rcu_enter_nohz() do { } while (0) | |
142 | #define rcu_exit_nohz() do { } while (0) | |
143 | #endif /* CONFIG_NO_HZ */ | |
144 | ||
e260be67 | 145 | #endif /* __LINUX_RCUPREEMPT_H */ |