Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0+ */
2 : /*
3 : * Read-Copy Update definitions shared among RCU implementations.
4 : *
5 : * Copyright IBM Corporation, 2011
6 : *
7 : * Author: Paul E. McKenney <paulmck@linux.ibm.com>
8 : */
9 :
10 : #ifndef __LINUX_RCU_H
11 : #define __LINUX_RCU_H
12 :
13 : #include <trace/events/rcu.h>
14 :
15 : /* Offset to allow distinguishing irq vs. task-based idle entry/exit. */
16 : #define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1)
17 :
18 :
19 : /*
20 : * Grace-period counter management.
21 : */
22 :
23 : #define RCU_SEQ_CTR_SHIFT 2
24 : #define RCU_SEQ_STATE_MASK ((1 << RCU_SEQ_CTR_SHIFT) - 1)
25 :
26 : /*
27 : * Return the counter portion of a sequence number previously returned
28 : * by rcu_seq_snap() or rcu_seq_current().
29 : */
30 758 : static inline unsigned long rcu_seq_ctr(unsigned long s)
31 : {
32 758 : return s >> RCU_SEQ_CTR_SHIFT;
33 : }
34 :
35 : /*
36 : * Return the state portion of a sequence number previously returned
37 : * by rcu_seq_snap() or rcu_seq_current().
38 : */
39 177686 : static inline int rcu_seq_state(unsigned long s)
40 : {
41 177506 : return s & RCU_SEQ_STATE_MASK;
42 : }
43 :
44 : /*
45 : * Set the state portion of the pointed-to sequence number.
46 : * The caller is responsible for preventing conflicting updates.
47 : */
48 120 : static inline void rcu_seq_set_state(unsigned long *sp, int newstate)
49 : {
50 120 : WARN_ON_ONCE(newstate & ~RCU_SEQ_STATE_MASK);
51 120 : WRITE_ONCE(*sp, (*sp & ~RCU_SEQ_STATE_MASK) + newstate);
52 : }
53 :
54 : /* Adjust sequence number for start of update-side operation. */
55 2242 : static inline void rcu_seq_start(unsigned long *sp)
56 : {
57 2242 : WRITE_ONCE(*sp, *sp + 1);
58 2242 : smp_mb(); /* Ensure update-side operation after counter increment. */
59 2242 : WARN_ON_ONCE(rcu_seq_state(*sp) != 1);
60 2242 : }
61 :
62 : /* Compute the end-of-grace-period value for the specified sequence number. */
63 4743 : static inline unsigned long rcu_seq_endval(unsigned long *sp)
64 : {
65 382 : return (*sp | RCU_SEQ_STATE_MASK) + 1;
66 : }
67 :
68 : /* Adjust sequence number for end of update-side operation. */
69 4260 : static inline void rcu_seq_end(unsigned long *sp)
70 : {
71 4260 : smp_mb(); /* Ensure update-side operation before counter increment. */
72 4260 : WARN_ON_ONCE(!rcu_seq_state(*sp));
73 4260 : WRITE_ONCE(*sp, rcu_seq_endval(sp));
74 4260 : }
75 :
76 : /*
77 : * rcu_seq_snap - Take a snapshot of the update side's sequence number.
78 : *
79 : * This function returns the earliest value of the grace-period sequence number
80 : * that will indicate that a full grace period has elapsed since the current
81 : * time. Once the grace-period sequence number has reached this value, it will
82 : * be safe to invoke all callbacks that have been registered prior to the
83 : * current time. This value is the current grace-period number plus two to the
84 : * power of the number of low-order bits reserved for state, then rounded up to
85 : * the next value in which the state bits are all zero.
86 : */
87 16888 : static inline unsigned long rcu_seq_snap(unsigned long *sp)
88 : {
89 16888 : unsigned long s;
90 :
91 16888 : s = (READ_ONCE(*sp) + 2 * RCU_SEQ_STATE_MASK + 1) & ~RCU_SEQ_STATE_MASK;
92 16888 : smp_mb(); /* Above access must not bleed into critical section. */
93 16888 : return s;
94 : }
95 :
96 : /* Return the current value the update side's sequence number, no ordering. */
97 256833 : static inline unsigned long rcu_seq_current(unsigned long *sp)
98 : {
99 256833 : return READ_ONCE(*sp);
100 : }
101 :
102 : /*
103 : * Given a snapshot from rcu_seq_snap(), determine whether or not the
104 : * corresponding update-side operation has started.
105 : */
106 2021 : static inline bool rcu_seq_started(unsigned long *sp, unsigned long s)
107 : {
108 2021 : return ULONG_CMP_LT((s - 1) & ~RCU_SEQ_STATE_MASK, READ_ONCE(*sp));
109 : }
110 :
111 : /*
112 : * Given a snapshot from rcu_seq_snap(), determine whether or not a
113 : * full update-side operation has occurred.
114 : */
115 761 : static inline bool rcu_seq_done(unsigned long *sp, unsigned long s)
116 : {
117 761 : return ULONG_CMP_GE(READ_ONCE(*sp), s);
118 : }
119 :
120 : /*
121 : * Has a grace period completed since the time the old gp_seq was collected?
122 : */
123 9692 : static inline bool rcu_seq_completed_gp(unsigned long old, unsigned long new)
124 : {
125 9692 : return ULONG_CMP_LT(old, new & ~RCU_SEQ_STATE_MASK);
126 : }
127 :
128 : /*
129 : * Has a grace period started since the time the old gp_seq was collected?
130 : */
131 9692 : static inline bool rcu_seq_new_gp(unsigned long old, unsigned long new)
132 : {
133 9692 : return ULONG_CMP_LT((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK,
134 : new);
135 : }
136 :
137 : /*
138 : * Roughly how many full grace periods have elapsed between the collection
139 : * of the two specified grace periods?
140 : */
141 : static inline unsigned long rcu_seq_diff(unsigned long new, unsigned long old)
142 : {
143 : unsigned long rnd_diff;
144 :
145 : if (old == new)
146 : return 0;
147 : /*
148 : * Compute the number of grace periods (still shifted up), plus
149 : * one if either of new and old is not an exact grace period.
150 : */
151 : rnd_diff = (new & ~RCU_SEQ_STATE_MASK) -
152 : ((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK) +
153 : ((new & RCU_SEQ_STATE_MASK) || (old & RCU_SEQ_STATE_MASK));
154 : if (ULONG_CMP_GE(RCU_SEQ_STATE_MASK, rnd_diff))
155 : return 1; /* Definitely no grace period has elapsed. */
156 : return ((rnd_diff - RCU_SEQ_STATE_MASK - 1) >> RCU_SEQ_CTR_SHIFT) + 2;
157 : }
158 :
159 : /*
160 : * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
161 : * by call_rcu() and rcu callback execution, and are therefore not part
162 : * of the RCU API. These are in rcupdate.h because they are used by all
163 : * RCU implementations.
164 : */
165 :
166 : #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
167 : # define STATE_RCU_HEAD_READY 0
168 : # define STATE_RCU_HEAD_QUEUED 1
169 :
170 : extern const struct debug_obj_descr rcuhead_debug_descr;
171 :
172 627579 : static inline int debug_rcu_head_queue(struct rcu_head *head)
173 : {
174 627579 : int r1;
175 :
176 627579 : r1 = debug_object_activate(head, &rcuhead_debug_descr);
177 627429 : debug_object_active_state(head, &rcuhead_debug_descr,
178 : STATE_RCU_HEAD_READY,
179 : STATE_RCU_HEAD_QUEUED);
180 627377 : return r1;
181 : }
182 :
183 626723 : static inline void debug_rcu_head_unqueue(struct rcu_head *head)
184 : {
185 626723 : debug_object_active_state(head, &rcuhead_debug_descr,
186 : STATE_RCU_HEAD_QUEUED,
187 : STATE_RCU_HEAD_READY);
188 625447 : debug_object_deactivate(head, &rcuhead_debug_descr);
189 624978 : }
190 : #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
191 : static inline int debug_rcu_head_queue(struct rcu_head *head)
192 : {
193 : return 0;
194 : }
195 :
196 : static inline void debug_rcu_head_unqueue(struct rcu_head *head)
197 : {
198 : }
199 : #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
200 :
201 : extern int rcu_cpu_stall_suppress_at_boot;
202 :
203 27723 : static inline bool rcu_stall_is_suppressed_at_boot(void)
204 : {
205 27723 : return rcu_cpu_stall_suppress_at_boot && !rcu_inkernel_boot_has_ended();
206 : }
207 :
208 : #ifdef CONFIG_RCU_STALL_COMMON
209 :
210 : extern int rcu_cpu_stall_ftrace_dump;
211 : extern int rcu_cpu_stall_suppress;
212 : extern int rcu_cpu_stall_timeout;
213 : int rcu_jiffies_till_stall_check(void);
214 :
215 27755 : static inline bool rcu_stall_is_suppressed(void)
216 : {
217 27755 : return rcu_stall_is_suppressed_at_boot() || rcu_cpu_stall_suppress;
218 : }
219 :
220 : #define rcu_ftrace_dump_stall_suppress() \
221 : do { \
222 : if (!rcu_cpu_stall_suppress) \
223 : rcu_cpu_stall_suppress = 3; \
224 : } while (0)
225 :
226 : #define rcu_ftrace_dump_stall_unsuppress() \
227 : do { \
228 : if (rcu_cpu_stall_suppress == 3) \
229 : rcu_cpu_stall_suppress = 0; \
230 : } while (0)
231 :
232 : #else /* #endif #ifdef CONFIG_RCU_STALL_COMMON */
233 :
234 : static inline bool rcu_stall_is_suppressed(void)
235 : {
236 : return rcu_stall_is_suppressed_at_boot();
237 : }
238 : #define rcu_ftrace_dump_stall_suppress()
239 : #define rcu_ftrace_dump_stall_unsuppress()
240 : #endif /* #ifdef CONFIG_RCU_STALL_COMMON */
241 :
242 : /*
243 : * Strings used in tracepoints need to be exported via the
244 : * tracing system such that tools like perf and trace-cmd can
245 : * translate the string address pointers to actual text.
246 : */
247 : #define TPS(x) tracepoint_string(x)
248 :
249 : /*
250 : * Dump the ftrace buffer, but only one time per callsite per boot.
251 : */
252 : #define rcu_ftrace_dump(oops_dump_mode) \
253 : do { \
254 : static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \
255 : \
256 : if (!atomic_read(&___rfd_beenhere) && \
257 : !atomic_xchg(&___rfd_beenhere, 1)) { \
258 : tracing_off(); \
259 : rcu_ftrace_dump_stall_suppress(); \
260 : ftrace_dump(oops_dump_mode); \
261 : rcu_ftrace_dump_stall_unsuppress(); \
262 : } \
263 : } while (0)
264 :
265 : void rcu_early_boot_tests(void);
266 : void rcu_test_sync_prims(void);
267 :
268 : /*
269 : * This function really isn't for public consumption, but RCU is special in
270 : * that context switches can allow the state machine to make progress.
271 : */
272 : extern void resched_cpu(int cpu);
273 :
274 : #if defined(CONFIG_SRCU) || !defined(CONFIG_TINY_RCU)
275 :
276 : #include <linux/rcu_node_tree.h>
277 :
278 : extern int rcu_num_lvls;
279 : extern int num_rcu_lvl[];
280 : extern int rcu_num_nodes;
281 : static bool rcu_fanout_exact;
282 : static int rcu_fanout_leaf;
283 :
284 : /*
285 : * Compute the per-level fanout, either using the exact fanout specified
286 : * or balancing the tree, depending on the rcu_fanout_exact boot parameter.
287 : */
288 4 : static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
289 : {
290 4 : int i;
291 :
292 8 : for (i = 0; i < RCU_NUM_LVLS; i++)
293 4 : levelspread[i] = INT_MIN;
294 4 : if (rcu_fanout_exact) {
295 0 : levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
296 0 : for (i = rcu_num_lvls - 2; i >= 0; i--)
297 0 : levelspread[i] = RCU_FANOUT;
298 : } else {
299 4 : int ccur;
300 4 : int cprv;
301 :
302 4 : cprv = nr_cpu_ids;
303 8 : for (i = rcu_num_lvls - 1; i >= 0; i--) {
304 4 : ccur = levelcnt[i];
305 4 : levelspread[i] = (cprv + ccur - 1) / ccur;
306 4 : cprv = ccur;
307 : }
308 : }
309 4 : }
310 :
311 : /* Returns a pointer to the first leaf rcu_node structure. */
312 : #define rcu_first_leaf_node() (rcu_state.level[rcu_num_lvls - 1])
313 :
314 : /* Is this rcu_node a leaf? */
315 : #define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1)
316 :
317 : /* Is this rcu_node the last leaf? */
318 : #define rcu_is_last_leaf_node(rnp) ((rnp) == &rcu_state.node[rcu_num_nodes - 1])
319 :
320 : /*
321 : * Do a full breadth-first scan of the {s,}rcu_node structures for the
322 : * specified state structure (for SRCU) or the only rcu_state structure
323 : * (for RCU).
324 : */
325 : #define srcu_for_each_node_breadth_first(sp, rnp) \
326 : for ((rnp) = &(sp)->node[0]; \
327 : (rnp) < &(sp)->node[rcu_num_nodes]; (rnp)++)
328 : #define rcu_for_each_node_breadth_first(rnp) \
329 : srcu_for_each_node_breadth_first(&rcu_state, rnp)
330 :
331 : /*
332 : * Scan the leaves of the rcu_node hierarchy for the rcu_state structure.
333 : * Note that if there is a singleton rcu_node tree with but one rcu_node
334 : * structure, this loop -will- visit the rcu_node structure. It is still
335 : * a leaf node, even if it is also the root node.
336 : */
337 : #define rcu_for_each_leaf_node(rnp) \
338 : for ((rnp) = rcu_first_leaf_node(); \
339 : (rnp) < &rcu_state.node[rcu_num_nodes]; (rnp)++)
340 :
341 : /*
342 : * Iterate over all possible CPUs in a leaf RCU node.
343 : */
344 : #define for_each_leaf_node_possible_cpu(rnp, cpu) \
345 : for (WARN_ON_ONCE(!rcu_is_leaf_node(rnp)), \
346 : (cpu) = cpumask_next((rnp)->grplo - 1, cpu_possible_mask); \
347 : (cpu) <= rnp->grphi; \
348 : (cpu) = cpumask_next((cpu), cpu_possible_mask))
349 :
350 : /*
351 : * Iterate over all CPUs in a leaf RCU node's specified mask.
352 : */
353 : #define rcu_find_next_bit(rnp, cpu, mask) \
354 : ((rnp)->grplo + find_next_bit(&(mask), BITS_PER_LONG, (cpu)))
355 : #define for_each_leaf_node_cpu_mask(rnp, cpu, mask) \
356 : for (WARN_ON_ONCE(!rcu_is_leaf_node(rnp)), \
357 : (cpu) = rcu_find_next_bit((rnp), 0, (mask)); \
358 : (cpu) <= rnp->grphi; \
359 : (cpu) = rcu_find_next_bit((rnp), (cpu) + 1 - (rnp->grplo), (mask)))
360 :
361 : /*
362 : * Wrappers for the rcu_node::lock acquire and release.
363 : *
364 : * Because the rcu_nodes form a tree, the tree traversal locking will observe
365 : * different lock values, this in turn means that an UNLOCK of one level
366 : * followed by a LOCK of another level does not imply a full memory barrier;
367 : * and most importantly transitivity is lost.
368 : *
369 : * In order to restore full ordering between tree levels, augment the regular
370 : * lock acquire functions with smp_mb__after_unlock_lock().
371 : *
372 : * As ->lock of struct rcu_node is a __private field, therefore one should use
373 : * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock.
374 : */
375 : #define raw_spin_lock_rcu_node(p) \
376 : do { \
377 : raw_spin_lock(&ACCESS_PRIVATE(p, lock)); \
378 : smp_mb__after_unlock_lock(); \
379 : } while (0)
380 :
381 : #define raw_spin_unlock_rcu_node(p) \
382 : do { \
383 : lockdep_assert_irqs_disabled(); \
384 : raw_spin_unlock(&ACCESS_PRIVATE(p, lock)); \
385 : } while (0)
386 :
387 : #define raw_spin_lock_irq_rcu_node(p) \
388 : do { \
389 : raw_spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \
390 : smp_mb__after_unlock_lock(); \
391 : } while (0)
392 :
393 : #define raw_spin_unlock_irq_rcu_node(p) \
394 : do { \
395 : lockdep_assert_irqs_disabled(); \
396 : raw_spin_unlock_irq(&ACCESS_PRIVATE(p, lock)); \
397 : } while (0)
398 :
399 : #define raw_spin_lock_irqsave_rcu_node(p, flags) \
400 : do { \
401 : raw_spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
402 : smp_mb__after_unlock_lock(); \
403 : } while (0)
404 :
405 : #define raw_spin_unlock_irqrestore_rcu_node(p, flags) \
406 : do { \
407 : lockdep_assert_irqs_disabled(); \
408 : raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags); \
409 : } while (0)
410 :
411 : #define raw_spin_trylock_rcu_node(p) \
412 : ({ \
413 : bool ___locked = raw_spin_trylock(&ACCESS_PRIVATE(p, lock)); \
414 : \
415 : if (___locked) \
416 : smp_mb__after_unlock_lock(); \
417 : ___locked; \
418 : })
419 :
420 : #define raw_lockdep_assert_held_rcu_node(p) \
421 : lockdep_assert_held(&ACCESS_PRIVATE(p, lock))
422 :
423 : #endif /* #if defined(CONFIG_SRCU) || !defined(CONFIG_TINY_RCU) */
424 :
425 : #ifdef CONFIG_SRCU
426 : void srcu_init(void);
427 : #else /* #ifdef CONFIG_SRCU */
428 : static inline void srcu_init(void) { }
429 : #endif /* #else #ifdef CONFIG_SRCU */
430 :
431 : #ifdef CONFIG_TINY_RCU
432 : /* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */
433 : static inline bool rcu_gp_is_normal(void) { return true; }
434 : static inline bool rcu_gp_is_expedited(void) { return false; }
435 : static inline void rcu_expedite_gp(void) { }
436 : static inline void rcu_unexpedite_gp(void) { }
437 : static inline void rcu_request_urgent_qs_task(struct task_struct *t) { }
438 : #else /* #ifdef CONFIG_TINY_RCU */
439 : bool rcu_gp_is_normal(void); /* Internal RCU use. */
440 : bool rcu_gp_is_expedited(void); /* Internal RCU use. */
441 : void rcu_expedite_gp(void);
442 : void rcu_unexpedite_gp(void);
443 : void rcupdate_announce_bootup_oddness(void);
444 : void show_rcu_tasks_gp_kthreads(void);
445 : void rcu_request_urgent_qs_task(struct task_struct *t);
446 : #endif /* #else #ifdef CONFIG_TINY_RCU */
447 :
448 : #define RCU_SCHEDULER_INACTIVE 0
449 : #define RCU_SCHEDULER_INIT 1
450 : #define RCU_SCHEDULER_RUNNING 2
451 :
452 : enum rcutorture_type {
453 : RCU_FLAVOR,
454 : RCU_TASKS_FLAVOR,
455 : RCU_TASKS_RUDE_FLAVOR,
456 : RCU_TASKS_TRACING_FLAVOR,
457 : RCU_TRIVIAL_FLAVOR,
458 : SRCU_FLAVOR,
459 : INVALID_RCU_FLAVOR
460 : };
461 :
462 : #if defined(CONFIG_TREE_RCU)
463 : void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
464 : unsigned long *gp_seq);
465 : void do_trace_rcu_torture_read(const char *rcutorturename,
466 : struct rcu_head *rhp,
467 : unsigned long secs,
468 : unsigned long c_old,
469 : unsigned long c);
470 : void rcu_gp_set_torture_wait(int duration);
471 : #else
472 : static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
473 : int *flags, unsigned long *gp_seq)
474 : {
475 : *flags = 0;
476 : *gp_seq = 0;
477 : }
478 : #ifdef CONFIG_RCU_TRACE
479 : void do_trace_rcu_torture_read(const char *rcutorturename,
480 : struct rcu_head *rhp,
481 : unsigned long secs,
482 : unsigned long c_old,
483 : unsigned long c);
484 : #else
485 : #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
486 : do { } while (0)
487 : #endif
488 : static inline void rcu_gp_set_torture_wait(int duration) { }
489 : #endif
490 :
491 : #if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST)
492 : long rcutorture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask);
493 : #endif
494 :
495 : #ifdef CONFIG_TINY_SRCU
496 :
497 : static inline void srcutorture_get_gp_data(enum rcutorture_type test_type,
498 : struct srcu_struct *sp, int *flags,
499 : unsigned long *gp_seq)
500 : {
501 : if (test_type != SRCU_FLAVOR)
502 : return;
503 : *flags = 0;
504 : *gp_seq = sp->srcu_idx;
505 : }
506 :
507 : #elif defined(CONFIG_TREE_SRCU)
508 :
509 : void srcutorture_get_gp_data(enum rcutorture_type test_type,
510 : struct srcu_struct *sp, int *flags,
511 : unsigned long *gp_seq);
512 :
513 : #endif
514 :
515 : #ifdef CONFIG_TINY_RCU
516 : static inline bool rcu_dynticks_zero_in_eqs(int cpu, int *vp) { return false; }
517 : static inline unsigned long rcu_get_gp_seq(void) { return 0; }
518 : static inline unsigned long rcu_exp_batches_completed(void) { return 0; }
519 : static inline unsigned long
520 : srcu_batches_completed(struct srcu_struct *sp) { return 0; }
521 : static inline void rcu_force_quiescent_state(void) { }
522 : static inline void show_rcu_gp_kthreads(void) { }
523 : static inline int rcu_get_gp_kthreads_prio(void) { return 0; }
524 : static inline void rcu_fwd_progress_check(unsigned long j) { }
525 : #else /* #ifdef CONFIG_TINY_RCU */
526 : bool rcu_dynticks_zero_in_eqs(int cpu, int *vp);
527 : unsigned long rcu_get_gp_seq(void);
528 : unsigned long rcu_exp_batches_completed(void);
529 : unsigned long srcu_batches_completed(struct srcu_struct *sp);
530 : void show_rcu_gp_kthreads(void);
531 : int rcu_get_gp_kthreads_prio(void);
532 : void rcu_fwd_progress_check(unsigned long j);
533 : void rcu_force_quiescent_state(void);
534 : extern struct workqueue_struct *rcu_gp_wq;
535 : extern struct workqueue_struct *rcu_par_gp_wq;
536 : #endif /* #else #ifdef CONFIG_TINY_RCU */
537 :
538 : #ifdef CONFIG_RCU_NOCB_CPU
539 : bool rcu_is_nocb_cpu(int cpu);
540 : void rcu_bind_current_to_nocb(void);
541 : #else
542 : static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
543 : static inline void rcu_bind_current_to_nocb(void) { }
544 : #endif
545 :
546 : #if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_RCU)
547 : void show_rcu_tasks_classic_gp_kthread(void);
548 : #else
549 : static inline void show_rcu_tasks_classic_gp_kthread(void) {}
550 : #endif
551 : #if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_RUDE_RCU)
552 : void show_rcu_tasks_rude_gp_kthread(void);
553 : #else
554 : static inline void show_rcu_tasks_rude_gp_kthread(void) {}
555 : #endif
556 : #if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_TRACE_RCU)
557 : void show_rcu_tasks_trace_gp_kthread(void);
558 : #else
559 : static inline void show_rcu_tasks_trace_gp_kthread(void) {}
560 : #endif
561 :
562 : #endif /* __LINUX_RCU_H */
|