Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 : #ifndef __LINUX_SEQLOCK_H
3 : #define __LINUX_SEQLOCK_H
4 :
5 : /*
6 : * seqcount_t / seqlock_t - a reader-writer consistency mechanism with
7 : * lockless readers (read-only retry loops), and no writer starvation.
8 : *
9 : * See Documentation/locking/seqlock.rst
10 : *
11 : * Copyrights:
12 : * - Based on x86_64 vsyscall gettimeofday: Keith Owens, Andrea Arcangeli
13 : * - Sequence counters with associated locks, (C) 2020 Linutronix GmbH
14 : */
15 :
16 : #include <linux/compiler.h>
17 : #include <linux/kcsan-checks.h>
18 : #include <linux/lockdep.h>
19 : #include <linux/mutex.h>
20 : #include <linux/ww_mutex.h>
21 : #include <linux/preempt.h>
22 : #include <linux/spinlock.h>
23 :
24 : #include <asm/processor.h>
25 :
26 : /*
27 : * The seqlock seqcount_t interface does not prescribe a precise sequence of
28 : * read begin/retry/end. For readers, typically there is a call to
29 : * read_seqcount_begin() and read_seqcount_retry(), however, there are more
30 : * esoteric cases which do not follow this pattern.
31 : *
32 : * As a consequence, we take the following best-effort approach for raw usage
33 : * via seqcount_t under KCSAN: upon beginning a seq-reader critical section,
34 : * pessimistically mark the next KCSAN_SEQLOCK_REGION_MAX memory accesses as
35 : * atomics; if there is a matching read_seqcount_retry() call, no following
36 : * memory operations are considered atomic. Usage of the seqlock_t interface
37 : * is not affected.
38 : */
39 : #define KCSAN_SEQLOCK_REGION_MAX 1000
40 :
41 : /*
42 : * Sequence counters (seqcount_t)
43 : *
44 : * This is the raw counting mechanism, without any writer protection.
45 : *
46 : * Write side critical sections must be serialized and non-preemptible.
47 : *
48 : * If readers can be invoked from hardirq or softirq contexts,
49 : * interrupts or bottom halves must also be respectively disabled before
50 : * entering the write section.
51 : *
52 : * This mechanism can't be used if the protected data contains pointers,
53 : * as the writer can invalidate a pointer that a reader is following.
54 : *
55 : * If the write serialization mechanism is one of the common kernel
56 : * locking primitives, use a sequence counter with associated lock
57 : * (seqcount_LOCKNAME_t) instead.
58 : *
59 : * If it's desired to automatically handle the sequence counter writer
60 : * serialization and non-preemptibility requirements, use a sequential
61 : * lock (seqlock_t) instead.
62 : *
63 : * See Documentation/locking/seqlock.rst
64 : */
65 : typedef struct seqcount {
66 : unsigned sequence;
67 : #ifdef CONFIG_DEBUG_LOCK_ALLOC
68 : struct lockdep_map dep_map;
69 : #endif
70 : } seqcount_t;
71 :
72 32762 : static inline void __seqcount_init(seqcount_t *s, const char *name,
73 : struct lock_class_key *key)
74 : {
75 : /*
76 : * Make sure we are not reinitializing a held lock:
77 : */
78 32762 : lockdep_init_map(&s->dep_map, name, key, 0);
79 32762 : s->sequence = 0;
80 32762 : }
81 :
82 : #ifdef CONFIG_DEBUG_LOCK_ALLOC
83 :
84 : # define SEQCOUNT_DEP_MAP_INIT(lockname) \
85 : .dep_map = { .name = #lockname }
86 :
87 : /**
88 : * seqcount_init() - runtime initializer for seqcount_t
89 : * @s: Pointer to the seqcount_t instance
90 : */
91 : # define seqcount_init(s) \
92 : do { \
93 : static struct lock_class_key __key; \
94 : __seqcount_init((s), #s, &__key); \
95 : } while (0)
96 :
97 419979 : static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
98 : {
99 419979 : seqcount_t *l = (seqcount_t *)s;
100 419979 : unsigned long flags;
101 :
102 841778 : local_irq_save(flags);
103 421779 : seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_);
104 423413 : seqcount_release(&l->dep_map, _RET_IP_);
105 423123 : local_irq_restore(flags);
106 423175 : }
107 :
108 : #else
109 : # define SEQCOUNT_DEP_MAP_INIT(lockname)
110 : # define seqcount_init(s) __seqcount_init(s, NULL, NULL)
111 : # define seqcount_lockdep_reader_access(x)
112 : #endif
113 :
114 : /**
115 : * SEQCNT_ZERO() - static initializer for seqcount_t
116 : * @name: Name of the seqcount_t instance
117 : */
118 : #define SEQCNT_ZERO(name) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(name) }
119 :
120 : /*
121 : * Sequence counters with associated locks (seqcount_LOCKNAME_t)
122 : *
123 : * A sequence counter which associates the lock used for writer
124 : * serialization at initialization time. This enables lockdep to validate
125 : * that the write side critical section is properly serialized.
126 : *
127 : * For associated locks which do not implicitly disable preemption,
128 : * preemption protection is enforced in the write side function.
129 : *
130 : * Lockdep is never used in any for the raw write variants.
131 : *
132 : * See Documentation/locking/seqlock.rst
133 : */
134 :
135 : /*
136 : * For PREEMPT_RT, seqcount_LOCKNAME_t write side critical sections cannot
137 : * disable preemption. It can lead to higher latencies, and the write side
138 : * sections will not be able to acquire locks which become sleeping locks
139 : * (e.g. spinlock_t).
140 : *
141 : * To remain preemptible while avoiding a possible livelock caused by the
142 : * reader preempting the writer, use a different technique: let the reader
143 : * detect if a seqcount_LOCKNAME_t writer is in progress. If that is the
144 : * case, acquire then release the associated LOCKNAME writer serialization
145 : * lock. This will allow any possibly-preempted writer to make progress
146 : * until the end of its writer serialization lock critical section.
147 : *
148 : * This lock-unlock technique must be implemented for all of PREEMPT_RT
149 : * sleeping locks. See Documentation/locking/locktypes.rst
150 : */
151 : #if defined(CONFIG_LOCKDEP) || defined(CONFIG_PREEMPT_RT)
152 : #define __SEQ_LOCK(expr) expr
153 : #else
154 : #define __SEQ_LOCK(expr)
155 : #endif
156 :
157 : /*
158 : * typedef seqcount_LOCKNAME_t - sequence counter with LOCKNAME associated
159 : * @seqcount: The real sequence counter
160 : * @lock: Pointer to the associated lock
161 : *
162 : * A plain sequence counter with external writer synchronization by
163 : * LOCKNAME @lock. The lock is associated to the sequence counter in the
164 : * static initializer or init function. This enables lockdep to validate
165 : * that the write side critical section is properly serialized.
166 : *
167 : * LOCKNAME: raw_spinlock, spinlock, rwlock, mutex, or ww_mutex.
168 : */
169 :
170 : /*
171 : * seqcount_LOCKNAME_init() - runtime initializer for seqcount_LOCKNAME_t
172 : * @s: Pointer to the seqcount_LOCKNAME_t instance
173 : * @lock: Pointer to the associated lock
174 : */
175 :
176 : #define seqcount_LOCKNAME_init(s, _lock, lockname) \
177 : do { \
178 : seqcount_##lockname##_t *____s = (s); \
179 : seqcount_init(&____s->seqcount); \
180 : __SEQ_LOCK(____s->lock = (_lock)); \
181 : } while (0)
182 :
183 : #define seqcount_raw_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, raw_spinlock)
184 : #define seqcount_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, spinlock)
185 : #define seqcount_rwlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, rwlock);
186 : #define seqcount_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, mutex);
187 : #define seqcount_ww_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, ww_mutex);
188 :
189 : /*
190 : * SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers
191 : * seqprop_LOCKNAME_*() - Property accessors for seqcount_LOCKNAME_t
192 : *
193 : * @lockname: "LOCKNAME" part of seqcount_LOCKNAME_t
194 : * @locktype: LOCKNAME canonical C data type
195 : * @preemptible: preemptibility of above locktype
196 : * @lockmember: argument for lockdep_assert_held()
197 : * @lockbase: associated lock release function (prefix only)
198 : * @lock_acquire: associated lock acquisition function (full call)
199 : */
200 : #define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockmember, lockbase, lock_acquire) \
201 : typedef struct seqcount_##lockname { \
202 : seqcount_t seqcount; \
203 : __SEQ_LOCK(locktype *lock); \
204 : } seqcount_##lockname##_t; \
205 : \
206 : static __always_inline seqcount_t * \
207 : __seqprop_##lockname##_ptr(seqcount_##lockname##_t *s) \
208 : { \
209 : return &s->seqcount; \
210 : } \
211 : \
212 : static __always_inline unsigned \
213 : __seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \
214 : { \
215 : unsigned seq = READ_ONCE(s->seqcount.sequence); \
216 : \
217 : if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \
218 : return seq; \
219 : \
220 : if (preemptible && unlikely(seq & 1)) { \
221 : __SEQ_LOCK(lock_acquire); \
222 : __SEQ_LOCK(lockbase##_unlock(s->lock)); \
223 : \
224 : /* \
225 : * Re-read the sequence counter since the (possibly \
226 : * preempted) writer made progress. \
227 : */ \
228 : seq = READ_ONCE(s->seqcount.sequence); \
229 : } \
230 : \
231 : return seq; \
232 : } \
233 : \
234 : static __always_inline bool \
235 : __seqprop_##lockname##_preemptible(const seqcount_##lockname##_t *s) \
236 : { \
237 : if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \
238 : return preemptible; \
239 : \
240 : /* PREEMPT_RT relies on the above LOCK+UNLOCK */ \
241 : return false; \
242 : } \
243 : \
244 : static __always_inline void \
245 : __seqprop_##lockname##_assert(const seqcount_##lockname##_t *s) \
246 : { \
247 : __SEQ_LOCK(lockdep_assert_held(lockmember)); \
248 : }
249 :
250 : /*
251 : * __seqprop() for seqcount_t
252 : */
253 :
254 54683 : static inline seqcount_t *__seqprop_ptr(seqcount_t *s)
255 : {
256 54196 : return s;
257 : }
258 :
259 15 : static inline unsigned __seqprop_sequence(const seqcount_t *s)
260 : {
261 15 : return READ_ONCE(s->sequence);
262 : }
263 :
264 54531 : static inline bool __seqprop_preemptible(const seqcount_t *s)
265 : {
266 27266 : return false;
267 : }
268 :
269 24 : static inline void __seqprop_assert(const seqcount_t *s)
270 : {
271 24 : lockdep_assert_preemption_disabled();
272 24 : }
273 :
274 : #define __SEQ_RT IS_ENABLED(CONFIG_PREEMPT_RT)
275 :
276 724579 : SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, s->lock, raw_spin, raw_spin_lock(s->lock))
277 1140075 : SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, s->lock, spin, spin_lock(s->lock))
278 : SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, s->lock, read, read_lock(s->lock))
279 : SEQCOUNT_LOCKNAME(mutex, struct mutex, true, s->lock, mutex, mutex_lock(s->lock))
280 : SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mutex, ww_mutex_lock(s->lock, NULL))
281 :
282 : /*
283 : * SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t
284 : * @name: Name of the seqcount_LOCKNAME_t instance
285 : * @lock: Pointer to the associated LOCKNAME
286 : */
287 :
288 : #define SEQCOUNT_LOCKNAME_ZERO(seq_name, assoc_lock) { \
289 : .seqcount = SEQCNT_ZERO(seq_name.seqcount), \
290 : __SEQ_LOCK(.lock = (assoc_lock)) \
291 : }
292 :
293 : #define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
294 : #define SEQCNT_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
295 : #define SEQCNT_RWLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
296 : #define SEQCNT_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
297 : #define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
298 :
299 : #define __seqprop_case(s, lockname, prop) \
300 : seqcount_##lockname##_t: __seqprop_##lockname##_##prop((void *)(s))
301 :
302 : #define __seqprop(s, prop) _Generic(*(s), \
303 : seqcount_t: __seqprop_##prop((void *)(s)), \
304 : __seqprop_case((s), raw_spinlock, prop), \
305 : __seqprop_case((s), spinlock, prop), \
306 : __seqprop_case((s), rwlock, prop), \
307 : __seqprop_case((s), mutex, prop), \
308 : __seqprop_case((s), ww_mutex, prop))
309 :
310 : #define seqprop_ptr(s) __seqprop(s, ptr)
311 : #define seqprop_sequence(s) __seqprop(s, sequence)
312 : #define seqprop_preemptible(s) __seqprop(s, preemptible)
313 : #define seqprop_assert(s) __seqprop(s, assert)
314 :
315 : /**
316 : * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
317 : * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
318 : *
319 : * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
320 : * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
321 : * provided before actually loading any of the variables that are to be
322 : * protected in this critical section.
323 : *
324 : * Use carefully, only in critical code, and comment how the barrier is
325 : * provided.
326 : *
327 : * Return: count to be passed to read_seqcount_retry()
328 : */
329 : #define __read_seqcount_begin(s) \
330 : ({ \
331 : unsigned __seq; \
332 : \
333 : while ((__seq = seqprop_sequence(s)) & 1) \
334 : cpu_relax(); \
335 : \
336 : kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
337 : __seq; \
338 : })
339 :
340 : /**
341 : * raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep
342 : * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
343 : *
344 : * Return: count to be passed to read_seqcount_retry()
345 : */
346 : #define raw_read_seqcount_begin(s) \
347 : ({ \
348 : unsigned _seq = __read_seqcount_begin(s); \
349 : \
350 : smp_rmb(); \
351 : _seq; \
352 : })
353 :
354 : /**
355 : * read_seqcount_begin() - begin a seqcount_t read critical section
356 : * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
357 : *
358 : * Return: count to be passed to read_seqcount_retry()
359 : */
360 : #define read_seqcount_begin(s) \
361 : ({ \
362 : seqcount_lockdep_reader_access(seqprop_ptr(s)); \
363 : raw_read_seqcount_begin(s); \
364 : })
365 :
366 : /**
367 : * raw_read_seqcount() - read the raw seqcount_t counter value
368 : * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
369 : *
370 : * raw_read_seqcount opens a read critical section of the given
371 : * seqcount_t, without any lockdep checking, and without checking or
372 : * masking the sequence counter LSB. Calling code is responsible for
373 : * handling that.
374 : *
375 : * Return: count to be passed to read_seqcount_retry()
376 : */
377 : #define raw_read_seqcount(s) \
378 : ({ \
379 : unsigned __seq = seqprop_sequence(s); \
380 : \
381 : smp_rmb(); \
382 : kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
383 : __seq; \
384 : })
385 :
386 : /**
387 : * raw_seqcount_begin() - begin a seqcount_t read critical section w/o
388 : * lockdep and w/o counter stabilization
389 : * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
390 : *
391 : * raw_seqcount_begin opens a read critical section of the given
392 : * seqcount_t. Unlike read_seqcount_begin(), this function will not wait
393 : * for the count to stabilize. If a writer is active when it begins, it
394 : * will fail the read_seqcount_retry() at the end of the read critical
395 : * section instead of stabilizing at the beginning of it.
396 : *
397 : * Use this only in special kernel hot paths where the read section is
398 : * small and has a high probability of success through other external
399 : * means. It will save a single branching instruction.
400 : *
401 : * Return: count to be passed to read_seqcount_retry()
402 : */
403 : #define raw_seqcount_begin(s) \
404 : ({ \
405 : /* \
406 : * If the counter is odd, let read_seqcount_retry() fail \
407 : * by decrementing the counter. \
408 : */ \
409 : raw_read_seqcount(s) & ~1; \
410 : })
411 :
412 : /**
413 : * __read_seqcount_retry() - end a seqcount_t read section w/o barrier
414 : * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
415 : * @start: count, from read_seqcount_begin()
416 : *
417 : * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
418 : * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
419 : * provided before actually loading any of the variables that are to be
420 : * protected in this critical section.
421 : *
422 : * Use carefully, only in critical code, and comment how the barrier is
423 : * provided.
424 : *
425 : * Return: true if a read section retry is required, else false
426 : */
427 : #define __read_seqcount_retry(s, start) \
428 : do___read_seqcount_retry(seqprop_ptr(s), start)
429 :
430 1066088 : static inline int do___read_seqcount_retry(const seqcount_t *s, unsigned start)
431 : {
432 670977 : kcsan_atomic_next(0);
433 830602 : return unlikely(READ_ONCE(s->sequence) != start);
434 : }
435 :
436 : /**
437 : * read_seqcount_retry() - end a seqcount_t read critical section
438 : * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
439 : * @start: count, from read_seqcount_begin()
440 : *
441 : * read_seqcount_retry closes the read critical section of given
442 : * seqcount_t. If the critical section was invalid, it must be ignored
443 : * (and typically retried).
444 : *
445 : * Return: true if a read section retry is required, else false
446 : */
447 : #define read_seqcount_retry(s, start) \
448 : do_read_seqcount_retry(seqprop_ptr(s), start)
449 :
450 891091 : static inline int do_read_seqcount_retry(const seqcount_t *s, unsigned start)
451 : {
452 830473 : smp_rmb();
453 891221 : return do___read_seqcount_retry(s, start);
454 : }
455 :
456 : /**
457 : * raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep
458 : * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
459 : *
460 : * Context: check write_seqcount_begin()
461 : */
462 : #define raw_write_seqcount_begin(s) \
463 : do { \
464 : if (seqprop_preemptible(s)) \
465 : preempt_disable(); \
466 : \
467 : do_raw_write_seqcount_begin(seqprop_ptr(s)); \
468 : } while (0)
469 :
470 74942 : static inline void do_raw_write_seqcount_begin(seqcount_t *s)
471 : {
472 73475 : kcsan_nestable_atomic_begin();
473 74942 : s->sequence++;
474 74942 : smp_wmb();
475 26793 : }
476 :
477 : /**
478 : * raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep
479 : * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
480 : *
481 : * Context: check write_seqcount_end()
482 : */
483 : #define raw_write_seqcount_end(s) \
484 : do { \
485 : do_raw_write_seqcount_end(seqprop_ptr(s)); \
486 : \
487 : if (seqprop_preemptible(s)) \
488 : preempt_enable(); \
489 : } while (0)
490 :
491 74942 : static inline void do_raw_write_seqcount_end(seqcount_t *s)
492 : {
493 73475 : smp_wmb();
494 74942 : s->sequence++;
495 73475 : kcsan_nestable_atomic_end();
496 : }
497 :
498 : /**
499 : * write_seqcount_begin_nested() - start a seqcount_t write section with
500 : * custom lockdep nesting level
501 : * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
502 : * @subclass: lockdep nesting level
503 : *
504 : * See Documentation/locking/lockdep-design.rst
505 : * Context: check write_seqcount_begin()
506 : */
507 : #define write_seqcount_begin_nested(s, subclass) \
508 : do { \
509 : seqprop_assert(s); \
510 : \
511 : if (seqprop_preemptible(s)) \
512 : preempt_disable(); \
513 : \
514 : do_write_seqcount_begin_nested(seqprop_ptr(s), subclass); \
515 : } while (0)
516 :
517 23079 : static inline void do_write_seqcount_begin_nested(seqcount_t *s, int subclass)
518 : {
519 23079 : do_raw_write_seqcount_begin(s);
520 23079 : seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
521 23079 : }
522 :
523 : /**
524 : * write_seqcount_begin() - start a seqcount_t write side critical section
525 : * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
526 : *
527 : * Context: sequence counter write side sections must be serialized and
528 : * non-preemptible. Preemption will be automatically disabled if and
529 : * only if the seqcount write serialization lock is associated, and
530 : * preemptible. If readers can be invoked from hardirq or softirq
531 : * context, interrupts or bottom halves must be respectively disabled.
532 : */
533 : #define write_seqcount_begin(s) \
534 : do { \
535 : seqprop_assert(s); \
536 : \
537 : if (seqprop_preemptible(s)) \
538 : preempt_disable(); \
539 : \
540 : do_write_seqcount_begin(seqprop_ptr(s)); \
541 : } while (0)
542 :
543 22685 : static inline void do_write_seqcount_begin(seqcount_t *s)
544 : {
545 22002 : do_write_seqcount_begin_nested(s, 0);
546 : }
547 :
548 : /**
549 : * write_seqcount_end() - end a seqcount_t write side critical section
550 : * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
551 : *
552 : * Context: Preemption will be automatically re-enabled if and only if
553 : * the seqcount write serialization lock is associated, and preemptible.
554 : */
555 : #define write_seqcount_end(s) \
556 : do { \
557 : do_write_seqcount_end(seqprop_ptr(s)); \
558 : \
559 : if (seqprop_preemptible(s)) \
560 : preempt_enable(); \
561 : } while (0)
562 :
563 23527 : static inline void do_write_seqcount_end(seqcount_t *s)
564 : {
565 23527 : seqcount_release(&s->dep_map, _RET_IP_);
566 23527 : do_raw_write_seqcount_end(s);
567 23527 : }
568 :
569 : /**
570 : * raw_write_seqcount_barrier() - do a seqcount_t write barrier
571 : * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
572 : *
573 : * This can be used to provide an ordering guarantee instead of the usual
574 : * consistency guarantee. It is one wmb cheaper, because it can collapse
575 : * the two back-to-back wmb()s.
576 : *
577 : * Note that writes surrounding the barrier should be declared atomic (e.g.
578 : * via WRITE_ONCE): a) to ensure the writes become visible to other threads
579 : * atomically, avoiding compiler optimizations; b) to document which writes are
580 : * meant to propagate to the reader critical section. This is necessary because
581 : * neither writes before and after the barrier are enclosed in a seq-writer
582 : * critical section that would ensure readers are aware of ongoing writes::
583 : *
584 : * seqcount_t seq;
585 : * bool X = true, Y = false;
586 : *
587 : * void read(void)
588 : * {
589 : * bool x, y;
590 : *
591 : * do {
592 : * int s = read_seqcount_begin(&seq);
593 : *
594 : * x = X; y = Y;
595 : *
596 : * } while (read_seqcount_retry(&seq, s));
597 : *
598 : * BUG_ON(!x && !y);
599 : * }
600 : *
601 : * void write(void)
602 : * {
603 : * WRITE_ONCE(Y, true);
604 : *
605 : * raw_write_seqcount_barrier(seq);
606 : *
607 : * WRITE_ONCE(X, false);
608 : * }
609 : */
610 : #define raw_write_seqcount_barrier(s) \
611 : do_raw_write_seqcount_barrier(seqprop_ptr(s))
612 :
613 220 : static inline void do_raw_write_seqcount_barrier(seqcount_t *s)
614 : {
615 220 : kcsan_nestable_atomic_begin();
616 220 : s->sequence++;
617 220 : smp_wmb();
618 220 : s->sequence++;
619 220 : kcsan_nestable_atomic_end();
620 : }
621 :
622 : /**
623 : * write_seqcount_invalidate() - invalidate in-progress seqcount_t read
624 : * side operations
625 : * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
626 : *
627 : * After write_seqcount_invalidate, no seqcount_t read side operations
628 : * will complete successfully and see data older than this.
629 : */
630 : #define write_seqcount_invalidate(s) \
631 : do_write_seqcount_invalidate(seqprop_ptr(s))
632 :
633 10648 : static inline void do_write_seqcount_invalidate(seqcount_t *s)
634 : {
635 10648 : smp_wmb();
636 10648 : kcsan_nestable_atomic_begin();
637 10648 : s->sequence+=2;
638 10648 : kcsan_nestable_atomic_end();
639 10648 : }
640 :
641 : /*
642 : * Latch sequence counters (seqcount_latch_t)
643 : *
644 : * A sequence counter variant where the counter even/odd value is used to
645 : * switch between two copies of protected data. This allows the read path,
646 : * typically NMIs, to safely interrupt the write side critical section.
647 : *
648 : * As the write sections are fully preemptible, no special handling for
649 : * PREEMPT_RT is needed.
650 : */
651 : typedef struct {
652 : seqcount_t seqcount;
653 : } seqcount_latch_t;
654 :
655 : /**
656 : * SEQCNT_LATCH_ZERO() - static initializer for seqcount_latch_t
657 : * @seq_name: Name of the seqcount_latch_t instance
658 : */
659 : #define SEQCNT_LATCH_ZERO(seq_name) { \
660 : .seqcount = SEQCNT_ZERO(seq_name.seqcount), \
661 : }
662 :
663 : /**
664 : * seqcount_latch_init() - runtime initializer for seqcount_latch_t
665 : * @s: Pointer to the seqcount_latch_t instance
666 : */
667 : #define seqcount_latch_init(s) seqcount_init(&(s)->seqcount)
668 :
669 : /**
670 : * raw_read_seqcount_latch() - pick even/odd latch data copy
671 : * @s: Pointer to seqcount_latch_t
672 : *
673 : * See raw_write_seqcount_latch() for details and a full reader/writer
674 : * usage example.
675 : *
676 : * Return: sequence counter raw value. Use the lowest bit as an index for
677 : * picking which data copy to read. The full counter must then be checked
678 : * with read_seqcount_latch_retry().
679 : */
680 122 : static inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s)
681 : {
682 : /*
683 : * Pairs with the first smp_wmb() in raw_write_seqcount_latch().
684 : * Due to the dependent load, a full smp_rmb() is not needed.
685 : */
686 122 : return READ_ONCE(s->seqcount.sequence);
687 : }
688 :
689 : /**
690 : * read_seqcount_latch_retry() - end a seqcount_latch_t read section
691 : * @s: Pointer to seqcount_latch_t
692 : * @start: count, from raw_read_seqcount_latch()
693 : *
694 : * Return: true if a read section retry is required, else false
695 : */
696 : static inline int
697 122 : read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
698 : {
699 122 : return read_seqcount_retry(&s->seqcount, start);
700 : }
701 :
702 : /**
703 : * raw_write_seqcount_latch() - redirect latch readers to even/odd copy
704 : * @s: Pointer to seqcount_latch_t
705 : *
706 : * The latch technique is a multiversion concurrency control method that allows
707 : * queries during non-atomic modifications. If you can guarantee queries never
708 : * interrupt the modification -- e.g. the concurrency is strictly between CPUs
709 : * -- you most likely do not need this.
710 : *
711 : * Where the traditional RCU/lockless data structures rely on atomic
712 : * modifications to ensure queries observe either the old or the new state the
713 : * latch allows the same for non-atomic updates. The trade-off is doubling the
714 : * cost of storage; we have to maintain two copies of the entire data
715 : * structure.
716 : *
717 : * Very simply put: we first modify one copy and then the other. This ensures
718 : * there is always one copy in a stable state, ready to give us an answer.
719 : *
720 : * The basic form is a data structure like::
721 : *
722 : * struct latch_struct {
723 : * seqcount_latch_t seq;
724 : * struct data_struct data[2];
725 : * };
726 : *
727 : * Where a modification, which is assumed to be externally serialized, does the
728 : * following::
729 : *
730 : * void latch_modify(struct latch_struct *latch, ...)
731 : * {
732 : * smp_wmb(); // Ensure that the last data[1] update is visible
733 : * latch->seq.sequence++;
734 : * smp_wmb(); // Ensure that the seqcount update is visible
735 : *
736 : * modify(latch->data[0], ...);
737 : *
738 : * smp_wmb(); // Ensure that the data[0] update is visible
739 : * latch->seq.sequence++;
740 : * smp_wmb(); // Ensure that the seqcount update is visible
741 : *
742 : * modify(latch->data[1], ...);
743 : * }
744 : *
745 : * The query will have a form like::
746 : *
747 : * struct entry *latch_query(struct latch_struct *latch, ...)
748 : * {
749 : * struct entry *entry;
750 : * unsigned seq, idx;
751 : *
752 : * do {
753 : * seq = raw_read_seqcount_latch(&latch->seq);
754 : *
755 : * idx = seq & 0x01;
756 : * entry = data_query(latch->data[idx], ...);
757 : *
758 : * // This includes needed smp_rmb()
759 : * } while (read_seqcount_latch_retry(&latch->seq, seq));
760 : *
761 : * return entry;
762 : * }
763 : *
764 : * So during the modification, queries are first redirected to data[1]. Then we
765 : * modify data[0]. When that is complete, we redirect queries back to data[0]
766 : * and we can modify data[1].
767 : *
768 : * NOTE:
769 : *
770 : * The non-requirement for atomic modifications does _NOT_ include
771 : * the publishing of new entries in the case where data is a dynamic
772 : * data structure.
773 : *
774 : * An iteration might start in data[0] and get suspended long enough
775 : * to miss an entire modification sequence, once it resumes it might
776 : * observe the new entry.
777 : *
778 : * NOTE2:
779 : *
780 : * When data is a dynamic data structure; one should use regular RCU
781 : * patterns to manage the lifetimes of the objects within.
782 : */
783 31790 : static inline void raw_write_seqcount_latch(seqcount_latch_t *s)
784 : {
785 31790 : smp_wmb(); /* prior stores before incrementing "sequence" */
786 31790 : s->seqcount.sequence++;
787 31790 : smp_wmb(); /* increment "sequence" before following stores */
788 : }
789 :
790 : /*
791 : * Sequential locks (seqlock_t)
792 : *
793 : * Sequence counters with an embedded spinlock for writer serialization
794 : * and non-preemptibility.
795 : *
796 : * For more info, see:
797 : * - Comments on top of seqcount_t
798 : * - Documentation/locking/seqlock.rst
799 : */
800 : typedef struct {
801 : /*
802 : * Make sure that readers don't starve writers on PREEMPT_RT: use
803 : * seqcount_spinlock_t instead of seqcount_t. Check __SEQ_LOCK().
804 : */
805 : seqcount_spinlock_t seqcount;
806 : spinlock_t lock;
807 : } seqlock_t;
808 :
809 : #define __SEQLOCK_UNLOCKED(lockname) \
810 : { \
811 : .seqcount = SEQCNT_SPINLOCK_ZERO(lockname, &(lockname).lock), \
812 : .lock = __SPIN_LOCK_UNLOCKED(lockname) \
813 : }
814 :
815 : /**
816 : * seqlock_init() - dynamic initializer for seqlock_t
817 : * @sl: Pointer to the seqlock_t instance
818 : */
819 : #define seqlock_init(sl) \
820 : do { \
821 : spin_lock_init(&(sl)->lock); \
822 : seqcount_spinlock_init(&(sl)->seqcount, &(sl)->lock); \
823 : } while (0)
824 :
825 : /**
826 : * DEFINE_SEQLOCK(sl) - Define a statically allocated seqlock_t
827 : * @sl: Name of the seqlock_t instance
828 : */
829 : #define DEFINE_SEQLOCK(sl) \
830 : seqlock_t sl = __SEQLOCK_UNLOCKED(sl)
831 :
832 : /**
833 : * read_seqbegin() - start a seqlock_t read side critical section
834 : * @sl: Pointer to seqlock_t
835 : *
836 : * Return: count, to be passed to read_seqretry()
837 : */
838 65239 : static inline unsigned read_seqbegin(const seqlock_t *sl)
839 : {
840 86308 : unsigned ret = read_seqcount_begin(&sl->seqcount);
841 :
842 65232 : kcsan_atomic_next(0); /* non-raw usage, assume closing read_seqretry() */
843 65232 : kcsan_flat_atomic_begin();
844 65232 : return ret;
845 : }
846 :
847 : /**
848 : * read_seqretry() - end a seqlock_t read side section
849 : * @sl: Pointer to seqlock_t
850 : * @start: count, from read_seqbegin()
851 : *
852 : * read_seqretry closes the read side critical section of given seqlock_t.
853 : * If the critical section was invalid, it must be ignored (and typically
854 : * retried).
855 : *
856 : * Return: true if a read section retry is required, else false
857 : */
858 254594 : static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
859 : {
860 : /*
861 : * Assume not nested: read_seqretry() may be called multiple times when
862 : * completing read critical section.
863 : */
864 252970 : kcsan_flat_atomic_end();
865 :
866 254594 : return read_seqcount_retry(&sl->seqcount, start);
867 : }
868 :
869 : /*
870 : * For all seqlock_t write side functions, use the the internal
871 : * do_write_seqcount_begin() instead of generic write_seqcount_begin().
872 : * This way, no redundant lockdep_assert_held() checks are added.
873 : */
874 :
875 : /**
876 : * write_seqlock() - start a seqlock_t write side critical section
877 : * @sl: Pointer to seqlock_t
878 : *
879 : * write_seqlock opens a write side critical section for the given
880 : * seqlock_t. It also implicitly acquires the spinlock_t embedded inside
881 : * that sequential lock. All seqlock_t write side sections are thus
882 : * automatically serialized and non-preemptible.
883 : *
884 : * Context: if the seqlock_t read section, or other write side critical
885 : * sections, can be invoked from hardirq or softirq contexts, use the
886 : * _irqsave or _bh variants of this function instead.
887 : */
888 4184 : static inline void write_seqlock(seqlock_t *sl)
889 : {
890 4184 : spin_lock(&sl->lock);
891 4184 : do_write_seqcount_begin(&sl->seqcount.seqcount);
892 4184 : }
893 :
894 : /**
895 : * write_sequnlock() - end a seqlock_t write side critical section
896 : * @sl: Pointer to seqlock_t
897 : *
898 : * write_sequnlock closes the (serialized and non-preemptible) write side
899 : * critical section of given seqlock_t.
900 : */
901 4184 : static inline void write_sequnlock(seqlock_t *sl)
902 : {
903 4184 : do_write_seqcount_end(&sl->seqcount.seqcount);
904 4184 : spin_unlock(&sl->lock);
905 0 : }
906 :
907 : /**
908 : * write_seqlock_bh() - start a softirqs-disabled seqlock_t write section
909 : * @sl: Pointer to seqlock_t
910 : *
911 : * _bh variant of write_seqlock(). Use only if the read side section, or
912 : * other write side sections, can be invoked from softirq contexts.
913 : */
914 0 : static inline void write_seqlock_bh(seqlock_t *sl)
915 : {
916 0 : spin_lock_bh(&sl->lock);
917 0 : do_write_seqcount_begin(&sl->seqcount.seqcount);
918 0 : }
919 :
920 : /**
921 : * write_sequnlock_bh() - end a softirqs-disabled seqlock_t write section
922 : * @sl: Pointer to seqlock_t
923 : *
924 : * write_sequnlock_bh closes the serialized, non-preemptible, and
925 : * softirqs-disabled, seqlock_t write side critical section opened with
926 : * write_seqlock_bh().
927 : */
928 0 : static inline void write_sequnlock_bh(seqlock_t *sl)
929 : {
930 0 : do_write_seqcount_end(&sl->seqcount.seqcount);
931 0 : spin_unlock_bh(&sl->lock);
932 0 : }
933 :
934 : /**
935 : * write_seqlock_irq() - start a non-interruptible seqlock_t write section
936 : * @sl: Pointer to seqlock_t
937 : *
938 : * _irq variant of write_seqlock(). Use only if the read side section, or
939 : * other write sections, can be invoked from hardirq contexts.
940 : */
941 0 : static inline void write_seqlock_irq(seqlock_t *sl)
942 : {
943 0 : spin_lock_irq(&sl->lock);
944 0 : do_write_seqcount_begin(&sl->seqcount.seqcount);
945 0 : }
946 :
947 : /**
948 : * write_sequnlock_irq() - end a non-interruptible seqlock_t write section
949 : * @sl: Pointer to seqlock_t
950 : *
951 : * write_sequnlock_irq closes the serialized and non-interruptible
952 : * seqlock_t write side section opened with write_seqlock_irq().
953 : */
954 0 : static inline void write_sequnlock_irq(seqlock_t *sl)
955 : {
956 0 : do_write_seqcount_end(&sl->seqcount.seqcount);
957 0 : spin_unlock_irq(&sl->lock);
958 0 : }
959 :
960 0 : static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
961 : {
962 0 : unsigned long flags;
963 :
964 0 : spin_lock_irqsave(&sl->lock, flags);
965 0 : do_write_seqcount_begin(&sl->seqcount.seqcount);
966 0 : return flags;
967 : }
968 :
969 : /**
970 : * write_seqlock_irqsave() - start a non-interruptible seqlock_t write
971 : * section
972 : * @lock: Pointer to seqlock_t
973 : * @flags: Stack-allocated storage for saving caller's local interrupt
974 : * state, to be passed to write_sequnlock_irqrestore().
975 : *
976 : * _irqsave variant of write_seqlock(). Use it only if the read side
977 : * section, or other write sections, can be invoked from hardirq context.
978 : */
979 : #define write_seqlock_irqsave(lock, flags) \
980 : do { flags = __write_seqlock_irqsave(lock); } while (0)
981 :
982 : /**
983 : * write_sequnlock_irqrestore() - end non-interruptible seqlock_t write
984 : * section
985 : * @sl: Pointer to seqlock_t
986 : * @flags: Caller's saved interrupt state, from write_seqlock_irqsave()
987 : *
988 : * write_sequnlock_irqrestore closes the serialized and non-interruptible
989 : * seqlock_t write section previously opened with write_seqlock_irqsave().
990 : */
991 : static inline void
992 0 : write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
993 : {
994 0 : do_write_seqcount_end(&sl->seqcount.seqcount);
995 0 : spin_unlock_irqrestore(&sl->lock, flags);
996 : }
997 :
998 : /**
999 : * read_seqlock_excl() - begin a seqlock_t locking reader section
1000 : * @sl: Pointer to seqlock_t
1001 : *
1002 : * read_seqlock_excl opens a seqlock_t locking reader critical section. A
1003 : * locking reader exclusively locks out *both* other writers *and* other
1004 : * locking readers, but it does not update the embedded sequence number.
1005 : *
1006 : * Locking readers act like a normal spin_lock()/spin_unlock().
1007 : *
1008 : * Context: if the seqlock_t write section, *or other read sections*, can
1009 : * be invoked from hardirq or softirq contexts, use the _irqsave or _bh
1010 : * variant of this function instead.
1011 : *
1012 : * The opened read section must be closed with read_sequnlock_excl().
1013 : */
1014 1428 : static inline void read_seqlock_excl(seqlock_t *sl)
1015 : {
1016 1428 : spin_lock(&sl->lock);
1017 32 : }
1018 :
1019 : /**
1020 : * read_sequnlock_excl() - end a seqlock_t locking reader critical section
1021 : * @sl: Pointer to seqlock_t
1022 : */
1023 1428 : static inline void read_sequnlock_excl(seqlock_t *sl)
1024 : {
1025 1428 : spin_unlock(&sl->lock);
1026 32 : }
1027 :
1028 : /**
1029 : * read_seqlock_excl_bh() - start a seqlock_t locking reader section with
1030 : * softirqs disabled
1031 : * @sl: Pointer to seqlock_t
1032 : *
1033 : * _bh variant of read_seqlock_excl(). Use this variant only if the
1034 : * seqlock_t write side section, *or other read sections*, can be invoked
1035 : * from softirq contexts.
1036 : */
1037 : static inline void read_seqlock_excl_bh(seqlock_t *sl)
1038 : {
1039 : spin_lock_bh(&sl->lock);
1040 : }
1041 :
1042 : /**
1043 : * read_sequnlock_excl_bh() - stop a seqlock_t softirq-disabled locking
1044 : * reader section
1045 : * @sl: Pointer to seqlock_t
1046 : */
1047 : static inline void read_sequnlock_excl_bh(seqlock_t *sl)
1048 : {
1049 : spin_unlock_bh(&sl->lock);
1050 : }
1051 :
1052 : /**
1053 : * read_seqlock_excl_irq() - start a non-interruptible seqlock_t locking
1054 : * reader section
1055 : * @sl: Pointer to seqlock_t
1056 : *
1057 : * _irq variant of read_seqlock_excl(). Use this only if the seqlock_t
1058 : * write side section, *or other read sections*, can be invoked from a
1059 : * hardirq context.
1060 : */
1061 : static inline void read_seqlock_excl_irq(seqlock_t *sl)
1062 : {
1063 : spin_lock_irq(&sl->lock);
1064 : }
1065 :
1066 : /**
1067 : * read_sequnlock_excl_irq() - end an interrupts-disabled seqlock_t
1068 : * locking reader section
1069 : * @sl: Pointer to seqlock_t
1070 : */
1071 : static inline void read_sequnlock_excl_irq(seqlock_t *sl)
1072 : {
1073 : spin_unlock_irq(&sl->lock);
1074 : }
1075 :
1076 0 : static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
1077 : {
1078 0 : unsigned long flags;
1079 :
1080 0 : spin_lock_irqsave(&sl->lock, flags);
1081 0 : return flags;
1082 : }
1083 :
1084 : /**
1085 : * read_seqlock_excl_irqsave() - start a non-interruptible seqlock_t
1086 : * locking reader section
1087 : * @lock: Pointer to seqlock_t
1088 : * @flags: Stack-allocated storage for saving caller's local interrupt
1089 : * state, to be passed to read_sequnlock_excl_irqrestore().
1090 : *
1091 : * _irqsave variant of read_seqlock_excl(). Use this only if the seqlock_t
1092 : * write side section, *or other read sections*, can be invoked from a
1093 : * hardirq context.
1094 : */
1095 : #define read_seqlock_excl_irqsave(lock, flags) \
1096 : do { flags = __read_seqlock_excl_irqsave(lock); } while (0)
1097 :
1098 : /**
1099 : * read_sequnlock_excl_irqrestore() - end non-interruptible seqlock_t
1100 : * locking reader section
1101 : * @sl: Pointer to seqlock_t
1102 : * @flags: Caller saved interrupt state, from read_seqlock_excl_irqsave()
1103 : */
1104 : static inline void
1105 0 : read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
1106 : {
1107 0 : spin_unlock_irqrestore(&sl->lock, flags);
1108 0 : }
1109 :
1110 : /**
1111 : * read_seqbegin_or_lock() - begin a seqlock_t lockless or locking reader
1112 : * @lock: Pointer to seqlock_t
1113 : * @seq : Marker and return parameter. If the passed value is even, the
1114 : * reader will become a *lockless* seqlock_t reader as in read_seqbegin().
1115 : * If the passed value is odd, the reader will become a *locking* reader
1116 : * as in read_seqlock_excl(). In the first call to this function, the
1117 : * caller *must* initialize and pass an even value to @seq; this way, a
1118 : * lockless read can be optimistically tried first.
1119 : *
1120 : * read_seqbegin_or_lock is an API designed to optimistically try a normal
1121 : * lockless seqlock_t read section first. If an odd counter is found, the
1122 : * lockless read trial has failed, and the next read iteration transforms
1123 : * itself into a full seqlock_t locking reader.
1124 : *
1125 : * This is typically used to avoid seqlock_t lockless readers starvation
1126 : * (too much retry loops) in the case of a sharp spike in write side
1127 : * activity.
1128 : *
1129 : * Context: if the seqlock_t write section, *or other read sections*, can
1130 : * be invoked from hardirq or softirq contexts, use the _irqsave or _bh
1131 : * variant of this function instead.
1132 : *
1133 : * Check Documentation/locking/seqlock.rst for template example code.
1134 : *
1135 : * Return: the encountered sequence counter value, through the @seq
1136 : * parameter, which is overloaded as a return parameter. This returned
1137 : * value must be checked with need_seqretry(). If the read section need to
1138 : * be retried, this returned value must also be passed as the @seq
1139 : * parameter of the next read_seqbegin_or_lock() iteration.
1140 : */
1141 16401 : static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
1142 : {
1143 16401 : if (!(*seq & 1)) /* Even */
1144 16369 : *seq = read_seqbegin(lock);
1145 : else /* Odd */
1146 32 : read_seqlock_excl(lock);
1147 16399 : }
1148 :
1149 : /**
1150 : * need_seqretry() - validate seqlock_t "locking or lockless" read section
1151 : * @lock: Pointer to seqlock_t
1152 : * @seq: sequence count, from read_seqbegin_or_lock()
1153 : *
1154 : * Return: true if a read section retry is required, false otherwise
1155 : */
1156 17460 : static inline int need_seqretry(seqlock_t *lock, int seq)
1157 : {
1158 17460 : return !(seq & 1) && read_seqretry(lock, seq);
1159 : }
1160 :
1161 : /**
1162 : * done_seqretry() - end seqlock_t "locking or lockless" reader section
1163 : * @lock: Pointer to seqlock_t
1164 : * @seq: count, from read_seqbegin_or_lock()
1165 : *
1166 : * done_seqretry finishes the seqlock_t read side critical section started
1167 : * with read_seqbegin_or_lock() and validated by need_seqretry().
1168 : */
1169 16370 : static inline void done_seqretry(seqlock_t *lock, int seq)
1170 : {
1171 16370 : if (seq & 1)
1172 14791 : read_sequnlock_excl(lock);
1173 : }
1174 :
1175 : /**
1176 : * read_seqbegin_or_lock_irqsave() - begin a seqlock_t lockless reader, or
1177 : * a non-interruptible locking reader
1178 : * @lock: Pointer to seqlock_t
1179 : * @seq: Marker and return parameter. Check read_seqbegin_or_lock().
1180 : *
1181 : * This is the _irqsave variant of read_seqbegin_or_lock(). Use it only if
1182 : * the seqlock_t write section, *or other read sections*, can be invoked
1183 : * from hardirq context.
1184 : *
1185 : * Note: Interrupts will be disabled only for "locking reader" mode.
1186 : *
1187 : * Return:
1188 : *
1189 : * 1. The saved local interrupts state in case of a locking reader, to
1190 : * be passed to done_seqretry_irqrestore().
1191 : *
1192 : * 2. The encountered sequence counter value, returned through @seq
1193 : * overloaded as a return parameter. Check read_seqbegin_or_lock().
1194 : */
1195 : static inline unsigned long
1196 1045 : read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
1197 : {
1198 1045 : unsigned long flags = 0;
1199 :
1200 1045 : if (!(*seq & 1)) /* Even */
1201 1045 : *seq = read_seqbegin(lock);
1202 : else /* Odd */
1203 0 : read_seqlock_excl_irqsave(lock, flags);
1204 :
1205 1045 : return flags;
1206 : }
1207 :
1208 : /**
1209 : * done_seqretry_irqrestore() - end a seqlock_t lockless reader, or a
1210 : * non-interruptible locking reader section
1211 : * @lock: Pointer to seqlock_t
1212 : * @seq: Count, from read_seqbegin_or_lock_irqsave()
1213 : * @flags: Caller's saved local interrupt state in case of a locking
1214 : * reader, also from read_seqbegin_or_lock_irqsave()
1215 : *
1216 : * This is the _irqrestore variant of done_seqretry(). The read section
1217 : * must've been opened with read_seqbegin_or_lock_irqsave(), and validated
1218 : * by need_seqretry().
1219 : */
1220 : static inline void
1221 1045 : done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
1222 : {
1223 1045 : if (seq & 1)
1224 1045 : read_sequnlock_excl_irqrestore(lock, flags);
1225 : }
1226 : #endif /* __LINUX_SEQLOCK_H */
|