Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 :
3 : #ifndef _LINUX_KCSAN_CHECKS_H
4 : #define _LINUX_KCSAN_CHECKS_H
5 :
6 : /* Note: Only include what is already included by compiler.h. */
7 : #include <linux/compiler_attributes.h>
8 : #include <linux/types.h>
9 :
10 : /* Access types -- if KCSAN_ACCESS_WRITE is not set, the access is a read. */
11 : #define KCSAN_ACCESS_WRITE (1 << 0) /* Access is a write. */
12 : #define KCSAN_ACCESS_COMPOUND (1 << 1) /* Compounded read-write instrumentation. */
13 : #define KCSAN_ACCESS_ATOMIC (1 << 2) /* Access is atomic. */
14 : /* The following are special, and never due to compiler instrumentation. */
15 : #define KCSAN_ACCESS_ASSERT (1 << 3) /* Access is an assertion. */
16 : #define KCSAN_ACCESS_SCOPED (1 << 4) /* Access is a scoped access. */
17 :
18 : /*
19 : * __kcsan_*: Always calls into the runtime when KCSAN is enabled. This may be used
20 : * even in compilation units that selectively disable KCSAN, but must use KCSAN
21 : * to validate access to an address. Never use these in header files!
22 : */
23 : #ifdef CONFIG_KCSAN
24 : /**
25 : * __kcsan_check_access - check generic access for races
26 : *
27 : * @ptr: address of access
28 : * @size: size of access
29 : * @type: access type modifier
30 : */
31 : void __kcsan_check_access(const volatile void *ptr, size_t size, int type);
32 :
33 : /**
34 : * kcsan_disable_current - disable KCSAN for the current context
35 : *
36 : * Supports nesting.
37 : */
38 : void kcsan_disable_current(void);
39 :
40 : /**
41 : * kcsan_enable_current - re-enable KCSAN for the current context
42 : *
43 : * Supports nesting.
44 : */
45 : void kcsan_enable_current(void);
46 : void kcsan_enable_current_nowarn(void); /* Safe in uaccess regions. */
47 :
48 : /**
49 : * kcsan_nestable_atomic_begin - begin nestable atomic region
50 : *
51 : * Accesses within the atomic region may appear to race with other accesses but
52 : * should be considered atomic.
53 : */
54 : void kcsan_nestable_atomic_begin(void);
55 :
56 : /**
57 : * kcsan_nestable_atomic_end - end nestable atomic region
58 : */
59 : void kcsan_nestable_atomic_end(void);
60 :
61 : /**
62 : * kcsan_flat_atomic_begin - begin flat atomic region
63 : *
64 : * Accesses within the atomic region may appear to race with other accesses but
65 : * should be considered atomic.
66 : */
67 : void kcsan_flat_atomic_begin(void);
68 :
69 : /**
70 : * kcsan_flat_atomic_end - end flat atomic region
71 : */
72 : void kcsan_flat_atomic_end(void);
73 :
74 : /**
75 : * kcsan_atomic_next - consider following accesses as atomic
76 : *
77 : * Force treating the next n memory accesses for the current context as atomic
78 : * operations.
79 : *
80 : * @n: number of following memory accesses to treat as atomic.
81 : */
82 : void kcsan_atomic_next(int n);
83 :
84 : /**
85 : * kcsan_set_access_mask - set access mask
86 : *
87 : * Set the access mask for all accesses for the current context if non-zero.
88 : * Only value changes to bits set in the mask will be reported.
89 : *
90 : * @mask: bitmask
91 : */
92 : void kcsan_set_access_mask(unsigned long mask);
93 :
94 : /* Scoped access information. */
95 : struct kcsan_scoped_access {
96 : struct list_head list;
97 : const volatile void *ptr;
98 : size_t size;
99 : int type;
100 : };
101 : /*
102 : * Automatically call kcsan_end_scoped_access() when kcsan_scoped_access goes
103 : * out of scope; relies on attribute "cleanup", which is supported by all
104 : * compilers that support KCSAN.
105 : */
106 : #define __kcsan_cleanup_scoped \
107 : __maybe_unused __attribute__((__cleanup__(kcsan_end_scoped_access)))
108 :
109 : /**
110 : * kcsan_begin_scoped_access - begin scoped access
111 : *
112 : * Begin scoped access and initialize @sa, which will cause KCSAN to
113 : * continuously check the memory range in the current thread until
114 : * kcsan_end_scoped_access() is called for @sa.
115 : *
116 : * Scoped accesses are implemented by appending @sa to an internal list for the
117 : * current execution context, and then checked on every call into the KCSAN
118 : * runtime.
119 : *
120 : * @ptr: address of access
121 : * @size: size of access
122 : * @type: access type modifier
123 : * @sa: struct kcsan_scoped_access to use for the scope of the access
124 : */
125 : struct kcsan_scoped_access *
126 : kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
127 : struct kcsan_scoped_access *sa);
128 :
129 : /**
130 : * kcsan_end_scoped_access - end scoped access
131 : *
132 : * End a scoped access, which will stop KCSAN checking the memory range.
133 : * Requires that kcsan_begin_scoped_access() was previously called once for @sa.
134 : *
135 : * @sa: a previously initialized struct kcsan_scoped_access
136 : */
137 : void kcsan_end_scoped_access(struct kcsan_scoped_access *sa);
138 :
139 :
140 : #else /* CONFIG_KCSAN */
141 :
142 910409 : static inline void __kcsan_check_access(const volatile void *ptr, size_t size,
143 910409 : int type) { }
144 :
145 0 : static inline void kcsan_disable_current(void) { }
146 0 : static inline void kcsan_enable_current(void) { }
147 : static inline void kcsan_enable_current_nowarn(void) { }
148 85810 : static inline void kcsan_nestable_atomic_begin(void) { }
149 84343 : static inline void kcsan_nestable_atomic_end(void) { }
150 65232 : static inline void kcsan_flat_atomic_begin(void) { }
151 254594 : static inline void kcsan_flat_atomic_end(void) { }
152 2521112 : static inline void kcsan_atomic_next(int n) { }
153 839997 : static inline void kcsan_set_access_mask(unsigned long mask) { }
154 :
155 : struct kcsan_scoped_access { };
156 : #define __kcsan_cleanup_scoped __maybe_unused
157 : static inline struct kcsan_scoped_access *
158 : kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
159 : struct kcsan_scoped_access *sa) { return sa; }
160 : static inline void kcsan_end_scoped_access(struct kcsan_scoped_access *sa) { }
161 :
162 : #endif /* CONFIG_KCSAN */
163 :
164 : #ifdef __SANITIZE_THREAD__
165 : /*
166 : * Only calls into the runtime when the particular compilation unit has KCSAN
167 : * instrumentation enabled. May be used in header files.
168 : */
169 : #define kcsan_check_access __kcsan_check_access
170 :
171 : /*
172 : * Only use these to disable KCSAN for accesses in the current compilation unit;
173 : * calls into libraries may still perform KCSAN checks.
174 : */
175 : #define __kcsan_disable_current kcsan_disable_current
176 : #define __kcsan_enable_current kcsan_enable_current_nowarn
177 : #else
178 : static inline void kcsan_check_access(const volatile void *ptr, size_t size,
179 : int type) { }
180 216783 : static inline void __kcsan_enable_current(void) { }
181 216783 : static inline void __kcsan_disable_current(void) { }
182 : #endif
183 :
184 : /**
185 : * __kcsan_check_read - check regular read access for races
186 : *
187 : * @ptr: address of access
188 : * @size: size of access
189 : */
190 : #define __kcsan_check_read(ptr, size) __kcsan_check_access(ptr, size, 0)
191 :
192 : /**
193 : * __kcsan_check_write - check regular write access for races
194 : *
195 : * @ptr: address of access
196 : * @size: size of access
197 : */
198 : #define __kcsan_check_write(ptr, size) \
199 : __kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
200 :
201 : /**
202 : * __kcsan_check_read_write - check regular read-write access for races
203 : *
204 : * @ptr: address of access
205 : * @size: size of access
206 : */
207 : #define __kcsan_check_read_write(ptr, size) \
208 : __kcsan_check_access(ptr, size, KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE)
209 :
210 : /**
211 : * kcsan_check_read - check regular read access for races
212 : *
213 : * @ptr: address of access
214 : * @size: size of access
215 : */
216 : #define kcsan_check_read(ptr, size) kcsan_check_access(ptr, size, 0)
217 :
218 : /**
219 : * kcsan_check_write - check regular write access for races
220 : *
221 : * @ptr: address of access
222 : * @size: size of access
223 : */
224 : #define kcsan_check_write(ptr, size) \
225 : kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
226 :
227 : /**
228 : * kcsan_check_read_write - check regular read-write access for races
229 : *
230 : * @ptr: address of access
231 : * @size: size of access
232 : */
233 : #define kcsan_check_read_write(ptr, size) \
234 : kcsan_check_access(ptr, size, KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE)
235 :
236 : /*
237 : * Check for atomic accesses: if atomic accesses are not ignored, this simply
238 : * aliases to kcsan_check_access(), otherwise becomes a no-op.
239 : */
240 : #ifdef CONFIG_KCSAN_IGNORE_ATOMICS
241 : #define kcsan_check_atomic_read(...) do { } while (0)
242 : #define kcsan_check_atomic_write(...) do { } while (0)
243 : #define kcsan_check_atomic_read_write(...) do { } while (0)
244 : #else
245 : #define kcsan_check_atomic_read(ptr, size) \
246 : kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC)
247 : #define kcsan_check_atomic_write(ptr, size) \
248 : kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE)
249 : #define kcsan_check_atomic_read_write(ptr, size) \
250 : kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_COMPOUND)
251 : #endif
252 :
253 : /**
254 : * ASSERT_EXCLUSIVE_WRITER - assert no concurrent writes to @var
255 : *
256 : * Assert that there are no concurrent writes to @var; other readers are
257 : * allowed. This assertion can be used to specify properties of concurrent code,
258 : * where violation cannot be detected as a normal data race.
259 : *
260 : * For example, if we only have a single writer, but multiple concurrent
261 : * readers, to avoid data races, all these accesses must be marked; even
262 : * concurrent marked writes racing with the single writer are bugs.
263 : * Unfortunately, due to being marked, they are no longer data races. For cases
264 : * like these, we can use the macro as follows:
265 : *
266 : * .. code-block:: c
267 : *
268 : * void writer(void) {
269 : * spin_lock(&update_foo_lock);
270 : * ASSERT_EXCLUSIVE_WRITER(shared_foo);
271 : * WRITE_ONCE(shared_foo, ...);
272 : * spin_unlock(&update_foo_lock);
273 : * }
274 : * void reader(void) {
275 : * // update_foo_lock does not need to be held!
276 : * ... = READ_ONCE(shared_foo);
277 : * }
278 : *
279 : * Note: ASSERT_EXCLUSIVE_WRITER_SCOPED(), if applicable, performs more thorough
280 : * checking if a clear scope where no concurrent writes are expected exists.
281 : *
282 : * @var: variable to assert on
283 : */
284 : #define ASSERT_EXCLUSIVE_WRITER(var) \
285 : __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT)
286 :
287 : /*
288 : * Helper macros for implementation of for ASSERT_EXCLUSIVE_*_SCOPED(). @id is
289 : * expected to be unique for the scope in which instances of kcsan_scoped_access
290 : * are declared.
291 : */
292 : #define __kcsan_scoped_name(c, suffix) __kcsan_scoped_##c##suffix
293 : #define __ASSERT_EXCLUSIVE_SCOPED(var, type, id) \
294 : struct kcsan_scoped_access __kcsan_scoped_name(id, _) \
295 : __kcsan_cleanup_scoped; \
296 : struct kcsan_scoped_access *__kcsan_scoped_name(id, _dummy_p) \
297 : __maybe_unused = kcsan_begin_scoped_access( \
298 : &(var), sizeof(var), KCSAN_ACCESS_SCOPED | (type), \
299 : &__kcsan_scoped_name(id, _))
300 :
301 : /**
302 : * ASSERT_EXCLUSIVE_WRITER_SCOPED - assert no concurrent writes to @var in scope
303 : *
304 : * Scoped variant of ASSERT_EXCLUSIVE_WRITER().
305 : *
306 : * Assert that there are no concurrent writes to @var for the duration of the
307 : * scope in which it is introduced. This provides a better way to fully cover
308 : * the enclosing scope, compared to multiple ASSERT_EXCLUSIVE_WRITER(), and
309 : * increases the likelihood for KCSAN to detect racing accesses.
310 : *
311 : * For example, it allows finding race-condition bugs that only occur due to
312 : * state changes within the scope itself:
313 : *
314 : * .. code-block:: c
315 : *
316 : * void writer(void) {
317 : * spin_lock(&update_foo_lock);
318 : * {
319 : * ASSERT_EXCLUSIVE_WRITER_SCOPED(shared_foo);
320 : * WRITE_ONCE(shared_foo, 42);
321 : * ...
322 : * // shared_foo should still be 42 here!
323 : * }
324 : * spin_unlock(&update_foo_lock);
325 : * }
326 : * void buggy(void) {
327 : * if (READ_ONCE(shared_foo) == 42)
328 : * WRITE_ONCE(shared_foo, 1); // bug!
329 : * }
330 : *
331 : * @var: variable to assert on
332 : */
333 : #define ASSERT_EXCLUSIVE_WRITER_SCOPED(var) \
334 : __ASSERT_EXCLUSIVE_SCOPED(var, KCSAN_ACCESS_ASSERT, __COUNTER__)
335 :
336 : /**
337 : * ASSERT_EXCLUSIVE_ACCESS - assert no concurrent accesses to @var
338 : *
339 : * Assert that there are no concurrent accesses to @var (no readers nor
340 : * writers). This assertion can be used to specify properties of concurrent
341 : * code, where violation cannot be detected as a normal data race.
342 : *
343 : * For example, where exclusive access is expected after determining no other
344 : * users of an object are left, but the object is not actually freed. We can
345 : * check that this property actually holds as follows:
346 : *
347 : * .. code-block:: c
348 : *
349 : * if (refcount_dec_and_test(&obj->refcnt)) {
350 : * ASSERT_EXCLUSIVE_ACCESS(*obj);
351 : * do_some_cleanup(obj);
352 : * release_for_reuse(obj);
353 : * }
354 : *
355 : * Note:
356 : *
357 : * 1. ASSERT_EXCLUSIVE_ACCESS_SCOPED(), if applicable, performs more thorough
358 : * checking if a clear scope where no concurrent accesses are expected exists.
359 : *
360 : * 2. For cases where the object is freed, `KASAN <kasan.html>`_ is a better
361 : * fit to detect use-after-free bugs.
362 : *
363 : * @var: variable to assert on
364 : */
365 : #define ASSERT_EXCLUSIVE_ACCESS(var) \
366 : __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT)
367 :
368 : /**
369 : * ASSERT_EXCLUSIVE_ACCESS_SCOPED - assert no concurrent accesses to @var in scope
370 : *
371 : * Scoped variant of ASSERT_EXCLUSIVE_ACCESS().
372 : *
373 : * Assert that there are no concurrent accesses to @var (no readers nor writers)
374 : * for the entire duration of the scope in which it is introduced. This provides
375 : * a better way to fully cover the enclosing scope, compared to multiple
376 : * ASSERT_EXCLUSIVE_ACCESS(), and increases the likelihood for KCSAN to detect
377 : * racing accesses.
378 : *
379 : * @var: variable to assert on
380 : */
381 : #define ASSERT_EXCLUSIVE_ACCESS_SCOPED(var) \
382 : __ASSERT_EXCLUSIVE_SCOPED(var, KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT, __COUNTER__)
383 :
384 : /**
385 : * ASSERT_EXCLUSIVE_BITS - assert no concurrent writes to subset of bits in @var
386 : *
387 : * Bit-granular variant of ASSERT_EXCLUSIVE_WRITER().
388 : *
389 : * Assert that there are no concurrent writes to a subset of bits in @var;
390 : * concurrent readers are permitted. This assertion captures more detailed
391 : * bit-level properties, compared to the other (word granularity) assertions.
392 : * Only the bits set in @mask are checked for concurrent modifications, while
393 : * ignoring the remaining bits, i.e. concurrent writes (or reads) to ~mask bits
394 : * are ignored.
395 : *
396 : * Use this for variables, where some bits must not be modified concurrently,
397 : * yet other bits are expected to be modified concurrently.
398 : *
399 : * For example, variables where, after initialization, some bits are read-only,
400 : * but other bits may still be modified concurrently. A reader may wish to
401 : * assert that this is true as follows:
402 : *
403 : * .. code-block:: c
404 : *
405 : * ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK);
406 : * foo = (READ_ONCE(flags) & READ_ONLY_MASK) >> READ_ONLY_SHIFT;
407 : *
408 : * Note: The access that immediately follows ASSERT_EXCLUSIVE_BITS() is assumed
409 : * to access the masked bits only, and KCSAN optimistically assumes it is
410 : * therefore safe, even in the presence of data races, and marking it with
411 : * READ_ONCE() is optional from KCSAN's point-of-view. We caution, however, that
412 : * it may still be advisable to do so, since we cannot reason about all compiler
413 : * optimizations when it comes to bit manipulations (on the reader and writer
414 : * side). If you are sure nothing can go wrong, we can write the above simply
415 : * as:
416 : *
417 : * .. code-block:: c
418 : *
419 : * ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK);
420 : * foo = (flags & READ_ONLY_MASK) >> READ_ONLY_SHIFT;
421 : *
422 : * Another example, where this may be used, is when certain bits of @var may
423 : * only be modified when holding the appropriate lock, but other bits may still
424 : * be modified concurrently. Writers, where other bits may change concurrently,
425 : * could use the assertion as follows:
426 : *
427 : * .. code-block:: c
428 : *
429 : * spin_lock(&foo_lock);
430 : * ASSERT_EXCLUSIVE_BITS(flags, FOO_MASK);
431 : * old_flags = flags;
432 : * new_flags = (old_flags & ~FOO_MASK) | (new_foo << FOO_SHIFT);
433 : * if (cmpxchg(&flags, old_flags, new_flags) != old_flags) { ... }
434 : * spin_unlock(&foo_lock);
435 : *
436 : * @var: variable to assert on
437 : * @mask: only check for modifications to bits set in @mask
438 : */
439 : #define ASSERT_EXCLUSIVE_BITS(var, mask) \
440 : do { \
441 : kcsan_set_access_mask(mask); \
442 : __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT);\
443 : kcsan_set_access_mask(0); \
444 : kcsan_atomic_next(1); \
445 : } while (0)
446 :
447 : #endif /* _LINUX_KCSAN_CHECKS_H */
|