Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /* kernel/rwsem.c: R/W semaphores, public implementation
3 : *
4 : * Written by David Howells (dhowells@redhat.com).
5 : * Derived from asm-i386/semaphore.h
6 : *
7 : * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
8 : * and Michel Lespinasse <walken@google.com>
9 : *
10 : * Optimistic spinning by Tim Chen <tim.c.chen@intel.com>
11 : * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes.
12 : *
13 : * Rwsem count bit fields re-definition and rwsem rearchitecture by
14 : * Waiman Long <longman@redhat.com> and
15 : * Peter Zijlstra <peterz@infradead.org>.
16 : */
17 :
18 : #include <linux/types.h>
19 : #include <linux/kernel.h>
20 : #include <linux/sched.h>
21 : #include <linux/sched/rt.h>
22 : #include <linux/sched/task.h>
23 : #include <linux/sched/debug.h>
24 : #include <linux/sched/wake_q.h>
25 : #include <linux/sched/signal.h>
26 : #include <linux/sched/clock.h>
27 : #include <linux/export.h>
28 : #include <linux/rwsem.h>
29 : #include <linux/atomic.h>
30 :
31 : #include "lock_events.h"
32 :
33 : /*
34 : * The least significant 2 bits of the owner value has the following
35 : * meanings when set.
36 : * - Bit 0: RWSEM_READER_OWNED - The rwsem is owned by readers
37 : * - Bit 1: RWSEM_NONSPINNABLE - Cannot spin on a reader-owned lock
38 : *
39 : * When the rwsem is reader-owned and a spinning writer has timed out,
40 : * the nonspinnable bit will be set to disable optimistic spinning.
41 :
42 : * When a writer acquires a rwsem, it puts its task_struct pointer
43 : * into the owner field. It is cleared after an unlock.
44 : *
45 : * When a reader acquires a rwsem, it will also puts its task_struct
46 : * pointer into the owner field with the RWSEM_READER_OWNED bit set.
47 : * On unlock, the owner field will largely be left untouched. So
48 : * for a free or reader-owned rwsem, the owner value may contain
49 : * information about the last reader that acquires the rwsem.
50 : *
51 : * That information may be helpful in debugging cases where the system
52 : * seems to hang on a reader owned rwsem especially if only one reader
53 : * is involved. Ideally we would like to track all the readers that own
54 : * a rwsem, but the overhead is simply too big.
55 : *
56 : * A fast path reader optimistic lock stealing is supported when the rwsem
57 : * is previously owned by a writer and the following conditions are met:
58 : * - OSQ is empty
59 : * - rwsem is not currently writer owned
60 : * - the handoff isn't set.
61 : */
62 : #define RWSEM_READER_OWNED (1UL << 0)
63 : #define RWSEM_NONSPINNABLE (1UL << 1)
64 : #define RWSEM_OWNER_FLAGS_MASK (RWSEM_READER_OWNED | RWSEM_NONSPINNABLE)
65 :
66 : #ifdef CONFIG_DEBUG_RWSEMS
67 : # define DEBUG_RWSEMS_WARN_ON(c, sem) do { \
68 : if (!debug_locks_silent && \
69 : WARN_ONCE(c, "DEBUG_RWSEMS_WARN_ON(%s): count = 0x%lx, magic = 0x%lx, owner = 0x%lx, curr 0x%lx, list %sempty\n",\
70 : #c, atomic_long_read(&(sem)->count), \
71 : (unsigned long) sem->magic, \
72 : atomic_long_read(&(sem)->owner), (long)current, \
73 : list_empty(&(sem)->wait_list) ? "" : "not ")) \
74 : debug_locks_off(); \
75 : } while (0)
76 : #else
77 : # define DEBUG_RWSEMS_WARN_ON(c, sem)
78 : #endif
79 :
80 : /*
81 : * On 64-bit architectures, the bit definitions of the count are:
82 : *
83 : * Bit 0 - writer locked bit
84 : * Bit 1 - waiters present bit
85 : * Bit 2 - lock handoff bit
86 : * Bits 3-7 - reserved
87 : * Bits 8-62 - 55-bit reader count
88 : * Bit 63 - read fail bit
89 : *
90 : * On 32-bit architectures, the bit definitions of the count are:
91 : *
92 : * Bit 0 - writer locked bit
93 : * Bit 1 - waiters present bit
94 : * Bit 2 - lock handoff bit
95 : * Bits 3-7 - reserved
96 : * Bits 8-30 - 23-bit reader count
97 : * Bit 31 - read fail bit
98 : *
99 : * It is not likely that the most significant bit (read fail bit) will ever
100 : * be set. This guard bit is still checked anyway in the down_read() fastpath
101 : * just in case we need to use up more of the reader bits for other purpose
102 : * in the future.
103 : *
104 : * atomic_long_fetch_add() is used to obtain reader lock, whereas
105 : * atomic_long_cmpxchg() will be used to obtain writer lock.
106 : *
107 : * There are three places where the lock handoff bit may be set or cleared.
108 : * 1) rwsem_mark_wake() for readers.
109 : * 2) rwsem_try_write_lock() for writers.
110 : * 3) Error path of rwsem_down_write_slowpath().
111 : *
112 : * For all the above cases, wait_lock will be held. A writer must also
113 : * be the first one in the wait_list to be eligible for setting the handoff
114 : * bit. So concurrent setting/clearing of handoff bit is not possible.
115 : */
116 : #define RWSEM_WRITER_LOCKED (1UL << 0)
117 : #define RWSEM_FLAG_WAITERS (1UL << 1)
118 : #define RWSEM_FLAG_HANDOFF (1UL << 2)
119 : #define RWSEM_FLAG_READFAIL (1UL << (BITS_PER_LONG - 1))
120 :
121 : #define RWSEM_READER_SHIFT 8
122 : #define RWSEM_READER_BIAS (1UL << RWSEM_READER_SHIFT)
123 : #define RWSEM_READER_MASK (~(RWSEM_READER_BIAS - 1))
124 : #define RWSEM_WRITER_MASK RWSEM_WRITER_LOCKED
125 : #define RWSEM_LOCK_MASK (RWSEM_WRITER_MASK|RWSEM_READER_MASK)
126 : #define RWSEM_READ_FAILED_MASK (RWSEM_WRITER_MASK|RWSEM_FLAG_WAITERS|\
127 : RWSEM_FLAG_HANDOFF|RWSEM_FLAG_READFAIL)
128 :
129 : /*
130 : * All writes to owner are protected by WRITE_ONCE() to make sure that
131 : * store tearing can't happen as optimistic spinners may read and use
132 : * the owner value concurrently without lock. Read from owner, however,
133 : * may not need READ_ONCE() as long as the pointer value is only used
134 : * for comparison and isn't being dereferenced.
135 : */
136 328859 : static inline void rwsem_set_owner(struct rw_semaphore *sem)
137 : {
138 657679 : atomic_long_set(&sem->owner, (long)current);
139 : }
140 :
141 327639 : static inline void rwsem_clear_owner(struct rw_semaphore *sem)
142 : {
143 655299 : atomic_long_set(&sem->owner, 0);
144 : }
145 :
146 : /*
147 : * Test the flags in the owner field.
148 : */
149 181648 : static inline bool rwsem_test_oflags(struct rw_semaphore *sem, long flags)
150 : {
151 0 : return atomic_long_read(&sem->owner) & flags;
152 : }
153 :
154 : /*
155 : * The task_struct pointer of the last owning reader will be left in
156 : * the owner field.
157 : *
158 : * Note that the owner value just indicates the task has owned the rwsem
159 : * previously, it may not be the real owner or one of the real owners
160 : * anymore when that field is examined, so take it with a grain of salt.
161 : *
162 : * The reader non-spinnable bit is preserved.
163 : */
164 181428 : static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
165 : struct task_struct *owner)
166 : {
167 362845 : unsigned long val = (unsigned long)owner | RWSEM_READER_OWNED |
168 181428 : (atomic_long_read(&sem->owner) & RWSEM_NONSPINNABLE);
169 :
170 181417 : atomic_long_set(&sem->owner, val);
171 181427 : }
172 :
173 181388 : static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
174 : {
175 181388 : __rwsem_set_reader_owned(sem, current);
176 : }
177 :
178 : /*
179 : * Return true if the rwsem is owned by a reader.
180 : */
181 181556 : static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem)
182 : {
183 : #ifdef CONFIG_DEBUG_RWSEMS
184 : /*
185 : * Check the count to see if it is write-locked.
186 : */
187 181556 : long count = atomic_long_read(&sem->count);
188 :
189 181540 : if (count & RWSEM_WRITER_MASK)
190 : return false;
191 : #endif
192 181545 : return rwsem_test_oflags(sem, RWSEM_READER_OWNED);
193 : }
194 :
195 : #ifdef CONFIG_DEBUG_RWSEMS
196 : /*
197 : * With CONFIG_DEBUG_RWSEMS configured, it will make sure that if there
198 : * is a task pointer in owner of a reader-owned rwsem, it will be the
199 : * real owner or one of the real owners. The only exception is when the
200 : * unlock is done by up_read_non_owner().
201 : */
202 181442 : static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
203 : {
204 181442 : unsigned long val = atomic_long_read(&sem->owner);
205 :
206 181431 : while ((val & ~RWSEM_OWNER_FLAGS_MASK) == (unsigned long)current) {
207 181348 : if (atomic_long_try_cmpxchg(&sem->owner, &val,
208 181323 : val & RWSEM_OWNER_FLAGS_MASK))
209 181348 : return;
210 : }
211 : }
212 : #else
213 : static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
214 : {
215 : }
216 : #endif
217 :
218 : /*
219 : * Set the RWSEM_NONSPINNABLE bits if the RWSEM_READER_OWNED flag
220 : * remains set. Otherwise, the operation will be aborted.
221 : */
222 32 : static inline void rwsem_set_nonspinnable(struct rw_semaphore *sem)
223 : {
224 32 : unsigned long owner = atomic_long_read(&sem->owner);
225 :
226 32 : do {
227 32 : if (!(owner & RWSEM_READER_OWNED))
228 : break;
229 32 : if (owner & RWSEM_NONSPINNABLE)
230 : break;
231 64 : } while (!atomic_long_try_cmpxchg(&sem->owner, &owner,
232 64 : owner | RWSEM_NONSPINNABLE));
233 32 : }
234 :
235 35892 : static inline bool rwsem_read_trylock(struct rw_semaphore *sem, long *cntp)
236 : {
237 35892 : *cntp = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count);
238 :
239 35894 : if (WARN_ON_ONCE(*cntp < 0))
240 0 : rwsem_set_nonspinnable(sem);
241 :
242 35894 : if (!(*cntp & RWSEM_READ_FAILED_MASK)) {
243 35783 : rwsem_set_reader_owned(sem);
244 35783 : return true;
245 : }
246 :
247 : return false;
248 : }
249 :
250 328804 : static inline bool rwsem_write_trylock(struct rw_semaphore *sem)
251 : {
252 328804 : long tmp = RWSEM_UNLOCKED_VALUE;
253 :
254 657663 : if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, RWSEM_WRITER_LOCKED)) {
255 328389 : rwsem_set_owner(sem);
256 328350 : return true;
257 : }
258 :
259 : return false;
260 : }
261 :
262 : /*
263 : * Return just the real task structure pointer of the owner
264 : */
265 328796 : static inline struct task_struct *rwsem_owner(struct rw_semaphore *sem)
266 : {
267 657592 : return (struct task_struct *)
268 657592 : (atomic_long_read(&sem->owner) & ~RWSEM_OWNER_FLAGS_MASK);
269 : }
270 :
271 : /*
272 : * Return the real task structure pointer of the owner and the embedded
273 : * flags in the owner. pflags must be non-NULL.
274 : */
275 : static inline struct task_struct *
276 225424 : rwsem_owner_flags(struct rw_semaphore *sem, unsigned long *pflags)
277 : {
278 450848 : unsigned long owner = atomic_long_read(&sem->owner);
279 :
280 225424 : *pflags = owner & RWSEM_OWNER_FLAGS_MASK;
281 225424 : return (struct task_struct *)(owner & ~RWSEM_OWNER_FLAGS_MASK);
282 : }
283 :
284 : /*
285 : * Guide to the rw_semaphore's count field.
286 : *
287 : * When the RWSEM_WRITER_LOCKED bit in count is set, the lock is owned
288 : * by a writer.
289 : *
290 : * The lock is owned by readers when
291 : * (1) the RWSEM_WRITER_LOCKED isn't set in count,
292 : * (2) some of the reader bits are set in count, and
293 : * (3) the owner field has RWSEM_READ_OWNED bit set.
294 : *
295 : * Having some reader bits set is not enough to guarantee a readers owned
296 : * lock as the readers may be in the process of backing out from the count
297 : * and a writer has just released the lock. So another writer may steal
298 : * the lock immediately after that.
299 : */
300 :
301 : /*
302 : * Initialize an rwsem:
303 : */
304 58145 : void __init_rwsem(struct rw_semaphore *sem, const char *name,
305 : struct lock_class_key *key)
306 : {
307 : #ifdef CONFIG_DEBUG_LOCK_ALLOC
308 : /*
309 : * Make sure we are not reinitializing a held semaphore:
310 : */
311 58145 : debug_check_no_locks_freed((void *)sem, sizeof(*sem));
312 58145 : lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP);
313 : #endif
314 : #ifdef CONFIG_DEBUG_RWSEMS
315 58145 : sem->magic = sem;
316 : #endif
317 58145 : atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
318 58145 : raw_spin_lock_init(&sem->wait_lock);
319 58146 : INIT_LIST_HEAD(&sem->wait_list);
320 58146 : atomic_long_set(&sem->owner, 0L);
321 : #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
322 58146 : osq_lock_init(&sem->osq);
323 : #endif
324 58146 : }
325 : EXPORT_SYMBOL(__init_rwsem);
326 :
327 : enum rwsem_waiter_type {
328 : RWSEM_WAITING_FOR_WRITE,
329 : RWSEM_WAITING_FOR_READ
330 : };
331 :
332 : struct rwsem_waiter {
333 : struct list_head list;
334 : struct task_struct *task;
335 : enum rwsem_waiter_type type;
336 : unsigned long timeout;
337 : };
338 : #define rwsem_first_waiter(sem) \
339 : list_first_entry(&sem->wait_list, struct rwsem_waiter, list)
340 :
341 : enum rwsem_wake_type {
342 : RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */
343 : RWSEM_WAKE_READERS, /* Wake readers only */
344 : RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */
345 : };
346 :
347 : enum writer_wait_state {
348 : WRITER_NOT_FIRST, /* Writer is not first in wait list */
349 : WRITER_FIRST, /* Writer is first in wait list */
350 : WRITER_HANDOFF /* Writer is first & handoff needed */
351 : };
352 :
353 : /*
354 : * The typical HZ value is either 250 or 1000. So set the minimum waiting
355 : * time to at least 4ms or 1 jiffy (if it is higher than 4ms) in the wait
356 : * queue before initiating the handoff protocol.
357 : */
358 : #define RWSEM_WAIT_TIMEOUT DIV_ROUND_UP(HZ, 250)
359 :
360 : /*
361 : * Magic number to batch-wakeup waiting readers, even when writers are
362 : * also present in the queue. This both limits the amount of work the
363 : * waking thread must do and also prevents any potential counter overflow,
364 : * however unlikely.
365 : */
366 : #define MAX_READERS_WAKEUP 0x100
367 :
368 : /*
369 : * handle the lock release when processes blocked on it that can now run
370 : * - if we come here from up_xxxx(), then the RWSEM_FLAG_WAITERS bit must
371 : * have been set.
372 : * - there must be someone on the queue
373 : * - the wait_lock must be held by the caller
374 : * - tasks are marked for wakeup, the caller must later invoke wake_up_q()
375 : * to actually wakeup the blocked task(s) and drop the reference count,
376 : * preferably when the wait_lock is released
377 : * - woken process blocks are discarded from the list after having task zeroed
378 : * - writers are only marked woken if downgrading is false
379 : */
380 287 : static void rwsem_mark_wake(struct rw_semaphore *sem,
381 : enum rwsem_wake_type wake_type,
382 : struct wake_q_head *wake_q)
383 : {
384 287 : struct rwsem_waiter *waiter, *tmp;
385 287 : long oldcount, woken = 0, adjustment = 0;
386 287 : struct list_head wlist;
387 :
388 574 : lockdep_assert_held(&sem->wait_lock);
389 :
390 : /*
391 : * Take a peek at the queue head waiter such that we can determine
392 : * the wakeup(s) to perform.
393 : */
394 287 : waiter = rwsem_first_waiter(sem);
395 :
396 287 : if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
397 242 : if (wake_type == RWSEM_WAKE_ANY) {
398 : /*
399 : * Mark writer at the front of the queue for wakeup.
400 : * Until the task is actually later awoken later by
401 : * the caller, other writers are able to steal it.
402 : * Readers, on the other hand, will block as they
403 : * will notice the queued writer.
404 : */
405 192 : wake_q_add(wake_q, waiter->task);
406 242 : lockevent_inc(rwsem_wake_writer);
407 : }
408 :
409 243 : return;
410 : }
411 :
412 : /*
413 : * No reader wakeup if there are too many of them already.
414 : */
415 45 : if (unlikely(atomic_long_read(&sem->count) < 0))
416 : return;
417 :
418 : /*
419 : * Writers might steal the lock before we grant it to the next reader.
420 : * We prefer to do the first reader grant before counting readers
421 : * so we can bail out early if a writer stole the lock.
422 : */
423 45 : if (wake_type != RWSEM_WAKE_READ_OWNED) {
424 45 : struct task_struct *owner;
425 :
426 45 : adjustment = RWSEM_READER_BIAS;
427 45 : oldcount = atomic_long_fetch_add(adjustment, &sem->count);
428 45 : if (unlikely(oldcount & RWSEM_WRITER_MASK)) {
429 : /*
430 : * When we've been waiting "too" long (for writers
431 : * to give up the lock), request a HANDOFF to
432 : * force the issue.
433 : */
434 1 : if (!(oldcount & RWSEM_FLAG_HANDOFF) &&
435 1 : time_after(jiffies, waiter->timeout)) {
436 1 : adjustment -= RWSEM_FLAG_HANDOFF;
437 1 : lockevent_inc(rwsem_rlock_handoff);
438 : }
439 :
440 1 : atomic_long_add(-adjustment, &sem->count);
441 1 : return;
442 : }
443 : /*
444 : * Set it to reader-owned to give spinners an early
445 : * indication that readers now have the lock.
446 : * The reader nonspinnable bit seen at slowpath entry of
447 : * the reader is copied over.
448 : */
449 44 : owner = waiter->task;
450 44 : __rwsem_set_reader_owned(sem, owner);
451 : }
452 :
453 : /*
454 : * Grant up to MAX_READERS_WAKEUP read locks to all the readers in the
455 : * queue. We know that the woken will be at least 1 as we accounted
456 : * for above. Note we increment the 'active part' of the count by the
457 : * number of readers before waking any processes up.
458 : *
459 : * This is an adaptation of the phase-fair R/W locks where at the
460 : * reader phase (first waiter is a reader), all readers are eligible
461 : * to acquire the lock at the same time irrespective of their order
462 : * in the queue. The writers acquire the lock according to their
463 : * order in the queue.
464 : *
465 : * We have to do wakeup in 2 passes to prevent the possibility that
466 : * the reader count may be decremented before it is incremented. It
467 : * is because the to-be-woken waiter may not have slept yet. So it
468 : * may see waiter->task got cleared, finish its critical section and
469 : * do an unlock before the reader count increment.
470 : *
471 : * 1) Collect the read-waiters in a separate list, count them and
472 : * fully increment the reader count in rwsem.
473 : * 2) For each waiters in the new list, clear waiter->task and
474 : * put them into wake_q to be woken up later.
475 : */
476 44 : INIT_LIST_HEAD(&wlist);
477 125 : list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) {
478 81 : if (waiter->type == RWSEM_WAITING_FOR_WRITE)
479 8 : continue;
480 :
481 73 : woken++;
482 73 : list_move_tail(&waiter->list, &wlist);
483 :
484 : /*
485 : * Limit # of readers that can be woken up per wakeup call.
486 : */
487 73 : if (woken >= MAX_READERS_WAKEUP)
488 : break;
489 : }
490 :
491 44 : adjustment = woken * RWSEM_READER_BIAS - adjustment;
492 44 : lockevent_cond_inc(rwsem_wake_reader, woken);
493 44 : if (list_empty(&sem->wait_list)) {
494 : /* hit end of list above */
495 41 : adjustment -= RWSEM_FLAG_WAITERS;
496 : }
497 :
498 : /*
499 : * When we've woken a reader, we no longer need to force writers
500 : * to give up the lock and we can clear HANDOFF.
501 : */
502 44 : if (woken && (atomic_long_read(&sem->count) & RWSEM_FLAG_HANDOFF))
503 1 : adjustment -= RWSEM_FLAG_HANDOFF;
504 :
505 44 : if (adjustment)
506 44 : atomic_long_add(adjustment, &sem->count);
507 :
508 : /* 2nd pass */
509 117 : list_for_each_entry_safe(waiter, tmp, &wlist, list) {
510 73 : struct task_struct *tsk;
511 :
512 73 : tsk = waiter->task;
513 73 : get_task_struct(tsk);
514 :
515 : /*
516 : * Ensure calling get_task_struct() before setting the reader
517 : * waiter to nil such that rwsem_down_read_slowpath() cannot
518 : * race with do_exit() by always holding a reference count
519 : * to the task to wakeup.
520 : */
521 73 : smp_store_release(&waiter->task, NULL);
522 : /*
523 : * Ensure issuing the wakeup (either by us or someone else)
524 : * after setting the reader waiter to nil.
525 : */
526 73 : wake_q_add_safe(wake_q, tsk);
527 : }
528 : }
529 :
530 : /*
531 : * This function must be called with the sem->wait_lock held to prevent
532 : * race conditions between checking the rwsem wait list and setting the
533 : * sem->count accordingly.
534 : *
535 : * If wstate is WRITER_HANDOFF, it will make sure that either the handoff
536 : * bit is set or the lock is acquired with handoff bit cleared.
537 : */
538 170 : static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
539 : enum writer_wait_state wstate)
540 : {
541 170 : long count, new;
542 :
543 340 : lockdep_assert_held(&sem->wait_lock);
544 :
545 170 : count = atomic_long_read(&sem->count);
546 170 : do {
547 170 : bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF);
548 :
549 170 : if (has_handoff && wstate == WRITER_NOT_FIRST)
550 : return false;
551 :
552 166 : new = count;
553 :
554 166 : if (count & RWSEM_LOCK_MASK) {
555 77 : if (has_handoff || (wstate != WRITER_HANDOFF))
556 : return false;
557 :
558 4 : new |= RWSEM_FLAG_HANDOFF;
559 : } else {
560 89 : new |= RWSEM_WRITER_LOCKED;
561 89 : new &= ~RWSEM_FLAG_HANDOFF;
562 :
563 89 : if (list_is_singular(&sem->wait_list))
564 43 : new &= ~RWSEM_FLAG_WAITERS;
565 : }
566 186 : } while (!atomic_long_try_cmpxchg_acquire(&sem->count, &count, new));
567 :
568 : /*
569 : * We have either acquired the lock with handoff bit cleared or
570 : * set the handoff bit.
571 : */
572 93 : if (new & RWSEM_FLAG_HANDOFF)
573 : return false;
574 :
575 89 : rwsem_set_owner(sem);
576 89 : return true;
577 : }
578 :
579 : #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
580 : /*
581 : * Try to acquire write lock before the writer has been put on wait queue.
582 : */
583 2894 : static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
584 : {
585 2894 : long count = atomic_long_read(&sem->count);
586 :
587 2894 : while (!(count & (RWSEM_LOCK_MASK|RWSEM_FLAG_HANDOFF))) {
588 381 : if (atomic_long_try_cmpxchg_acquire(&sem->count, &count,
589 381 : count | RWSEM_WRITER_LOCKED)) {
590 381 : rwsem_set_owner(sem);
591 381 : lockevent_inc(rwsem_opt_lock);
592 381 : return true;
593 : }
594 : }
595 : return false;
596 : }
597 :
598 222099 : static inline bool owner_on_cpu(struct task_struct *owner)
599 : {
600 : /*
601 : * As lock holder preemption issue, we both skip spinning if
602 : * task is not on cpu or its cpu is preempted
603 : */
604 222099 : return owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
605 : }
606 :
607 470 : static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
608 : {
609 470 : struct task_struct *owner;
610 470 : unsigned long flags;
611 470 : bool ret = true;
612 :
613 470 : if (need_resched()) {
614 : lockevent_inc(rwsem_opt_fail);
615 : return false;
616 : }
617 :
618 453 : preempt_disable();
619 453 : rcu_read_lock();
620 453 : owner = rwsem_owner_flags(sem, &flags);
621 : /*
622 : * Don't check the read-owner as the entry may be stale.
623 : */
624 453 : if ((flags & RWSEM_NONSPINNABLE) ||
625 313 : (owner && !(flags & RWSEM_READER_OWNED) && !owner_on_cpu(owner)))
626 : ret = false;
627 453 : rcu_read_unlock();
628 453 : preempt_enable();
629 :
630 453 : lockevent_cond_inc(rwsem_opt_fail, !ret);
631 453 : return ret;
632 : }
633 :
634 : /*
635 : * The rwsem_spin_on_owner() function returns the folowing 4 values
636 : * depending on the lock owner state.
637 : * OWNER_NULL : owner is currently NULL
638 : * OWNER_WRITER: when owner changes and is a writer
639 : * OWNER_READER: when owner changes and the new owner may be a reader.
640 : * OWNER_NONSPINNABLE:
641 : * when optimistic spinning has to stop because either the
642 : * owner stops running, is unknown, or its timeslice has
643 : * been used up.
644 : */
645 : enum owner_state {
646 : OWNER_NULL = 1 << 0,
647 : OWNER_WRITER = 1 << 1,
648 : OWNER_READER = 1 << 2,
649 : OWNER_NONSPINNABLE = 1 << 3,
650 : };
651 : #define OWNER_SPINNABLE (OWNER_NULL | OWNER_WRITER | OWNER_READER)
652 :
653 : static inline enum owner_state
654 3168 : rwsem_owner_state(struct task_struct *owner, unsigned long flags)
655 : {
656 3168 : if (flags & RWSEM_NONSPINNABLE)
657 : return OWNER_NONSPINNABLE;
658 :
659 3168 : if (flags & RWSEM_READER_OWNED)
660 : return OWNER_READER;
661 :
662 637 : return owner ? OWNER_WRITER : OWNER_NULL;
663 : }
664 :
665 : static noinline enum owner_state
666 2909 : rwsem_spin_on_owner(struct rw_semaphore *sem)
667 : {
668 2909 : struct task_struct *new, *owner;
669 2909 : unsigned long flags, new_flags;
670 2909 : enum owner_state state;
671 :
672 2909 : owner = rwsem_owner_flags(sem, &flags);
673 2909 : state = rwsem_owner_state(owner, flags);
674 271 : if (state != OWNER_WRITER)
675 2638 : return state;
676 :
677 271 : rcu_read_lock();
678 222062 : for (;;) {
679 : /*
680 : * When a waiting writer set the handoff flag, it may spin
681 : * on the owner as well. Once that writer acquires the lock,
682 : * we can spin on it. So we don't need to quit even when the
683 : * handoff bit is set.
684 : */
685 222062 : new = rwsem_owner_flags(sem, &new_flags);
686 222062 : if ((new != owner) || (new_flags != flags)) {
687 259 : state = rwsem_owner_state(new, new_flags);
688 : break;
689 : }
690 :
691 : /*
692 : * Ensure we emit the owner->on_cpu, dereference _after_
693 : * checking sem->owner still matches owner, if that fails,
694 : * owner might point to free()d memory, if it still matches,
695 : * the rcu_read_lock() ensures the memory stays valid.
696 : */
697 221803 : barrier();
698 :
699 221803 : if (need_resched() || !owner_on_cpu(owner)) {
700 : state = OWNER_NONSPINNABLE;
701 : break;
702 : }
703 :
704 221791 : cpu_relax();
705 : }
706 271 : rcu_read_unlock();
707 :
708 271 : return state;
709 : }
710 :
711 : /*
712 : * Calculate reader-owned rwsem spinning threshold for writer
713 : *
714 : * The more readers own the rwsem, the longer it will take for them to
715 : * wind down and free the rwsem. So the empirical formula used to
716 : * determine the actual spinning time limit here is:
717 : *
718 : * Spinning threshold = (10 + nr_readers/2)us
719 : *
720 : * The limit is capped to a maximum of 25us (30 readers). This is just
721 : * a heuristic and is subjected to change in the future.
722 : */
723 32 : static inline u64 rwsem_rspin_threshold(struct rw_semaphore *sem)
724 : {
725 32 : long count = atomic_long_read(&sem->count);
726 32 : int readers = count >> RWSEM_READER_SHIFT;
727 32 : u64 delta;
728 :
729 32 : if (readers > 30)
730 : readers = 30;
731 32 : delta = (20 + readers) * NSEC_PER_USEC / 2;
732 :
733 32 : return sched_clock() + delta;
734 : }
735 :
736 424 : static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
737 : {
738 424 : bool taken = false;
739 424 : int prev_owner_state = OWNER_NULL;
740 424 : int loop = 0;
741 424 : u64 rspin_threshold = 0;
742 :
743 424 : preempt_disable();
744 :
745 : /* sem->wait_lock should not be held when doing optimistic spinning */
746 424 : if (!osq_lock(&sem->osq))
747 0 : goto done;
748 :
749 : /*
750 : * Optimistically spin on the owner field and attempt to acquire the
751 : * lock whenever the owner changes. Spinning will be stopped when:
752 : * 1) the owning writer isn't running; or
753 : * 2) readers own the lock and spinning time has exceeded limit.
754 : */
755 2905 : for (;;) {
756 2905 : enum owner_state owner_state;
757 :
758 2905 : owner_state = rwsem_spin_on_owner(sem);
759 2905 : if (!(owner_state & OWNER_SPINNABLE))
760 : break;
761 :
762 : /*
763 : * Try to acquire the lock
764 : */
765 2894 : taken = rwsem_try_write_lock_unqueued(sem);
766 :
767 2894 : if (taken)
768 : break;
769 :
770 : /*
771 : * Time-based reader-owned rwsem optimistic spinning
772 : */
773 2513 : if (owner_state == OWNER_READER) {
774 : /*
775 : * Re-initialize rspin_threshold every time when
776 : * the owner state changes from non-reader to reader.
777 : * This allows a writer to steal the lock in between
778 : * 2 reader phases and have the threshold reset at
779 : * the beginning of the 2nd reader phase.
780 : */
781 2512 : if (prev_owner_state != OWNER_READER) {
782 32 : if (rwsem_test_oflags(sem, RWSEM_NONSPINNABLE))
783 : break;
784 32 : rspin_threshold = rwsem_rspin_threshold(sem);
785 32 : loop = 0;
786 : }
787 :
788 : /*
789 : * Check time threshold once every 16 iterations to
790 : * avoid calling sched_clock() too frequently so
791 : * as to reduce the average latency between the times
792 : * when the lock becomes free and when the spinner
793 : * is ready to do a trylock.
794 : */
795 2480 : else if (!(++loop & 0xf) && (sched_clock() > rspin_threshold)) {
796 32 : rwsem_set_nonspinnable(sem);
797 32 : lockevent_inc(rwsem_opt_nospin);
798 32 : break;
799 : }
800 : }
801 :
802 : /*
803 : * An RT task cannot do optimistic spinning if it cannot
804 : * be sure the lock holder is running or live-lock may
805 : * happen if the current task and the lock holder happen
806 : * to run in the same CPU. However, aborting optimistic
807 : * spinning while a NULL owner is detected may miss some
808 : * opportunity where spinning can continue without causing
809 : * problem.
810 : *
811 : * There are 2 possible cases where an RT task may be able
812 : * to continue spinning.
813 : *
814 : * 1) The lock owner is in the process of releasing the
815 : * lock, sem->owner is cleared but the lock has not
816 : * been released yet.
817 : * 2) The lock was free and owner cleared, but another
818 : * task just comes in and acquire the lock before
819 : * we try to get it. The new owner may be a spinnable
820 : * writer.
821 : *
822 : * To take advantage of two scenarios listed agove, the RT
823 : * task is made to retry one more time to see if it can
824 : * acquire the lock or continue spinning on the new owning
825 : * writer. Of course, if the time lag is long enough or the
826 : * new owner is not a writer or spinnable, the RT task will
827 : * quit spinning.
828 : *
829 : * If the owner is a writer, the need_resched() check is
830 : * done inside rwsem_spin_on_owner(). If the owner is not
831 : * a writer, need_resched() check needs to be done here.
832 : */
833 2481 : if (owner_state != OWNER_WRITER) {
834 2481 : if (need_resched())
835 : break;
836 2481 : if (rt_task(current) &&
837 : (prev_owner_state != OWNER_WRITER))
838 : break;
839 : }
840 2481 : prev_owner_state = owner_state;
841 :
842 : /*
843 : * The cpu_relax() call is a compiler barrier which forces
844 : * everything in this loop to be re-loaded. We don't need
845 : * memory barriers as we'll eventually observe the right
846 : * values at the cost of a few extra spins.
847 : */
848 2481 : cpu_relax();
849 : }
850 424 : osq_unlock(&sem->osq);
851 424 : done:
852 424 : preempt_enable();
853 424 : lockevent_cond_inc(rwsem_opt_fail, !taken);
854 424 : return taken;
855 : }
856 :
857 : /*
858 : * Clear the owner's RWSEM_NONSPINNABLE bit if it is set. This should
859 : * only be called when the reader count reaches 0.
860 : */
861 71 : static inline void clear_nonspinnable(struct rw_semaphore *sem)
862 : {
863 71 : if (rwsem_test_oflags(sem, RWSEM_NONSPINNABLE))
864 32 : atomic_long_andnot(RWSEM_NONSPINNABLE, &sem->owner);
865 71 : }
866 :
867 : #else
868 : static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
869 : {
870 : return false;
871 : }
872 :
873 : static inline bool rwsem_optimistic_spin(struct rw_semaphore *sem)
874 : {
875 : return false;
876 : }
877 :
878 : static inline void clear_nonspinnable(struct rw_semaphore *sem) { }
879 :
880 : static inline int
881 : rwsem_spin_on_owner(struct rw_semaphore *sem)
882 : {
883 : return 0;
884 : }
885 : #define OWNER_NULL 1
886 : #endif
887 :
888 : /*
889 : * Wait for the read lock to be granted
890 : */
891 : static struct rw_semaphore __sched *
892 110 : rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, int state)
893 : {
894 110 : long adjustment = -RWSEM_READER_BIAS;
895 110 : long rcnt = (count >> RWSEM_READER_SHIFT);
896 110 : struct rwsem_waiter waiter;
897 110 : DEFINE_WAKE_Q(wake_q);
898 110 : bool wake = false;
899 :
900 : /*
901 : * To prevent a constant stream of readers from starving a sleeping
902 : * waiter, don't attempt optimistic lock stealing if the lock is
903 : * currently owned by readers.
904 : */
905 110 : if ((atomic_long_read(&sem->owner) & RWSEM_READER_OWNED) &&
906 36 : (rcnt > 1) && !(count & RWSEM_WRITER_LOCKED))
907 36 : goto queue;
908 :
909 : /*
910 : * Reader optimistic lock stealing.
911 : */
912 74 : if (!(count & (RWSEM_WRITER_LOCKED | RWSEM_FLAG_HANDOFF))) {
913 37 : rwsem_set_reader_owned(sem);
914 37 : lockevent_inc(rwsem_rlock_steal);
915 :
916 : /*
917 : * Wake up other readers in the wait queue if it is
918 : * the first reader.
919 : */
920 37 : if ((rcnt == 1) && (count & RWSEM_FLAG_WAITERS)) {
921 37 : raw_spin_lock_irq(&sem->wait_lock);
922 37 : if (!list_empty(&sem->wait_list))
923 37 : rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED,
924 : &wake_q);
925 37 : raw_spin_unlock_irq(&sem->wait_lock);
926 37 : wake_up_q(&wake_q);
927 : }
928 37 : return sem;
929 : }
930 :
931 37 : queue:
932 73 : waiter.task = current;
933 73 : waiter.type = RWSEM_WAITING_FOR_READ;
934 73 : waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
935 :
936 73 : raw_spin_lock_irq(&sem->wait_lock);
937 73 : if (list_empty(&sem->wait_list)) {
938 : /*
939 : * In case the wait queue is empty and the lock isn't owned
940 : * by a writer or has the handoff bit set, this reader can
941 : * exit the slowpath and return immediately as its
942 : * RWSEM_READER_BIAS has already been set in the count.
943 : */
944 24 : if (!(atomic_long_read(&sem->count) &
945 : (RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF))) {
946 : /* Provide lock ACQUIRE */
947 0 : smp_acquire__after_ctrl_dep();
948 0 : raw_spin_unlock_irq(&sem->wait_lock);
949 0 : rwsem_set_reader_owned(sem);
950 0 : lockevent_inc(rwsem_rlock_fast);
951 0 : return sem;
952 : }
953 : adjustment += RWSEM_FLAG_WAITERS;
954 : }
955 73 : list_add_tail(&waiter.list, &sem->wait_list);
956 :
957 : /* we're now waiting on the lock, but no longer actively locking */
958 73 : count = atomic_long_add_return(adjustment, &sem->count);
959 :
960 : /*
961 : * If there are no active locks, wake the front queued process(es).
962 : *
963 : * If there are no writers and we are first in the queue,
964 : * wake our own waiter to join the existing active readers !
965 : */
966 73 : if (!(count & RWSEM_LOCK_MASK)) {
967 0 : clear_nonspinnable(sem);
968 0 : wake = true;
969 : }
970 73 : if (wake || (!(count & RWSEM_WRITER_MASK) &&
971 36 : (adjustment & RWSEM_FLAG_WAITERS)))
972 0 : rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
973 :
974 73 : raw_spin_unlock_irq(&sem->wait_lock);
975 73 : wake_up_q(&wake_q);
976 :
977 : /* wait to be given the lock */
978 219 : for (;;) {
979 146 : set_current_state(state);
980 146 : if (!smp_load_acquire(&waiter.task)) {
981 : /* Matches rwsem_mark_wake()'s smp_store_release(). */
982 : break;
983 : }
984 73 : if (signal_pending_state(state, current)) {
985 0 : raw_spin_lock_irq(&sem->wait_lock);
986 0 : if (waiter.task)
987 0 : goto out_nolock;
988 0 : raw_spin_unlock_irq(&sem->wait_lock);
989 : /* Ordered by sem->wait_lock against rwsem_mark_wake(). */
990 0 : break;
991 : }
992 73 : schedule();
993 73 : lockevent_inc(rwsem_sleep_reader);
994 : }
995 :
996 73 : __set_current_state(TASK_RUNNING);
997 73 : lockevent_inc(rwsem_rlock);
998 73 : return sem;
999 :
1000 0 : out_nolock:
1001 0 : list_del(&waiter.list);
1002 0 : if (list_empty(&sem->wait_list)) {
1003 0 : atomic_long_andnot(RWSEM_FLAG_WAITERS|RWSEM_FLAG_HANDOFF,
1004 : &sem->count);
1005 : }
1006 0 : raw_spin_unlock_irq(&sem->wait_lock);
1007 0 : __set_current_state(TASK_RUNNING);
1008 0 : lockevent_inc(rwsem_rlock_fail);
1009 0 : return ERR_PTR(-EINTR);
1010 : }
1011 :
1012 : /*
1013 : * Wait until we successfully acquire the write lock
1014 : */
1015 : static struct rw_semaphore *
1016 470 : rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
1017 : {
1018 470 : long count;
1019 470 : enum writer_wait_state wstate;
1020 470 : struct rwsem_waiter waiter;
1021 470 : struct rw_semaphore *ret = sem;
1022 470 : DEFINE_WAKE_Q(wake_q);
1023 :
1024 : /* do optimistic spinning and steal lock if possible */
1025 470 : if (rwsem_can_spin_on_owner(sem) && rwsem_optimistic_spin(sem)) {
1026 : /* rwsem_optimistic_spin() implies ACQUIRE on success */
1027 : return sem;
1028 : }
1029 :
1030 : /*
1031 : * Optimistic spinning failed, proceed to the slowpath
1032 : * and block until we can acquire the sem.
1033 : */
1034 89 : waiter.task = current;
1035 89 : waiter.type = RWSEM_WAITING_FOR_WRITE;
1036 89 : waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
1037 :
1038 89 : raw_spin_lock_irq(&sem->wait_lock);
1039 :
1040 : /* account for this before adding a new element to the list */
1041 89 : wstate = list_empty(&sem->wait_list) ? WRITER_FIRST : WRITER_NOT_FIRST;
1042 :
1043 89 : list_add_tail(&waiter.list, &sem->wait_list);
1044 :
1045 : /* we're now waiting on the lock */
1046 89 : if (wstate == WRITER_NOT_FIRST) {
1047 29 : count = atomic_long_read(&sem->count);
1048 :
1049 : /*
1050 : * If there were already threads queued before us and:
1051 : * 1) there are no active locks, wake the front
1052 : * queued process(es) as the handoff bit might be set.
1053 : * 2) there are no active writers and some readers, the lock
1054 : * must be read owned; so we try to wake any read lock
1055 : * waiters that were queued ahead of us.
1056 : */
1057 29 : if (count & RWSEM_WRITER_MASK)
1058 11 : goto wait;
1059 :
1060 18 : rwsem_mark_wake(sem, (count & RWSEM_READER_MASK)
1061 : ? RWSEM_WAKE_READERS
1062 : : RWSEM_WAKE_ANY, &wake_q);
1063 :
1064 18 : if (!wake_q_empty(&wake_q)) {
1065 : /*
1066 : * We want to minimize wait_lock hold time especially
1067 : * when a large number of readers are to be woken up.
1068 : */
1069 5 : raw_spin_unlock_irq(&sem->wait_lock);
1070 5 : wake_up_q(&wake_q);
1071 5 : wake_q_init(&wake_q); /* Used again, reinit */
1072 5 : raw_spin_lock_irq(&sem->wait_lock);
1073 : }
1074 : } else {
1075 60 : atomic_long_or(RWSEM_FLAG_WAITERS, &sem->count);
1076 : }
1077 :
1078 89 : wait:
1079 : /* wait until we successfully acquire the lock */
1080 89 : set_current_state(state);
1081 251 : for (;;) {
1082 170 : if (rwsem_try_write_lock(sem, wstate)) {
1083 : /* rwsem_try_write_lock() implies ACQUIRE on success */
1084 : break;
1085 : }
1086 :
1087 81 : raw_spin_unlock_irq(&sem->wait_lock);
1088 :
1089 : /*
1090 : * After setting the handoff bit and failing to acquire
1091 : * the lock, attempt to spin on owner to accelerate lock
1092 : * transfer. If the previous owner is a on-cpu writer and it
1093 : * has just released the lock, OWNER_NULL will be returned.
1094 : * In this case, we attempt to acquire the lock again
1095 : * without sleeping.
1096 : */
1097 85 : if (wstate == WRITER_HANDOFF &&
1098 4 : rwsem_spin_on_owner(sem) == OWNER_NULL)
1099 1 : goto trylock_again;
1100 :
1101 : /* Block until there are no active lockers. */
1102 82 : for (;;) {
1103 82 : if (signal_pending_state(state, current))
1104 0 : goto out_nolock;
1105 :
1106 82 : schedule();
1107 82 : lockevent_inc(rwsem_sleep_writer);
1108 82 : set_current_state(state);
1109 : /*
1110 : * If HANDOFF bit is set, unconditionally do
1111 : * a trylock.
1112 : */
1113 82 : if (wstate == WRITER_HANDOFF)
1114 : break;
1115 :
1116 79 : if ((wstate == WRITER_NOT_FIRST) &&
1117 24 : (rwsem_first_waiter(sem) == &waiter))
1118 24 : wstate = WRITER_FIRST;
1119 :
1120 79 : count = atomic_long_read(&sem->count);
1121 79 : if (!(count & RWSEM_LOCK_MASK))
1122 : break;
1123 :
1124 : /*
1125 : * The setting of the handoff bit is deferred
1126 : * until rwsem_try_write_lock() is called.
1127 : */
1128 6 : if ((wstate == WRITER_FIRST) && (rt_task(current) ||
1129 6 : time_after(jiffies, waiter.timeout))) {
1130 : wstate = WRITER_HANDOFF;
1131 : lockevent_inc(rwsem_wlock_handoff);
1132 : break;
1133 : }
1134 : }
1135 81 : trylock_again:
1136 81 : raw_spin_lock_irq(&sem->wait_lock);
1137 : }
1138 89 : __set_current_state(TASK_RUNNING);
1139 89 : list_del(&waiter.list);
1140 89 : raw_spin_unlock_irq(&sem->wait_lock);
1141 89 : lockevent_inc(rwsem_wlock);
1142 :
1143 89 : return ret;
1144 :
1145 0 : out_nolock:
1146 0 : __set_current_state(TASK_RUNNING);
1147 0 : raw_spin_lock_irq(&sem->wait_lock);
1148 0 : list_del(&waiter.list);
1149 :
1150 0 : if (unlikely(wstate == WRITER_HANDOFF))
1151 0 : atomic_long_add(-RWSEM_FLAG_HANDOFF, &sem->count);
1152 :
1153 0 : if (list_empty(&sem->wait_list))
1154 0 : atomic_long_andnot(RWSEM_FLAG_WAITERS, &sem->count);
1155 : else
1156 0 : rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
1157 0 : raw_spin_unlock_irq(&sem->wait_lock);
1158 0 : wake_up_q(&wake_q);
1159 0 : lockevent_inc(rwsem_wlock_fail);
1160 :
1161 0 : return ERR_PTR(-EINTR);
1162 : }
1163 :
1164 : /*
1165 : * handle waking up a waiter on the semaphore
1166 : * - up_read/up_write has decremented the active part of count if we come here
1167 : */
1168 232 : static struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem, long count)
1169 : {
1170 232 : unsigned long flags;
1171 232 : DEFINE_WAKE_Q(wake_q);
1172 :
1173 232 : raw_spin_lock_irqsave(&sem->wait_lock, flags);
1174 :
1175 232 : if (!list_empty(&sem->wait_list))
1176 232 : rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
1177 :
1178 232 : raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
1179 232 : wake_up_q(&wake_q);
1180 :
1181 232 : return sem;
1182 : }
1183 :
1184 : /*
1185 : * downgrade a write lock into a read lock
1186 : * - caller incremented waiting part of count and discovered it still negative
1187 : * - just wake up any readers at the front of the queue
1188 : */
1189 0 : static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
1190 : {
1191 0 : unsigned long flags;
1192 0 : DEFINE_WAKE_Q(wake_q);
1193 :
1194 0 : raw_spin_lock_irqsave(&sem->wait_lock, flags);
1195 :
1196 0 : if (!list_empty(&sem->wait_list))
1197 0 : rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
1198 :
1199 0 : raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
1200 0 : wake_up_q(&wake_q);
1201 :
1202 0 : return sem;
1203 : }
1204 :
1205 : /*
1206 : * lock for reading
1207 : */
1208 35892 : static inline int __down_read_common(struct rw_semaphore *sem, int state)
1209 : {
1210 35892 : long count;
1211 :
1212 35892 : if (!rwsem_read_trylock(sem, &count)) {
1213 110 : if (IS_ERR(rwsem_down_read_slowpath(sem, count, state)))
1214 : return -EINTR;
1215 110 : DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1216 : }
1217 : return 0;
1218 : }
1219 :
1220 33022 : static inline void __down_read(struct rw_semaphore *sem)
1221 : {
1222 33022 : __down_read_common(sem, TASK_UNINTERRUPTIBLE);
1223 : }
1224 :
1225 0 : static inline int __down_read_interruptible(struct rw_semaphore *sem)
1226 : {
1227 0 : return __down_read_common(sem, TASK_INTERRUPTIBLE);
1228 : }
1229 :
1230 2870 : static inline int __down_read_killable(struct rw_semaphore *sem)
1231 : {
1232 2870 : return __down_read_common(sem, TASK_KILLABLE);
1233 : }
1234 :
1235 144506 : static inline int __down_read_trylock(struct rw_semaphore *sem)
1236 : {
1237 144506 : long tmp;
1238 :
1239 144506 : DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1240 :
1241 : /*
1242 : * Optimize for the case when the rwsem is not locked at all.
1243 : */
1244 144506 : tmp = RWSEM_UNLOCKED_VALUE;
1245 144510 : do {
1246 144521 : if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
1247 144510 : tmp + RWSEM_READER_BIAS)) {
1248 144419 : rwsem_set_reader_owned(sem);
1249 144419 : return 1;
1250 : }
1251 102 : } while (!(tmp & RWSEM_READ_FAILED_MASK));
1252 : return 0;
1253 : }
1254 :
1255 : /*
1256 : * lock for writing
1257 : */
1258 328803 : static inline int __down_write_common(struct rw_semaphore *sem, int state)
1259 : {
1260 328803 : if (unlikely(!rwsem_write_trylock(sem))) {
1261 470 : if (IS_ERR(rwsem_down_write_slowpath(sem, state)))
1262 0 : return -EINTR;
1263 : }
1264 :
1265 : return 0;
1266 : }
1267 :
1268 287725 : static inline void __down_write(struct rw_semaphore *sem)
1269 : {
1270 287725 : __down_write_common(sem, TASK_UNINTERRUPTIBLE);
1271 : }
1272 :
1273 41096 : static inline int __down_write_killable(struct rw_semaphore *sem)
1274 : {
1275 41096 : return __down_write_common(sem, TASK_KILLABLE);
1276 : }
1277 :
1278 0 : static inline int __down_write_trylock(struct rw_semaphore *sem)
1279 : {
1280 0 : DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1281 0 : return rwsem_write_trylock(sem);
1282 : }
1283 :
1284 : /*
1285 : * unlock after reading
1286 : */
1287 181426 : static inline void __up_read(struct rw_semaphore *sem)
1288 : {
1289 181426 : long tmp;
1290 :
1291 181426 : DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1292 181426 : DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1293 :
1294 181428 : rwsem_clear_reader_owned(sem);
1295 181456 : tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count);
1296 181461 : DEBUG_RWSEMS_WARN_ON(tmp < 0, sem);
1297 181461 : if (unlikely((tmp & (RWSEM_LOCK_MASK|RWSEM_FLAG_WAITERS)) ==
1298 : RWSEM_FLAG_WAITERS)) {
1299 71 : clear_nonspinnable(sem);
1300 71 : rwsem_wake(sem, tmp);
1301 : }
1302 181461 : }
1303 :
1304 : /*
1305 : * unlock after writing
1306 : */
1307 327639 : static inline void __up_write(struct rw_semaphore *sem)
1308 : {
1309 327639 : long tmp;
1310 :
1311 327639 : DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1312 : /*
1313 : * sem->owner may differ from current if the ownership is transferred
1314 : * to an anonymous writer by setting the RWSEM_NONSPINNABLE bits.
1315 : */
1316 327639 : DEBUG_RWSEMS_WARN_ON((rwsem_owner(sem) != current) &&
1317 : !rwsem_test_oflags(sem, RWSEM_NONSPINNABLE), sem);
1318 :
1319 327639 : rwsem_clear_owner(sem);
1320 327660 : tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count);
1321 327725 : if (unlikely(tmp & RWSEM_FLAG_WAITERS))
1322 161 : rwsem_wake(sem, tmp);
1323 327725 : }
1324 :
1325 : /*
1326 : * downgrade write lock to read lock
1327 : */
1328 1149 : static inline void __downgrade_write(struct rw_semaphore *sem)
1329 : {
1330 1149 : long tmp;
1331 :
1332 : /*
1333 : * When downgrading from exclusive to shared ownership,
1334 : * anything inside the write-locked region cannot leak
1335 : * into the read side. In contrast, anything in the
1336 : * read-locked region is ok to be re-ordered into the
1337 : * write side. As such, rely on RELEASE semantics.
1338 : */
1339 1149 : DEBUG_RWSEMS_WARN_ON(rwsem_owner(sem) != current, sem);
1340 1149 : tmp = atomic_long_fetch_add_release(
1341 : -RWSEM_WRITER_LOCKED+RWSEM_READER_BIAS, &sem->count);
1342 1149 : rwsem_set_reader_owned(sem);
1343 1149 : if (tmp & RWSEM_FLAG_WAITERS)
1344 0 : rwsem_downgrade_wake(sem);
1345 1149 : }
1346 :
1347 : /*
1348 : * lock for reading
1349 : */
1350 33022 : void __sched down_read(struct rw_semaphore *sem)
1351 : {
1352 33022 : might_sleep();
1353 33023 : rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1354 :
1355 33022 : LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
1356 33024 : }
1357 : EXPORT_SYMBOL(down_read);
1358 :
1359 0 : int __sched down_read_interruptible(struct rw_semaphore *sem)
1360 : {
1361 0 : might_sleep();
1362 0 : rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1363 :
1364 0 : if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_interruptible)) {
1365 0 : rwsem_release(&sem->dep_map, _RET_IP_);
1366 0 : return -EINTR;
1367 : }
1368 :
1369 : return 0;
1370 : }
1371 : EXPORT_SYMBOL(down_read_interruptible);
1372 :
1373 2870 : int __sched down_read_killable(struct rw_semaphore *sem)
1374 : {
1375 2870 : might_sleep();
1376 2870 : rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1377 :
1378 2870 : if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
1379 0 : rwsem_release(&sem->dep_map, _RET_IP_);
1380 0 : return -EINTR;
1381 : }
1382 :
1383 : return 0;
1384 : }
1385 : EXPORT_SYMBOL(down_read_killable);
1386 :
1387 : /*
1388 : * trylock for reading -- returns 1 if successful, 0 if contention
1389 : */
1390 144506 : int down_read_trylock(struct rw_semaphore *sem)
1391 : {
1392 144506 : int ret = __down_read_trylock(sem);
1393 :
1394 144510 : if (ret == 1)
1395 144412 : rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
1396 144500 : return ret;
1397 : }
1398 : EXPORT_SYMBOL(down_read_trylock);
1399 :
1400 : /*
1401 : * lock for writing
1402 : */
1403 279557 : void __sched down_write(struct rw_semaphore *sem)
1404 : {
1405 279557 : might_sleep();
1406 279575 : rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
1407 279544 : LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1408 279560 : }
1409 : EXPORT_SYMBOL(down_write);
1410 :
1411 : /*
1412 : * lock for writing
1413 : */
1414 41098 : int __sched down_write_killable(struct rw_semaphore *sem)
1415 : {
1416 41098 : might_sleep();
1417 41098 : rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
1418 :
1419 41096 : if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
1420 : __down_write_killable)) {
1421 0 : rwsem_release(&sem->dep_map, _RET_IP_);
1422 0 : return -EINTR;
1423 : }
1424 :
1425 : return 0;
1426 : }
1427 : EXPORT_SYMBOL(down_write_killable);
1428 :
1429 : /*
1430 : * trylock for writing -- returns 1 if successful, 0 if contention
1431 : */
1432 0 : int down_write_trylock(struct rw_semaphore *sem)
1433 : {
1434 0 : int ret = __down_write_trylock(sem);
1435 :
1436 0 : if (ret == 1)
1437 0 : rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_);
1438 :
1439 0 : return ret;
1440 : }
1441 : EXPORT_SYMBOL(down_write_trylock);
1442 :
1443 : /*
1444 : * release a read lock
1445 : */
1446 181444 : void up_read(struct rw_semaphore *sem)
1447 : {
1448 181444 : rwsem_release(&sem->dep_map, _RET_IP_);
1449 181433 : __up_read(sem);
1450 181460 : }
1451 : EXPORT_SYMBOL(up_read);
1452 :
1453 : /*
1454 : * release a write lock
1455 : */
1456 327673 : void up_write(struct rw_semaphore *sem)
1457 : {
1458 327673 : rwsem_release(&sem->dep_map, _RET_IP_);
1459 327641 : __up_write(sem);
1460 327721 : }
1461 : EXPORT_SYMBOL(up_write);
1462 :
1463 : /*
1464 : * downgrade write lock to read lock
1465 : */
1466 1149 : void downgrade_write(struct rw_semaphore *sem)
1467 : {
1468 1149 : lock_downgrade(&sem->dep_map, _RET_IP_);
1469 1149 : __downgrade_write(sem);
1470 1149 : }
1471 : EXPORT_SYMBOL(downgrade_write);
1472 :
1473 : #ifdef CONFIG_DEBUG_LOCK_ALLOC
1474 :
1475 0 : void down_read_nested(struct rw_semaphore *sem, int subclass)
1476 : {
1477 0 : might_sleep();
1478 0 : rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
1479 0 : LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
1480 0 : }
1481 : EXPORT_SYMBOL(down_read_nested);
1482 :
1483 0 : int down_read_killable_nested(struct rw_semaphore *sem, int subclass)
1484 : {
1485 0 : might_sleep();
1486 0 : rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
1487 :
1488 0 : if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
1489 0 : rwsem_release(&sem->dep_map, _RET_IP_);
1490 0 : return -EINTR;
1491 : }
1492 :
1493 : return 0;
1494 : }
1495 : EXPORT_SYMBOL(down_read_killable_nested);
1496 :
1497 0 : void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest)
1498 : {
1499 0 : might_sleep();
1500 0 : rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_);
1501 0 : LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1502 0 : }
1503 : EXPORT_SYMBOL(_down_write_nest_lock);
1504 :
1505 0 : void down_read_non_owner(struct rw_semaphore *sem)
1506 : {
1507 0 : might_sleep();
1508 0 : __down_read(sem);
1509 0 : __rwsem_set_reader_owned(sem, NULL);
1510 0 : }
1511 : EXPORT_SYMBOL(down_read_non_owner);
1512 :
1513 8181 : void down_write_nested(struct rw_semaphore *sem, int subclass)
1514 : {
1515 8181 : might_sleep();
1516 8181 : rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
1517 8181 : LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1518 8181 : }
1519 : EXPORT_SYMBOL(down_write_nested);
1520 :
1521 0 : int __sched down_write_killable_nested(struct rw_semaphore *sem, int subclass)
1522 : {
1523 0 : might_sleep();
1524 0 : rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
1525 :
1526 0 : if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
1527 : __down_write_killable)) {
1528 0 : rwsem_release(&sem->dep_map, _RET_IP_);
1529 0 : return -EINTR;
1530 : }
1531 :
1532 : return 0;
1533 : }
1534 : EXPORT_SYMBOL(down_write_killable_nested);
1535 :
1536 0 : void up_read_non_owner(struct rw_semaphore *sem)
1537 : {
1538 0 : DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1539 0 : __up_read(sem);
1540 0 : }
1541 : EXPORT_SYMBOL(up_read_non_owner);
1542 :
1543 : #endif
|