Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 : /* 3 : * Queue read/write lock 4 : * 5 : * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P. 6 : * 7 : * Authors: Waiman Long <waiman.long@hp.com> 8 : */ 9 : #ifndef __ASM_GENERIC_QRWLOCK_H 10 : #define __ASM_GENERIC_QRWLOCK_H 11 : 12 : #include <linux/atomic.h> 13 : #include <asm/barrier.h> 14 : #include <asm/processor.h> 15 : 16 : #include <asm-generic/qrwlock_types.h> 17 : 18 : /* Must be included from asm/spinlock.h after defining arch_spin_is_locked. */ 19 : 20 : /* 21 : * Writer states & reader shift and bias. 22 : */ 23 : #define _QW_WAITING 0x100 /* A writer is waiting */ 24 : #define _QW_LOCKED 0x0ff /* A writer holds the lock */ 25 : #define _QW_WMASK 0x1ff /* Writer mask */ 26 : #define _QR_SHIFT 9 /* Reader count shift */ 27 : #define _QR_BIAS (1U << _QR_SHIFT) 28 : 29 : /* 30 : * External function declarations 31 : */ 32 : extern void queued_read_lock_slowpath(struct qrwlock *lock); 33 : extern void queued_write_lock_slowpath(struct qrwlock *lock); 34 : 35 : /** 36 : * queued_read_trylock - try to acquire read lock of a queue rwlock 37 : * @lock : Pointer to queue rwlock structure 38 : * Return: 1 if lock acquired, 0 if failed 39 : */ 40 0 : static inline int queued_read_trylock(struct qrwlock *lock) 41 : { 42 0 : int cnts; 43 : 44 0 : cnts = atomic_read(&lock->cnts); 45 0 : if (likely(!(cnts & _QW_WMASK))) { 46 0 : cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts); 47 0 : if (likely(!(cnts & _QW_WMASK))) 48 : return 1; 49 0 : atomic_sub(_QR_BIAS, &lock->cnts); 50 : } 51 : return 0; 52 : } 53 : 54 : /** 55 : * queued_write_trylock - try to acquire write lock of a queue rwlock 56 : * @lock : Pointer to queue rwlock structure 57 : * Return: 1 if lock acquired, 0 if failed 58 : */ 59 0 : static inline int queued_write_trylock(struct qrwlock *lock) 60 : { 61 0 : int cnts; 62 : 63 0 : cnts = atomic_read(&lock->cnts); 64 0 : if (unlikely(cnts)) 65 : return 0; 66 : 67 0 : return likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, 68 : _QW_LOCKED)); 69 : } 70 : /** 71 : * queued_read_lock - acquire read lock of a queue rwlock 72 : * @lock: Pointer to queue rwlock structure 73 : */ 74 41882 : static inline void queued_read_lock(struct qrwlock *lock) 75 : { 76 41882 : int cnts; 77 : 78 41882 : cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts); 79 41884 : if (likely(!(cnts & _QW_WMASK))) 80 : return; 81 : 82 : /* The slowpath will decrement the reader count, if necessary. */ 83 12 : queued_read_lock_slowpath(lock); 84 : } 85 : 86 : /** 87 : * queued_write_lock - acquire write lock of a queue rwlock 88 : * @lock : Pointer to queue rwlock structure 89 : */ 90 22680 : static inline void queued_write_lock(struct qrwlock *lock) 91 : { 92 22680 : int cnts = 0; 93 : /* Optimize for the unfair lock case where the fair flag is 0. */ 94 45362 : if (likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED))) 95 22660 : return; 96 : 97 22 : queued_write_lock_slowpath(lock); 98 : } 99 : 100 : /** 101 : * queued_read_unlock - release read lock of a queue rwlock 102 : * @lock : Pointer to queue rwlock structure 103 : */ 104 41881 : static inline void queued_read_unlock(struct qrwlock *lock) 105 : { 106 : /* 107 : * Atomically decrement the reader count 108 : */ 109 41881 : (void)atomic_sub_return_release(_QR_BIAS, &lock->cnts); 110 41884 : } 111 : 112 : /** 113 : * queued_write_unlock - release write lock of a queue rwlock 114 : * @lock : Pointer to queue rwlock structure 115 : */ 116 22680 : static inline void queued_write_unlock(struct qrwlock *lock) 117 : { 118 22680 : smp_store_release(&lock->wlocked, 0); 119 : } 120 : 121 : /** 122 : * queued_rwlock_is_contended - check if the lock is contended 123 : * @lock : Pointer to queue rwlock structure 124 : * Return: 1 if lock contended, 0 otherwise 125 : */ 126 : static inline int queued_rwlock_is_contended(struct qrwlock *lock) 127 : { 128 : return arch_spin_is_locked(&lock->wait_lock); 129 : } 130 : 131 : /* 132 : * Remapping rwlock architecture specific functions to the corresponding 133 : * queue rwlock functions. 134 : */ 135 : #define arch_read_lock(l) queued_read_lock(l) 136 : #define arch_write_lock(l) queued_write_lock(l) 137 : #define arch_read_trylock(l) queued_read_trylock(l) 138 : #define arch_write_trylock(l) queued_write_trylock(l) 139 : #define arch_read_unlock(l) queued_read_unlock(l) 140 : #define arch_write_unlock(l) queued_write_unlock(l) 141 : #define arch_rwlock_is_contended(l) queued_rwlock_is_contended(l) 142 : 143 : #endif /* __ASM_GENERIC_QRWLOCK_H */