Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-only
2 : #include <linux/atomic.h>
3 : #include <linux/percpu.h>
4 : #include <linux/wait.h>
5 : #include <linux/lockdep.h>
6 : #include <linux/percpu-rwsem.h>
7 : #include <linux/rcupdate.h>
8 : #include <linux/sched.h>
9 : #include <linux/sched/task.h>
10 : #include <linux/errno.h>
11 :
12 370 : int __percpu_init_rwsem(struct percpu_rw_semaphore *sem,
13 : const char *name, struct lock_class_key *key)
14 : {
15 370 : sem->read_count = alloc_percpu(int);
16 370 : if (unlikely(!sem->read_count))
17 : return -ENOMEM;
18 :
19 370 : rcu_sync_init(&sem->rss);
20 370 : rcuwait_init(&sem->writer);
21 370 : init_waitqueue_head(&sem->waiters);
22 370 : atomic_set(&sem->block, 0);
23 : #ifdef CONFIG_DEBUG_LOCK_ALLOC
24 370 : debug_check_no_locks_freed((void *)sem, sizeof(*sem));
25 370 : lockdep_init_map(&sem->dep_map, name, key, 0);
26 : #endif
27 370 : return 0;
28 : }
29 : EXPORT_SYMBOL_GPL(__percpu_init_rwsem);
30 :
31 297 : void percpu_free_rwsem(struct percpu_rw_semaphore *sem)
32 : {
33 : /*
34 : * XXX: temporary kludge. The error path in alloc_super()
35 : * assumes that percpu_free_rwsem() is safe after kzalloc().
36 : */
37 297 : if (!sem->read_count)
38 : return;
39 :
40 297 : rcu_sync_dtor(&sem->rss);
41 297 : free_percpu(sem->read_count);
42 297 : sem->read_count = NULL; /* catch use after free bugs */
43 : }
44 : EXPORT_SYMBOL_GPL(percpu_free_rwsem);
45 :
46 1971 : static bool __percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
47 : {
48 1971 : this_cpu_inc(*sem->read_count);
49 :
50 : /*
51 : * Due to having preemption disabled the decrement happens on
52 : * the same CPU as the increment, avoiding the
53 : * increment-on-one-CPU-and-decrement-on-another problem.
54 : *
55 : * If the reader misses the writer's assignment of sem->block, then the
56 : * writer is guaranteed to see the reader's increment.
57 : *
58 : * Conversely, any readers that increment their sem->read_count after
59 : * the writer looks are guaranteed to see the sem->block value, which
60 : * in turn means that they are guaranteed to immediately decrement
61 : * their sem->read_count, so that it doesn't matter that the writer
62 : * missed them.
63 : */
64 :
65 1971 : smp_mb(); /* A matches D */
66 :
67 : /*
68 : * If !sem->block the critical section starts here, matched by the
69 : * release in percpu_up_write().
70 : */
71 3942 : if (likely(!atomic_read_acquire(&sem->block)))
72 : return true;
73 :
74 0 : this_cpu_dec(*sem->read_count);
75 :
76 : /* Prod writer to re-evaluate readers_active_check() */
77 0 : rcuwait_wake_up(&sem->writer);
78 :
79 0 : return false;
80 : }
81 :
82 201 : static inline bool __percpu_down_write_trylock(struct percpu_rw_semaphore *sem)
83 : {
84 201 : if (atomic_read(&sem->block))
85 : return false;
86 :
87 402 : return atomic_xchg(&sem->block, 1) == 0;
88 : }
89 :
90 0 : static bool __percpu_rwsem_trylock(struct percpu_rw_semaphore *sem, bool reader)
91 : {
92 0 : if (reader) {
93 0 : bool ret;
94 :
95 0 : preempt_disable();
96 0 : ret = __percpu_down_read_trylock(sem);
97 0 : preempt_enable();
98 :
99 0 : return ret;
100 : }
101 0 : return __percpu_down_write_trylock(sem);
102 : }
103 :
104 : /*
105 : * The return value of wait_queue_entry::func means:
106 : *
107 : * <0 - error, wakeup is terminated and the error is returned
108 : * 0 - no wakeup, a next waiter is tried
109 : * >0 - woken, if EXCLUSIVE, counted towards @nr_exclusive.
110 : *
111 : * We use EXCLUSIVE for both readers and writers to preserve FIFO order,
112 : * and play games with the return value to allow waking multiple readers.
113 : *
114 : * Specifically, we wake readers until we've woken a single writer, or until a
115 : * trylock fails.
116 : */
117 0 : static int percpu_rwsem_wake_function(struct wait_queue_entry *wq_entry,
118 : unsigned int mode, int wake_flags,
119 : void *key)
120 : {
121 0 : bool reader = wq_entry->flags & WQ_FLAG_CUSTOM;
122 0 : struct percpu_rw_semaphore *sem = key;
123 0 : struct task_struct *p;
124 :
125 : /* concurrent against percpu_down_write(), can get stolen */
126 0 : if (!__percpu_rwsem_trylock(sem, reader))
127 : return 1;
128 :
129 0 : p = get_task_struct(wq_entry->private);
130 0 : list_del_init(&wq_entry->entry);
131 0 : smp_store_release(&wq_entry->private, NULL);
132 :
133 0 : wake_up_process(p);
134 0 : put_task_struct(p);
135 :
136 0 : return !reader; /* wake (readers until) 1 writer */
137 : }
138 :
139 0 : static void percpu_rwsem_wait(struct percpu_rw_semaphore *sem, bool reader)
140 : {
141 0 : DEFINE_WAIT_FUNC(wq_entry, percpu_rwsem_wake_function);
142 0 : bool wait;
143 :
144 0 : spin_lock_irq(&sem->waiters.lock);
145 : /*
146 : * Serialize against the wakeup in percpu_up_write(), if we fail
147 : * the trylock, the wakeup must see us on the list.
148 : */
149 0 : wait = !__percpu_rwsem_trylock(sem, reader);
150 0 : if (wait) {
151 0 : wq_entry.flags |= WQ_FLAG_EXCLUSIVE | reader * WQ_FLAG_CUSTOM;
152 0 : __add_wait_queue_entry_tail(&sem->waiters, &wq_entry);
153 : }
154 0 : spin_unlock_irq(&sem->waiters.lock);
155 :
156 0 : while (wait) {
157 0 : set_current_state(TASK_UNINTERRUPTIBLE);
158 0 : if (!smp_load_acquire(&wq_entry.private))
159 : break;
160 0 : schedule();
161 : }
162 0 : __set_current_state(TASK_RUNNING);
163 0 : }
164 :
165 1971 : bool __percpu_down_read(struct percpu_rw_semaphore *sem, bool try)
166 : {
167 1971 : if (__percpu_down_read_trylock(sem))
168 : return true;
169 :
170 0 : if (try)
171 : return false;
172 :
173 0 : preempt_enable();
174 0 : percpu_rwsem_wait(sem, /* .reader = */ true);
175 0 : preempt_disable();
176 :
177 0 : return true;
178 : }
179 : EXPORT_SYMBOL_GPL(__percpu_down_read);
180 :
181 : #define per_cpu_sum(var) \
182 : ({ \
183 : typeof(var) __sum = 0; \
184 : int cpu; \
185 : compiletime_assert_atomic_type(__sum); \
186 : for_each_possible_cpu(cpu) \
187 : __sum += per_cpu(var, cpu); \
188 : __sum; \
189 : })
190 :
191 : /*
192 : * Return true if the modular sum of the sem->read_count per-CPU variable is
193 : * zero. If this sum is zero, then it is stable due to the fact that if any
194 : * newly arriving readers increment a given counter, they will immediately
195 : * decrement that same counter.
196 : *
197 : * Assumes sem->block is set.
198 : */
199 201 : static bool readers_active_check(struct percpu_rw_semaphore *sem)
200 : {
201 1005 : if (per_cpu_sum(*sem->read_count) != 0)
202 : return false;
203 :
204 : /*
205 : * If we observed the decrement; ensure we see the entire critical
206 : * section.
207 : */
208 :
209 201 : smp_mb(); /* C matches B */
210 :
211 201 : return true;
212 : }
213 :
214 201 : void percpu_down_write(struct percpu_rw_semaphore *sem)
215 : {
216 201 : might_sleep();
217 201 : rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
218 :
219 : /* Notify readers to take the slow path. */
220 201 : rcu_sync_enter(&sem->rss);
221 :
222 : /*
223 : * Try set sem->block; this provides writer-writer exclusion.
224 : * Having sem->block set makes new readers block.
225 : */
226 201 : if (!__percpu_down_write_trylock(sem))
227 0 : percpu_rwsem_wait(sem, /* .reader = */ false);
228 :
229 : /* smp_mb() implied by __percpu_down_write_trylock() on success -- D matches A */
230 :
231 : /*
232 : * If they don't see our store of sem->block, then we are guaranteed to
233 : * see their sem->read_count increment, and therefore will wait for
234 : * them.
235 : */
236 :
237 : /* Wait for all active readers to complete. */
238 402 : rcuwait_wait_event(&sem->writer, readers_active_check(sem), TASK_UNINTERRUPTIBLE);
239 201 : }
240 : EXPORT_SYMBOL_GPL(percpu_down_write);
241 :
242 201 : void percpu_up_write(struct percpu_rw_semaphore *sem)
243 : {
244 201 : rwsem_release(&sem->dep_map, _RET_IP_);
245 :
246 : /*
247 : * Signal the writer is done, no fast path yet.
248 : *
249 : * One reason that we cannot just immediately flip to readers_fast is
250 : * that new readers might fail to see the results of this writer's
251 : * critical section.
252 : *
253 : * Therefore we force it through the slow path which guarantees an
254 : * acquire and thereby guarantees the critical section's consistency.
255 : */
256 201 : atomic_set_release(&sem->block, 0);
257 :
258 : /*
259 : * Prod any pending reader/writer to make progress.
260 : */
261 201 : __wake_up(&sem->waiters, TASK_NORMAL, 1, sem);
262 :
263 : /*
264 : * Once this completes (at least one RCU-sched grace period hence) the
265 : * reader fast path will be available again. Safe to use outside the
266 : * exclusive write lock because its counting.
267 : */
268 201 : rcu_sync_exit(&sem->rss);
269 201 : }
270 : EXPORT_SYMBOL_GPL(percpu_up_write);
|