Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0-only */
2 : /*
3 : * Fast and scalable bitmaps.
4 : *
5 : * Copyright (C) 2016 Facebook
6 : * Copyright (C) 2013-2014 Jens Axboe
7 : */
8 :
9 : #ifndef __LINUX_SCALE_BITMAP_H
10 : #define __LINUX_SCALE_BITMAP_H
11 :
12 : #include <linux/kernel.h>
13 : #include <linux/slab.h>
14 :
15 : struct seq_file;
16 :
17 : /**
18 : * struct sbitmap_word - Word in a &struct sbitmap.
19 : */
20 : struct sbitmap_word {
21 : /**
22 : * @depth: Number of bits being used in @word/@cleared
23 : */
24 : unsigned long depth;
25 :
26 : /**
27 : * @word: word holding free bits
28 : */
29 : unsigned long word ____cacheline_aligned_in_smp;
30 :
31 : /**
32 : * @cleared: word holding cleared bits
33 : */
34 : unsigned long cleared ____cacheline_aligned_in_smp;
35 : } ____cacheline_aligned_in_smp;
36 :
37 : /**
38 : * struct sbitmap - Scalable bitmap.
39 : *
40 : * A &struct sbitmap is spread over multiple cachelines to avoid ping-pong. This
41 : * trades off higher memory usage for better scalability.
42 : */
43 : struct sbitmap {
44 : /**
45 : * @depth: Number of bits used in the whole bitmap.
46 : */
47 : unsigned int depth;
48 :
49 : /**
50 : * @shift: log2(number of bits used per word)
51 : */
52 : unsigned int shift;
53 :
54 : /**
55 : * @map_nr: Number of words (cachelines) being used for the bitmap.
56 : */
57 : unsigned int map_nr;
58 :
59 : /**
60 : * @map: Allocated bitmap.
61 : */
62 : struct sbitmap_word *map;
63 : };
64 :
65 : #define SBQ_WAIT_QUEUES 8
66 : #define SBQ_WAKE_BATCH 8
67 :
68 : /**
69 : * struct sbq_wait_state - Wait queue in a &struct sbitmap_queue.
70 : */
71 : struct sbq_wait_state {
72 : /**
73 : * @wait_cnt: Number of frees remaining before we wake up.
74 : */
75 : atomic_t wait_cnt;
76 :
77 : /**
78 : * @wait: Wait queue.
79 : */
80 : wait_queue_head_t wait;
81 : } ____cacheline_aligned_in_smp;
82 :
83 : /**
84 : * struct sbitmap_queue - Scalable bitmap with the added ability to wait on free
85 : * bits.
86 : *
87 : * A &struct sbitmap_queue uses multiple wait queues and rolling wakeups to
88 : * avoid contention on the wait queue spinlock. This ensures that we don't hit a
89 : * scalability wall when we run out of free bits and have to start putting tasks
90 : * to sleep.
91 : */
92 : struct sbitmap_queue {
93 : /**
94 : * @sb: Scalable bitmap.
95 : */
96 : struct sbitmap sb;
97 :
98 : /*
99 : * @alloc_hint: Cache of last successfully allocated or freed bit.
100 : *
101 : * This is per-cpu, which allows multiple users to stick to different
102 : * cachelines until the map is exhausted.
103 : */
104 : unsigned int __percpu *alloc_hint;
105 :
106 : /**
107 : * @wake_batch: Number of bits which must be freed before we wake up any
108 : * waiters.
109 : */
110 : unsigned int wake_batch;
111 :
112 : /**
113 : * @wake_index: Next wait queue in @ws to wake up.
114 : */
115 : atomic_t wake_index;
116 :
117 : /**
118 : * @ws: Wait queues.
119 : */
120 : struct sbq_wait_state *ws;
121 :
122 : /*
123 : * @ws_active: count of currently active ws waitqueues
124 : */
125 : atomic_t ws_active;
126 :
127 : /**
128 : * @round_robin: Allocate bits in strict round-robin order.
129 : */
130 : bool round_robin;
131 :
132 : /**
133 : * @min_shallow_depth: The minimum shallow depth which may be passed to
134 : * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow().
135 : */
136 : unsigned int min_shallow_depth;
137 : };
138 :
139 : /**
140 : * sbitmap_init_node() - Initialize a &struct sbitmap on a specific memory node.
141 : * @sb: Bitmap to initialize.
142 : * @depth: Number of bits to allocate.
143 : * @shift: Use 2^@shift bits per word in the bitmap; if a negative number if
144 : * given, a good default is chosen.
145 : * @flags: Allocation flags.
146 : * @node: Memory node to allocate on.
147 : *
148 : * Return: Zero on success or negative errno on failure.
149 : */
150 : int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
151 : gfp_t flags, int node);
152 :
153 : /**
154 : * sbitmap_free() - Free memory used by a &struct sbitmap.
155 : * @sb: Bitmap to free.
156 : */
157 0 : static inline void sbitmap_free(struct sbitmap *sb)
158 : {
159 0 : kfree(sb->map);
160 0 : sb->map = NULL;
161 0 : }
162 :
163 : /**
164 : * sbitmap_resize() - Resize a &struct sbitmap.
165 : * @sb: Bitmap to resize.
166 : * @depth: New number of bits to resize to.
167 : *
168 : * Doesn't reallocate anything. It's up to the caller to ensure that the new
169 : * depth doesn't exceed the depth that the sb was initialized with.
170 : */
171 : void sbitmap_resize(struct sbitmap *sb, unsigned int depth);
172 :
173 : /**
174 : * sbitmap_get() - Try to allocate a free bit from a &struct sbitmap.
175 : * @sb: Bitmap to allocate from.
176 : * @alloc_hint: Hint for where to start searching for a free bit.
177 : * @round_robin: If true, be stricter about allocation order; always allocate
178 : * starting from the last allocated bit. This is less efficient
179 : * than the default behavior (false).
180 : *
181 : * This operation provides acquire barrier semantics if it succeeds.
182 : *
183 : * Return: Non-negative allocated bit number if successful, -1 otherwise.
184 : */
185 : int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin);
186 :
187 : /**
188 : * sbitmap_get_shallow() - Try to allocate a free bit from a &struct sbitmap,
189 : * limiting the depth used from each word.
190 : * @sb: Bitmap to allocate from.
191 : * @alloc_hint: Hint for where to start searching for a free bit.
192 : * @shallow_depth: The maximum number of bits to allocate from a single word.
193 : *
194 : * This rather specific operation allows for having multiple users with
195 : * different allocation limits. E.g., there can be a high-priority class that
196 : * uses sbitmap_get() and a low-priority class that uses sbitmap_get_shallow()
197 : * with a @shallow_depth of (1 << (@sb->shift - 1)). Then, the low-priority
198 : * class can only allocate half of the total bits in the bitmap, preventing it
199 : * from starving out the high-priority class.
200 : *
201 : * Return: Non-negative allocated bit number if successful, -1 otherwise.
202 : */
203 : int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
204 : unsigned long shallow_depth);
205 :
206 : /**
207 : * sbitmap_any_bit_set() - Check for a set bit in a &struct sbitmap.
208 : * @sb: Bitmap to check.
209 : *
210 : * Return: true if any bit in the bitmap is set, false otherwise.
211 : */
212 : bool sbitmap_any_bit_set(const struct sbitmap *sb);
213 :
214 : #define SB_NR_TO_INDEX(sb, bitnr) ((bitnr) >> (sb)->shift)
215 : #define SB_NR_TO_BIT(sb, bitnr) ((bitnr) & ((1U << (sb)->shift) - 1U))
216 :
217 : typedef bool (*sb_for_each_fn)(struct sbitmap *, unsigned int, void *);
218 :
219 : /**
220 : * __sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap.
221 : * @start: Where to start the iteration.
222 : * @sb: Bitmap to iterate over.
223 : * @fn: Callback. Should return true to continue or false to break early.
224 : * @data: Pointer to pass to callback.
225 : *
226 : * This is inline even though it's non-trivial so that the function calls to the
227 : * callback will hopefully get optimized away.
228 : */
229 22 : static inline void __sbitmap_for_each_set(struct sbitmap *sb,
230 : unsigned int start,
231 : sb_for_each_fn fn, void *data)
232 : {
233 22 : unsigned int index;
234 22 : unsigned int nr;
235 22 : unsigned int scanned = 0;
236 :
237 22 : if (start >= sb->depth)
238 0 : start = 0;
239 22 : index = SB_NR_TO_INDEX(sb, start);
240 22 : nr = SB_NR_TO_BIT(sb, start);
241 :
242 164 : while (scanned < sb->depth) {
243 142 : unsigned long word;
244 142 : unsigned int depth = min_t(unsigned int,
245 : sb->map[index].depth - nr,
246 : sb->depth - scanned);
247 :
248 142 : scanned += depth;
249 142 : word = sb->map[index].word & ~sb->map[index].cleared;
250 142 : if (!word)
251 127 : goto next;
252 :
253 : /*
254 : * On the first iteration of the outer loop, we need to add the
255 : * bit offset back to the size of the word for find_next_bit().
256 : * On all other iterations, nr is zero, so this is a noop.
257 : */
258 15 : depth += nr;
259 45 : while (1) {
260 30 : nr = find_next_bit(&word, depth, nr);
261 30 : if (nr >= depth)
262 : break;
263 15 : if (!fn(sb, (index << sb->shift) + nr, data))
264 0 : return;
265 :
266 15 : nr++;
267 : }
268 15 : next:
269 142 : nr = 0;
270 142 : if (++index >= sb->map_nr)
271 22 : index = 0;
272 : }
273 : }
274 :
275 : /**
276 : * sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap.
277 : * @sb: Bitmap to iterate over.
278 : * @fn: Callback. Should return true to continue or false to break early.
279 : * @data: Pointer to pass to callback.
280 : */
281 22 : static inline void sbitmap_for_each_set(struct sbitmap *sb, sb_for_each_fn fn,
282 : void *data)
283 : {
284 22 : __sbitmap_for_each_set(sb, 0, fn, data);
285 0 : }
286 :
287 42 : static inline unsigned long *__sbitmap_word(struct sbitmap *sb,
288 : unsigned int bitnr)
289 : {
290 42 : return &sb->map[SB_NR_TO_INDEX(sb, bitnr)].word;
291 : }
292 :
293 : /* Helpers equivalent to the operations in asm/bitops.h and linux/bitmap.h */
294 :
295 14 : static inline void sbitmap_set_bit(struct sbitmap *sb, unsigned int bitnr)
296 : {
297 14 : set_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
298 14 : }
299 :
300 14 : static inline void sbitmap_clear_bit(struct sbitmap *sb, unsigned int bitnr)
301 : {
302 14 : clear_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
303 14 : }
304 :
305 : /*
306 : * This one is special, since it doesn't actually clear the bit, rather it
307 : * sets the corresponding bit in the ->cleared mask instead. Paired with
308 : * the caller doing sbitmap_deferred_clear() if a given index is full, which
309 : * will clear the previously freed entries in the corresponding ->word.
310 : */
311 3565 : static inline void sbitmap_deferred_clear_bit(struct sbitmap *sb, unsigned int bitnr)
312 : {
313 3565 : unsigned long *addr = &sb->map[SB_NR_TO_INDEX(sb, bitnr)].cleared;
314 :
315 3565 : set_bit(SB_NR_TO_BIT(sb, bitnr), addr);
316 3565 : }
317 :
318 : static inline void sbitmap_clear_bit_unlock(struct sbitmap *sb,
319 : unsigned int bitnr)
320 : {
321 : clear_bit_unlock(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
322 : }
323 :
324 14 : static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr)
325 : {
326 14 : return test_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
327 : }
328 :
329 : /**
330 : * sbitmap_show() - Dump &struct sbitmap information to a &struct seq_file.
331 : * @sb: Bitmap to show.
332 : * @m: struct seq_file to write to.
333 : *
334 : * This is intended for debugging. The format may change at any time.
335 : */
336 : void sbitmap_show(struct sbitmap *sb, struct seq_file *m);
337 :
338 : /**
339 : * sbitmap_bitmap_show() - Write a hex dump of a &struct sbitmap to a &struct
340 : * seq_file.
341 : * @sb: Bitmap to show.
342 : * @m: struct seq_file to write to.
343 : *
344 : * This is intended for debugging. The output isn't guaranteed to be internally
345 : * consistent.
346 : */
347 : void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m);
348 :
349 : /**
350 : * sbitmap_queue_init_node() - Initialize a &struct sbitmap_queue on a specific
351 : * memory node.
352 : * @sbq: Bitmap queue to initialize.
353 : * @depth: See sbitmap_init_node().
354 : * @shift: See sbitmap_init_node().
355 : * @round_robin: See sbitmap_get().
356 : * @flags: Allocation flags.
357 : * @node: Memory node to allocate on.
358 : *
359 : * Return: Zero on success or negative errno on failure.
360 : */
361 : int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
362 : int shift, bool round_robin, gfp_t flags, int node);
363 :
364 : /**
365 : * sbitmap_queue_free() - Free memory used by a &struct sbitmap_queue.
366 : *
367 : * @sbq: Bitmap queue to free.
368 : */
369 0 : static inline void sbitmap_queue_free(struct sbitmap_queue *sbq)
370 : {
371 0 : kfree(sbq->ws);
372 0 : free_percpu(sbq->alloc_hint);
373 0 : sbitmap_free(&sbq->sb);
374 0 : }
375 :
376 : /**
377 : * sbitmap_queue_resize() - Resize a &struct sbitmap_queue.
378 : * @sbq: Bitmap queue to resize.
379 : * @depth: New number of bits to resize to.
380 : *
381 : * Like sbitmap_resize(), this doesn't reallocate anything. It has to do
382 : * some extra work on the &struct sbitmap_queue, so it's not safe to just
383 : * resize the underlying &struct sbitmap.
384 : */
385 : void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth);
386 :
387 : /**
388 : * __sbitmap_queue_get() - Try to allocate a free bit from a &struct
389 : * sbitmap_queue with preemption already disabled.
390 : * @sbq: Bitmap queue to allocate from.
391 : *
392 : * Return: Non-negative allocated bit number if successful, -1 otherwise.
393 : */
394 : int __sbitmap_queue_get(struct sbitmap_queue *sbq);
395 :
396 : /**
397 : * __sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
398 : * sbitmap_queue, limiting the depth used from each word, with preemption
399 : * already disabled.
400 : * @sbq: Bitmap queue to allocate from.
401 : * @shallow_depth: The maximum number of bits to allocate from a single word.
402 : * See sbitmap_get_shallow().
403 : *
404 : * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after
405 : * initializing @sbq.
406 : *
407 : * Return: Non-negative allocated bit number if successful, -1 otherwise.
408 : */
409 : int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
410 : unsigned int shallow_depth);
411 :
412 : /**
413 : * sbitmap_queue_get() - Try to allocate a free bit from a &struct
414 : * sbitmap_queue.
415 : * @sbq: Bitmap queue to allocate from.
416 : * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to
417 : * sbitmap_queue_clear()).
418 : *
419 : * Return: Non-negative allocated bit number if successful, -1 otherwise.
420 : */
421 : static inline int sbitmap_queue_get(struct sbitmap_queue *sbq,
422 : unsigned int *cpu)
423 : {
424 : int nr;
425 :
426 : *cpu = get_cpu();
427 : nr = __sbitmap_queue_get(sbq);
428 : put_cpu();
429 : return nr;
430 : }
431 :
432 : /**
433 : * sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
434 : * sbitmap_queue, limiting the depth used from each word.
435 : * @sbq: Bitmap queue to allocate from.
436 : * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to
437 : * sbitmap_queue_clear()).
438 : * @shallow_depth: The maximum number of bits to allocate from a single word.
439 : * See sbitmap_get_shallow().
440 : *
441 : * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after
442 : * initializing @sbq.
443 : *
444 : * Return: Non-negative allocated bit number if successful, -1 otherwise.
445 : */
446 : static inline int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
447 : unsigned int *cpu,
448 : unsigned int shallow_depth)
449 : {
450 : int nr;
451 :
452 : *cpu = get_cpu();
453 : nr = __sbitmap_queue_get_shallow(sbq, shallow_depth);
454 : put_cpu();
455 : return nr;
456 : }
457 :
458 : /**
459 : * sbitmap_queue_min_shallow_depth() - Inform a &struct sbitmap_queue of the
460 : * minimum shallow depth that will be used.
461 : * @sbq: Bitmap queue in question.
462 : * @min_shallow_depth: The minimum shallow depth that will be passed to
463 : * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow().
464 : *
465 : * sbitmap_queue_clear() batches wakeups as an optimization. The batch size
466 : * depends on the depth of the bitmap. Since the shallow allocation functions
467 : * effectively operate with a different depth, the shallow depth must be taken
468 : * into account when calculating the batch size. This function must be called
469 : * with the minimum shallow depth that will be used. Failure to do so can result
470 : * in missed wakeups.
471 : */
472 : void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
473 : unsigned int min_shallow_depth);
474 :
475 : /**
476 : * sbitmap_queue_clear() - Free an allocated bit and wake up waiters on a
477 : * &struct sbitmap_queue.
478 : * @sbq: Bitmap to free from.
479 : * @nr: Bit number to free.
480 : * @cpu: CPU the bit was allocated on.
481 : */
482 : void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
483 : unsigned int cpu);
484 :
485 0 : static inline int sbq_index_inc(int index)
486 : {
487 0 : return (index + 1) & (SBQ_WAIT_QUEUES - 1);
488 : }
489 :
490 0 : static inline void sbq_index_atomic_inc(atomic_t *index)
491 : {
492 0 : int old = atomic_read(index);
493 0 : int new = sbq_index_inc(old);
494 0 : atomic_cmpxchg(index, old, new);
495 0 : }
496 :
497 : /**
498 : * sbq_wait_ptr() - Get the next wait queue to use for a &struct
499 : * sbitmap_queue.
500 : * @sbq: Bitmap queue to wait on.
501 : * @wait_index: A counter per "user" of @sbq.
502 : */
503 0 : static inline struct sbq_wait_state *sbq_wait_ptr(struct sbitmap_queue *sbq,
504 : atomic_t *wait_index)
505 : {
506 0 : struct sbq_wait_state *ws;
507 :
508 0 : ws = &sbq->ws[atomic_read(wait_index)];
509 0 : sbq_index_atomic_inc(wait_index);
510 0 : return ws;
511 : }
512 :
513 : /**
514 : * sbitmap_queue_wake_all() - Wake up everything waiting on a &struct
515 : * sbitmap_queue.
516 : * @sbq: Bitmap queue to wake up.
517 : */
518 : void sbitmap_queue_wake_all(struct sbitmap_queue *sbq);
519 :
520 : /**
521 : * sbitmap_queue_wake_up() - Wake up some of waiters in one waitqueue
522 : * on a &struct sbitmap_queue.
523 : * @sbq: Bitmap queue to wake up.
524 : */
525 : void sbitmap_queue_wake_up(struct sbitmap_queue *sbq);
526 :
527 : /**
528 : * sbitmap_queue_show() - Dump &struct sbitmap_queue information to a &struct
529 : * seq_file.
530 : * @sbq: Bitmap queue to show.
531 : * @m: struct seq_file to write to.
532 : *
533 : * This is intended for debugging. The format may change at any time.
534 : */
535 : void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m);
536 :
537 : struct sbq_wait {
538 : struct sbitmap_queue *sbq; /* if set, sbq_wait is accounted */
539 : struct wait_queue_entry wait;
540 : };
541 :
542 : #define DEFINE_SBQ_WAIT(name) \
543 : struct sbq_wait name = { \
544 : .sbq = NULL, \
545 : .wait = { \
546 : .private = current, \
547 : .func = autoremove_wake_function, \
548 : .entry = LIST_HEAD_INIT((name).wait.entry), \
549 : } \
550 : }
551 :
552 : /*
553 : * Wrapper around prepare_to_wait_exclusive(), which maintains some extra
554 : * internal state.
555 : */
556 : void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
557 : struct sbq_wait_state *ws,
558 : struct sbq_wait *sbq_wait, int state);
559 :
560 : /*
561 : * Must be paired with sbitmap_prepare_to_wait().
562 : */
563 : void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
564 : struct sbq_wait *sbq_wait);
565 :
566 : /*
567 : * Wrapper around add_wait_queue(), which maintains some extra internal state
568 : */
569 : void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
570 : struct sbq_wait_state *ws,
571 : struct sbq_wait *sbq_wait);
572 :
573 : /*
574 : * Must be paired with sbitmap_add_wait_queue()
575 : */
576 : void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait);
577 :
578 : #endif /* __LINUX_SCALE_BITMAP_H */
|