Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-only
2 : /*
3 : * Copyright (C) 2016 Facebook
4 : * Copyright (C) 2013-2014 Jens Axboe
5 : */
6 :
7 : #include <linux/sched.h>
8 : #include <linux/random.h>
9 : #include <linux/sbitmap.h>
10 : #include <linux/seq_file.h>
11 :
12 : /*
13 : * See if we have deferred clears that we can batch move
14 : */
15 66 : static inline bool sbitmap_deferred_clear(struct sbitmap_word *map)
16 : {
17 66 : unsigned long mask;
18 :
19 66 : if (!READ_ONCE(map->cleared))
20 : return false;
21 :
22 : /*
23 : * First get a stable cleared mask, setting the old mask to 0.
24 : */
25 57 : mask = xchg(&map->cleared, 0);
26 :
27 : /*
28 : * Now clear the masked bits in our free word
29 : */
30 57 : atomic_long_andnot(mask, (atomic_long_t *)&map->word);
31 57 : BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(map->word));
32 57 : return true;
33 : }
34 :
35 27 : int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
36 : gfp_t flags, int node)
37 : {
38 27 : unsigned int bits_per_word;
39 27 : unsigned int i;
40 :
41 27 : if (shift < 0) {
42 18 : shift = ilog2(BITS_PER_LONG);
43 : /*
44 : * If the bitmap is small, shrink the number of bits per word so
45 : * we spread over a few cachelines, at least. If less than 4
46 : * bits, just forget about it, it's not going to work optimally
47 : * anyway.
48 : */
49 18 : if (depth >= 4) {
50 17 : while ((4U << shift) > depth)
51 8 : shift--;
52 : }
53 : }
54 27 : bits_per_word = 1U << shift;
55 27 : if (bits_per_word > BITS_PER_LONG)
56 : return -EINVAL;
57 :
58 27 : sb->shift = shift;
59 27 : sb->depth = depth;
60 27 : sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
61 :
62 27 : if (depth == 0) {
63 9 : sb->map = NULL;
64 9 : return 0;
65 : }
66 :
67 18 : sb->map = kcalloc_node(sb->map_nr, sizeof(*sb->map), flags, node);
68 18 : if (!sb->map)
69 : return -ENOMEM;
70 :
71 75 : for (i = 0; i < sb->map_nr; i++) {
72 57 : sb->map[i].depth = min(depth, bits_per_word);
73 57 : depth -= sb->map[i].depth;
74 : }
75 : return 0;
76 : }
77 : EXPORT_SYMBOL_GPL(sbitmap_init_node);
78 :
79 9 : void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
80 : {
81 9 : unsigned int bits_per_word = 1U << sb->shift;
82 9 : unsigned int i;
83 :
84 18 : for (i = 0; i < sb->map_nr; i++)
85 9 : sbitmap_deferred_clear(&sb->map[i]);
86 :
87 9 : sb->depth = depth;
88 9 : sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
89 :
90 18 : for (i = 0; i < sb->map_nr; i++) {
91 9 : sb->map[i].depth = min(depth, bits_per_word);
92 9 : depth -= sb->map[i].depth;
93 : }
94 9 : }
95 : EXPORT_SYMBOL_GPL(sbitmap_resize);
96 :
97 3626 : static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
98 : unsigned int hint, bool wrap)
99 : {
100 3626 : int nr;
101 :
102 : /* don't wrap if starting from 0 */
103 3626 : wrap = wrap && hint;
104 :
105 3626 : while (1) {
106 3626 : nr = find_next_zero_bit(word, depth, hint);
107 3624 : if (unlikely(nr >= depth)) {
108 : /*
109 : * We started with an offset, and we didn't reset the
110 : * offset to 0 in a failure case, so start from 0 to
111 : * exhaust the map.
112 : */
113 57 : if (hint && wrap) {
114 0 : hint = 0;
115 0 : continue;
116 : }
117 : return -1;
118 : }
119 :
120 3567 : if (!test_and_set_bit_lock(nr, word))
121 : break;
122 :
123 0 : hint = nr + 1;
124 0 : if (hint >= depth - 1)
125 0 : hint = 0;
126 : }
127 :
128 : return nr;
129 : }
130 :
131 3568 : static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index,
132 : unsigned int alloc_hint, bool round_robin)
133 : {
134 3568 : struct sbitmap_word *map = &sb->map[index];
135 3625 : int nr;
136 :
137 3625 : do {
138 7251 : nr = __sbitmap_get_word(&map->word, map->depth, alloc_hint,
139 3625 : !round_robin);
140 3626 : if (nr != -1)
141 : break;
142 57 : if (!sbitmap_deferred_clear(map))
143 : break;
144 : } while (1);
145 :
146 3569 : return nr;
147 : }
148 :
149 3569 : int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
150 : {
151 3569 : unsigned int i, index;
152 3569 : int nr = -1;
153 :
154 3569 : index = SB_NR_TO_INDEX(sb, alloc_hint);
155 :
156 : /*
157 : * Unless we're doing round robin tag allocation, just use the
158 : * alloc_hint to find the right word index. No point in looping
159 : * twice in find_next_zero_bit() for that case.
160 : */
161 3569 : if (round_robin)
162 0 : alloc_hint = SB_NR_TO_BIT(sb, alloc_hint);
163 : else
164 : alloc_hint = 0;
165 :
166 3569 : for (i = 0; i < sb->map_nr; i++) {
167 3569 : nr = sbitmap_find_bit_in_index(sb, index, alloc_hint,
168 : round_robin);
169 3569 : if (nr != -1) {
170 3569 : nr += index << sb->shift;
171 3569 : break;
172 : }
173 :
174 : /* Jump to next index. */
175 0 : alloc_hint = 0;
176 0 : if (++index >= sb->map_nr)
177 0 : index = 0;
178 : }
179 :
180 3569 : return nr;
181 : }
182 : EXPORT_SYMBOL_GPL(sbitmap_get);
183 :
184 0 : int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
185 : unsigned long shallow_depth)
186 : {
187 0 : unsigned int i, index;
188 0 : int nr = -1;
189 :
190 0 : index = SB_NR_TO_INDEX(sb, alloc_hint);
191 :
192 0 : for (i = 0; i < sb->map_nr; i++) {
193 0 : again:
194 0 : nr = __sbitmap_get_word(&sb->map[index].word,
195 0 : min(sb->map[index].depth, shallow_depth),
196 0 : SB_NR_TO_BIT(sb, alloc_hint), true);
197 0 : if (nr != -1) {
198 0 : nr += index << sb->shift;
199 0 : break;
200 : }
201 :
202 0 : if (sbitmap_deferred_clear(&sb->map[index]))
203 0 : goto again;
204 :
205 : /* Jump to next index. */
206 0 : index++;
207 0 : alloc_hint = index << sb->shift;
208 :
209 0 : if (index >= sb->map_nr) {
210 0 : index = 0;
211 0 : alloc_hint = 0;
212 : }
213 : }
214 :
215 0 : return nr;
216 : }
217 : EXPORT_SYMBOL_GPL(sbitmap_get_shallow);
218 :
219 623 : bool sbitmap_any_bit_set(const struct sbitmap *sb)
220 : {
221 623 : unsigned int i;
222 :
223 1232 : for (i = 0; i < sb->map_nr; i++) {
224 623 : if (sb->map[i].word & ~sb->map[i].cleared)
225 : return true;
226 : }
227 : return false;
228 : }
229 : EXPORT_SYMBOL_GPL(sbitmap_any_bit_set);
230 :
231 0 : static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set)
232 : {
233 0 : unsigned int i, weight = 0;
234 :
235 0 : for (i = 0; i < sb->map_nr; i++) {
236 0 : const struct sbitmap_word *word = &sb->map[i];
237 :
238 0 : if (set)
239 0 : weight += bitmap_weight(&word->word, word->depth);
240 : else
241 0 : weight += bitmap_weight(&word->cleared, word->depth);
242 : }
243 0 : return weight;
244 : }
245 :
246 0 : static unsigned int sbitmap_weight(const struct sbitmap *sb)
247 : {
248 0 : return __sbitmap_weight(sb, true);
249 : }
250 :
251 0 : static unsigned int sbitmap_cleared(const struct sbitmap *sb)
252 : {
253 0 : return __sbitmap_weight(sb, false);
254 : }
255 :
256 0 : void sbitmap_show(struct sbitmap *sb, struct seq_file *m)
257 : {
258 0 : seq_printf(m, "depth=%u\n", sb->depth);
259 0 : seq_printf(m, "busy=%u\n", sbitmap_weight(sb) - sbitmap_cleared(sb));
260 0 : seq_printf(m, "cleared=%u\n", sbitmap_cleared(sb));
261 0 : seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift);
262 0 : seq_printf(m, "map_nr=%u\n", sb->map_nr);
263 0 : }
264 : EXPORT_SYMBOL_GPL(sbitmap_show);
265 :
266 0 : static inline void emit_byte(struct seq_file *m, unsigned int offset, u8 byte)
267 : {
268 0 : if ((offset & 0xf) == 0) {
269 0 : if (offset != 0)
270 0 : seq_putc(m, '\n');
271 0 : seq_printf(m, "%08x:", offset);
272 : }
273 0 : if ((offset & 0x1) == 0)
274 0 : seq_putc(m, ' ');
275 0 : seq_printf(m, "%02x", byte);
276 0 : }
277 :
278 0 : void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m)
279 : {
280 0 : u8 byte = 0;
281 0 : unsigned int byte_bits = 0;
282 0 : unsigned int offset = 0;
283 0 : int i;
284 :
285 0 : for (i = 0; i < sb->map_nr; i++) {
286 0 : unsigned long word = READ_ONCE(sb->map[i].word);
287 0 : unsigned long cleared = READ_ONCE(sb->map[i].cleared);
288 0 : unsigned int word_bits = READ_ONCE(sb->map[i].depth);
289 :
290 0 : word &= ~cleared;
291 :
292 0 : while (word_bits > 0) {
293 0 : unsigned int bits = min(8 - byte_bits, word_bits);
294 :
295 0 : byte |= (word & (BIT(bits) - 1)) << byte_bits;
296 0 : byte_bits += bits;
297 0 : if (byte_bits == 8) {
298 0 : emit_byte(m, offset, byte);
299 0 : byte = 0;
300 0 : byte_bits = 0;
301 0 : offset++;
302 : }
303 0 : word >>= bits;
304 0 : word_bits -= bits;
305 : }
306 : }
307 0 : if (byte_bits) {
308 0 : emit_byte(m, offset, byte);
309 0 : offset++;
310 : }
311 0 : if (offset)
312 0 : seq_putc(m, '\n');
313 0 : }
314 : EXPORT_SYMBOL_GPL(sbitmap_bitmap_show);
315 :
316 18 : static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq,
317 : unsigned int depth)
318 : {
319 18 : unsigned int wake_batch;
320 18 : unsigned int shallow_depth;
321 :
322 : /*
323 : * For each batch, we wake up one queue. We need to make sure that our
324 : * batch size is small enough that the full depth of the bitmap,
325 : * potentially limited by a shallow depth, is enough to wake up all of
326 : * the queues.
327 : *
328 : * Each full word of the bitmap has bits_per_word bits, and there might
329 : * be a partial word. There are depth / bits_per_word full words and
330 : * depth % bits_per_word bits left over. In bitwise arithmetic:
331 : *
332 : * bits_per_word = 1 << shift
333 : * depth / bits_per_word = depth >> shift
334 : * depth % bits_per_word = depth & ((1 << shift) - 1)
335 : *
336 : * Each word can be limited to sbq->min_shallow_depth bits.
337 : */
338 18 : shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth);
339 18 : depth = ((depth >> sbq->sb.shift) * shallow_depth +
340 18 : min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth));
341 18 : wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1,
342 : SBQ_WAKE_BATCH);
343 :
344 18 : return wake_batch;
345 : }
346 :
347 18 : int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
348 : int shift, bool round_robin, gfp_t flags, int node)
349 : {
350 18 : int ret;
351 18 : int i;
352 :
353 18 : ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node);
354 18 : if (ret)
355 : return ret;
356 :
357 18 : sbq->alloc_hint = alloc_percpu_gfp(unsigned int, flags);
358 18 : if (!sbq->alloc_hint) {
359 0 : sbitmap_free(&sbq->sb);
360 0 : return -ENOMEM;
361 : }
362 :
363 18 : if (depth && !round_robin) {
364 45 : for_each_possible_cpu(i)
365 36 : *per_cpu_ptr(sbq->alloc_hint, i) = prandom_u32() % depth;
366 : }
367 :
368 18 : sbq->min_shallow_depth = UINT_MAX;
369 18 : sbq->wake_batch = sbq_calc_wake_batch(sbq, depth);
370 18 : atomic_set(&sbq->wake_index, 0);
371 18 : atomic_set(&sbq->ws_active, 0);
372 :
373 18 : sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
374 18 : if (!sbq->ws) {
375 0 : free_percpu(sbq->alloc_hint);
376 0 : sbitmap_free(&sbq->sb);
377 0 : return -ENOMEM;
378 : }
379 :
380 162 : for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
381 144 : init_waitqueue_head(&sbq->ws[i].wait);
382 144 : atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch);
383 : }
384 :
385 18 : sbq->round_robin = round_robin;
386 18 : return 0;
387 : }
388 : EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
389 :
390 0 : static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
391 : unsigned int depth)
392 : {
393 0 : unsigned int wake_batch = sbq_calc_wake_batch(sbq, depth);
394 0 : int i;
395 :
396 0 : if (sbq->wake_batch != wake_batch) {
397 0 : WRITE_ONCE(sbq->wake_batch, wake_batch);
398 : /*
399 : * Pairs with the memory barrier in sbitmap_queue_wake_up()
400 : * to ensure that the batch size is updated before the wait
401 : * counts.
402 : */
403 0 : smp_mb();
404 0 : for (i = 0; i < SBQ_WAIT_QUEUES; i++)
405 0 : atomic_set(&sbq->ws[i].wait_cnt, 1);
406 : }
407 0 : }
408 :
409 0 : void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
410 : {
411 0 : sbitmap_queue_update_wake_batch(sbq, depth);
412 0 : sbitmap_resize(&sbq->sb, depth);
413 0 : }
414 : EXPORT_SYMBOL_GPL(sbitmap_queue_resize);
415 :
416 3567 : int __sbitmap_queue_get(struct sbitmap_queue *sbq)
417 : {
418 3567 : unsigned int hint, depth;
419 3567 : int nr;
420 :
421 3567 : hint = this_cpu_read(*sbq->alloc_hint);
422 3568 : depth = READ_ONCE(sbq->sb.depth);
423 3568 : if (unlikely(hint >= depth)) {
424 0 : hint = depth ? prandom_u32() % depth : 0;
425 3568 : this_cpu_write(*sbq->alloc_hint, hint);
426 : }
427 3568 : nr = sbitmap_get(&sbq->sb, hint, sbq->round_robin);
428 :
429 3568 : if (nr == -1) {
430 : /* If the map is full, a hint won't do us much good. */
431 0 : this_cpu_write(*sbq->alloc_hint, 0);
432 3568 : } else if (nr == hint || unlikely(sbq->round_robin)) {
433 : /* Only update the hint if we used it. */
434 1 : hint = nr + 1;
435 1 : if (hint >= depth - 1)
436 0 : hint = 0;
437 3568 : this_cpu_write(*sbq->alloc_hint, hint);
438 : }
439 :
440 3568 : return nr;
441 : }
442 : EXPORT_SYMBOL_GPL(__sbitmap_queue_get);
443 :
444 0 : int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
445 : unsigned int shallow_depth)
446 : {
447 0 : unsigned int hint, depth;
448 0 : int nr;
449 :
450 0 : WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth);
451 :
452 0 : hint = this_cpu_read(*sbq->alloc_hint);
453 0 : depth = READ_ONCE(sbq->sb.depth);
454 0 : if (unlikely(hint >= depth)) {
455 0 : hint = depth ? prandom_u32() % depth : 0;
456 0 : this_cpu_write(*sbq->alloc_hint, hint);
457 : }
458 0 : nr = sbitmap_get_shallow(&sbq->sb, hint, shallow_depth);
459 :
460 0 : if (nr == -1) {
461 : /* If the map is full, a hint won't do us much good. */
462 0 : this_cpu_write(*sbq->alloc_hint, 0);
463 0 : } else if (nr == hint || unlikely(sbq->round_robin)) {
464 : /* Only update the hint if we used it. */
465 0 : hint = nr + 1;
466 0 : if (hint >= depth - 1)
467 0 : hint = 0;
468 0 : this_cpu_write(*sbq->alloc_hint, hint);
469 : }
470 :
471 0 : return nr;
472 : }
473 : EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow);
474 :
475 0 : void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
476 : unsigned int min_shallow_depth)
477 : {
478 0 : sbq->min_shallow_depth = min_shallow_depth;
479 0 : sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth);
480 0 : }
481 : EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth);
482 :
483 3565 : static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
484 : {
485 3565 : int i, wake_index;
486 :
487 3565 : if (!atomic_read(&sbq->ws_active))
488 : return NULL;
489 :
490 0 : wake_index = atomic_read(&sbq->wake_index);
491 0 : for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
492 0 : struct sbq_wait_state *ws = &sbq->ws[wake_index];
493 :
494 0 : if (waitqueue_active(&ws->wait)) {
495 0 : if (wake_index != atomic_read(&sbq->wake_index))
496 0 : atomic_set(&sbq->wake_index, wake_index);
497 0 : return ws;
498 : }
499 :
500 0 : wake_index = sbq_index_inc(wake_index);
501 : }
502 :
503 : return NULL;
504 : }
505 :
506 3565 : static bool __sbq_wake_up(struct sbitmap_queue *sbq)
507 : {
508 3565 : struct sbq_wait_state *ws;
509 3565 : unsigned int wake_batch;
510 3565 : int wait_cnt;
511 :
512 3565 : ws = sbq_wake_ptr(sbq);
513 3565 : if (!ws)
514 : return false;
515 :
516 0 : wait_cnt = atomic_dec_return(&ws->wait_cnt);
517 0 : if (wait_cnt <= 0) {
518 0 : int ret;
519 :
520 0 : wake_batch = READ_ONCE(sbq->wake_batch);
521 :
522 : /*
523 : * Pairs with the memory barrier in sbitmap_queue_resize() to
524 : * ensure that we see the batch size update before the wait
525 : * count is reset.
526 : */
527 0 : smp_mb__before_atomic();
528 :
529 : /*
530 : * For concurrent callers of this, the one that failed the
531 : * atomic_cmpxhcg() race should call this function again
532 : * to wakeup a new batch on a different 'ws'.
533 : */
534 0 : ret = atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wake_batch);
535 0 : if (ret == wait_cnt) {
536 0 : sbq_index_atomic_inc(&sbq->wake_index);
537 0 : wake_up_nr(&ws->wait, wake_batch);
538 0 : return false;
539 : }
540 :
541 : return true;
542 : }
543 :
544 : return false;
545 : }
546 :
547 3565 : void sbitmap_queue_wake_up(struct sbitmap_queue *sbq)
548 : {
549 7130 : while (__sbq_wake_up(sbq))
550 3565 : ;
551 0 : }
552 : EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
553 :
554 3565 : void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
555 : unsigned int cpu)
556 : {
557 : /*
558 : * Once the clear bit is set, the bit may be allocated out.
559 : *
560 : * Orders READ/WRITE on the asssociated instance(such as request
561 : * of blk_mq) by this bit for avoiding race with re-allocation,
562 : * and its pair is the memory barrier implied in __sbitmap_get_word.
563 : *
564 : * One invariant is that the clear bit has to be zero when the bit
565 : * is in use.
566 : */
567 3565 : smp_mb__before_atomic();
568 3565 : sbitmap_deferred_clear_bit(&sbq->sb, nr);
569 :
570 : /*
571 : * Pairs with the memory barrier in set_current_state() to ensure the
572 : * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker
573 : * and test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the
574 : * waiter. See the comment on waitqueue_active().
575 : */
576 3565 : smp_mb__after_atomic();
577 3565 : sbitmap_queue_wake_up(sbq);
578 :
579 3565 : if (likely(!sbq->round_robin && nr < sbq->sb.depth))
580 3565 : *per_cpu_ptr(sbq->alloc_hint, cpu) = nr;
581 3565 : }
582 : EXPORT_SYMBOL_GPL(sbitmap_queue_clear);
583 :
584 0 : void sbitmap_queue_wake_all(struct sbitmap_queue *sbq)
585 : {
586 0 : int i, wake_index;
587 :
588 : /*
589 : * Pairs with the memory barrier in set_current_state() like in
590 : * sbitmap_queue_wake_up().
591 : */
592 0 : smp_mb();
593 0 : wake_index = atomic_read(&sbq->wake_index);
594 0 : for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
595 0 : struct sbq_wait_state *ws = &sbq->ws[wake_index];
596 :
597 0 : if (waitqueue_active(&ws->wait))
598 0 : wake_up(&ws->wait);
599 :
600 0 : wake_index = sbq_index_inc(wake_index);
601 : }
602 0 : }
603 : EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all);
604 :
605 0 : void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
606 : {
607 0 : bool first;
608 0 : int i;
609 :
610 0 : sbitmap_show(&sbq->sb, m);
611 :
612 0 : seq_puts(m, "alloc_hint={");
613 0 : first = true;
614 0 : for_each_possible_cpu(i) {
615 0 : if (!first)
616 0 : seq_puts(m, ", ");
617 0 : first = false;
618 0 : seq_printf(m, "%u", *per_cpu_ptr(sbq->alloc_hint, i));
619 : }
620 0 : seq_puts(m, "}\n");
621 :
622 0 : seq_printf(m, "wake_batch=%u\n", sbq->wake_batch);
623 0 : seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index));
624 0 : seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active));
625 :
626 0 : seq_puts(m, "ws={\n");
627 0 : for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
628 0 : struct sbq_wait_state *ws = &sbq->ws[i];
629 :
630 0 : seq_printf(m, "\t{.wait_cnt=%d, .wait=%s},\n",
631 0 : atomic_read(&ws->wait_cnt),
632 0 : waitqueue_active(&ws->wait) ? "active" : "inactive");
633 : }
634 0 : seq_puts(m, "}\n");
635 :
636 0 : seq_printf(m, "round_robin=%d\n", sbq->round_robin);
637 0 : seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth);
638 0 : }
639 : EXPORT_SYMBOL_GPL(sbitmap_queue_show);
640 :
641 0 : void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
642 : struct sbq_wait_state *ws,
643 : struct sbq_wait *sbq_wait)
644 : {
645 0 : if (!sbq_wait->sbq) {
646 0 : sbq_wait->sbq = sbq;
647 0 : atomic_inc(&sbq->ws_active);
648 0 : add_wait_queue(&ws->wait, &sbq_wait->wait);
649 : }
650 0 : }
651 : EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue);
652 :
653 0 : void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait)
654 : {
655 0 : list_del_init(&sbq_wait->wait.entry);
656 0 : if (sbq_wait->sbq) {
657 0 : atomic_dec(&sbq_wait->sbq->ws_active);
658 0 : sbq_wait->sbq = NULL;
659 : }
660 0 : }
661 : EXPORT_SYMBOL_GPL(sbitmap_del_wait_queue);
662 :
663 0 : void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
664 : struct sbq_wait_state *ws,
665 : struct sbq_wait *sbq_wait, int state)
666 : {
667 0 : if (!sbq_wait->sbq) {
668 0 : atomic_inc(&sbq->ws_active);
669 0 : sbq_wait->sbq = sbq;
670 : }
671 0 : prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state);
672 0 : }
673 : EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait);
674 :
675 0 : void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
676 : struct sbq_wait *sbq_wait)
677 : {
678 0 : finish_wait(&ws->wait, &sbq_wait->wait);
679 0 : if (sbq_wait->sbq) {
680 0 : atomic_dec(&sbq->ws_active);
681 0 : sbq_wait->sbq = NULL;
682 : }
683 0 : }
684 : EXPORT_SYMBOL_GPL(sbitmap_finish_wait);
|