blob: 54f57cd117c67274508d7b19726f6a23867665aa [file] [log] [blame]
Thomas Gleixner0fc479b2019-05-29 16:57:42 -07001// SPDX-License-Identifier: GPL-2.0-only
Omar Sandoval88459642016-09-17 08:38:44 -06002/*
3 * Copyright (C) 2016 Facebook
4 * Copyright (C) 2013-2014 Jens Axboe
Omar Sandoval88459642016-09-17 08:38:44 -06005 */
6
Ingo Molnaraf8601a2017-02-03 09:57:00 +01007#include <linux/sched.h>
Omar Sandoval98d95412016-09-17 01:28:25 -07008#include <linux/random.h>
Omar Sandoval88459642016-09-17 08:38:44 -06009#include <linux/sbitmap.h>
Omar Sandoval24af1ccf2017-01-25 14:32:13 -080010#include <linux/seq_file.h>
Omar Sandoval88459642016-09-17 08:38:44 -060011
Jens Axboeb2dbff12018-12-11 18:39:41 -070012/*
13 * See if we have deferred clears that we can batch move
14 */
15static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index)
16{
17 unsigned long mask, val;
Jens Axboeb2dbff12018-12-11 18:39:41 -070018 bool ret = false;
Ming Leife76fc62019-01-15 11:59:52 +080019 unsigned long flags;
Jens Axboeb2dbff12018-12-11 18:39:41 -070020
Ming Leife76fc62019-01-15 11:59:52 +080021 spin_lock_irqsave(&sb->map[index].swap_lock, flags);
Jens Axboeb2dbff12018-12-11 18:39:41 -070022
23 if (!sb->map[index].cleared)
24 goto out_unlock;
25
26 /*
27 * First get a stable cleared mask, setting the old mask to 0.
28 */
29 do {
30 mask = sb->map[index].cleared;
31 } while (cmpxchg(&sb->map[index].cleared, mask, 0) != mask);
32
33 /*
34 * Now clear the masked bits in our free word
35 */
36 do {
37 val = sb->map[index].word;
38 } while (cmpxchg(&sb->map[index].word, val, val & ~mask) != val);
39
40 ret = true;
41out_unlock:
Ming Leife76fc62019-01-15 11:59:52 +080042 spin_unlock_irqrestore(&sb->map[index].swap_lock, flags);
Jens Axboeb2dbff12018-12-11 18:39:41 -070043 return ret;
44}
45
Omar Sandoval88459642016-09-17 08:38:44 -060046int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
47 gfp_t flags, int node)
48{
49 unsigned int bits_per_word;
50 unsigned int i;
51
52 if (shift < 0) {
53 shift = ilog2(BITS_PER_LONG);
54 /*
55 * If the bitmap is small, shrink the number of bits per word so
56 * we spread over a few cachelines, at least. If less than 4
57 * bits, just forget about it, it's not going to work optimally
58 * anyway.
59 */
60 if (depth >= 4) {
61 while ((4U << shift) > depth)
62 shift--;
63 }
64 }
65 bits_per_word = 1U << shift;
66 if (bits_per_word > BITS_PER_LONG)
67 return -EINVAL;
68
69 sb->shift = shift;
70 sb->depth = depth;
71 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
72
73 if (depth == 0) {
74 sb->map = NULL;
75 return 0;
76 }
77
Kees Cook590b5b72018-06-12 14:04:20 -070078 sb->map = kcalloc_node(sb->map_nr, sizeof(*sb->map), flags, node);
Omar Sandoval88459642016-09-17 08:38:44 -060079 if (!sb->map)
80 return -ENOMEM;
81
82 for (i = 0; i < sb->map_nr; i++) {
83 sb->map[i].depth = min(depth, bits_per_word);
84 depth -= sb->map[i].depth;
Jens Axboeea86ea22018-11-30 13:18:06 -070085 spin_lock_init(&sb->map[i].swap_lock);
Omar Sandoval88459642016-09-17 08:38:44 -060086 }
87 return 0;
88}
89EXPORT_SYMBOL_GPL(sbitmap_init_node);
90
91void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
92{
93 unsigned int bits_per_word = 1U << sb->shift;
94 unsigned int i;
95
Jens Axboeb2dbff12018-12-11 18:39:41 -070096 for (i = 0; i < sb->map_nr; i++)
97 sbitmap_deferred_clear(sb, i);
98
Omar Sandoval88459642016-09-17 08:38:44 -060099 sb->depth = depth;
100 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
101
102 for (i = 0; i < sb->map_nr; i++) {
103 sb->map[i].depth = min(depth, bits_per_word);
104 depth -= sb->map[i].depth;
105 }
106}
107EXPORT_SYMBOL_GPL(sbitmap_resize);
108
Omar Sandovalc05e6672017-04-14 00:59:58 -0700109static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
110 unsigned int hint, bool wrap)
Omar Sandoval88459642016-09-17 08:38:44 -0600111{
112 unsigned int orig_hint = hint;
113 int nr;
114
115 while (1) {
Omar Sandovalc05e6672017-04-14 00:59:58 -0700116 nr = find_next_zero_bit(word, depth, hint);
117 if (unlikely(nr >= depth)) {
Omar Sandoval88459642016-09-17 08:38:44 -0600118 /*
119 * We started with an offset, and we didn't reset the
120 * offset to 0 in a failure case, so start from 0 to
121 * exhaust the map.
122 */
123 if (orig_hint && hint && wrap) {
124 hint = orig_hint = 0;
125 continue;
126 }
127 return -1;
128 }
129
Omar Sandoval4ace53f2018-02-27 16:56:43 -0800130 if (!test_and_set_bit_lock(nr, word))
Omar Sandoval88459642016-09-17 08:38:44 -0600131 break;
132
133 hint = nr + 1;
Omar Sandovalc05e6672017-04-14 00:59:58 -0700134 if (hint >= depth - 1)
Omar Sandoval88459642016-09-17 08:38:44 -0600135 hint = 0;
136 }
137
138 return nr;
139}
140
Jens Axboeea86ea22018-11-30 13:18:06 -0700141static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index,
142 unsigned int alloc_hint, bool round_robin)
143{
144 int nr;
145
146 do {
147 nr = __sbitmap_get_word(&sb->map[index].word,
148 sb->map[index].depth, alloc_hint,
149 !round_robin);
150 if (nr != -1)
151 break;
152 if (!sbitmap_deferred_clear(sb, index))
153 break;
154 } while (1);
155
156 return nr;
157}
158
Omar Sandoval88459642016-09-17 08:38:44 -0600159int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
160{
161 unsigned int i, index;
162 int nr = -1;
163
164 index = SB_NR_TO_INDEX(sb, alloc_hint);
165
Jens Axboe27fae422018-11-29 12:35:16 -0700166 /*
167 * Unless we're doing round robin tag allocation, just use the
168 * alloc_hint to find the right word index. No point in looping
169 * twice in find_next_zero_bit() for that case.
170 */
171 if (round_robin)
172 alloc_hint = SB_NR_TO_BIT(sb, alloc_hint);
173 else
174 alloc_hint = 0;
175
Omar Sandoval88459642016-09-17 08:38:44 -0600176 for (i = 0; i < sb->map_nr; i++) {
Jens Axboeea86ea22018-11-30 13:18:06 -0700177 nr = sbitmap_find_bit_in_index(sb, index, alloc_hint,
178 round_robin);
Omar Sandoval88459642016-09-17 08:38:44 -0600179 if (nr != -1) {
180 nr += index << sb->shift;
181 break;
182 }
183
184 /* Jump to next index. */
Jens Axboe27fae422018-11-29 12:35:16 -0700185 alloc_hint = 0;
186 if (++index >= sb->map_nr)
Omar Sandoval88459642016-09-17 08:38:44 -0600187 index = 0;
Omar Sandoval88459642016-09-17 08:38:44 -0600188 }
189
190 return nr;
191}
192EXPORT_SYMBOL_GPL(sbitmap_get);
193
Omar Sandovalc05e6672017-04-14 00:59:58 -0700194int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
195 unsigned long shallow_depth)
196{
197 unsigned int i, index;
198 int nr = -1;
199
200 index = SB_NR_TO_INDEX(sb, alloc_hint);
201
202 for (i = 0; i < sb->map_nr; i++) {
Jens Axboeb2dbff12018-12-11 18:39:41 -0700203again:
Omar Sandovalc05e6672017-04-14 00:59:58 -0700204 nr = __sbitmap_get_word(&sb->map[index].word,
205 min(sb->map[index].depth, shallow_depth),
206 SB_NR_TO_BIT(sb, alloc_hint), true);
207 if (nr != -1) {
208 nr += index << sb->shift;
209 break;
210 }
211
Jens Axboeb2dbff12018-12-11 18:39:41 -0700212 if (sbitmap_deferred_clear(sb, index))
213 goto again;
214
Omar Sandovalc05e6672017-04-14 00:59:58 -0700215 /* Jump to next index. */
216 index++;
217 alloc_hint = index << sb->shift;
218
219 if (index >= sb->map_nr) {
220 index = 0;
221 alloc_hint = 0;
222 }
223 }
224
225 return nr;
226}
227EXPORT_SYMBOL_GPL(sbitmap_get_shallow);
228
Omar Sandoval88459642016-09-17 08:38:44 -0600229bool sbitmap_any_bit_set(const struct sbitmap *sb)
230{
231 unsigned int i;
232
233 for (i = 0; i < sb->map_nr; i++) {
Jens Axboeb2dbff12018-12-11 18:39:41 -0700234 if (sb->map[i].word & ~sb->map[i].cleared)
Omar Sandoval88459642016-09-17 08:38:44 -0600235 return true;
236 }
237 return false;
238}
239EXPORT_SYMBOL_GPL(sbitmap_any_bit_set);
240
241bool sbitmap_any_bit_clear(const struct sbitmap *sb)
242{
243 unsigned int i;
244
245 for (i = 0; i < sb->map_nr; i++) {
246 const struct sbitmap_word *word = &sb->map[i];
Jens Axboeb2dbff12018-12-11 18:39:41 -0700247 unsigned long mask = word->word & ~word->cleared;
Omar Sandoval88459642016-09-17 08:38:44 -0600248 unsigned long ret;
249
Jens Axboeb2dbff12018-12-11 18:39:41 -0700250 ret = find_first_zero_bit(&mask, word->depth);
Omar Sandoval88459642016-09-17 08:38:44 -0600251 if (ret < word->depth)
252 return true;
253 }
254 return false;
255}
256EXPORT_SYMBOL_GPL(sbitmap_any_bit_clear);
257
Jens Axboeea86ea22018-11-30 13:18:06 -0700258static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set)
Omar Sandoval88459642016-09-17 08:38:44 -0600259{
Colin Ian King60658e02016-09-19 14:34:08 +0100260 unsigned int i, weight = 0;
Omar Sandoval88459642016-09-17 08:38:44 -0600261
262 for (i = 0; i < sb->map_nr; i++) {
263 const struct sbitmap_word *word = &sb->map[i];
264
Jens Axboeea86ea22018-11-30 13:18:06 -0700265 if (set)
266 weight += bitmap_weight(&word->word, word->depth);
267 else
268 weight += bitmap_weight(&word->cleared, word->depth);
Omar Sandoval88459642016-09-17 08:38:44 -0600269 }
270 return weight;
271}
Jens Axboeea86ea22018-11-30 13:18:06 -0700272
273static unsigned int sbitmap_weight(const struct sbitmap *sb)
274{
275 return __sbitmap_weight(sb, true);
276}
277
278static unsigned int sbitmap_cleared(const struct sbitmap *sb)
279{
280 return __sbitmap_weight(sb, false);
281}
Omar Sandoval88459642016-09-17 08:38:44 -0600282
Omar Sandoval24af1ccf2017-01-25 14:32:13 -0800283void sbitmap_show(struct sbitmap *sb, struct seq_file *m)
284{
285 seq_printf(m, "depth=%u\n", sb->depth);
Jens Axboeea86ea22018-11-30 13:18:06 -0700286 seq_printf(m, "busy=%u\n", sbitmap_weight(sb) - sbitmap_cleared(sb));
287 seq_printf(m, "cleared=%u\n", sbitmap_cleared(sb));
Omar Sandoval24af1ccf2017-01-25 14:32:13 -0800288 seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift);
289 seq_printf(m, "map_nr=%u\n", sb->map_nr);
290}
291EXPORT_SYMBOL_GPL(sbitmap_show);
292
293static inline void emit_byte(struct seq_file *m, unsigned int offset, u8 byte)
294{
295 if ((offset & 0xf) == 0) {
296 if (offset != 0)
297 seq_putc(m, '\n');
298 seq_printf(m, "%08x:", offset);
299 }
300 if ((offset & 0x1) == 0)
301 seq_putc(m, ' ');
302 seq_printf(m, "%02x", byte);
303}
304
305void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m)
306{
307 u8 byte = 0;
308 unsigned int byte_bits = 0;
309 unsigned int offset = 0;
310 int i;
311
312 for (i = 0; i < sb->map_nr; i++) {
313 unsigned long word = READ_ONCE(sb->map[i].word);
314 unsigned int word_bits = READ_ONCE(sb->map[i].depth);
315
316 while (word_bits > 0) {
317 unsigned int bits = min(8 - byte_bits, word_bits);
318
319 byte |= (word & (BIT(bits) - 1)) << byte_bits;
320 byte_bits += bits;
321 if (byte_bits == 8) {
322 emit_byte(m, offset, byte);
323 byte = 0;
324 byte_bits = 0;
325 offset++;
326 }
327 word >>= bits;
328 word_bits -= bits;
329 }
330 }
331 if (byte_bits) {
332 emit_byte(m, offset, byte);
333 offset++;
334 }
335 if (offset)
336 seq_putc(m, '\n');
337}
338EXPORT_SYMBOL_GPL(sbitmap_bitmap_show);
339
Omar Sandovala3275532018-05-09 17:16:31 -0700340static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq,
341 unsigned int depth)
Omar Sandoval88459642016-09-17 08:38:44 -0600342{
343 unsigned int wake_batch;
Omar Sandovala3275532018-05-09 17:16:31 -0700344 unsigned int shallow_depth;
Omar Sandoval88459642016-09-17 08:38:44 -0600345
346 /*
347 * For each batch, we wake up one queue. We need to make sure that our
Omar Sandovala3275532018-05-09 17:16:31 -0700348 * batch size is small enough that the full depth of the bitmap,
349 * potentially limited by a shallow depth, is enough to wake up all of
350 * the queues.
351 *
352 * Each full word of the bitmap has bits_per_word bits, and there might
353 * be a partial word. There are depth / bits_per_word full words and
354 * depth % bits_per_word bits left over. In bitwise arithmetic:
355 *
356 * bits_per_word = 1 << shift
357 * depth / bits_per_word = depth >> shift
358 * depth % bits_per_word = depth & ((1 << shift) - 1)
359 *
360 * Each word can be limited to sbq->min_shallow_depth bits.
Omar Sandoval88459642016-09-17 08:38:44 -0600361 */
Omar Sandovala3275532018-05-09 17:16:31 -0700362 shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth);
363 depth = ((depth >> sbq->sb.shift) * shallow_depth +
364 min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth));
365 wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1,
366 SBQ_WAKE_BATCH);
Omar Sandoval88459642016-09-17 08:38:44 -0600367
368 return wake_batch;
369}
370
371int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
Omar Sandovalf4a644d2016-09-17 01:28:24 -0700372 int shift, bool round_robin, gfp_t flags, int node)
Omar Sandoval88459642016-09-17 08:38:44 -0600373{
374 int ret;
375 int i;
376
377 ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node);
378 if (ret)
379 return ret;
380
Omar Sandoval40aabb62016-09-17 01:28:23 -0700381 sbq->alloc_hint = alloc_percpu_gfp(unsigned int, flags);
382 if (!sbq->alloc_hint) {
383 sbitmap_free(&sbq->sb);
384 return -ENOMEM;
385 }
386
Omar Sandoval98d95412016-09-17 01:28:25 -0700387 if (depth && !round_robin) {
388 for_each_possible_cpu(i)
389 *per_cpu_ptr(sbq->alloc_hint, i) = prandom_u32() % depth;
390 }
391
Omar Sandovala3275532018-05-09 17:16:31 -0700392 sbq->min_shallow_depth = UINT_MAX;
393 sbq->wake_batch = sbq_calc_wake_batch(sbq, depth);
Omar Sandoval88459642016-09-17 08:38:44 -0600394 atomic_set(&sbq->wake_index, 0);
Jens Axboe5d2ee712018-11-29 17:36:41 -0700395 atomic_set(&sbq->ws_active, 0);
Omar Sandoval88459642016-09-17 08:38:44 -0600396
Omar Sandoval48e28162016-09-17 01:28:22 -0700397 sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
Omar Sandoval88459642016-09-17 08:38:44 -0600398 if (!sbq->ws) {
Omar Sandoval40aabb62016-09-17 01:28:23 -0700399 free_percpu(sbq->alloc_hint);
Omar Sandoval88459642016-09-17 08:38:44 -0600400 sbitmap_free(&sbq->sb);
401 return -ENOMEM;
402 }
403
404 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
405 init_waitqueue_head(&sbq->ws[i].wait);
406 atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch);
407 }
Omar Sandovalf4a644d2016-09-17 01:28:24 -0700408
409 sbq->round_robin = round_robin;
Omar Sandoval88459642016-09-17 08:38:44 -0600410 return 0;
411}
412EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
413
Omar Sandovala3275532018-05-09 17:16:31 -0700414static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
415 unsigned int depth)
Omar Sandoval88459642016-09-17 08:38:44 -0600416{
Omar Sandovala3275532018-05-09 17:16:31 -0700417 unsigned int wake_batch = sbq_calc_wake_batch(sbq, depth);
Omar Sandoval6c0ca7a2017-01-18 11:55:22 -0800418 int i;
419
420 if (sbq->wake_batch != wake_batch) {
421 WRITE_ONCE(sbq->wake_batch, wake_batch);
422 /*
Ming Leie6fc4642018-05-24 11:00:39 -0600423 * Pairs with the memory barrier in sbitmap_queue_wake_up()
424 * to ensure that the batch size is updated before the wait
425 * counts.
Omar Sandoval6c0ca7a2017-01-18 11:55:22 -0800426 */
Andrea Parria0934fd2019-05-20 19:23:57 +0200427 smp_mb();
Omar Sandoval6c0ca7a2017-01-18 11:55:22 -0800428 for (i = 0; i < SBQ_WAIT_QUEUES; i++)
429 atomic_set(&sbq->ws[i].wait_cnt, 1);
430 }
Omar Sandovala3275532018-05-09 17:16:31 -0700431}
432
433void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
434{
435 sbitmap_queue_update_wake_batch(sbq, depth);
Omar Sandoval88459642016-09-17 08:38:44 -0600436 sbitmap_resize(&sbq->sb, depth);
437}
438EXPORT_SYMBOL_GPL(sbitmap_queue_resize);
439
Omar Sandovalf4a644d2016-09-17 01:28:24 -0700440int __sbitmap_queue_get(struct sbitmap_queue *sbq)
Omar Sandoval40aabb62016-09-17 01:28:23 -0700441{
Omar Sandoval05fd0952016-09-17 01:28:26 -0700442 unsigned int hint, depth;
Omar Sandoval40aabb62016-09-17 01:28:23 -0700443 int nr;
444
445 hint = this_cpu_read(*sbq->alloc_hint);
Omar Sandoval05fd0952016-09-17 01:28:26 -0700446 depth = READ_ONCE(sbq->sb.depth);
447 if (unlikely(hint >= depth)) {
448 hint = depth ? prandom_u32() % depth : 0;
449 this_cpu_write(*sbq->alloc_hint, hint);
450 }
Omar Sandovalf4a644d2016-09-17 01:28:24 -0700451 nr = sbitmap_get(&sbq->sb, hint, sbq->round_robin);
Omar Sandoval40aabb62016-09-17 01:28:23 -0700452
453 if (nr == -1) {
454 /* If the map is full, a hint won't do us much good. */
455 this_cpu_write(*sbq->alloc_hint, 0);
Omar Sandovalf4a644d2016-09-17 01:28:24 -0700456 } else if (nr == hint || unlikely(sbq->round_robin)) {
Omar Sandoval40aabb62016-09-17 01:28:23 -0700457 /* Only update the hint if we used it. */
458 hint = nr + 1;
Omar Sandoval05fd0952016-09-17 01:28:26 -0700459 if (hint >= depth - 1)
Omar Sandoval40aabb62016-09-17 01:28:23 -0700460 hint = 0;
461 this_cpu_write(*sbq->alloc_hint, hint);
462 }
463
464 return nr;
465}
466EXPORT_SYMBOL_GPL(__sbitmap_queue_get);
467
Omar Sandovalc05e6672017-04-14 00:59:58 -0700468int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
469 unsigned int shallow_depth)
470{
471 unsigned int hint, depth;
472 int nr;
473
Omar Sandoval61445b562018-05-09 17:29:24 -0700474 WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth);
475
Omar Sandovalc05e6672017-04-14 00:59:58 -0700476 hint = this_cpu_read(*sbq->alloc_hint);
477 depth = READ_ONCE(sbq->sb.depth);
478 if (unlikely(hint >= depth)) {
479 hint = depth ? prandom_u32() % depth : 0;
480 this_cpu_write(*sbq->alloc_hint, hint);
481 }
482 nr = sbitmap_get_shallow(&sbq->sb, hint, shallow_depth);
483
484 if (nr == -1) {
485 /* If the map is full, a hint won't do us much good. */
486 this_cpu_write(*sbq->alloc_hint, 0);
487 } else if (nr == hint || unlikely(sbq->round_robin)) {
488 /* Only update the hint if we used it. */
489 hint = nr + 1;
490 if (hint >= depth - 1)
491 hint = 0;
492 this_cpu_write(*sbq->alloc_hint, hint);
493 }
494
495 return nr;
496}
497EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow);
498
Omar Sandovala3275532018-05-09 17:16:31 -0700499void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
500 unsigned int min_shallow_depth)
501{
502 sbq->min_shallow_depth = min_shallow_depth;
503 sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth);
504}
505EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth);
506
Omar Sandoval88459642016-09-17 08:38:44 -0600507static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
508{
509 int i, wake_index;
510
Jens Axboe5d2ee712018-11-29 17:36:41 -0700511 if (!atomic_read(&sbq->ws_active))
512 return NULL;
513
Omar Sandoval88459642016-09-17 08:38:44 -0600514 wake_index = atomic_read(&sbq->wake_index);
515 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
516 struct sbq_wait_state *ws = &sbq->ws[wake_index];
517
518 if (waitqueue_active(&ws->wait)) {
519 int o = atomic_read(&sbq->wake_index);
520
521 if (wake_index != o)
522 atomic_cmpxchg(&sbq->wake_index, o, wake_index);
523 return ws;
524 }
525
526 wake_index = sbq_index_inc(wake_index);
527 }
528
529 return NULL;
530}
531
Jens Axboec854ab52018-05-14 12:17:31 -0600532static bool __sbq_wake_up(struct sbitmap_queue *sbq)
Omar Sandoval88459642016-09-17 08:38:44 -0600533{
534 struct sbq_wait_state *ws;
Omar Sandoval6c0ca7a2017-01-18 11:55:22 -0800535 unsigned int wake_batch;
Omar Sandoval88459642016-09-17 08:38:44 -0600536 int wait_cnt;
537
Omar Sandoval88459642016-09-17 08:38:44 -0600538 ws = sbq_wake_ptr(sbq);
539 if (!ws)
Jens Axboec854ab52018-05-14 12:17:31 -0600540 return false;
Omar Sandoval88459642016-09-17 08:38:44 -0600541
542 wait_cnt = atomic_dec_return(&ws->wait_cnt);
Omar Sandoval6c0ca7a2017-01-18 11:55:22 -0800543 if (wait_cnt <= 0) {
Jens Axboec854ab52018-05-14 12:17:31 -0600544 int ret;
545
Omar Sandoval6c0ca7a2017-01-18 11:55:22 -0800546 wake_batch = READ_ONCE(sbq->wake_batch);
Jens Axboec854ab52018-05-14 12:17:31 -0600547
Omar Sandoval6c0ca7a2017-01-18 11:55:22 -0800548 /*
549 * Pairs with the memory barrier in sbitmap_queue_resize() to
550 * ensure that we see the batch size update before the wait
551 * count is reset.
552 */
553 smp_mb__before_atomic();
Jens Axboec854ab52018-05-14 12:17:31 -0600554
Omar Sandoval6c0ca7a2017-01-18 11:55:22 -0800555 /*
Jens Axboec854ab52018-05-14 12:17:31 -0600556 * For concurrent callers of this, the one that failed the
557 * atomic_cmpxhcg() race should call this function again
558 * to wakeup a new batch on a different 'ws'.
Omar Sandoval6c0ca7a2017-01-18 11:55:22 -0800559 */
Jens Axboec854ab52018-05-14 12:17:31 -0600560 ret = atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wake_batch);
561 if (ret == wait_cnt) {
562 sbq_index_atomic_inc(&sbq->wake_index);
563 wake_up_nr(&ws->wait, wake_batch);
564 return false;
565 }
566
567 return true;
Omar Sandoval88459642016-09-17 08:38:44 -0600568 }
Jens Axboec854ab52018-05-14 12:17:31 -0600569
570 return false;
571}
572
Ming Leie6fc4642018-05-24 11:00:39 -0600573void sbitmap_queue_wake_up(struct sbitmap_queue *sbq)
Jens Axboec854ab52018-05-14 12:17:31 -0600574{
575 while (__sbq_wake_up(sbq))
576 ;
Omar Sandoval88459642016-09-17 08:38:44 -0600577}
Ming Leie6fc4642018-05-24 11:00:39 -0600578EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
Omar Sandoval88459642016-09-17 08:38:44 -0600579
Omar Sandoval40aabb62016-09-17 01:28:23 -0700580void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
Omar Sandovalf4a644d2016-09-17 01:28:24 -0700581 unsigned int cpu)
Omar Sandoval88459642016-09-17 08:38:44 -0600582{
Ming Leie6d1fa52019-03-22 09:13:51 +0800583 /*
584 * Once the clear bit is set, the bit may be allocated out.
585 *
586 * Orders READ/WRITE on the asssociated instance(such as request
587 * of blk_mq) by this bit for avoiding race with re-allocation,
588 * and its pair is the memory barrier implied in __sbitmap_get_word.
589 *
590 * One invariant is that the clear bit has to be zero when the bit
591 * is in use.
592 */
593 smp_mb__before_atomic();
Jens Axboeea86ea22018-11-30 13:18:06 -0700594 sbitmap_deferred_clear_bit(&sbq->sb, nr);
595
Ming Leie6fc4642018-05-24 11:00:39 -0600596 /*
597 * Pairs with the memory barrier in set_current_state() to ensure the
598 * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker
599 * and test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the
600 * waiter. See the comment on waitqueue_active().
601 */
602 smp_mb__after_atomic();
603 sbitmap_queue_wake_up(sbq);
604
Omar Sandoval5c64a8d2016-09-17 12:20:54 -0700605 if (likely(!sbq->round_robin && nr < sbq->sb.depth))
Omar Sandoval40aabb62016-09-17 01:28:23 -0700606 *per_cpu_ptr(sbq->alloc_hint, cpu) = nr;
Omar Sandoval88459642016-09-17 08:38:44 -0600607}
608EXPORT_SYMBOL_GPL(sbitmap_queue_clear);
609
610void sbitmap_queue_wake_all(struct sbitmap_queue *sbq)
611{
612 int i, wake_index;
613
614 /*
Omar Sandovalf66227d2017-01-18 11:55:21 -0800615 * Pairs with the memory barrier in set_current_state() like in
Ming Leie6fc4642018-05-24 11:00:39 -0600616 * sbitmap_queue_wake_up().
Omar Sandoval88459642016-09-17 08:38:44 -0600617 */
618 smp_mb();
619 wake_index = atomic_read(&sbq->wake_index);
620 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
621 struct sbq_wait_state *ws = &sbq->ws[wake_index];
622
623 if (waitqueue_active(&ws->wait))
624 wake_up(&ws->wait);
625
626 wake_index = sbq_index_inc(wake_index);
627 }
628}
629EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all);
Omar Sandoval24af1ccf2017-01-25 14:32:13 -0800630
631void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
632{
633 bool first;
634 int i;
635
636 sbitmap_show(&sbq->sb, m);
637
638 seq_puts(m, "alloc_hint={");
639 first = true;
640 for_each_possible_cpu(i) {
641 if (!first)
642 seq_puts(m, ", ");
643 first = false;
644 seq_printf(m, "%u", *per_cpu_ptr(sbq->alloc_hint, i));
645 }
646 seq_puts(m, "}\n");
647
648 seq_printf(m, "wake_batch=%u\n", sbq->wake_batch);
649 seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index));
Jens Axboe5d2ee712018-11-29 17:36:41 -0700650 seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active));
Omar Sandoval24af1ccf2017-01-25 14:32:13 -0800651
652 seq_puts(m, "ws={\n");
653 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
654 struct sbq_wait_state *ws = &sbq->ws[i];
655
656 seq_printf(m, "\t{.wait_cnt=%d, .wait=%s},\n",
657 atomic_read(&ws->wait_cnt),
658 waitqueue_active(&ws->wait) ? "active" : "inactive");
659 }
660 seq_puts(m, "}\n");
661
662 seq_printf(m, "round_robin=%d\n", sbq->round_robin);
Omar Sandovala3275532018-05-09 17:16:31 -0700663 seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth);
Omar Sandoval24af1ccf2017-01-25 14:32:13 -0800664}
665EXPORT_SYMBOL_GPL(sbitmap_queue_show);
Jens Axboe5d2ee712018-11-29 17:36:41 -0700666
Jens Axboe9f6b7ef2018-12-20 08:49:00 -0700667void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
668 struct sbq_wait_state *ws,
669 struct sbq_wait *sbq_wait)
670{
671 if (!sbq_wait->sbq) {
672 sbq_wait->sbq = sbq;
673 atomic_inc(&sbq->ws_active);
674 }
675 add_wait_queue(&ws->wait, &sbq_wait->wait);
676}
677EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue);
678
679void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait)
680{
681 list_del_init(&sbq_wait->wait.entry);
682 if (sbq_wait->sbq) {
683 atomic_dec(&sbq_wait->sbq->ws_active);
684 sbq_wait->sbq = NULL;
685 }
686}
687EXPORT_SYMBOL_GPL(sbitmap_del_wait_queue);
688
Jens Axboe5d2ee712018-11-29 17:36:41 -0700689void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
690 struct sbq_wait_state *ws,
691 struct sbq_wait *sbq_wait, int state)
692{
Jens Axboe9f6b7ef2018-12-20 08:49:00 -0700693 if (!sbq_wait->sbq) {
Jens Axboe5d2ee712018-11-29 17:36:41 -0700694 atomic_inc(&sbq->ws_active);
Jens Axboe9f6b7ef2018-12-20 08:49:00 -0700695 sbq_wait->sbq = sbq;
Jens Axboe5d2ee712018-11-29 17:36:41 -0700696 }
697 prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state);
698}
699EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait);
700
701void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
702 struct sbq_wait *sbq_wait)
703{
704 finish_wait(&ws->wait, &sbq_wait->wait);
Jens Axboe9f6b7ef2018-12-20 08:49:00 -0700705 if (sbq_wait->sbq) {
Jens Axboe5d2ee712018-11-29 17:36:41 -0700706 atomic_dec(&sbq->ws_active);
Jens Axboe9f6b7ef2018-12-20 08:49:00 -0700707 sbq_wait->sbq = NULL;
Jens Axboe5d2ee712018-11-29 17:36:41 -0700708 }
709}
710EXPORT_SYMBOL_GPL(sbitmap_finish_wait);