blob: 9fc3da430aba16842cefae29499dc3f74b438d4b [file] [log] [blame]
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -05001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * test_xarray.c: Test the XArray API
4 * Copyright (c) 2017-2018 Microsoft Corporation
Matthew Wilcox (Oracle)c44aa5e2020-01-17 22:13:21 -05005 * Copyright (c) 2019-2020 Oracle
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -05006 * Author: Matthew Wilcox <willy@infradead.org>
7 */
8
9#include <linux/xarray.h>
10#include <linux/module.h>
11
12static unsigned int tests_run;
13static unsigned int tests_passed;
14
Matthew Wilcox (Oracle)bd40b172020-01-31 05:07:55 -050015static const unsigned int order_limit =
16 IS_ENABLED(CONFIG_XARRAY_MULTI) ? BITS_PER_LONG : 1;
17
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -050018#ifndef XA_DEBUG
19# ifdef __KERNEL__
20void xa_dump(const struct xarray *xa) { }
21# endif
22#undef XA_BUG_ON
23#define XA_BUG_ON(xa, x) do { \
24 tests_run++; \
25 if (x) { \
26 printk("BUG at %s:%d\n", __func__, __LINE__); \
27 xa_dump(xa); \
28 dump_stack(); \
29 } else { \
30 tests_passed++; \
31 } \
32} while (0)
33#endif
34
Matthew Wilcoxb7677a12018-11-05 13:19:54 -050035static void *xa_mk_index(unsigned long index)
36{
37 return xa_mk_value(index & LONG_MAX);
38}
39
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -050040static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp)
41{
Matthew Wilcoxb7677a12018-11-05 13:19:54 -050042 return xa_store(xa, index, xa_mk_index(index), gfp);
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -050043}
44
Matthew Wilcox12fd2ae2019-03-09 22:25:27 -050045static void xa_insert_index(struct xarray *xa, unsigned long index)
46{
47 XA_BUG_ON(xa, xa_insert(xa, index, xa_mk_index(index),
48 GFP_KERNEL) != 0);
49}
50
Matthew Wilcox371c7522018-07-04 10:50:12 -040051static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp)
52{
Matthew Wilcoxa3e4d3f2018-12-31 10:41:01 -050053 u32 id;
Matthew Wilcox371c7522018-07-04 10:50:12 -040054
Matthew Wilcoxa3e4d3f2018-12-31 10:41:01 -050055 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(index), xa_limit_32b,
Matthew Wilcox371c7522018-07-04 10:50:12 -040056 gfp) != 0);
57 XA_BUG_ON(xa, id != index);
58}
59
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -050060static void xa_erase_index(struct xarray *xa, unsigned long index)
61{
Matthew Wilcoxb7677a12018-11-05 13:19:54 -050062 XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_index(index));
Matthew Wilcox58d6ea32017-11-10 15:15:08 -050063 XA_BUG_ON(xa, xa_load(xa, index) != NULL);
64}
65
66/*
67 * If anyone needs this, please move it to xarray.c. We have no current
68 * users outside the test suite because all current multislot users want
69 * to use the advanced API.
70 */
71static void *xa_store_order(struct xarray *xa, unsigned long index,
72 unsigned order, void *entry, gfp_t gfp)
73{
74 XA_STATE_ORDER(xas, xa, index, order);
75 void *curr;
76
77 do {
78 xas_lock(&xas);
79 curr = xas_store(&xas, entry);
80 xas_unlock(&xas);
81 } while (xas_nomem(&xas, gfp));
82
83 return curr;
84}
85
86static noinline void check_xa_err(struct xarray *xa)
87{
88 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 0, GFP_NOWAIT)) != 0);
89 XA_BUG_ON(xa, xa_err(xa_erase(xa, 0)) != 0);
90#ifndef __KERNEL__
91 /* The kernel does not fail GFP_NOWAIT allocations */
92 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM);
93 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM);
94#endif
95 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_KERNEL)) != 0);
96 XA_BUG_ON(xa, xa_err(xa_store(xa, 1, xa_mk_value(0), GFP_KERNEL)) != 0);
97 XA_BUG_ON(xa, xa_err(xa_erase(xa, 1)) != 0);
98// kills the test-suite :-(
99// XA_BUG_ON(xa, xa_err(xa_store(xa, 0, xa_mk_internal(0), 0)) != -EINVAL);
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -0500100}
101
Matthew Wilcoxb803b422017-11-14 08:30:11 -0500102static noinline void check_xas_retry(struct xarray *xa)
103{
104 XA_STATE(xas, xa, 0);
105 void *entry;
106
107 xa_store_index(xa, 0, GFP_KERNEL);
108 xa_store_index(xa, 1, GFP_KERNEL);
109
110 rcu_read_lock();
111 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_value(0));
112 xa_erase_index(xa, 1);
113 XA_BUG_ON(xa, !xa_is_retry(xas_reload(&xas)));
114 XA_BUG_ON(xa, xas_retry(&xas, NULL));
115 XA_BUG_ON(xa, xas_retry(&xas, xa_mk_value(0)));
116 xas_reset(&xas);
117 XA_BUG_ON(xa, xas.xa_node != XAS_RESTART);
118 XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0));
119 XA_BUG_ON(xa, xas.xa_node != NULL);
Matthew Wilcoxbd542112019-02-04 23:12:08 -0500120 rcu_read_unlock();
Matthew Wilcoxb803b422017-11-14 08:30:11 -0500121
122 XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL);
Matthew Wilcoxbd542112019-02-04 23:12:08 -0500123
124 rcu_read_lock();
Matthew Wilcoxb803b422017-11-14 08:30:11 -0500125 XA_BUG_ON(xa, !xa_is_internal(xas_reload(&xas)));
126 xas.xa_node = XAS_RESTART;
127 XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0));
128 rcu_read_unlock();
129
130 /* Make sure we can iterate through retry entries */
131 xas_lock(&xas);
132 xas_set(&xas, 0);
133 xas_store(&xas, XA_RETRY_ENTRY);
134 xas_set(&xas, 1);
135 xas_store(&xas, XA_RETRY_ENTRY);
136
137 xas_set(&xas, 0);
138 xas_for_each(&xas, entry, ULONG_MAX) {
Matthew Wilcoxb7677a12018-11-05 13:19:54 -0500139 xas_store(&xas, xa_mk_index(xas.xa_index));
Matthew Wilcoxb803b422017-11-14 08:30:11 -0500140 }
141 xas_unlock(&xas);
142
143 xa_erase_index(xa, 0);
144 xa_erase_index(xa, 1);
145}
146
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -0500147static noinline void check_xa_load(struct xarray *xa)
148{
149 unsigned long i, j;
150
151 for (i = 0; i < 1024; i++) {
152 for (j = 0; j < 1024; j++) {
153 void *entry = xa_load(xa, j);
154 if (j < i)
155 XA_BUG_ON(xa, xa_to_value(entry) != j);
156 else
157 XA_BUG_ON(xa, entry);
158 }
159 XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
160 }
161
162 for (i = 0; i < 1024; i++) {
163 for (j = 0; j < 1024; j++) {
164 void *entry = xa_load(xa, j);
165 if (j >= i)
166 XA_BUG_ON(xa, xa_to_value(entry) != j);
167 else
168 XA_BUG_ON(xa, entry);
169 }
170 xa_erase_index(xa, i);
171 }
172 XA_BUG_ON(xa, !xa_empty(xa));
173}
174
Matthew Wilcox9b89a032017-11-10 09:34:31 -0500175static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)
176{
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500177 unsigned int order;
178 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 8 : 1;
179
Matthew Wilcox9b89a032017-11-10 09:34:31 -0500180 /* NULL elements have no marks set */
181 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
182 xa_set_mark(xa, index, XA_MARK_0);
183 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
184
185 /* Storing a pointer will not make a mark appear */
186 XA_BUG_ON(xa, xa_store_index(xa, index, GFP_KERNEL) != NULL);
187 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
188 xa_set_mark(xa, index, XA_MARK_0);
189 XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
190
191 /* Setting one mark will not set another mark */
192 XA_BUG_ON(xa, xa_get_mark(xa, index + 1, XA_MARK_0));
193 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_1));
194
195 /* Storing NULL clears marks, and they can't be set again */
196 xa_erase_index(xa, index);
197 XA_BUG_ON(xa, !xa_empty(xa));
198 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
199 xa_set_mark(xa, index, XA_MARK_0);
200 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500201
202 /*
203 * Storing a multi-index entry over entries with marks gives the
204 * entire entry the union of the marks
205 */
206 BUG_ON((index % 4) != 0);
207 for (order = 2; order < max_order; order++) {
208 unsigned long base = round_down(index, 1UL << order);
209 unsigned long next = base + (1UL << order);
210 unsigned long i;
211
212 XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL));
213 xa_set_mark(xa, index + 1, XA_MARK_0);
214 XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL));
Matthew Wilcoxd69d2872019-01-14 13:57:31 -0500215 xa_set_mark(xa, index + 2, XA_MARK_2);
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500216 XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL));
Matthew Wilcoxb7677a12018-11-05 13:19:54 -0500217 xa_store_order(xa, index, order, xa_mk_index(index),
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500218 GFP_KERNEL);
219 for (i = base; i < next; i++) {
Matthew Wilcox93eb07f2018-09-08 12:09:52 -0400220 XA_STATE(xas, xa, i);
221 unsigned int seen = 0;
222 void *entry;
223
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500224 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
Matthew Wilcoxd69d2872019-01-14 13:57:31 -0500225 XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_1));
226 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_2));
Matthew Wilcox93eb07f2018-09-08 12:09:52 -0400227
228 /* We should see two elements in the array */
Matthew Wilcoxfffc9a22018-11-19 09:36:29 -0500229 rcu_read_lock();
Matthew Wilcox93eb07f2018-09-08 12:09:52 -0400230 xas_for_each(&xas, entry, ULONG_MAX)
231 seen++;
Matthew Wilcoxfffc9a22018-11-19 09:36:29 -0500232 rcu_read_unlock();
Matthew Wilcox93eb07f2018-09-08 12:09:52 -0400233 XA_BUG_ON(xa, seen != 2);
234
235 /* One of which is marked */
236 xas_set(&xas, 0);
237 seen = 0;
Matthew Wilcoxfffc9a22018-11-19 09:36:29 -0500238 rcu_read_lock();
Matthew Wilcox93eb07f2018-09-08 12:09:52 -0400239 xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0)
240 seen++;
Matthew Wilcoxfffc9a22018-11-19 09:36:29 -0500241 rcu_read_unlock();
Matthew Wilcox93eb07f2018-09-08 12:09:52 -0400242 XA_BUG_ON(xa, seen != 1);
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500243 }
244 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0));
245 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_1));
246 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_2));
247 xa_erase_index(xa, index);
248 xa_erase_index(xa, next);
249 XA_BUG_ON(xa, !xa_empty(xa));
250 }
251 XA_BUG_ON(xa, !xa_empty(xa));
Matthew Wilcox9b89a032017-11-10 09:34:31 -0500252}
253
Matthew Wilcoxadb9d9c2018-04-09 16:52:21 -0400254static noinline void check_xa_mark_2(struct xarray *xa)
255{
256 XA_STATE(xas, xa, 0);
257 unsigned long index;
258 unsigned int count = 0;
259 void *entry;
260
261 xa_store_index(xa, 0, GFP_KERNEL);
262 xa_set_mark(xa, 0, XA_MARK_0);
263 xas_lock(&xas);
264 xas_load(&xas);
265 xas_init_marks(&xas);
266 xas_unlock(&xas);
267 XA_BUG_ON(xa, !xa_get_mark(xa, 0, XA_MARK_0) == 0);
268
269 for (index = 3500; index < 4500; index++) {
270 xa_store_index(xa, index, GFP_KERNEL);
271 xa_set_mark(xa, index, XA_MARK_0);
272 }
273
274 xas_reset(&xas);
275 rcu_read_lock();
276 xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0)
277 count++;
278 rcu_read_unlock();
279 XA_BUG_ON(xa, count != 1000);
280
281 xas_lock(&xas);
282 xas_for_each(&xas, entry, ULONG_MAX) {
283 xas_init_marks(&xas);
284 XA_BUG_ON(xa, !xa_get_mark(xa, xas.xa_index, XA_MARK_0));
285 XA_BUG_ON(xa, !xas_get_mark(&xas, XA_MARK_0));
286 }
287 xas_unlock(&xas);
288
289 xa_destroy(xa);
290}
291
Matthew Wilcox9b89a032017-11-10 09:34:31 -0500292static noinline void check_xa_mark(struct xarray *xa)
293{
294 unsigned long index;
295
296 for (index = 0; index < 16384; index += 4)
297 check_xa_mark_1(xa, index);
Matthew Wilcoxadb9d9c2018-04-09 16:52:21 -0400298
299 check_xa_mark_2(xa);
Matthew Wilcox9b89a032017-11-10 09:34:31 -0500300}
301
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500302static noinline void check_xa_shrink(struct xarray *xa)
303{
304 XA_STATE(xas, xa, 1);
305 struct xa_node *node;
Matthew Wilcox93eb07f2018-09-08 12:09:52 -0400306 unsigned int order;
307 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 15 : 1;
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500308
309 XA_BUG_ON(xa, !xa_empty(xa));
310 XA_BUG_ON(xa, xa_store_index(xa, 0, GFP_KERNEL) != NULL);
311 XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL);
312
313 /*
314 * Check that erasing the entry at 1 shrinks the tree and properly
315 * marks the node as being deleted.
316 */
317 xas_lock(&xas);
318 XA_BUG_ON(xa, xas_load(&xas) != xa_mk_value(1));
319 node = xas.xa_node;
320 XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != xa_mk_value(0));
321 XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1));
322 XA_BUG_ON(xa, xa_load(xa, 1) != NULL);
323 XA_BUG_ON(xa, xas.xa_node != XAS_BOUNDS);
324 XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != XA_RETRY_ENTRY);
325 XA_BUG_ON(xa, xas_load(&xas) != NULL);
326 xas_unlock(&xas);
327 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
328 xa_erase_index(xa, 0);
329 XA_BUG_ON(xa, !xa_empty(xa));
Matthew Wilcox93eb07f2018-09-08 12:09:52 -0400330
331 for (order = 0; order < max_order; order++) {
332 unsigned long max = (1UL << order) - 1;
333 xa_store_order(xa, 0, order, xa_mk_value(0), GFP_KERNEL);
334 XA_BUG_ON(xa, xa_load(xa, max) != xa_mk_value(0));
335 XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL);
336 rcu_read_lock();
337 node = xa_head(xa);
338 rcu_read_unlock();
339 XA_BUG_ON(xa, xa_store_index(xa, ULONG_MAX, GFP_KERNEL) !=
340 NULL);
341 rcu_read_lock();
342 XA_BUG_ON(xa, xa_head(xa) == node);
343 rcu_read_unlock();
344 XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL);
345 xa_erase_index(xa, ULONG_MAX);
346 XA_BUG_ON(xa, xa->xa_head != node);
347 xa_erase_index(xa, 0);
348 }
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500349}
350
Matthew Wilcox12fd2ae2019-03-09 22:25:27 -0500351static noinline void check_insert(struct xarray *xa)
352{
353 unsigned long i;
354
355 for (i = 0; i < 1024; i++) {
356 xa_insert_index(xa, i);
357 XA_BUG_ON(xa, xa_load(xa, i - 1) != NULL);
358 XA_BUG_ON(xa, xa_load(xa, i + 1) != NULL);
359 xa_erase_index(xa, i);
360 }
361
362 for (i = 10; i < BITS_PER_LONG; i++) {
363 xa_insert_index(xa, 1UL << i);
364 XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 1) != NULL);
365 XA_BUG_ON(xa, xa_load(xa, (1UL << i) + 1) != NULL);
366 xa_erase_index(xa, 1UL << i);
367
368 xa_insert_index(xa, (1UL << i) - 1);
369 XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 2) != NULL);
370 XA_BUG_ON(xa, xa_load(xa, 1UL << i) != NULL);
371 xa_erase_index(xa, (1UL << i) - 1);
372 }
373
374 xa_insert_index(xa, ~0UL);
375 XA_BUG_ON(xa, xa_load(xa, 0UL) != NULL);
376 XA_BUG_ON(xa, xa_load(xa, ~1UL) != NULL);
377 xa_erase_index(xa, ~0UL);
378
379 XA_BUG_ON(xa, !xa_empty(xa));
380}
381
Matthew Wilcox41aec912017-11-10 15:34:55 -0500382static noinline void check_cmpxchg(struct xarray *xa)
383{
384 void *FIVE = xa_mk_value(5);
385 void *SIX = xa_mk_value(6);
386 void *LOTS = xa_mk_value(12345678);
387
388 XA_BUG_ON(xa, !xa_empty(xa));
389 XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_KERNEL) != NULL);
Matthew Wilcoxfd9dc932019-02-06 13:07:11 -0500390 XA_BUG_ON(xa, xa_insert(xa, 12345678, xa, GFP_KERNEL) != -EBUSY);
Matthew Wilcox41aec912017-11-10 15:34:55 -0500391 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, SIX, FIVE, GFP_KERNEL) != LOTS);
392 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, LOTS, FIVE, GFP_KERNEL) != LOTS);
393 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, FIVE, LOTS, GFP_KERNEL) != FIVE);
394 XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != NULL);
395 XA_BUG_ON(xa, xa_cmpxchg(xa, 5, NULL, FIVE, GFP_KERNEL) != NULL);
Matthew Wilcox (Oracle)062b7352020-03-31 14:23:59 -0400396 XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) != -EBUSY);
397 XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != FIVE);
398 XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) == -EBUSY);
Matthew Wilcox41aec912017-11-10 15:34:55 -0500399 xa_erase_index(xa, 12345678);
400 xa_erase_index(xa, 5);
401 XA_BUG_ON(xa, !xa_empty(xa));
402}
403
Matthew Wilcox9f14d4f2018-10-01 14:54:59 -0400404static noinline void check_reserve(struct xarray *xa)
405{
406 void *entry;
Matthew Wilcox4a318962018-12-17 14:45:36 -0500407 unsigned long index;
Matthew Wilcoxb38f6c52019-02-20 11:30:49 -0500408 int count;
Matthew Wilcox9f14d4f2018-10-01 14:54:59 -0400409
410 /* An array with a reserved entry is not empty */
411 XA_BUG_ON(xa, !xa_empty(xa));
Matthew Wilcoxf818b822019-02-08 14:02:45 -0500412 XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
Matthew Wilcox9f14d4f2018-10-01 14:54:59 -0400413 XA_BUG_ON(xa, xa_empty(xa));
414 XA_BUG_ON(xa, xa_load(xa, 12345678));
415 xa_release(xa, 12345678);
416 XA_BUG_ON(xa, !xa_empty(xa));
417
418 /* Releasing a used entry does nothing */
Matthew Wilcoxf818b822019-02-08 14:02:45 -0500419 XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
Matthew Wilcox9f14d4f2018-10-01 14:54:59 -0400420 XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_NOWAIT) != NULL);
421 xa_release(xa, 12345678);
422 xa_erase_index(xa, 12345678);
423 XA_BUG_ON(xa, !xa_empty(xa));
424
Matthew Wilcoxb38f6c52019-02-20 11:30:49 -0500425 /* cmpxchg sees a reserved entry as ZERO */
Matthew Wilcoxf818b822019-02-08 14:02:45 -0500426 XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
Matthew Wilcoxb38f6c52019-02-20 11:30:49 -0500427 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, XA_ZERO_ENTRY,
428 xa_mk_value(12345678), GFP_NOWAIT) != NULL);
Matthew Wilcox9f14d4f2018-10-01 14:54:59 -0400429 xa_release(xa, 12345678);
430 xa_erase_index(xa, 12345678);
431 XA_BUG_ON(xa, !xa_empty(xa));
432
Matthew Wilcoxb38f6c52019-02-20 11:30:49 -0500433 /* xa_insert treats it as busy */
Matthew Wilcoxf818b822019-02-08 14:02:45 -0500434 XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
Matthew Wilcoxb0606fe2019-01-02 13:57:03 -0500435 XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) !=
Matthew Wilcoxfd9dc932019-02-06 13:07:11 -0500436 -EBUSY);
Matthew Wilcoxb0606fe2019-01-02 13:57:03 -0500437 XA_BUG_ON(xa, xa_empty(xa));
438 XA_BUG_ON(xa, xa_erase(xa, 12345678) != NULL);
Matthew Wilcox4c0608f2018-10-30 09:45:55 -0400439 XA_BUG_ON(xa, !xa_empty(xa));
440
Matthew Wilcox9f14d4f2018-10-01 14:54:59 -0400441 /* Can iterate through a reserved entry */
442 xa_store_index(xa, 5, GFP_KERNEL);
Matthew Wilcoxf818b822019-02-08 14:02:45 -0500443 XA_BUG_ON(xa, xa_reserve(xa, 6, GFP_KERNEL) != 0);
Matthew Wilcox9f14d4f2018-10-01 14:54:59 -0400444 xa_store_index(xa, 7, GFP_KERNEL);
445
Matthew Wilcoxb38f6c52019-02-20 11:30:49 -0500446 count = 0;
Matthew Wilcox4a318962018-12-17 14:45:36 -0500447 xa_for_each(xa, index, entry) {
Matthew Wilcox9f14d4f2018-10-01 14:54:59 -0400448 XA_BUG_ON(xa, index != 5 && index != 7);
Matthew Wilcoxb38f6c52019-02-20 11:30:49 -0500449 count++;
Matthew Wilcox9f14d4f2018-10-01 14:54:59 -0400450 }
Matthew Wilcoxb38f6c52019-02-20 11:30:49 -0500451 XA_BUG_ON(xa, count != 2);
452
453 /* If we free a reserved entry, we should be able to allocate it */
454 if (xa->xa_flags & XA_FLAGS_ALLOC) {
455 u32 id;
456
457 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(8),
458 XA_LIMIT(5, 10), GFP_KERNEL) != 0);
459 XA_BUG_ON(xa, id != 8);
460
461 xa_release(xa, 6);
462 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(6),
463 XA_LIMIT(5, 10), GFP_KERNEL) != 0);
464 XA_BUG_ON(xa, id != 6);
465 }
466
Matthew Wilcox9f14d4f2018-10-01 14:54:59 -0400467 xa_destroy(xa);
468}
469
Matthew Wilcoxb803b422017-11-14 08:30:11 -0500470static noinline void check_xas_erase(struct xarray *xa)
471{
472 XA_STATE(xas, xa, 0);
473 void *entry;
474 unsigned long i, j;
475
476 for (i = 0; i < 200; i++) {
477 for (j = i; j < 2 * i + 17; j++) {
478 xas_set(&xas, j);
479 do {
480 xas_lock(&xas);
Matthew Wilcoxb7677a12018-11-05 13:19:54 -0500481 xas_store(&xas, xa_mk_index(j));
Matthew Wilcoxb803b422017-11-14 08:30:11 -0500482 xas_unlock(&xas);
483 } while (xas_nomem(&xas, GFP_KERNEL));
484 }
485
486 xas_set(&xas, ULONG_MAX);
487 do {
488 xas_lock(&xas);
489 xas_store(&xas, xa_mk_value(0));
490 xas_unlock(&xas);
491 } while (xas_nomem(&xas, GFP_KERNEL));
492
493 xas_lock(&xas);
494 xas_store(&xas, NULL);
495
496 xas_set(&xas, 0);
497 j = i;
498 xas_for_each(&xas, entry, ULONG_MAX) {
Matthew Wilcoxb7677a12018-11-05 13:19:54 -0500499 XA_BUG_ON(xa, entry != xa_mk_index(j));
Matthew Wilcoxb803b422017-11-14 08:30:11 -0500500 xas_store(&xas, NULL);
501 j++;
502 }
503 xas_unlock(&xas);
504 XA_BUG_ON(xa, !xa_empty(xa));
505 }
506}
507
Matthew Wilcox4f06d632018-09-09 01:52:17 -0400508#ifdef CONFIG_XARRAY_MULTI
509static noinline void check_multi_store_1(struct xarray *xa, unsigned long index,
510 unsigned int order)
511{
512 XA_STATE(xas, xa, index);
513 unsigned long min = index & ~((1UL << order) - 1);
514 unsigned long max = min + (1UL << order);
515
Matthew Wilcoxb7677a12018-11-05 13:19:54 -0500516 xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
517 XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(index));
518 XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(index));
Matthew Wilcox4f06d632018-09-09 01:52:17 -0400519 XA_BUG_ON(xa, xa_load(xa, max) != NULL);
520 XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
521
Matthew Wilcoxfffc9a22018-11-19 09:36:29 -0500522 xas_lock(&xas);
Matthew Wilcoxb7677a12018-11-05 13:19:54 -0500523 XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(min)) != xa_mk_index(index));
Matthew Wilcoxfffc9a22018-11-19 09:36:29 -0500524 xas_unlock(&xas);
Matthew Wilcoxb7677a12018-11-05 13:19:54 -0500525 XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(min));
526 XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(min));
Matthew Wilcox4f06d632018-09-09 01:52:17 -0400527 XA_BUG_ON(xa, xa_load(xa, max) != NULL);
528 XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
529
530 xa_erase_index(xa, min);
531 XA_BUG_ON(xa, !xa_empty(xa));
532}
533
534static noinline void check_multi_store_2(struct xarray *xa, unsigned long index,
535 unsigned int order)
536{
537 XA_STATE(xas, xa, index);
538 xa_store_order(xa, index, order, xa_mk_value(0), GFP_KERNEL);
539
Matthew Wilcoxfffc9a22018-11-19 09:36:29 -0500540 xas_lock(&xas);
Matthew Wilcox4f06d632018-09-09 01:52:17 -0400541 XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(1)) != xa_mk_value(0));
542 XA_BUG_ON(xa, xas.xa_index != index);
543 XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1));
Matthew Wilcoxfffc9a22018-11-19 09:36:29 -0500544 xas_unlock(&xas);
Matthew Wilcox4f06d632018-09-09 01:52:17 -0400545 XA_BUG_ON(xa, !xa_empty(xa));
546}
Matthew Wilcox4f145cd2018-11-29 16:04:35 -0500547
548static noinline void check_multi_store_3(struct xarray *xa, unsigned long index,
549 unsigned int order)
550{
551 XA_STATE(xas, xa, 0);
552 void *entry;
553 int n = 0;
554
555 xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
556
557 xas_lock(&xas);
558 xas_for_each(&xas, entry, ULONG_MAX) {
559 XA_BUG_ON(xa, entry != xa_mk_index(index));
560 n++;
561 }
562 XA_BUG_ON(xa, n != 1);
563 xas_set(&xas, index + 1);
564 xas_for_each(&xas, entry, ULONG_MAX) {
565 XA_BUG_ON(xa, entry != xa_mk_index(index));
566 n++;
567 }
568 XA_BUG_ON(xa, n != 2);
569 xas_unlock(&xas);
570
571 xa_destroy(xa);
572}
Matthew Wilcox4f06d632018-09-09 01:52:17 -0400573#endif
574
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500575static noinline void check_multi_store(struct xarray *xa)
576{
577#ifdef CONFIG_XARRAY_MULTI
578 unsigned long i, j, k;
579 unsigned int max_order = (sizeof(long) == 4) ? 30 : 60;
580
581 /* Loading from any position returns the same value */
582 xa_store_order(xa, 0, 1, xa_mk_value(0), GFP_KERNEL);
583 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
584 XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0));
585 XA_BUG_ON(xa, xa_load(xa, 2) != NULL);
586 rcu_read_lock();
587 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 2);
588 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2);
589 rcu_read_unlock();
590
591 /* Storing adjacent to the value does not alter the value */
592 xa_store(xa, 3, xa, GFP_KERNEL);
593 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
594 XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0));
595 XA_BUG_ON(xa, xa_load(xa, 2) != NULL);
596 rcu_read_lock();
597 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 3);
598 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2);
599 rcu_read_unlock();
600
601 /* Overwriting multiple indexes works */
602 xa_store_order(xa, 0, 2, xa_mk_value(1), GFP_KERNEL);
603 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(1));
604 XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(1));
605 XA_BUG_ON(xa, xa_load(xa, 2) != xa_mk_value(1));
606 XA_BUG_ON(xa, xa_load(xa, 3) != xa_mk_value(1));
607 XA_BUG_ON(xa, xa_load(xa, 4) != NULL);
608 rcu_read_lock();
609 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 4);
610 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 4);
611 rcu_read_unlock();
612
613 /* We can erase multiple values with a single store */
Matthew Wilcox5404a7f2018-11-05 09:34:04 -0500614 xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL);
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500615 XA_BUG_ON(xa, !xa_empty(xa));
616
617 /* Even when the first slot is empty but the others aren't */
618 xa_store_index(xa, 1, GFP_KERNEL);
619 xa_store_index(xa, 2, GFP_KERNEL);
620 xa_store_order(xa, 0, 2, NULL, GFP_KERNEL);
621 XA_BUG_ON(xa, !xa_empty(xa));
622
623 for (i = 0; i < max_order; i++) {
624 for (j = 0; j < max_order; j++) {
Matthew Wilcoxb7677a12018-11-05 13:19:54 -0500625 xa_store_order(xa, 0, i, xa_mk_index(i), GFP_KERNEL);
626 xa_store_order(xa, 0, j, xa_mk_index(j), GFP_KERNEL);
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500627
628 for (k = 0; k < max_order; k++) {
629 void *entry = xa_load(xa, (1UL << k) - 1);
630 if ((i < k) && (j < k))
631 XA_BUG_ON(xa, entry != NULL);
632 else
Matthew Wilcoxb7677a12018-11-05 13:19:54 -0500633 XA_BUG_ON(xa, entry != xa_mk_index(j));
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500634 }
635
636 xa_erase(xa, 0);
637 XA_BUG_ON(xa, !xa_empty(xa));
638 }
639 }
Matthew Wilcox4f06d632018-09-09 01:52:17 -0400640
641 for (i = 0; i < 20; i++) {
642 check_multi_store_1(xa, 200, i);
643 check_multi_store_1(xa, 0, i);
644 check_multi_store_1(xa, (1UL << i) + 1, i);
645 }
646 check_multi_store_2(xa, 4095, 9);
Matthew Wilcox4f145cd2018-11-29 16:04:35 -0500647
648 for (i = 1; i < 20; i++) {
649 check_multi_store_3(xa, 0, i);
650 check_multi_store_3(xa, 1UL << i, i);
651 }
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500652#endif
653}
654
Matthew Wilcox3ccaf572018-10-26 14:43:22 -0400655static noinline void check_xa_alloc_1(struct xarray *xa, unsigned int base)
Matthew Wilcox371c7522018-07-04 10:50:12 -0400656{
657 int i;
658 u32 id;
659
Matthew Wilcox3ccaf572018-10-26 14:43:22 -0400660 XA_BUG_ON(xa, !xa_empty(xa));
661 /* An empty array should assign %base to the first alloc */
662 xa_alloc_index(xa, base, GFP_KERNEL);
Matthew Wilcox371c7522018-07-04 10:50:12 -0400663
664 /* Erasing it should make the array empty again */
Matthew Wilcox3ccaf572018-10-26 14:43:22 -0400665 xa_erase_index(xa, base);
666 XA_BUG_ON(xa, !xa_empty(xa));
Matthew Wilcox371c7522018-07-04 10:50:12 -0400667
Matthew Wilcox3ccaf572018-10-26 14:43:22 -0400668 /* And it should assign %base again */
669 xa_alloc_index(xa, base, GFP_KERNEL);
Matthew Wilcox371c7522018-07-04 10:50:12 -0400670
Matthew Wilcox3ccaf572018-10-26 14:43:22 -0400671 /* Allocating and then erasing a lot should not lose base */
672 for (i = base + 1; i < 2 * XA_CHUNK_SIZE; i++)
673 xa_alloc_index(xa, i, GFP_KERNEL);
674 for (i = base; i < 2 * XA_CHUNK_SIZE; i++)
675 xa_erase_index(xa, i);
676 xa_alloc_index(xa, base, GFP_KERNEL);
677
678 /* Destroying the array should do the same as erasing */
679 xa_destroy(xa);
680
681 /* And it should assign %base again */
682 xa_alloc_index(xa, base, GFP_KERNEL);
683
684 /* The next assigned ID should be base+1 */
685 xa_alloc_index(xa, base + 1, GFP_KERNEL);
686 xa_erase_index(xa, base + 1);
Matthew Wilcox371c7522018-07-04 10:50:12 -0400687
688 /* Storing a value should mark it used */
Matthew Wilcox3ccaf572018-10-26 14:43:22 -0400689 xa_store_index(xa, base + 1, GFP_KERNEL);
690 xa_alloc_index(xa, base + 2, GFP_KERNEL);
Matthew Wilcox371c7522018-07-04 10:50:12 -0400691
Matthew Wilcox3ccaf572018-10-26 14:43:22 -0400692 /* If we then erase base, it should be free */
693 xa_erase_index(xa, base);
694 xa_alloc_index(xa, base, GFP_KERNEL);
Matthew Wilcox371c7522018-07-04 10:50:12 -0400695
Matthew Wilcox3ccaf572018-10-26 14:43:22 -0400696 xa_erase_index(xa, base + 1);
697 xa_erase_index(xa, base + 2);
Matthew Wilcox371c7522018-07-04 10:50:12 -0400698
699 for (i = 1; i < 5000; i++) {
Matthew Wilcox3ccaf572018-10-26 14:43:22 -0400700 xa_alloc_index(xa, base + i, GFP_KERNEL);
Matthew Wilcox371c7522018-07-04 10:50:12 -0400701 }
702
Matthew Wilcox3ccaf572018-10-26 14:43:22 -0400703 xa_destroy(xa);
Matthew Wilcox371c7522018-07-04 10:50:12 -0400704
Matthew Wilcox3ccaf572018-10-26 14:43:22 -0400705 /* Check that we fail properly at the limit of allocation */
Matthew Wilcoxa3e4d3f2018-12-31 10:41:01 -0500706 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX - 1),
707 XA_LIMIT(UINT_MAX - 1, UINT_MAX),
Matthew Wilcox371c7522018-07-04 10:50:12 -0400708 GFP_KERNEL) != 0);
Matthew Wilcox3ccaf572018-10-26 14:43:22 -0400709 XA_BUG_ON(xa, id != 0xfffffffeU);
Matthew Wilcoxa3e4d3f2018-12-31 10:41:01 -0500710 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX),
711 XA_LIMIT(UINT_MAX - 1, UINT_MAX),
Matthew Wilcox371c7522018-07-04 10:50:12 -0400712 GFP_KERNEL) != 0);
Matthew Wilcox3ccaf572018-10-26 14:43:22 -0400713 XA_BUG_ON(xa, id != 0xffffffffU);
Matthew Wilcoxa3e4d3f2018-12-31 10:41:01 -0500714 id = 3;
715 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(0),
716 XA_LIMIT(UINT_MAX - 1, UINT_MAX),
717 GFP_KERNEL) != -EBUSY);
718 XA_BUG_ON(xa, id != 3);
Matthew Wilcox3ccaf572018-10-26 14:43:22 -0400719 xa_destroy(xa);
Matthew Wilcox48483612018-12-13 13:57:42 -0500720
Matthew Wilcoxa3e4d3f2018-12-31 10:41:01 -0500721 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5),
722 GFP_KERNEL) != -EBUSY);
Matthew Wilcox3ccaf572018-10-26 14:43:22 -0400723 XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != 0);
Matthew Wilcoxa3e4d3f2018-12-31 10:41:01 -0500724 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5),
725 GFP_KERNEL) != -EBUSY);
Matthew Wilcox3ccaf572018-10-26 14:43:22 -0400726 xa_erase_index(xa, 3);
727 XA_BUG_ON(xa, !xa_empty(xa));
728}
729
Matthew Wilcoxa3e4d3f2018-12-31 10:41:01 -0500730static noinline void check_xa_alloc_2(struct xarray *xa, unsigned int base)
731{
732 unsigned int i, id;
733 unsigned long index;
734 void *entry;
735
736 /* Allocate and free a NULL and check xa_empty() behaves */
737 XA_BUG_ON(xa, !xa_empty(xa));
738 XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
739 XA_BUG_ON(xa, id != base);
740 XA_BUG_ON(xa, xa_empty(xa));
741 XA_BUG_ON(xa, xa_erase(xa, id) != NULL);
742 XA_BUG_ON(xa, !xa_empty(xa));
743
744 /* Ditto, but check destroy instead of erase */
745 XA_BUG_ON(xa, !xa_empty(xa));
746 XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
747 XA_BUG_ON(xa, id != base);
748 XA_BUG_ON(xa, xa_empty(xa));
749 xa_destroy(xa);
750 XA_BUG_ON(xa, !xa_empty(xa));
751
752 for (i = base; i < base + 10; i++) {
753 XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b,
754 GFP_KERNEL) != 0);
755 XA_BUG_ON(xa, id != i);
756 }
757
758 XA_BUG_ON(xa, xa_store(xa, 3, xa_mk_index(3), GFP_KERNEL) != NULL);
759 XA_BUG_ON(xa, xa_store(xa, 4, xa_mk_index(4), GFP_KERNEL) != NULL);
760 XA_BUG_ON(xa, xa_store(xa, 4, NULL, GFP_KERNEL) != xa_mk_index(4));
761 XA_BUG_ON(xa, xa_erase(xa, 5) != NULL);
762 XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
763 XA_BUG_ON(xa, id != 5);
764
765 xa_for_each(xa, index, entry) {
766 xa_erase_index(xa, index);
767 }
768
769 for (i = base; i < base + 9; i++) {
770 XA_BUG_ON(xa, xa_erase(xa, i) != NULL);
771 XA_BUG_ON(xa, xa_empty(xa));
772 }
773 XA_BUG_ON(xa, xa_erase(xa, 8) != NULL);
774 XA_BUG_ON(xa, xa_empty(xa));
775 XA_BUG_ON(xa, xa_erase(xa, base + 9) != NULL);
776 XA_BUG_ON(xa, !xa_empty(xa));
777
778 xa_destroy(xa);
779}
780
Matthew Wilcox2fa044e2018-11-06 14:13:35 -0500781static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base)
782{
783 struct xa_limit limit = XA_LIMIT(1, 0x3fff);
784 u32 next = 0;
785 unsigned int i, id;
786 unsigned long index;
787 void *entry;
788
789 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(1), limit,
790 &next, GFP_KERNEL) != 0);
791 XA_BUG_ON(xa, id != 1);
792
793 next = 0x3ffd;
794 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(0x3ffd), limit,
795 &next, GFP_KERNEL) != 0);
796 XA_BUG_ON(xa, id != 0x3ffd);
797 xa_erase_index(xa, 0x3ffd);
798 xa_erase_index(xa, 1);
799 XA_BUG_ON(xa, !xa_empty(xa));
800
801 for (i = 0x3ffe; i < 0x4003; i++) {
802 if (i < 0x4000)
803 entry = xa_mk_index(i);
804 else
805 entry = xa_mk_index(i - 0x3fff);
806 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, entry, limit,
807 &next, GFP_KERNEL) != (id == 1));
808 XA_BUG_ON(xa, xa_mk_index(id) != entry);
809 }
810
811 /* Check wrap-around is handled correctly */
812 if (base != 0)
813 xa_erase_index(xa, base);
814 xa_erase_index(xa, base + 1);
815 next = UINT_MAX;
816 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(UINT_MAX),
817 xa_limit_32b, &next, GFP_KERNEL) != 0);
818 XA_BUG_ON(xa, id != UINT_MAX);
819 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base),
820 xa_limit_32b, &next, GFP_KERNEL) != 1);
821 XA_BUG_ON(xa, id != base);
822 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base + 1),
823 xa_limit_32b, &next, GFP_KERNEL) != 0);
824 XA_BUG_ON(xa, id != base + 1);
825
826 xa_for_each(xa, index, entry)
827 xa_erase_index(xa, index);
828
829 XA_BUG_ON(xa, !xa_empty(xa));
830}
831
Matthew Wilcox3ccaf572018-10-26 14:43:22 -0400832static DEFINE_XARRAY_ALLOC(xa0);
833static DEFINE_XARRAY_ALLOC1(xa1);
834
835static noinline void check_xa_alloc(void)
836{
837 check_xa_alloc_1(&xa0, 0);
838 check_xa_alloc_1(&xa1, 1);
Matthew Wilcoxa3e4d3f2018-12-31 10:41:01 -0500839 check_xa_alloc_2(&xa0, 0);
840 check_xa_alloc_2(&xa1, 1);
Matthew Wilcox2fa044e2018-11-06 14:13:35 -0500841 check_xa_alloc_3(&xa0, 0);
842 check_xa_alloc_3(&xa1, 1);
Matthew Wilcox371c7522018-07-04 10:50:12 -0400843}
844
Matthew Wilcox4e99d4e2018-06-01 22:46:02 -0400845static noinline void __check_store_iter(struct xarray *xa, unsigned long start,
846 unsigned int order, unsigned int present)
847{
848 XA_STATE_ORDER(xas, xa, start, order);
849 void *entry;
850 unsigned int count = 0;
851
852retry:
853 xas_lock(&xas);
854 xas_for_each_conflict(&xas, entry) {
855 XA_BUG_ON(xa, !xa_is_value(entry));
Matthew Wilcoxb7677a12018-11-05 13:19:54 -0500856 XA_BUG_ON(xa, entry < xa_mk_index(start));
857 XA_BUG_ON(xa, entry > xa_mk_index(start + (1UL << order) - 1));
Matthew Wilcox4e99d4e2018-06-01 22:46:02 -0400858 count++;
859 }
Matthew Wilcoxb7677a12018-11-05 13:19:54 -0500860 xas_store(&xas, xa_mk_index(start));
Matthew Wilcox4e99d4e2018-06-01 22:46:02 -0400861 xas_unlock(&xas);
862 if (xas_nomem(&xas, GFP_KERNEL)) {
863 count = 0;
864 goto retry;
865 }
866 XA_BUG_ON(xa, xas_error(&xas));
867 XA_BUG_ON(xa, count != present);
Matthew Wilcoxb7677a12018-11-05 13:19:54 -0500868 XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_index(start));
Matthew Wilcox4e99d4e2018-06-01 22:46:02 -0400869 XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) !=
Matthew Wilcoxb7677a12018-11-05 13:19:54 -0500870 xa_mk_index(start));
Matthew Wilcox4e99d4e2018-06-01 22:46:02 -0400871 xa_erase_index(xa, start);
872}
873
874static noinline void check_store_iter(struct xarray *xa)
875{
876 unsigned int i, j;
877 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
878
879 for (i = 0; i < max_order; i++) {
880 unsigned int min = 1 << i;
881 unsigned int max = (2 << i) - 1;
882 __check_store_iter(xa, 0, i, 0);
883 XA_BUG_ON(xa, !xa_empty(xa));
884 __check_store_iter(xa, min, i, 0);
885 XA_BUG_ON(xa, !xa_empty(xa));
886
887 xa_store_index(xa, min, GFP_KERNEL);
888 __check_store_iter(xa, min, i, 1);
889 XA_BUG_ON(xa, !xa_empty(xa));
890 xa_store_index(xa, max, GFP_KERNEL);
891 __check_store_iter(xa, min, i, 1);
892 XA_BUG_ON(xa, !xa_empty(xa));
893
894 for (j = 0; j < min; j++)
895 xa_store_index(xa, j, GFP_KERNEL);
896 __check_store_iter(xa, 0, i, min);
897 XA_BUG_ON(xa, !xa_empty(xa));
898 for (j = 0; j < min; j++)
899 xa_store_index(xa, min + j, GFP_KERNEL);
900 __check_store_iter(xa, min, i, min);
901 XA_BUG_ON(xa, !xa_empty(xa));
902 }
903#ifdef CONFIG_XARRAY_MULTI
904 xa_store_index(xa, 63, GFP_KERNEL);
905 xa_store_index(xa, 65, GFP_KERNEL);
906 __check_store_iter(xa, 64, 2, 1);
907 xa_erase_index(xa, 63);
908#endif
909 XA_BUG_ON(xa, !xa_empty(xa));
910}
911
Matthew Wilcox (Oracle)19c30f42020-01-17 22:00:41 -0500912static noinline void check_multi_find_1(struct xarray *xa, unsigned order)
Matthew Wilcoxb803b422017-11-14 08:30:11 -0500913{
914#ifdef CONFIG_XARRAY_MULTI
Matthew Wilcox (Oracle)19c30f42020-01-17 22:00:41 -0500915 unsigned long multi = 3 << order;
916 unsigned long next = 4 << order;
Matthew Wilcoxb803b422017-11-14 08:30:11 -0500917 unsigned long index;
918
Matthew Wilcox (Oracle)19c30f42020-01-17 22:00:41 -0500919 xa_store_order(xa, multi, order, xa_mk_value(multi), GFP_KERNEL);
920 XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL) != NULL);
Matthew Wilcox (Oracle)c44aa5e2020-01-17 22:13:21 -0500921 XA_BUG_ON(xa, xa_store_index(xa, next + 1, GFP_KERNEL) != NULL);
Matthew Wilcoxb803b422017-11-14 08:30:11 -0500922
923 index = 0;
924 XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
Matthew Wilcox (Oracle)19c30f42020-01-17 22:00:41 -0500925 xa_mk_value(multi));
926 XA_BUG_ON(xa, index != multi);
927 index = multi + 1;
Matthew Wilcoxb803b422017-11-14 08:30:11 -0500928 XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
Matthew Wilcox (Oracle)19c30f42020-01-17 22:00:41 -0500929 xa_mk_value(multi));
930 XA_BUG_ON(xa, (index < multi) || (index >= next));
Matthew Wilcoxb803b422017-11-14 08:30:11 -0500931 XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) !=
Matthew Wilcox (Oracle)19c30f42020-01-17 22:00:41 -0500932 xa_mk_value(next));
933 XA_BUG_ON(xa, index != next);
Matthew Wilcox (Oracle)c44aa5e2020-01-17 22:13:21 -0500934 XA_BUG_ON(xa, xa_find_after(xa, &index, next, XA_PRESENT) != NULL);
935 XA_BUG_ON(xa, index != next);
Matthew Wilcoxb803b422017-11-14 08:30:11 -0500936
Matthew Wilcox (Oracle)19c30f42020-01-17 22:00:41 -0500937 xa_erase_index(xa, multi);
938 xa_erase_index(xa, next);
Matthew Wilcox (Oracle)c44aa5e2020-01-17 22:13:21 -0500939 xa_erase_index(xa, next + 1);
Matthew Wilcoxb803b422017-11-14 08:30:11 -0500940 XA_BUG_ON(xa, !xa_empty(xa));
941#endif
942}
943
944static noinline void check_multi_find_2(struct xarray *xa)
945{
946 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 10 : 1;
947 unsigned int i, j;
948 void *entry;
949
950 for (i = 0; i < max_order; i++) {
951 unsigned long index = 1UL << i;
952 for (j = 0; j < index; j++) {
953 XA_STATE(xas, xa, j + index);
954 xa_store_index(xa, index - 1, GFP_KERNEL);
Matthew Wilcoxb7677a12018-11-05 13:19:54 -0500955 xa_store_order(xa, index, i, xa_mk_index(index),
Matthew Wilcoxb803b422017-11-14 08:30:11 -0500956 GFP_KERNEL);
957 rcu_read_lock();
958 xas_for_each(&xas, entry, ULONG_MAX) {
959 xa_erase_index(xa, index);
960 }
961 rcu_read_unlock();
962 xa_erase_index(xa, index - 1);
963 XA_BUG_ON(xa, !xa_empty(xa));
964 }
965 }
966}
967
Matthew Wilcox (Oracle)bd40b172020-01-31 05:07:55 -0500968static noinline void check_multi_find_3(struct xarray *xa)
969{
970 unsigned int order;
971
972 for (order = 5; order < order_limit; order++) {
973 unsigned long index = 1UL << (order - 5);
974
975 XA_BUG_ON(xa, !xa_empty(xa));
976 xa_store_order(xa, 0, order - 4, xa_mk_index(0), GFP_KERNEL);
977 XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT));
978 xa_erase_index(xa, 0);
979 }
980}
981
Matthew Wilcox8229706e2018-11-01 16:55:19 -0400982static noinline void check_find_1(struct xarray *xa)
Matthew Wilcoxb803b422017-11-14 08:30:11 -0500983{
984 unsigned long i, j, k;
985
986 XA_BUG_ON(xa, !xa_empty(xa));
987
988 /*
989 * Check xa_find with all pairs between 0 and 99 inclusive,
990 * starting at every index between 0 and 99
991 */
992 for (i = 0; i < 100; i++) {
993 XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
994 xa_set_mark(xa, i, XA_MARK_0);
995 for (j = 0; j < i; j++) {
996 XA_BUG_ON(xa, xa_store_index(xa, j, GFP_KERNEL) !=
997 NULL);
998 xa_set_mark(xa, j, XA_MARK_0);
999 for (k = 0; k < 100; k++) {
1000 unsigned long index = k;
1001 void *entry = xa_find(xa, &index, ULONG_MAX,
1002 XA_PRESENT);
1003 if (k <= j)
1004 XA_BUG_ON(xa, index != j);
1005 else if (k <= i)
1006 XA_BUG_ON(xa, index != i);
1007 else
1008 XA_BUG_ON(xa, entry != NULL);
1009
1010 index = k;
1011 entry = xa_find(xa, &index, ULONG_MAX,
1012 XA_MARK_0);
1013 if (k <= j)
1014 XA_BUG_ON(xa, index != j);
1015 else if (k <= i)
1016 XA_BUG_ON(xa, index != i);
1017 else
1018 XA_BUG_ON(xa, entry != NULL);
1019 }
1020 xa_erase_index(xa, j);
1021 XA_BUG_ON(xa, xa_get_mark(xa, j, XA_MARK_0));
1022 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
1023 }
1024 xa_erase_index(xa, i);
1025 XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0));
1026 }
1027 XA_BUG_ON(xa, !xa_empty(xa));
Matthew Wilcox8229706e2018-11-01 16:55:19 -04001028}
1029
1030static noinline void check_find_2(struct xarray *xa)
1031{
1032 void *entry;
Matthew Wilcox4a318962018-12-17 14:45:36 -05001033 unsigned long i, j, index;
Matthew Wilcox8229706e2018-11-01 16:55:19 -04001034
Matthew Wilcox4a318962018-12-17 14:45:36 -05001035 xa_for_each(xa, index, entry) {
Matthew Wilcox8229706e2018-11-01 16:55:19 -04001036 XA_BUG_ON(xa, true);
1037 }
1038
1039 for (i = 0; i < 1024; i++) {
1040 xa_store_index(xa, index, GFP_KERNEL);
1041 j = 0;
Matthew Wilcox4a318962018-12-17 14:45:36 -05001042 xa_for_each(xa, index, entry) {
Matthew Wilcoxb7677a12018-11-05 13:19:54 -05001043 XA_BUG_ON(xa, xa_mk_index(index) != entry);
Matthew Wilcox8229706e2018-11-01 16:55:19 -04001044 XA_BUG_ON(xa, index != j++);
1045 }
1046 }
1047
1048 xa_destroy(xa);
1049}
1050
Matthew Wilcox48483612018-12-13 13:57:42 -05001051static noinline void check_find_3(struct xarray *xa)
1052{
1053 XA_STATE(xas, xa, 0);
1054 unsigned long i, j, k;
1055 void *entry;
1056
1057 for (i = 0; i < 100; i++) {
1058 for (j = 0; j < 100; j++) {
Matthew Wilcox490fd30f2018-12-17 17:37:25 -05001059 rcu_read_lock();
Matthew Wilcox48483612018-12-13 13:57:42 -05001060 for (k = 0; k < 100; k++) {
1061 xas_set(&xas, j);
1062 xas_for_each_marked(&xas, entry, k, XA_MARK_0)
1063 ;
1064 if (j > k)
1065 XA_BUG_ON(xa,
1066 xas.xa_node != XAS_RESTART);
1067 }
Matthew Wilcox490fd30f2018-12-17 17:37:25 -05001068 rcu_read_unlock();
Matthew Wilcox48483612018-12-13 13:57:42 -05001069 }
1070 xa_store_index(xa, i, GFP_KERNEL);
1071 xa_set_mark(xa, i, XA_MARK_0);
1072 }
1073 xa_destroy(xa);
1074}
1075
Matthew Wilcox (Oracle)430f24f2020-01-17 17:45:12 -05001076static noinline void check_find_4(struct xarray *xa)
1077{
1078 unsigned long index = 0;
1079 void *entry;
1080
1081 xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
1082
1083 entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
1084 XA_BUG_ON(xa, entry != xa_mk_index(ULONG_MAX));
1085
1086 entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
1087 XA_BUG_ON(xa, entry);
1088
1089 xa_erase_index(xa, ULONG_MAX);
1090}
1091
Matthew Wilcox8229706e2018-11-01 16:55:19 -04001092static noinline void check_find(struct xarray *xa)
1093{
Matthew Wilcox (Oracle)19c30f42020-01-17 22:00:41 -05001094 unsigned i;
1095
Matthew Wilcox8229706e2018-11-01 16:55:19 -04001096 check_find_1(xa);
1097 check_find_2(xa);
Matthew Wilcox48483612018-12-13 13:57:42 -05001098 check_find_3(xa);
Matthew Wilcox (Oracle)430f24f2020-01-17 17:45:12 -05001099 check_find_4(xa);
Matthew Wilcox (Oracle)19c30f42020-01-17 22:00:41 -05001100
1101 for (i = 2; i < 10; i++)
1102 check_multi_find_1(xa, i);
Matthew Wilcoxb803b422017-11-14 08:30:11 -05001103 check_multi_find_2(xa);
Matthew Wilcox (Oracle)bd40b172020-01-31 05:07:55 -05001104 check_multi_find_3(xa);
Matthew Wilcoxb803b422017-11-14 08:30:11 -05001105}
1106
Matthew Wilcoxe21a2952017-11-22 08:36:00 -05001107/* See find_swap_entry() in mm/shmem.c */
1108static noinline unsigned long xa_find_entry(struct xarray *xa, void *item)
1109{
1110 XA_STATE(xas, xa, 0);
1111 unsigned int checked = 0;
1112 void *entry;
1113
1114 rcu_read_lock();
1115 xas_for_each(&xas, entry, ULONG_MAX) {
1116 if (xas_retry(&xas, entry))
1117 continue;
1118 if (entry == item)
1119 break;
1120 checked++;
1121 if ((checked % 4) != 0)
1122 continue;
1123 xas_pause(&xas);
1124 }
1125 rcu_read_unlock();
1126
1127 return entry ? xas.xa_index : -1;
1128}
1129
1130static noinline void check_find_entry(struct xarray *xa)
1131{
1132#ifdef CONFIG_XARRAY_MULTI
1133 unsigned int order;
1134 unsigned long offset, index;
1135
1136 for (order = 0; order < 20; order++) {
1137 for (offset = 0; offset < (1UL << (order + 3));
1138 offset += (1UL << order)) {
1139 for (index = 0; index < (1UL << (order + 5));
1140 index += (1UL << order)) {
1141 xa_store_order(xa, index, order,
Matthew Wilcoxb7677a12018-11-05 13:19:54 -05001142 xa_mk_index(index), GFP_KERNEL);
Matthew Wilcoxe21a2952017-11-22 08:36:00 -05001143 XA_BUG_ON(xa, xa_load(xa, index) !=
Matthew Wilcoxb7677a12018-11-05 13:19:54 -05001144 xa_mk_index(index));
Matthew Wilcoxe21a2952017-11-22 08:36:00 -05001145 XA_BUG_ON(xa, xa_find_entry(xa,
Matthew Wilcoxb7677a12018-11-05 13:19:54 -05001146 xa_mk_index(index)) != index);
Matthew Wilcoxe21a2952017-11-22 08:36:00 -05001147 }
1148 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
1149 xa_destroy(xa);
1150 }
1151 }
1152#endif
1153
1154 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
1155 xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
1156 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
Matthew Wilcoxb7677a12018-11-05 13:19:54 -05001157 XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_index(ULONG_MAX)) != -1);
Matthew Wilcoxe21a2952017-11-22 08:36:00 -05001158 xa_erase_index(xa, ULONG_MAX);
1159 XA_BUG_ON(xa, !xa_empty(xa));
1160}
1161
Matthew Wilcox (Oracle)c36d4512020-01-31 06:17:09 -05001162static noinline void check_pause(struct xarray *xa)
1163{
1164 XA_STATE(xas, xa, 0);
1165 void *entry;
1166 unsigned int order;
1167 unsigned long index = 1;
1168 unsigned int count = 0;
1169
1170 for (order = 0; order < order_limit; order++) {
1171 XA_BUG_ON(xa, xa_store_order(xa, index, order,
1172 xa_mk_index(index), GFP_KERNEL));
1173 index += 1UL << order;
1174 }
1175
1176 rcu_read_lock();
1177 xas_for_each(&xas, entry, ULONG_MAX) {
1178 XA_BUG_ON(xa, entry != xa_mk_index(1UL << count));
1179 count++;
1180 }
1181 rcu_read_unlock();
1182 XA_BUG_ON(xa, count != order_limit);
1183
1184 count = 0;
1185 xas_set(&xas, 0);
1186 rcu_read_lock();
1187 xas_for_each(&xas, entry, ULONG_MAX) {
1188 XA_BUG_ON(xa, entry != xa_mk_index(1UL << count));
1189 count++;
1190 xas_pause(&xas);
1191 }
1192 rcu_read_unlock();
1193 XA_BUG_ON(xa, count != order_limit);
1194
1195 xa_destroy(xa);
1196}
1197
Matthew Wilcox (Oracle)91abab82019-07-01 17:03:29 -04001198static noinline void check_move_tiny(struct xarray *xa)
1199{
1200 XA_STATE(xas, xa, 0);
1201
1202 XA_BUG_ON(xa, !xa_empty(xa));
1203 rcu_read_lock();
1204 XA_BUG_ON(xa, xas_next(&xas) != NULL);
1205 XA_BUG_ON(xa, xas_next(&xas) != NULL);
1206 rcu_read_unlock();
1207 xa_store_index(xa, 0, GFP_KERNEL);
1208 rcu_read_lock();
1209 xas_set(&xas, 0);
1210 XA_BUG_ON(xa, xas_next(&xas) != xa_mk_index(0));
1211 XA_BUG_ON(xa, xas_next(&xas) != NULL);
1212 xas_set(&xas, 0);
1213 XA_BUG_ON(xa, xas_prev(&xas) != xa_mk_index(0));
1214 XA_BUG_ON(xa, xas_prev(&xas) != NULL);
1215 rcu_read_unlock();
1216 xa_erase_index(xa, 0);
1217 XA_BUG_ON(xa, !xa_empty(xa));
1218}
1219
Matthew Wilcox (Oracle)82a22312019-11-07 22:49:11 -05001220static noinline void check_move_max(struct xarray *xa)
1221{
1222 XA_STATE(xas, xa, 0);
1223
1224 xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
1225 rcu_read_lock();
1226 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX));
1227 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL);
1228 rcu_read_unlock();
1229
1230 xas_set(&xas, 0);
1231 rcu_read_lock();
1232 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX));
1233 xas_pause(&xas);
1234 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL);
1235 rcu_read_unlock();
1236
1237 xa_erase_index(xa, ULONG_MAX);
1238 XA_BUG_ON(xa, !xa_empty(xa));
1239}
1240
Matthew Wilcox64d3e9a2017-12-01 00:06:52 -05001241static noinline void check_move_small(struct xarray *xa, unsigned long idx)
1242{
1243 XA_STATE(xas, xa, 0);
1244 unsigned long i;
1245
1246 xa_store_index(xa, 0, GFP_KERNEL);
1247 xa_store_index(xa, idx, GFP_KERNEL);
1248
1249 rcu_read_lock();
1250 for (i = 0; i < idx * 4; i++) {
1251 void *entry = xas_next(&xas);
1252 if (i <= idx)
1253 XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
1254 XA_BUG_ON(xa, xas.xa_index != i);
1255 if (i == 0 || i == idx)
Matthew Wilcoxb7677a12018-11-05 13:19:54 -05001256 XA_BUG_ON(xa, entry != xa_mk_index(i));
Matthew Wilcox64d3e9a2017-12-01 00:06:52 -05001257 else
1258 XA_BUG_ON(xa, entry != NULL);
1259 }
1260 xas_next(&xas);
1261 XA_BUG_ON(xa, xas.xa_index != i);
1262
1263 do {
1264 void *entry = xas_prev(&xas);
1265 i--;
1266 if (i <= idx)
1267 XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
1268 XA_BUG_ON(xa, xas.xa_index != i);
1269 if (i == 0 || i == idx)
Matthew Wilcoxb7677a12018-11-05 13:19:54 -05001270 XA_BUG_ON(xa, entry != xa_mk_index(i));
Matthew Wilcox64d3e9a2017-12-01 00:06:52 -05001271 else
1272 XA_BUG_ON(xa, entry != NULL);
1273 } while (i > 0);
1274
1275 xas_set(&xas, ULONG_MAX);
1276 XA_BUG_ON(xa, xas_next(&xas) != NULL);
1277 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
1278 XA_BUG_ON(xa, xas_next(&xas) != xa_mk_value(0));
1279 XA_BUG_ON(xa, xas.xa_index != 0);
1280 XA_BUG_ON(xa, xas_prev(&xas) != NULL);
1281 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
1282 rcu_read_unlock();
1283
1284 xa_erase_index(xa, 0);
1285 xa_erase_index(xa, idx);
1286 XA_BUG_ON(xa, !xa_empty(xa));
1287}
1288
1289static noinline void check_move(struct xarray *xa)
1290{
1291 XA_STATE(xas, xa, (1 << 16) - 1);
1292 unsigned long i;
1293
1294 for (i = 0; i < (1 << 16); i++)
1295 XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
1296
1297 rcu_read_lock();
1298 do {
1299 void *entry = xas_prev(&xas);
1300 i--;
Matthew Wilcoxb7677a12018-11-05 13:19:54 -05001301 XA_BUG_ON(xa, entry != xa_mk_index(i));
Matthew Wilcox64d3e9a2017-12-01 00:06:52 -05001302 XA_BUG_ON(xa, i != xas.xa_index);
1303 } while (i != 0);
1304
1305 XA_BUG_ON(xa, xas_prev(&xas) != NULL);
1306 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
1307
1308 do {
1309 void *entry = xas_next(&xas);
Matthew Wilcoxb7677a12018-11-05 13:19:54 -05001310 XA_BUG_ON(xa, entry != xa_mk_index(i));
Matthew Wilcox64d3e9a2017-12-01 00:06:52 -05001311 XA_BUG_ON(xa, i != xas.xa_index);
1312 i++;
1313 } while (i < (1 << 16));
1314 rcu_read_unlock();
1315
1316 for (i = (1 << 8); i < (1 << 15); i++)
1317 xa_erase_index(xa, i);
1318
1319 i = xas.xa_index;
1320
1321 rcu_read_lock();
1322 do {
1323 void *entry = xas_prev(&xas);
1324 i--;
1325 if ((i < (1 << 8)) || (i >= (1 << 15)))
Matthew Wilcoxb7677a12018-11-05 13:19:54 -05001326 XA_BUG_ON(xa, entry != xa_mk_index(i));
Matthew Wilcox64d3e9a2017-12-01 00:06:52 -05001327 else
1328 XA_BUG_ON(xa, entry != NULL);
1329 XA_BUG_ON(xa, i != xas.xa_index);
1330 } while (i != 0);
1331
1332 XA_BUG_ON(xa, xas_prev(&xas) != NULL);
1333 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
1334
1335 do {
1336 void *entry = xas_next(&xas);
1337 if ((i < (1 << 8)) || (i >= (1 << 15)))
Matthew Wilcoxb7677a12018-11-05 13:19:54 -05001338 XA_BUG_ON(xa, entry != xa_mk_index(i));
Matthew Wilcox64d3e9a2017-12-01 00:06:52 -05001339 else
1340 XA_BUG_ON(xa, entry != NULL);
1341 XA_BUG_ON(xa, i != xas.xa_index);
1342 i++;
1343 } while (i < (1 << 16));
1344 rcu_read_unlock();
1345
1346 xa_destroy(xa);
1347
Matthew Wilcox (Oracle)91abab82019-07-01 17:03:29 -04001348 check_move_tiny(xa);
Matthew Wilcox (Oracle)82a22312019-11-07 22:49:11 -05001349 check_move_max(xa);
Matthew Wilcox (Oracle)91abab82019-07-01 17:03:29 -04001350
Matthew Wilcox64d3e9a2017-12-01 00:06:52 -05001351 for (i = 0; i < 16; i++)
1352 check_move_small(xa, 1UL << i);
1353
1354 for (i = 2; i < 16; i++)
1355 check_move_small(xa, (1UL << i) - 1);
1356}
1357
Matthew Wilcox2264f512017-12-04 00:11:48 -05001358static noinline void xa_store_many_order(struct xarray *xa,
1359 unsigned long index, unsigned order)
1360{
1361 XA_STATE_ORDER(xas, xa, index, order);
1362 unsigned int i = 0;
1363
1364 do {
1365 xas_lock(&xas);
1366 XA_BUG_ON(xa, xas_find_conflict(&xas));
1367 xas_create_range(&xas);
1368 if (xas_error(&xas))
1369 goto unlock;
1370 for (i = 0; i < (1U << order); i++) {
Matthew Wilcoxb7677a12018-11-05 13:19:54 -05001371 XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(index + i)));
Matthew Wilcox2264f512017-12-04 00:11:48 -05001372 xas_next(&xas);
1373 }
1374unlock:
1375 xas_unlock(&xas);
1376 } while (xas_nomem(&xas, GFP_KERNEL));
1377
1378 XA_BUG_ON(xa, xas_error(&xas));
1379}
1380
1381static noinline void check_create_range_1(struct xarray *xa,
1382 unsigned long index, unsigned order)
1383{
1384 unsigned long i;
1385
1386 xa_store_many_order(xa, index, order);
1387 for (i = index; i < index + (1UL << order); i++)
1388 xa_erase_index(xa, i);
1389 XA_BUG_ON(xa, !xa_empty(xa));
1390}
1391
1392static noinline void check_create_range_2(struct xarray *xa, unsigned order)
1393{
1394 unsigned long i;
1395 unsigned long nr = 1UL << order;
1396
1397 for (i = 0; i < nr * nr; i += nr)
1398 xa_store_many_order(xa, i, order);
1399 for (i = 0; i < nr * nr; i++)
1400 xa_erase_index(xa, i);
1401 XA_BUG_ON(xa, !xa_empty(xa));
1402}
1403
1404static noinline void check_create_range_3(void)
1405{
1406 XA_STATE(xas, NULL, 0);
1407 xas_set_err(&xas, -EEXIST);
1408 xas_create_range(&xas);
1409 XA_BUG_ON(NULL, xas_error(&xas) != -EEXIST);
1410}
1411
1412static noinline void check_create_range_4(struct xarray *xa,
1413 unsigned long index, unsigned order)
1414{
1415 XA_STATE_ORDER(xas, xa, index, order);
1416 unsigned long base = xas.xa_index;
1417 unsigned long i = 0;
1418
1419 xa_store_index(xa, index, GFP_KERNEL);
1420 do {
1421 xas_lock(&xas);
1422 xas_create_range(&xas);
1423 if (xas_error(&xas))
1424 goto unlock;
1425 for (i = 0; i < (1UL << order); i++) {
Matthew Wilcoxb7677a12018-11-05 13:19:54 -05001426 void *old = xas_store(&xas, xa_mk_index(base + i));
Matthew Wilcox2264f512017-12-04 00:11:48 -05001427 if (xas.xa_index == index)
Matthew Wilcoxb7677a12018-11-05 13:19:54 -05001428 XA_BUG_ON(xa, old != xa_mk_index(base + i));
Matthew Wilcox2264f512017-12-04 00:11:48 -05001429 else
1430 XA_BUG_ON(xa, old != NULL);
1431 xas_next(&xas);
1432 }
1433unlock:
1434 xas_unlock(&xas);
1435 } while (xas_nomem(&xas, GFP_KERNEL));
1436
1437 XA_BUG_ON(xa, xas_error(&xas));
1438
1439 for (i = base; i < base + (1UL << order); i++)
1440 xa_erase_index(xa, i);
1441 XA_BUG_ON(xa, !xa_empty(xa));
1442}
1443
1444static noinline void check_create_range(struct xarray *xa)
1445{
1446 unsigned int order;
1447 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 12 : 1;
1448
1449 for (order = 0; order < max_order; order++) {
1450 check_create_range_1(xa, 0, order);
1451 check_create_range_1(xa, 1U << order, order);
1452 check_create_range_1(xa, 2U << order, order);
1453 check_create_range_1(xa, 3U << order, order);
1454 check_create_range_1(xa, 1U << 24, order);
1455 if (order < 10)
1456 check_create_range_2(xa, order);
1457
1458 check_create_range_4(xa, 0, order);
1459 check_create_range_4(xa, 1U << order, order);
1460 check_create_range_4(xa, 2U << order, order);
1461 check_create_range_4(xa, 3U << order, order);
1462 check_create_range_4(xa, 1U << 24, order);
1463
1464 check_create_range_4(xa, 1, order);
1465 check_create_range_4(xa, (1U << order) + 1, order);
1466 check_create_range_4(xa, (2U << order) + 1, order);
1467 check_create_range_4(xa, (2U << order) - 1, order);
1468 check_create_range_4(xa, (3U << order) + 1, order);
1469 check_create_range_4(xa, (3U << order) - 1, order);
1470 check_create_range_4(xa, (1U << 24) + 1, order);
1471 }
1472
1473 check_create_range_3();
1474}
1475
Matthew Wilcox0e9446c2018-08-15 14:13:29 -04001476static noinline void __check_store_range(struct xarray *xa, unsigned long first,
1477 unsigned long last)
1478{
1479#ifdef CONFIG_XARRAY_MULTI
Matthew Wilcoxb7677a12018-11-05 13:19:54 -05001480 xa_store_range(xa, first, last, xa_mk_index(first), GFP_KERNEL);
Matthew Wilcox0e9446c2018-08-15 14:13:29 -04001481
Matthew Wilcoxb7677a12018-11-05 13:19:54 -05001482 XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_index(first));
1483 XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_index(first));
Matthew Wilcox0e9446c2018-08-15 14:13:29 -04001484 XA_BUG_ON(xa, xa_load(xa, first - 1) != NULL);
1485 XA_BUG_ON(xa, xa_load(xa, last + 1) != NULL);
1486
1487 xa_store_range(xa, first, last, NULL, GFP_KERNEL);
1488#endif
1489
1490 XA_BUG_ON(xa, !xa_empty(xa));
1491}
1492
1493static noinline void check_store_range(struct xarray *xa)
1494{
1495 unsigned long i, j;
1496
1497 for (i = 0; i < 128; i++) {
1498 for (j = i; j < 128; j++) {
1499 __check_store_range(xa, i, j);
1500 __check_store_range(xa, 128 + i, 128 + j);
1501 __check_store_range(xa, 4095 + i, 4095 + j);
1502 __check_store_range(xa, 4096 + i, 4096 + j);
1503 __check_store_range(xa, 123456 + i, 123456 + j);
Matthew Wilcox5404a7f2018-11-05 09:34:04 -05001504 __check_store_range(xa, (1 << 24) + i, (1 << 24) + j);
Matthew Wilcox0e9446c2018-08-15 14:13:29 -04001505 }
1506 }
1507}
1508
Matthew Wilcox76b4e522018-12-28 23:20:44 -05001509static void check_align_1(struct xarray *xa, char *name)
1510{
1511 int i;
1512 unsigned int id;
1513 unsigned long index;
1514 void *entry;
1515
1516 for (i = 0; i < 8; i++) {
Matthew Wilcoxa3e4d3f2018-12-31 10:41:01 -05001517 XA_BUG_ON(xa, xa_alloc(xa, &id, name + i, xa_limit_32b,
1518 GFP_KERNEL) != 0);
Matthew Wilcox76b4e522018-12-28 23:20:44 -05001519 XA_BUG_ON(xa, id != i);
1520 }
1521 xa_for_each(xa, index, entry)
1522 XA_BUG_ON(xa, xa_is_err(entry));
1523 xa_destroy(xa);
1524}
1525
Matthew Wilcox4a5c8d82019-02-21 17:54:44 -05001526/*
1527 * We should always be able to store without allocating memory after
1528 * reserving a slot.
1529 */
Matthew Wilcox2fbe9672019-02-21 17:36:45 -05001530static void check_align_2(struct xarray *xa, char *name)
1531{
1532 int i;
1533
1534 XA_BUG_ON(xa, !xa_empty(xa));
1535
1536 for (i = 0; i < 8; i++) {
1537 XA_BUG_ON(xa, xa_store(xa, 0, name + i, GFP_KERNEL) != NULL);
1538 xa_erase(xa, 0);
1539 }
1540
Matthew Wilcox4a5c8d82019-02-21 17:54:44 -05001541 for (i = 0; i < 8; i++) {
1542 XA_BUG_ON(xa, xa_reserve(xa, 0, GFP_KERNEL) != 0);
1543 XA_BUG_ON(xa, xa_store(xa, 0, name + i, 0) != NULL);
1544 xa_erase(xa, 0);
1545 }
1546
Matthew Wilcox2fbe9672019-02-21 17:36:45 -05001547 XA_BUG_ON(xa, !xa_empty(xa));
1548}
1549
Matthew Wilcox76b4e522018-12-28 23:20:44 -05001550static noinline void check_align(struct xarray *xa)
1551{
1552 char name[] = "Motorola 68000";
1553
1554 check_align_1(xa, name);
1555 check_align_1(xa, name + 1);
1556 check_align_1(xa, name + 2);
1557 check_align_1(xa, name + 3);
Matthew Wilcox2fbe9672019-02-21 17:36:45 -05001558 check_align_2(xa, name);
Matthew Wilcox76b4e522018-12-28 23:20:44 -05001559}
1560
Matthew Wilcoxa97e7902017-11-24 14:24:59 -05001561static LIST_HEAD(shadow_nodes);
1562
1563static void test_update_node(struct xa_node *node)
1564{
1565 if (node->count && node->count == node->nr_values) {
1566 if (list_empty(&node->private_list))
1567 list_add(&shadow_nodes, &node->private_list);
1568 } else {
1569 if (!list_empty(&node->private_list))
1570 list_del_init(&node->private_list);
1571 }
1572}
1573
1574static noinline void shadow_remove(struct xarray *xa)
1575{
1576 struct xa_node *node;
1577
1578 xa_lock(xa);
1579 while ((node = list_first_entry_or_null(&shadow_nodes,
1580 struct xa_node, private_list))) {
1581 XA_STATE(xas, node->array, 0);
1582 XA_BUG_ON(xa, node->array != xa);
1583 list_del_init(&node->private_list);
1584 xas.xa_node = xa_parent_locked(node->array, node);
1585 xas.xa_offset = node->offset;
1586 xas.xa_shift = node->shift + XA_CHUNK_SHIFT;
1587 xas_set_update(&xas, test_update_node);
1588 xas_store(&xas, NULL);
1589 }
1590 xa_unlock(xa);
1591}
1592
1593static noinline void check_workingset(struct xarray *xa, unsigned long index)
1594{
1595 XA_STATE(xas, xa, index);
1596 xas_set_update(&xas, test_update_node);
1597
1598 do {
1599 xas_lock(&xas);
1600 xas_store(&xas, xa_mk_value(0));
1601 xas_next(&xas);
1602 xas_store(&xas, xa_mk_value(1));
1603 xas_unlock(&xas);
1604 } while (xas_nomem(&xas, GFP_KERNEL));
1605
1606 XA_BUG_ON(xa, list_empty(&shadow_nodes));
1607
1608 xas_lock(&xas);
1609 xas_next(&xas);
1610 xas_store(&xas, &xas);
1611 XA_BUG_ON(xa, !list_empty(&shadow_nodes));
1612
1613 xas_store(&xas, xa_mk_value(2));
1614 xas_unlock(&xas);
1615 XA_BUG_ON(xa, list_empty(&shadow_nodes));
1616
1617 shadow_remove(xa);
1618 XA_BUG_ON(xa, !list_empty(&shadow_nodes));
1619 XA_BUG_ON(xa, !xa_empty(xa));
1620}
1621
Matthew Wilcoxd6427f82018-08-28 16:13:16 -04001622/*
1623 * Check that the pointer / value / sibling entries are accounted the
1624 * way we expect them to be.
1625 */
1626static noinline void check_account(struct xarray *xa)
1627{
1628#ifdef CONFIG_XARRAY_MULTI
1629 unsigned int order;
1630
1631 for (order = 1; order < 12; order++) {
1632 XA_STATE(xas, xa, 1 << order);
1633
1634 xa_store_order(xa, 0, order, xa, GFP_KERNEL);
Matthew Wilcoxfffc9a22018-11-19 09:36:29 -05001635 rcu_read_lock();
Matthew Wilcoxd6427f82018-08-28 16:13:16 -04001636 xas_load(&xas);
1637 XA_BUG_ON(xa, xas.xa_node->count == 0);
1638 XA_BUG_ON(xa, xas.xa_node->count > (1 << order));
1639 XA_BUG_ON(xa, xas.xa_node->nr_values != 0);
Matthew Wilcoxfffc9a22018-11-19 09:36:29 -05001640 rcu_read_unlock();
Matthew Wilcoxd6427f82018-08-28 16:13:16 -04001641
Matthew Wilcoxb7677a12018-11-05 13:19:54 -05001642 xa_store_order(xa, 1 << order, order, xa_mk_index(1UL << order),
Matthew Wilcoxd6427f82018-08-28 16:13:16 -04001643 GFP_KERNEL);
1644 XA_BUG_ON(xa, xas.xa_node->count != xas.xa_node->nr_values * 2);
1645
1646 xa_erase(xa, 1 << order);
1647 XA_BUG_ON(xa, xas.xa_node->nr_values != 0);
1648
1649 xa_erase(xa, 0);
1650 XA_BUG_ON(xa, !xa_empty(xa));
1651 }
1652#endif
1653}
1654
Matthew Wilcox687149f2017-11-17 08:16:34 -05001655static noinline void check_destroy(struct xarray *xa)
1656{
1657 unsigned long index;
1658
1659 XA_BUG_ON(xa, !xa_empty(xa));
1660
1661 /* Destroying an empty array is a no-op */
1662 xa_destroy(xa);
1663 XA_BUG_ON(xa, !xa_empty(xa));
1664
1665 /* Destroying an array with a single entry */
1666 for (index = 0; index < 1000; index++) {
1667 xa_store_index(xa, index, GFP_KERNEL);
1668 XA_BUG_ON(xa, xa_empty(xa));
1669 xa_destroy(xa);
1670 XA_BUG_ON(xa, !xa_empty(xa));
1671 }
1672
1673 /* Destroying an array with a single entry at ULONG_MAX */
1674 xa_store(xa, ULONG_MAX, xa, GFP_KERNEL);
1675 XA_BUG_ON(xa, xa_empty(xa));
1676 xa_destroy(xa);
1677 XA_BUG_ON(xa, !xa_empty(xa));
1678
1679#ifdef CONFIG_XARRAY_MULTI
1680 /* Destroying an array with a multi-index entry */
1681 xa_store_order(xa, 1 << 11, 11, xa, GFP_KERNEL);
1682 XA_BUG_ON(xa, xa_empty(xa));
1683 xa_destroy(xa);
1684 XA_BUG_ON(xa, !xa_empty(xa));
1685#endif
1686}
1687
Matthew Wilcox58d6ea32017-11-10 15:15:08 -05001688static DEFINE_XARRAY(array);
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -05001689
1690static int xarray_checks(void)
1691{
Matthew Wilcox58d6ea32017-11-10 15:15:08 -05001692 check_xa_err(&array);
Matthew Wilcoxb803b422017-11-14 08:30:11 -05001693 check_xas_retry(&array);
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -05001694 check_xa_load(&array);
Matthew Wilcox9b89a032017-11-10 09:34:31 -05001695 check_xa_mark(&array);
Matthew Wilcox58d6ea32017-11-10 15:15:08 -05001696 check_xa_shrink(&array);
Matthew Wilcoxb803b422017-11-14 08:30:11 -05001697 check_xas_erase(&array);
Matthew Wilcox12fd2ae2019-03-09 22:25:27 -05001698 check_insert(&array);
Matthew Wilcox41aec912017-11-10 15:34:55 -05001699 check_cmpxchg(&array);
Matthew Wilcox9f14d4f2018-10-01 14:54:59 -04001700 check_reserve(&array);
Matthew Wilcoxb38f6c52019-02-20 11:30:49 -05001701 check_reserve(&xa0);
Matthew Wilcox58d6ea32017-11-10 15:15:08 -05001702 check_multi_store(&array);
Matthew Wilcox371c7522018-07-04 10:50:12 -04001703 check_xa_alloc();
Matthew Wilcoxb803b422017-11-14 08:30:11 -05001704 check_find(&array);
Matthew Wilcoxe21a2952017-11-22 08:36:00 -05001705 check_find_entry(&array);
Matthew Wilcox (Oracle)c36d4512020-01-31 06:17:09 -05001706 check_pause(&array);
Matthew Wilcoxd6427f82018-08-28 16:13:16 -04001707 check_account(&array);
Matthew Wilcox687149f2017-11-17 08:16:34 -05001708 check_destroy(&array);
Matthew Wilcox64d3e9a2017-12-01 00:06:52 -05001709 check_move(&array);
Matthew Wilcox2264f512017-12-04 00:11:48 -05001710 check_create_range(&array);
Matthew Wilcox0e9446c2018-08-15 14:13:29 -04001711 check_store_range(&array);
Matthew Wilcox4e99d4e2018-06-01 22:46:02 -04001712 check_store_iter(&array);
Matthew Wilcox76b4e522018-12-28 23:20:44 -05001713 check_align(&xa0);
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -05001714
Matthew Wilcoxa97e7902017-11-24 14:24:59 -05001715 check_workingset(&array, 0);
1716 check_workingset(&array, 64);
1717 check_workingset(&array, 4096);
1718
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -05001719 printk("XArray: %u of %u tests passed\n", tests_passed, tests_run);
1720 return (tests_run == tests_passed) ? 0 : -EINVAL;
1721}
1722
1723static void xarray_exit(void)
1724{
1725}
1726
1727module_init(xarray_checks);
1728module_exit(xarray_exit);
1729MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>");
1730MODULE_LICENSE("GPL");