blob: 3972e2123e554436d4744b494c4c4105ecdf588e [file] [log] [blame]
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001/*
2 * Generic infrastructure for lifetime debugging of objects.
3 *
4 * Started by Thomas Gleixner
5 *
6 * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
7 *
8 * For licencing details see kernel-base/COPYING
9 */
Fabian Frederick719e4842014-06-04 16:06:04 -070010
11#define pr_fmt(fmt) "ODEBUG: " fmt
12
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070013#include <linux/debugobjects.h>
14#include <linux/interrupt.h>
Alexey Dobriyand43c36d2009-10-07 17:09:06 +040015#include <linux/sched.h>
Ingo Molnar68db0cf2017-02-08 18:51:37 +010016#include <linux/sched/task_stack.h>
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070017#include <linux/seq_file.h>
18#include <linux/debugfs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090019#include <linux/slab.h>
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070020#include <linux/hash.h>
Waiman Longcaba4cb2017-08-14 09:52:13 -040021#include <linux/kmemleak.h>
Zqiang88451f22020-09-08 14:27:09 +080022#include <linux/cpu.h>
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070023
24#define ODEBUG_HASH_BITS 14
25#define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
26
Christian Borntraeger0b6ec8c2016-01-27 15:37:58 +010027#define ODEBUG_POOL_SIZE 1024
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070028#define ODEBUG_POOL_MIN_LEVEL 256
Waiman Longd86998b2019-05-20 10:14:46 -040029#define ODEBUG_POOL_PERCPU_SIZE 64
Waiman Long634d61f2019-05-20 10:14:47 -040030#define ODEBUG_BATCH_SIZE 16
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070031
32#define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
33#define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
34#define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
35
Waiman Longa7344a62019-05-20 10:14:49 -040036/*
37 * We limit the freeing of debug objects via workqueue at a maximum
38 * frequency of 10Hz and about 1024 objects for each freeing operation.
39 * So it is freeing at most 10k debug objects per second.
40 */
41#define ODEBUG_FREE_WORK_MAX 1024
42#define ODEBUG_FREE_WORK_DELAY DIV_ROUND_UP(HZ, 10)
43
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070044struct debug_bucket {
45 struct hlist_head list;
Thomas Gleixneraef9cb02009-11-17 18:11:28 +010046 raw_spinlock_t lock;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070047};
48
Waiman Longd86998b2019-05-20 10:14:46 -040049/*
50 * Debug object percpu free list
51 * Access is protected by disabling irq
52 */
53struct debug_percpu_free {
54 struct hlist_head free_objs;
55 int obj_free;
56};
57
58static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);
59
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070060static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
61
Thomas Gleixner1be1cb72009-03-16 18:53:18 +010062static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070063
Thomas Gleixneraef9cb02009-11-17 18:11:28 +010064static DEFINE_RAW_SPINLOCK(pool_lock);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070065
66static HLIST_HEAD(obj_pool);
Yang Shi36c4ead2018-02-06 07:18:26 +080067static HLIST_HEAD(obj_to_free);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070068
Waiman Longd86998b2019-05-20 10:14:46 -040069/*
70 * Because of the presence of percpu free pools, obj_pool_free will
71 * under-count those in the percpu free pools. Similarly, obj_pool_used
72 * will over-count those in the percpu free pools. Adjustments will be
73 * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
74 * can be off.
75 */
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070076static int obj_pool_min_free = ODEBUG_POOL_SIZE;
77static int obj_pool_free = ODEBUG_POOL_SIZE;
78static int obj_pool_used;
79static int obj_pool_max_used;
Waiman Longa7344a62019-05-20 10:14:49 -040080static bool obj_freeing;
Yang Shi36c4ead2018-02-06 07:18:26 +080081/* The number of objs on the global free list */
82static int obj_nr_tofree;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070083
84static int debug_objects_maxchain __read_mostly;
Arnd Bergmann163cf842018-03-13 14:18:46 +010085static int __maybe_unused debug_objects_maxchecked __read_mostly;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070086static int debug_objects_fixups __read_mostly;
87static int debug_objects_warnings __read_mostly;
Ingo Molnar3ae70202008-11-26 10:02:00 +010088static int debug_objects_enabled __read_mostly
89 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
Waiman Long97dd5522017-01-05 15:17:04 -050090static int debug_objects_pool_size __read_mostly
91 = ODEBUG_POOL_SIZE;
92static int debug_objects_pool_min_level __read_mostly
93 = ODEBUG_POOL_MIN_LEVEL;
Stephen Boydaedcade2020-08-14 17:40:26 -070094static const struct debug_obj_descr *descr_test __read_mostly;
Waiman Longd86998b2019-05-20 10:14:46 -040095static struct kmem_cache *obj_cache __read_mostly;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070096
Waiman Longc4b73aa2017-01-05 15:17:03 -050097/*
Waiman Long0cad93c2017-02-07 16:40:30 -050098 * Track numbers of kmem_cache_alloc()/free() calls done.
Waiman Longc4b73aa2017-01-05 15:17:03 -050099 */
Waiman Long0cad93c2017-02-07 16:40:30 -0500100static int debug_objects_allocated;
Waiman Longc4b73aa2017-01-05 15:17:03 -0500101static int debug_objects_freed;
102
Thomas Gleixner337fff82009-03-16 10:04:53 +0100103static void free_obj_work(struct work_struct *work);
Waiman Longa7344a62019-05-20 10:14:49 -0400104static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
Thomas Gleixner337fff82009-03-16 10:04:53 +0100105
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700106static int __init enable_object_debug(char *str)
107{
108 debug_objects_enabled = 1;
109 return 0;
110}
Kyle McMartin3e8ebb52009-03-01 20:41:41 -0500111
112static int __init disable_object_debug(char *str)
113{
114 debug_objects_enabled = 0;
115 return 0;
116}
117
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700118early_param("debug_objects", enable_object_debug);
Kyle McMartin3e8ebb52009-03-01 20:41:41 -0500119early_param("no_debug_objects", disable_object_debug);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700120
121static const char *obj_states[ODEBUG_STATE_MAX] = {
122 [ODEBUG_STATE_NONE] = "none",
123 [ODEBUG_STATE_INIT] = "initialized",
124 [ODEBUG_STATE_INACTIVE] = "inactive",
125 [ODEBUG_STATE_ACTIVE] = "active",
126 [ODEBUG_STATE_DESTROYED] = "destroyed",
127 [ODEBUG_STATE_NOTAVAILABLE] = "not available",
128};
129
Thomas Gleixner1fda1072012-04-11 11:52:18 +0200130static void fill_pool(void)
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700131{
Tetsuo Handaaee97ee2023-05-11 22:47:32 +0900132 gfp_t gfp = __GFP_HIGH | __GFP_NOWARN;
Waiman Longd26bf502019-05-20 10:14:48 -0400133 struct debug_obj *obj;
Vegard Nossum50db04dd2008-06-15 00:47:36 +0200134 unsigned long flags;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700135
Marco Elver35fd7a62020-01-16 19:55:29 +0100136 if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
Thomas Gleixner1fda1072012-04-11 11:52:18 +0200137 return;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700138
Yang Shi36c4ead2018-02-06 07:18:26 +0800139 /*
140 * Reuse objs from the global free list; they will be reinitialized
141 * when allocating.
Marco Elver35fd7a62020-01-16 19:55:29 +0100142 *
143 * Both obj_nr_tofree and obj_pool_free are checked locklessly; the
144 * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical
145 * sections.
Yang Shi36c4ead2018-02-06 07:18:26 +0800146 */
Marco Elver35fd7a62020-01-16 19:55:29 +0100147 while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) {
Yang Shi36c4ead2018-02-06 07:18:26 +0800148 raw_spin_lock_irqsave(&pool_lock, flags);
149 /*
150 * Recheck with the lock held as the worker thread might have
151 * won the race and freed the global free list already.
152 */
Waiman Longd26bf502019-05-20 10:14:48 -0400153 while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
Yang Shi36c4ead2018-02-06 07:18:26 +0800154 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
155 hlist_del(&obj->node);
Marco Elver35fd7a62020-01-16 19:55:29 +0100156 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
Yang Shi36c4ead2018-02-06 07:18:26 +0800157 hlist_add_head(&obj->node, &obj_pool);
Marco Elver35fd7a62020-01-16 19:55:29 +0100158 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
Yang Shi36c4ead2018-02-06 07:18:26 +0800159 }
160 raw_spin_unlock_irqrestore(&pool_lock, flags);
161 }
162
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700163 if (unlikely(!obj_cache))
Thomas Gleixner1fda1072012-04-11 11:52:18 +0200164 return;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700165
Marco Elver35fd7a62020-01-16 19:55:29 +0100166 while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
Waiman Longd26bf502019-05-20 10:14:48 -0400167 struct debug_obj *new[ODEBUG_BATCH_SIZE];
168 int cnt;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700169
Waiman Longd26bf502019-05-20 10:14:48 -0400170 for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
171 new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
172 if (!new[cnt])
173 break;
174 }
175 if (!cnt)
Dan Carpenter33408082012-04-18 14:28:10 +0300176 return;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700177
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100178 raw_spin_lock_irqsave(&pool_lock, flags);
Waiman Longd26bf502019-05-20 10:14:48 -0400179 while (cnt) {
180 hlist_add_head(&new[--cnt]->node, &obj_pool);
181 debug_objects_allocated++;
Marco Elver35fd7a62020-01-16 19:55:29 +0100182 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
Waiman Longd26bf502019-05-20 10:14:48 -0400183 }
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100184 raw_spin_unlock_irqrestore(&pool_lock, flags);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700185 }
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700186}
187
188/*
189 * Lookup an object in the hash bucket.
190 */
191static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
192{
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700193 struct debug_obj *obj;
194 int cnt = 0;
195
Sasha Levinb67bfe02013-02-27 17:06:00 -0800196 hlist_for_each_entry(obj, &b->list, node) {
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700197 cnt++;
198 if (obj->object == addr)
199 return obj;
200 }
201 if (cnt > debug_objects_maxchain)
202 debug_objects_maxchain = cnt;
203
204 return NULL;
205}
206
207/*
Waiman Longd86998b2019-05-20 10:14:46 -0400208 * Allocate a new object from the hlist
209 */
210static struct debug_obj *__alloc_object(struct hlist_head *list)
211{
212 struct debug_obj *obj = NULL;
213
214 if (list->first) {
215 obj = hlist_entry(list->first, typeof(*obj), node);
216 hlist_del(&obj->node);
217 }
218
219 return obj;
220}
221
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700222static struct debug_obj *
Stephen Boydaedcade2020-08-14 17:40:26 -0700223alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700224{
Waiman Long634d61f2019-05-20 10:14:47 -0400225 struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
Waiman Longd86998b2019-05-20 10:14:46 -0400226 struct debug_obj *obj;
227
228 if (likely(obj_cache)) {
Waiman Longd86998b2019-05-20 10:14:46 -0400229 obj = __alloc_object(&percpu_pool->free_objs);
230 if (obj) {
231 percpu_pool->obj_free--;
232 goto init_obj;
233 }
234 }
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700235
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100236 raw_spin_lock(&pool_lock);
Waiman Longd86998b2019-05-20 10:14:46 -0400237 obj = __alloc_object(&obj_pool);
238 if (obj) {
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700239 obj_pool_used++;
Marco Elver35fd7a62020-01-16 19:55:29 +0100240 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
Waiman Long634d61f2019-05-20 10:14:47 -0400241
242 /*
243 * Looking ahead, allocate one batch of debug objects and
244 * put them into the percpu free pool.
245 */
246 if (likely(obj_cache)) {
247 int i;
248
249 for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
250 struct debug_obj *obj2;
251
252 obj2 = __alloc_object(&obj_pool);
253 if (!obj2)
254 break;
255 hlist_add_head(&obj2->node,
256 &percpu_pool->free_objs);
257 percpu_pool->obj_free++;
258 obj_pool_used++;
Marco Elver35fd7a62020-01-16 19:55:29 +0100259 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
Waiman Long634d61f2019-05-20 10:14:47 -0400260 }
261 }
262
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700263 if (obj_pool_used > obj_pool_max_used)
264 obj_pool_max_used = obj_pool_used;
265
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700266 if (obj_pool_free < obj_pool_min_free)
267 obj_pool_min_free = obj_pool_free;
268 }
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100269 raw_spin_unlock(&pool_lock);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700270
Waiman Longd86998b2019-05-20 10:14:46 -0400271init_obj:
272 if (obj) {
273 obj->object = addr;
274 obj->descr = descr;
275 obj->state = ODEBUG_STATE_NONE;
276 obj->astate = 0;
277 hlist_add_head(&obj->node, &b->list);
278 }
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700279 return obj;
280}
281
282/*
Thomas Gleixner337fff82009-03-16 10:04:53 +0100283 * workqueue function to free objects.
Waiman Long858274b2017-01-05 15:17:05 -0500284 *
285 * To reduce contention on the global pool_lock, the actual freeing of
Yang Shi636e1972018-02-06 07:18:27 +0800286 * debug objects will be delayed if the pool_lock is busy.
Thomas Gleixner337fff82009-03-16 10:04:53 +0100287 */
288static void free_obj_work(struct work_struct *work)
289{
Yang Shi36c4ead2018-02-06 07:18:26 +0800290 struct hlist_node *tmp;
291 struct debug_obj *obj;
Thomas Gleixner337fff82009-03-16 10:04:53 +0100292 unsigned long flags;
Yang Shi36c4ead2018-02-06 07:18:26 +0800293 HLIST_HEAD(tofree);
Thomas Gleixner337fff82009-03-16 10:04:53 +0100294
Waiman Longa7344a62019-05-20 10:14:49 -0400295 WRITE_ONCE(obj_freeing, false);
Waiman Long858274b2017-01-05 15:17:05 -0500296 if (!raw_spin_trylock_irqsave(&pool_lock, flags))
297 return;
Yang Shi36c4ead2018-02-06 07:18:26 +0800298
Waiman Longa7344a62019-05-20 10:14:49 -0400299 if (obj_pool_free >= debug_objects_pool_size)
300 goto free_objs;
301
Yang Shi36c4ead2018-02-06 07:18:26 +0800302 /*
303 * The objs on the pool list might be allocated before the work is
304 * run, so recheck if pool list it full or not, if not fill pool
Waiman Longa7344a62019-05-20 10:14:49 -0400305 * list from the global free list. As it is likely that a workload
306 * may be gearing up to use more and more objects, don't free any
307 * of them until the next round.
Yang Shi36c4ead2018-02-06 07:18:26 +0800308 */
309 while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
310 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
311 hlist_del(&obj->node);
312 hlist_add_head(&obj->node, &obj_pool);
Marco Elver35fd7a62020-01-16 19:55:29 +0100313 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
314 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
Yang Shi36c4ead2018-02-06 07:18:26 +0800315 }
Waiman Longa7344a62019-05-20 10:14:49 -0400316 raw_spin_unlock_irqrestore(&pool_lock, flags);
317 return;
Yang Shi36c4ead2018-02-06 07:18:26 +0800318
Waiman Longa7344a62019-05-20 10:14:49 -0400319free_objs:
Yang Shi36c4ead2018-02-06 07:18:26 +0800320 /*
321 * Pool list is already full and there are still objs on the free
322 * list. Move remaining free objs to a temporary list to free the
323 * memory outside the pool_lock held region.
324 */
325 if (obj_nr_tofree) {
326 hlist_move_list(&obj_to_free, &tofree);
Arnd Bergmann04148182018-02-22 16:52:58 +0100327 debug_objects_freed += obj_nr_tofree;
Marco Elver35fd7a62020-01-16 19:55:29 +0100328 WRITE_ONCE(obj_nr_tofree, 0);
Yang Shi36c4ead2018-02-06 07:18:26 +0800329 }
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100330 raw_spin_unlock_irqrestore(&pool_lock, flags);
Yang Shi36c4ead2018-02-06 07:18:26 +0800331
332 hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
333 hlist_del(&obj->node);
334 kmem_cache_free(obj_cache, obj);
335 }
Thomas Gleixner337fff82009-03-16 10:04:53 +0100336}
337
Waiman Longa7344a62019-05-20 10:14:49 -0400338static void __free_object(struct debug_obj *obj)
Yang Shi636e1972018-02-06 07:18:27 +0800339{
Waiman Long634d61f2019-05-20 10:14:47 -0400340 struct debug_obj *objs[ODEBUG_BATCH_SIZE];
341 struct debug_percpu_free *percpu_pool;
342 int lookahead_count = 0;
Yang Shi636e1972018-02-06 07:18:27 +0800343 unsigned long flags;
344 bool work;
345
Waiman Longd86998b2019-05-20 10:14:46 -0400346 local_irq_save(flags);
Waiman Long634d61f2019-05-20 10:14:47 -0400347 if (!obj_cache)
348 goto free_to_obj_pool;
349
Waiman Longd86998b2019-05-20 10:14:46 -0400350 /*
351 * Try to free it into the percpu pool first.
352 */
353 percpu_pool = this_cpu_ptr(&percpu_obj_pool);
Waiman Long634d61f2019-05-20 10:14:47 -0400354 if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) {
Waiman Longd86998b2019-05-20 10:14:46 -0400355 hlist_add_head(&obj->node, &percpu_pool->free_objs);
356 percpu_pool->obj_free++;
357 local_irq_restore(flags);
Waiman Longa7344a62019-05-20 10:14:49 -0400358 return;
Waiman Longd86998b2019-05-20 10:14:46 -0400359 }
360
Waiman Long634d61f2019-05-20 10:14:47 -0400361 /*
362 * As the percpu pool is full, look ahead and pull out a batch
363 * of objects from the percpu pool and free them as well.
364 */
365 for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
366 objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs);
367 if (!objs[lookahead_count])
368 break;
369 percpu_pool->obj_free--;
370 }
371
372free_to_obj_pool:
Waiman Longd86998b2019-05-20 10:14:46 -0400373 raw_spin_lock(&pool_lock);
Waiman Longa7344a62019-05-20 10:14:49 -0400374 work = (obj_pool_free > debug_objects_pool_size) && obj_cache &&
375 (obj_nr_tofree < ODEBUG_FREE_WORK_MAX);
Yang Shi636e1972018-02-06 07:18:27 +0800376 obj_pool_used--;
377
378 if (work) {
Marco Elver35fd7a62020-01-16 19:55:29 +0100379 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
Yang Shi636e1972018-02-06 07:18:27 +0800380 hlist_add_head(&obj->node, &obj_to_free);
Waiman Long634d61f2019-05-20 10:14:47 -0400381 if (lookahead_count) {
Marco Elver35fd7a62020-01-16 19:55:29 +0100382 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count);
Waiman Long634d61f2019-05-20 10:14:47 -0400383 obj_pool_used -= lookahead_count;
384 while (lookahead_count) {
385 hlist_add_head(&objs[--lookahead_count]->node,
386 &obj_to_free);
387 }
388 }
Waiman Longa7344a62019-05-20 10:14:49 -0400389
390 if ((obj_pool_free > debug_objects_pool_size) &&
391 (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) {
392 int i;
393
394 /*
395 * Free one more batch of objects from obj_pool.
396 */
397 for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
398 obj = __alloc_object(&obj_pool);
399 hlist_add_head(&obj->node, &obj_to_free);
Marco Elver35fd7a62020-01-16 19:55:29 +0100400 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
401 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
Waiman Longa7344a62019-05-20 10:14:49 -0400402 }
403 }
Yang Shi636e1972018-02-06 07:18:27 +0800404 } else {
Marco Elver35fd7a62020-01-16 19:55:29 +0100405 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
Yang Shi636e1972018-02-06 07:18:27 +0800406 hlist_add_head(&obj->node, &obj_pool);
Waiman Long634d61f2019-05-20 10:14:47 -0400407 if (lookahead_count) {
Marco Elver35fd7a62020-01-16 19:55:29 +0100408 WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count);
Waiman Long634d61f2019-05-20 10:14:47 -0400409 obj_pool_used -= lookahead_count;
410 while (lookahead_count) {
411 hlist_add_head(&objs[--lookahead_count]->node,
412 &obj_pool);
413 }
414 }
Yang Shi636e1972018-02-06 07:18:27 +0800415 }
Waiman Longd86998b2019-05-20 10:14:46 -0400416 raw_spin_unlock(&pool_lock);
417 local_irq_restore(flags);
Yang Shi636e1972018-02-06 07:18:27 +0800418}
419
Thomas Gleixner337fff82009-03-16 10:04:53 +0100420/*
421 * Put the object back into the pool and schedule work to free objects
422 * if necessary.
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700423 */
424static void free_object(struct debug_obj *obj)
425{
Waiman Longa7344a62019-05-20 10:14:49 -0400426 __free_object(obj);
Marco Elver35fd7a62020-01-16 19:55:29 +0100427 if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
Waiman Longa7344a62019-05-20 10:14:49 -0400428 WRITE_ONCE(obj_freeing, true);
429 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
430 }
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700431}
432
Zqiang88451f22020-09-08 14:27:09 +0800433#ifdef CONFIG_HOTPLUG_CPU
434static int object_cpu_offline(unsigned int cpu)
435{
436 struct debug_percpu_free *percpu_pool;
437 struct hlist_node *tmp;
438 struct debug_obj *obj;
wuchi4b469322022-06-11 21:06:34 +0800439 unsigned long flags;
Zqiang88451f22020-09-08 14:27:09 +0800440
441 /* Remote access is safe as the CPU is dead already */
442 percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu);
443 hlist_for_each_entry_safe(obj, tmp, &percpu_pool->free_objs, node) {
444 hlist_del(&obj->node);
445 kmem_cache_free(obj_cache, obj);
446 }
wuchi4b469322022-06-11 21:06:34 +0800447
448 raw_spin_lock_irqsave(&pool_lock, flags);
449 obj_pool_used -= percpu_pool->obj_free;
450 debug_objects_freed += percpu_pool->obj_free;
451 raw_spin_unlock_irqrestore(&pool_lock, flags);
452
Zqiang88451f22020-09-08 14:27:09 +0800453 percpu_pool->obj_free = 0;
454
455 return 0;
456}
457#endif
458
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700459/*
460 * We run out of memory. That means we probably have tons of objects
461 * allocated.
462 */
463static void debug_objects_oom(void)
464{
465 struct debug_bucket *db = obj_hash;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800466 struct hlist_node *tmp;
Vegard Nossum673d62cc2008-08-31 23:39:21 +0200467 HLIST_HEAD(freelist);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700468 struct debug_obj *obj;
469 unsigned long flags;
470 int i;
471
Fabian Frederick719e4842014-06-04 16:06:04 -0700472 pr_warn("Out of memory. ODEBUG disabled\n");
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700473
474 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100475 raw_spin_lock_irqsave(&db->lock, flags);
Vegard Nossum673d62cc2008-08-31 23:39:21 +0200476 hlist_move_list(&db->list, &freelist);
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100477 raw_spin_unlock_irqrestore(&db->lock, flags);
Vegard Nossum673d62cc2008-08-31 23:39:21 +0200478
479 /* Now free them */
Sasha Levinb67bfe02013-02-27 17:06:00 -0800480 hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700481 hlist_del(&obj->node);
482 free_object(obj);
483 }
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700484 }
485}
486
487/*
488 * We use the pfn of the address for the hash. That way we can check
489 * for freed objects simply by checking the affected bucket.
490 */
491static struct debug_bucket *get_bucket(unsigned long addr)
492{
493 unsigned long hash;
494
495 hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
496 return &obj_hash[hash];
497}
498
499static void debug_print_object(struct debug_obj *obj, char *msg)
500{
Stephen Boydaedcade2020-08-14 17:40:26 -0700501 const struct debug_obj_descr *descr = obj->descr;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700502 static int limit;
503
Tetsuo Handa173e1912023-06-07 19:19:02 +0900504 /*
505 * Don't report if lookup_object_or_alloc() by the current thread
506 * failed because lookup_object_or_alloc()/debug_objects_oom() by a
507 * concurrent thread turned off debug_objects_enabled and cleared
508 * the hash buckets.
509 */
510 if (!debug_objects_enabled)
511 return;
512
Stanislaw Gruszka99777282011-03-07 09:58:33 +0100513 if (limit < 5 && descr != descr_test) {
514 void *hint = descr->debug_hint ?
515 descr->debug_hint(obj->object) : NULL;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700516 limit++;
Mathieu Desnoyersa5d8e462010-04-17 08:48:38 -0400517 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
Stanislaw Gruszka99777282011-03-07 09:58:33 +0100518 "object type: %s hint: %pS\n",
Mathieu Desnoyersa5d8e462010-04-17 08:48:38 -0400519 msg, obj_states[obj->state], obj->astate,
Stanislaw Gruszka99777282011-03-07 09:58:33 +0100520 descr->name, hint);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700521 }
522 debug_objects_warnings++;
523}
524
525/*
526 * Try to repair the damage, so we have a better chance to get useful
527 * debug output.
528 */
Du, Changbinb1e4d9d2016-05-19 17:09:20 -0700529static bool
530debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700531 void * addr, enum debug_obj_state state)
532{
Du, Changbinb1e4d9d2016-05-19 17:09:20 -0700533 if (fixup && fixup(addr, state)) {
534 debug_objects_fixups++;
535 return true;
536 }
537 return false;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700538}
539
540static void debug_object_is_on_stack(void *addr, int onstack)
541{
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700542 int is_on_stack;
543 static int limit;
544
545 if (limit > 4)
546 return;
547
FUJITA Tomonori8b05c7e2008-07-23 21:26:53 -0700548 is_on_stack = object_is_on_stack(addr);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700549 if (is_on_stack == onstack)
550 return;
551
552 limit++;
553 if (is_on_stack)
Joel Fernandes (Google)fc91a3c2018-07-23 14:25:31 -0700554 pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
555 task_stack_page(current));
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700556 else
Joel Fernandes (Google)fc91a3c2018-07-23 14:25:31 -0700557 pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
558 task_stack_page(current));
559
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700560 WARN_ON(1);
561}
562
Thomas Gleixner500ffa52023-04-12 09:54:39 +0200563static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
564 const struct debug_obj_descr *descr,
565 bool onstack, bool alloc_ifstatic)
566{
567 struct debug_obj *obj = lookup_object(addr, b);
568 enum debug_obj_state state = ODEBUG_STATE_NONE;
569
570 if (likely(obj))
571 return obj;
572
573 /*
574 * debug_object_init() unconditionally allocates untracked
575 * objects. It does not matter whether it is a static object or
576 * not.
577 *
578 * debug_object_assert_init() and debug_object_activate() allow
579 * allocation only if the descriptor callback confirms that the
580 * object is static and considered initialized. For non-static
581 * objects the allocation needs to be done from the fixup callback.
582 */
583 if (unlikely(alloc_ifstatic)) {
584 if (!descr->is_static_object || !descr->is_static_object(addr))
585 return ERR_PTR(-ENOENT);
586 /* Statically allocated objects are considered initialized */
587 state = ODEBUG_STATE_INIT;
588 }
589
590 obj = alloc_object(addr, b, descr);
591 if (likely(obj)) {
592 obj->state = state;
593 debug_object_is_on_stack(addr, onstack);
594 return obj;
595 }
596
597 /* Out of memory. Do the cleanup outside of the locked region */
598 debug_objects_enabled = 0;
599 return NULL;
600}
601
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700602static void
Stephen Boydaedcade2020-08-14 17:40:26 -0700603__debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700604{
Andrzej Hajdaa5d70e22023-10-25 23:39:07 +0200605 struct debug_obj *obj, o;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700606 struct debug_bucket *db;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700607 unsigned long flags;
608
Greg Kroah-Hartmanb5a52002023-06-12 10:56:09 +0200609 /*
610 * On RT enabled kernels the pool refill must happen in preemptible
611 * context:
612 */
613 if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible())
614 fill_pool();
Vegard Nossum50db04dd2008-06-15 00:47:36 +0200615
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700616 db = get_bucket((unsigned long) addr);
617
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100618 raw_spin_lock_irqsave(&db->lock, flags);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700619
Thomas Gleixner500ffa52023-04-12 09:54:39 +0200620 obj = lookup_object_or_alloc(addr, db, descr, onstack, false);
621 if (unlikely(!obj)) {
622 raw_spin_unlock_irqrestore(&db->lock, flags);
623 debug_objects_oom();
624 return;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700625 }
626
627 switch (obj->state) {
628 case ODEBUG_STATE_NONE:
629 case ODEBUG_STATE_INIT:
630 case ODEBUG_STATE_INACTIVE:
631 obj->state = ODEBUG_STATE_INIT;
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100632 raw_spin_unlock_irqrestore(&db->lock, flags);
Waiman Longd5f34152019-05-20 10:14:50 -0400633 return;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700634 default:
635 break;
636 }
637
Andrzej Hajdaa5d70e22023-10-25 23:39:07 +0200638 o = *obj;
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100639 raw_spin_unlock_irqrestore(&db->lock, flags);
Andrzej Hajdaa5d70e22023-10-25 23:39:07 +0200640 debug_print_object(&o, "init");
641
642 if (o.state == ODEBUG_STATE_ACTIVE)
643 debug_object_fixup(descr->fixup_init, addr, o.state);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700644}
645
646/**
647 * debug_object_init - debug checks when an object is initialized
648 * @addr: address of the object
649 * @descr: pointer to an object specific debug description structure
650 */
Stephen Boydaedcade2020-08-14 17:40:26 -0700651void debug_object_init(void *addr, const struct debug_obj_descr *descr)
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700652{
653 if (!debug_objects_enabled)
654 return;
655
656 __debug_object_init(addr, descr, 0);
657}
Chris Wilsonf8ff04e2016-11-30 15:54:10 -0800658EXPORT_SYMBOL_GPL(debug_object_init);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700659
660/**
661 * debug_object_init_on_stack - debug checks when an object on stack is
662 * initialized
663 * @addr: address of the object
664 * @descr: pointer to an object specific debug description structure
665 */
Stephen Boydaedcade2020-08-14 17:40:26 -0700666void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700667{
668 if (!debug_objects_enabled)
669 return;
670
671 __debug_object_init(addr, descr, 1);
672}
Chris Wilsonf8ff04e2016-11-30 15:54:10 -0800673EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700674
675/**
676 * debug_object_activate - debug checks when an object is activated
677 * @addr: address of the object
678 * @descr: pointer to an object specific debug description structure
Paul E. McKenneyb778ae22013-04-23 12:51:11 -0700679 * Returns 0 for success, -EINVAL for check failed.
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700680 */
Stephen Boydaedcade2020-08-14 17:40:26 -0700681int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700682{
Thomas Gleixner500ffa52023-04-12 09:54:39 +0200683 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700684 struct debug_bucket *db;
685 struct debug_obj *obj;
686 unsigned long flags;
687
688 if (!debug_objects_enabled)
Paul E. McKenneyb778ae22013-04-23 12:51:11 -0700689 return 0;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700690
691 db = get_bucket((unsigned long) addr);
692
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100693 raw_spin_lock_irqsave(&db->lock, flags);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700694
Thomas Gleixner500ffa52023-04-12 09:54:39 +0200695 obj = lookup_object_or_alloc(addr, db, descr, false, true);
Andrzej Hajdaa5d70e22023-10-25 23:39:07 +0200696 if (unlikely(!obj)) {
697 raw_spin_unlock_irqrestore(&db->lock, flags);
698 debug_objects_oom();
699 return 0;
700 } else if (likely(!IS_ERR(obj))) {
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700701 switch (obj->state) {
Andrzej Hajdaa5d70e22023-10-25 23:39:07 +0200702 case ODEBUG_STATE_ACTIVE:
703 case ODEBUG_STATE_DESTROYED:
704 o = *obj;
705 break;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700706 case ODEBUG_STATE_INIT:
707 case ODEBUG_STATE_INACTIVE:
708 obj->state = ODEBUG_STATE_ACTIVE;
Andrzej Hajdaa5d70e22023-10-25 23:39:07 +0200709 fallthrough;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700710 default:
Andrzej Hajdaa5d70e22023-10-25 23:39:07 +0200711 raw_spin_unlock_irqrestore(&db->lock, flags);
712 return 0;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700713 }
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700714 }
715
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100716 raw_spin_unlock_irqrestore(&db->lock, flags);
Thomas Gleixner500ffa52023-04-12 09:54:39 +0200717 debug_print_object(&o, "activate");
Andrzej Hajdaa5d70e22023-10-25 23:39:07 +0200718
719 switch (o.state) {
720 case ODEBUG_STATE_ACTIVE:
721 case ODEBUG_STATE_NOTAVAILABLE:
722 if (debug_object_fixup(descr->fixup_activate, addr, o.state))
723 return 0;
724 fallthrough;
725 default:
726 return -EINVAL;
727 }
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700728}
Chris Wilsonf8ff04e2016-11-30 15:54:10 -0800729EXPORT_SYMBOL_GPL(debug_object_activate);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700730
731/**
732 * debug_object_deactivate - debug checks when an object is deactivated
733 * @addr: address of the object
734 * @descr: pointer to an object specific debug description structure
735 */
Stephen Boydaedcade2020-08-14 17:40:26 -0700736void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700737{
Andrzej Hajdaa5d70e22023-10-25 23:39:07 +0200738 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700739 struct debug_bucket *db;
740 struct debug_obj *obj;
741 unsigned long flags;
742
743 if (!debug_objects_enabled)
744 return;
745
746 db = get_bucket((unsigned long) addr);
747
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100748 raw_spin_lock_irqsave(&db->lock, flags);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700749
750 obj = lookup_object(addr, db);
751 if (obj) {
752 switch (obj->state) {
Andrzej Hajdaa5d70e22023-10-25 23:39:07 +0200753 case ODEBUG_STATE_DESTROYED:
754 break;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700755 case ODEBUG_STATE_INIT:
756 case ODEBUG_STATE_INACTIVE:
757 case ODEBUG_STATE_ACTIVE:
Andrzej Hajdaa5d70e22023-10-25 23:39:07 +0200758 if (obj->astate)
759 break;
760 obj->state = ODEBUG_STATE_INACTIVE;
761 fallthrough;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700762 default:
Andrzej Hajdaa5d70e22023-10-25 23:39:07 +0200763 raw_spin_unlock_irqrestore(&db->lock, flags);
764 return;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700765 }
Andrzej Hajdaa5d70e22023-10-25 23:39:07 +0200766 o = *obj;
Waiman Longd5f34152019-05-20 10:14:50 -0400767 }
768
769 raw_spin_unlock_irqrestore(&db->lock, flags);
Andrzej Hajdaa5d70e22023-10-25 23:39:07 +0200770 debug_print_object(&o, "deactivate");
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700771}
Chris Wilsonf8ff04e2016-11-30 15:54:10 -0800772EXPORT_SYMBOL_GPL(debug_object_deactivate);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700773
774/**
775 * debug_object_destroy - debug checks when an object is destroyed
776 * @addr: address of the object
777 * @descr: pointer to an object specific debug description structure
778 */
Stephen Boydaedcade2020-08-14 17:40:26 -0700779void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700780{
Andrzej Hajdaa5d70e22023-10-25 23:39:07 +0200781 struct debug_obj *obj, o;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700782 struct debug_bucket *db;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700783 unsigned long flags;
784
785 if (!debug_objects_enabled)
786 return;
787
788 db = get_bucket((unsigned long) addr);
789
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100790 raw_spin_lock_irqsave(&db->lock, flags);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700791
792 obj = lookup_object(addr, db);
Andrzej Hajdaa5d70e22023-10-25 23:39:07 +0200793 if (!obj) {
794 raw_spin_unlock_irqrestore(&db->lock, flags);
795 return;
796 }
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700797
798 switch (obj->state) {
Andrzej Hajdaa5d70e22023-10-25 23:39:07 +0200799 case ODEBUG_STATE_ACTIVE:
800 case ODEBUG_STATE_DESTROYED:
801 break;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700802 case ODEBUG_STATE_NONE:
803 case ODEBUG_STATE_INIT:
804 case ODEBUG_STATE_INACTIVE:
805 obj->state = ODEBUG_STATE_DESTROYED;
Andrzej Hajdaa5d70e22023-10-25 23:39:07 +0200806 fallthrough;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700807 default:
Andrzej Hajdaa5d70e22023-10-25 23:39:07 +0200808 raw_spin_unlock_irqrestore(&db->lock, flags);
809 return;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700810 }
Andrzej Hajdaa5d70e22023-10-25 23:39:07 +0200811
812 o = *obj;
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100813 raw_spin_unlock_irqrestore(&db->lock, flags);
Andrzej Hajdaa5d70e22023-10-25 23:39:07 +0200814 debug_print_object(&o, "destroy");
815
816 if (o.state == ODEBUG_STATE_ACTIVE)
817 debug_object_fixup(descr->fixup_destroy, addr, o.state);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700818}
Chris Wilsonf8ff04e2016-11-30 15:54:10 -0800819EXPORT_SYMBOL_GPL(debug_object_destroy);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700820
821/**
822 * debug_object_free - debug checks when an object is freed
823 * @addr: address of the object
824 * @descr: pointer to an object specific debug description structure
825 */
Stephen Boydaedcade2020-08-14 17:40:26 -0700826void debug_object_free(void *addr, const struct debug_obj_descr *descr)
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700827{
Andrzej Hajdaa5d70e22023-10-25 23:39:07 +0200828 struct debug_obj *obj, o;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700829 struct debug_bucket *db;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700830 unsigned long flags;
831
832 if (!debug_objects_enabled)
833 return;
834
835 db = get_bucket((unsigned long) addr);
836
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100837 raw_spin_lock_irqsave(&db->lock, flags);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700838
839 obj = lookup_object(addr, db);
Andrzej Hajdaa5d70e22023-10-25 23:39:07 +0200840 if (!obj) {
841 raw_spin_unlock_irqrestore(&db->lock, flags);
842 return;
843 }
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700844
845 switch (obj->state) {
846 case ODEBUG_STATE_ACTIVE:
Andrzej Hajdaa5d70e22023-10-25 23:39:07 +0200847 break;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700848 default:
849 hlist_del(&obj->node);
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100850 raw_spin_unlock_irqrestore(&db->lock, flags);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700851 free_object(obj);
Vegard Nossum673d62cc2008-08-31 23:39:21 +0200852 return;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700853 }
Andrzej Hajdaa5d70e22023-10-25 23:39:07 +0200854
855 o = *obj;
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100856 raw_spin_unlock_irqrestore(&db->lock, flags);
Andrzej Hajdaa5d70e22023-10-25 23:39:07 +0200857 debug_print_object(&o, "free");
858
859 debug_object_fixup(descr->fixup_free, addr, o.state);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700860}
Chris Wilsonf8ff04e2016-11-30 15:54:10 -0800861EXPORT_SYMBOL_GPL(debug_object_free);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700862
Mathieu Desnoyersa5d8e462010-04-17 08:48:38 -0400863/**
Christine Chanb84d4352011-11-07 19:48:27 -0800864 * debug_object_assert_init - debug checks when object should be init-ed
865 * @addr: address of the object
866 * @descr: pointer to an object specific debug description structure
867 */
Stephen Boydaedcade2020-08-14 17:40:26 -0700868void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
Christine Chanb84d4352011-11-07 19:48:27 -0800869{
Thomas Gleixner500ffa52023-04-12 09:54:39 +0200870 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
Christine Chanb84d4352011-11-07 19:48:27 -0800871 struct debug_bucket *db;
872 struct debug_obj *obj;
873 unsigned long flags;
874
875 if (!debug_objects_enabled)
876 return;
877
878 db = get_bucket((unsigned long) addr);
879
880 raw_spin_lock_irqsave(&db->lock, flags);
Thomas Gleixner500ffa52023-04-12 09:54:39 +0200881 obj = lookup_object_or_alloc(addr, db, descr, false, true);
882 raw_spin_unlock_irqrestore(&db->lock, flags);
883 if (likely(!IS_ERR_OR_NULL(obj)))
884 return;
Christine Chanb84d4352011-11-07 19:48:27 -0800885
Thomas Gleixner500ffa52023-04-12 09:54:39 +0200886 /* If NULL the allocation has hit OOM */
Christine Chanb84d4352011-11-07 19:48:27 -0800887 if (!obj) {
Thomas Gleixner500ffa52023-04-12 09:54:39 +0200888 debug_objects_oom();
Christine Chanb84d4352011-11-07 19:48:27 -0800889 return;
890 }
891
Thomas Gleixner500ffa52023-04-12 09:54:39 +0200892 /* Object is neither tracked nor static. It's not initialized. */
893 debug_print_object(&o, "assert_init");
894 debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE);
Christine Chanb84d4352011-11-07 19:48:27 -0800895}
Chris Wilsonf8ff04e2016-11-30 15:54:10 -0800896EXPORT_SYMBOL_GPL(debug_object_assert_init);
Christine Chanb84d4352011-11-07 19:48:27 -0800897
898/**
Mathieu Desnoyersa5d8e462010-04-17 08:48:38 -0400899 * debug_object_active_state - debug checks object usage state machine
900 * @addr: address of the object
901 * @descr: pointer to an object specific debug description structure
902 * @expect: expected state
903 * @next: state to move to if expected state is found
904 */
905void
Stephen Boydaedcade2020-08-14 17:40:26 -0700906debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
Mathieu Desnoyersa5d8e462010-04-17 08:48:38 -0400907 unsigned int expect, unsigned int next)
908{
Andrzej Hajdaa5d70e22023-10-25 23:39:07 +0200909 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
Mathieu Desnoyersa5d8e462010-04-17 08:48:38 -0400910 struct debug_bucket *db;
911 struct debug_obj *obj;
912 unsigned long flags;
913
914 if (!debug_objects_enabled)
915 return;
916
917 db = get_bucket((unsigned long) addr);
918
919 raw_spin_lock_irqsave(&db->lock, flags);
920
921 obj = lookup_object(addr, db);
922 if (obj) {
923 switch (obj->state) {
924 case ODEBUG_STATE_ACTIVE:
Andrzej Hajdaa5d70e22023-10-25 23:39:07 +0200925 if (obj->astate != expect)
926 break;
927 obj->astate = next;
928 raw_spin_unlock_irqrestore(&db->lock, flags);
929 return;
Mathieu Desnoyersa5d8e462010-04-17 08:48:38 -0400930 default:
Mathieu Desnoyersa5d8e462010-04-17 08:48:38 -0400931 break;
932 }
Andrzej Hajdaa5d70e22023-10-25 23:39:07 +0200933 o = *obj;
Waiman Longd5f34152019-05-20 10:14:50 -0400934 }
935
936 raw_spin_unlock_irqrestore(&db->lock, flags);
Andrzej Hajdaa5d70e22023-10-25 23:39:07 +0200937 debug_print_object(&o, "active_state");
Mathieu Desnoyersa5d8e462010-04-17 08:48:38 -0400938}
Chris Wilsonf8ff04e2016-11-30 15:54:10 -0800939EXPORT_SYMBOL_GPL(debug_object_active_state);
Mathieu Desnoyersa5d8e462010-04-17 08:48:38 -0400940
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700941#ifdef CONFIG_DEBUG_OBJECTS_FREE
942static void __debug_check_no_obj_freed(const void *address, unsigned long size)
943{
944 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
Andrzej Hajdaa5d70e22023-10-25 23:39:07 +0200945 int cnt, objs_checked = 0;
946 struct debug_obj *obj, o;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700947 struct debug_bucket *db;
Yang Shi1ea9b982018-02-06 07:18:28 +0800948 struct hlist_node *tmp;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700949
950 saddr = (unsigned long) address;
951 eaddr = saddr + size;
952 paddr = saddr & ODEBUG_CHUNK_MASK;
953 chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
954 chunks >>= ODEBUG_CHUNK_SHIFT;
955
956 for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
957 db = get_bucket(paddr);
958
959repeat:
960 cnt = 0;
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100961 raw_spin_lock_irqsave(&db->lock, flags);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800962 hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700963 cnt++;
964 oaddr = (unsigned long) obj->object;
965 if (oaddr < saddr || oaddr >= eaddr)
966 continue;
967
968 switch (obj->state) {
969 case ODEBUG_STATE_ACTIVE:
Andrzej Hajdaa5d70e22023-10-25 23:39:07 +0200970 o = *obj;
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100971 raw_spin_unlock_irqrestore(&db->lock, flags);
Andrzej Hajdaa5d70e22023-10-25 23:39:07 +0200972 debug_print_object(&o, "free");
973 debug_object_fixup(o.descr->fixup_free, (void *)oaddr, o.state);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700974 goto repeat;
975 default:
976 hlist_del(&obj->node);
Waiman Longa7344a62019-05-20 10:14:49 -0400977 __free_object(obj);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700978 break;
979 }
980 }
Thomas Gleixneraef9cb02009-11-17 18:11:28 +0100981 raw_spin_unlock_irqrestore(&db->lock, flags);
Vegard Nossum673d62cc2008-08-31 23:39:21 +0200982
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700983 if (cnt > debug_objects_maxchain)
984 debug_objects_maxchain = cnt;
Yang Shibd9dcd02018-02-06 07:18:25 +0800985
986 objs_checked += cnt;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700987 }
Yang Shibd9dcd02018-02-06 07:18:25 +0800988
989 if (objs_checked > debug_objects_maxchecked)
990 debug_objects_maxchecked = objs_checked;
Yang Shi1ea9b982018-02-06 07:18:28 +0800991
992 /* Schedule work to actually kmem_cache_free() objects */
Marco Elver35fd7a62020-01-16 19:55:29 +0100993 if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
Waiman Longa7344a62019-05-20 10:14:49 -0400994 WRITE_ONCE(obj_freeing, true);
995 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
996 }
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700997}
998
999void debug_check_no_obj_freed(const void *address, unsigned long size)
1000{
1001 if (debug_objects_enabled)
1002 __debug_check_no_obj_freed(address, size);
1003}
1004#endif
1005
1006#ifdef CONFIG_DEBUG_FS
1007
1008static int debug_stats_show(struct seq_file *m, void *v)
1009{
Waiman Longd86998b2019-05-20 10:14:46 -04001010 int cpu, obj_percpu_free = 0;
1011
1012 for_each_possible_cpu(cpu)
1013 obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
1014
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001015 seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
Yang Shibd9dcd02018-02-06 07:18:25 +08001016 seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001017 seq_printf(m, "warnings :%d\n", debug_objects_warnings);
1018 seq_printf(m, "fixups :%d\n", debug_objects_fixups);
Marco Elver35fd7a62020-01-16 19:55:29 +01001019 seq_printf(m, "pool_free :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free);
Waiman Longd86998b2019-05-20 10:14:46 -04001020 seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001021 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
Waiman Longd86998b2019-05-20 10:14:46 -04001022 seq_printf(m, "pool_used :%d\n", obj_pool_used - obj_percpu_free);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001023 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
Marco Elver35fd7a62020-01-16 19:55:29 +01001024 seq_printf(m, "on_free_list :%d\n", READ_ONCE(obj_nr_tofree));
Waiman Long0cad93c2017-02-07 16:40:30 -05001025 seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
1026 seq_printf(m, "objs_freed :%d\n", debug_objects_freed);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001027 return 0;
1028}
Qinglang Miao0f85c482020-07-16 16:47:47 +08001029DEFINE_SHOW_ATTRIBUTE(debug_stats);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001030
1031static int __init debug_objects_init_debugfs(void)
1032{
Greg Kroah-Hartmanfecb0d92019-06-12 17:35:13 +02001033 struct dentry *dbgdir;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001034
1035 if (!debug_objects_enabled)
1036 return 0;
1037
1038 dbgdir = debugfs_create_dir("debug_objects", NULL);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001039
Greg Kroah-Hartmanfecb0d92019-06-12 17:35:13 +02001040 debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001041
1042 return 0;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001043}
1044__initcall(debug_objects_init_debugfs);
1045
1046#else
1047static inline void debug_objects_init_debugfs(void) { }
1048#endif
1049
1050#ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1051
1052/* Random data structure for the self test */
1053struct self_test {
1054 unsigned long dummy1[6];
1055 int static_init;
1056 unsigned long dummy2[3];
1057};
1058
Stephen Boydaedcade2020-08-14 17:40:26 -07001059static __initconst const struct debug_obj_descr descr_type_test;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001060
Du, Changbinb9fdac7f2016-05-19 17:09:41 -07001061static bool __init is_static_object(void *addr)
1062{
1063 struct self_test *obj = addr;
1064
1065 return obj->static_init;
1066}
1067
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001068/*
1069 * fixup_init is called when:
1070 * - an active object is initialized
1071 */
Du, Changbinb1e4d9d2016-05-19 17:09:20 -07001072static bool __init fixup_init(void *addr, enum debug_obj_state state)
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001073{
1074 struct self_test *obj = addr;
1075
1076 switch (state) {
1077 case ODEBUG_STATE_ACTIVE:
1078 debug_object_deactivate(obj, &descr_type_test);
1079 debug_object_init(obj, &descr_type_test);
Du, Changbinb1e4d9d2016-05-19 17:09:20 -07001080 return true;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001081 default:
Du, Changbinb1e4d9d2016-05-19 17:09:20 -07001082 return false;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001083 }
1084}
1085
1086/*
1087 * fixup_activate is called when:
1088 * - an active object is activated
Du, Changbinb9fdac7f2016-05-19 17:09:41 -07001089 * - an unknown non-static object is activated
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001090 */
Du, Changbinb1e4d9d2016-05-19 17:09:20 -07001091static bool __init fixup_activate(void *addr, enum debug_obj_state state)
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001092{
1093 struct self_test *obj = addr;
1094
1095 switch (state) {
1096 case ODEBUG_STATE_NOTAVAILABLE:
Du, Changbinb1e4d9d2016-05-19 17:09:20 -07001097 return true;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001098 case ODEBUG_STATE_ACTIVE:
1099 debug_object_deactivate(obj, &descr_type_test);
1100 debug_object_activate(obj, &descr_type_test);
Du, Changbinb1e4d9d2016-05-19 17:09:20 -07001101 return true;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001102
1103 default:
Du, Changbinb1e4d9d2016-05-19 17:09:20 -07001104 return false;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001105 }
1106}
1107
1108/*
1109 * fixup_destroy is called when:
1110 * - an active object is destroyed
1111 */
Du, Changbinb1e4d9d2016-05-19 17:09:20 -07001112static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001113{
1114 struct self_test *obj = addr;
1115
1116 switch (state) {
1117 case ODEBUG_STATE_ACTIVE:
1118 debug_object_deactivate(obj, &descr_type_test);
1119 debug_object_destroy(obj, &descr_type_test);
Du, Changbinb1e4d9d2016-05-19 17:09:20 -07001120 return true;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001121 default:
Du, Changbinb1e4d9d2016-05-19 17:09:20 -07001122 return false;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001123 }
1124}
1125
1126/*
1127 * fixup_free is called when:
1128 * - an active object is freed
1129 */
Du, Changbinb1e4d9d2016-05-19 17:09:20 -07001130static bool __init fixup_free(void *addr, enum debug_obj_state state)
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001131{
1132 struct self_test *obj = addr;
1133
1134 switch (state) {
1135 case ODEBUG_STATE_ACTIVE:
1136 debug_object_deactivate(obj, &descr_type_test);
1137 debug_object_free(obj, &descr_type_test);
Du, Changbinb1e4d9d2016-05-19 17:09:20 -07001138 return true;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001139 default:
Du, Changbinb1e4d9d2016-05-19 17:09:20 -07001140 return false;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001141 }
1142}
1143
Henrik Kretzschmar1fb2f772010-03-26 20:38:35 +01001144static int __init
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001145check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1146{
1147 struct debug_bucket *db;
1148 struct debug_obj *obj;
1149 unsigned long flags;
1150 int res = -EINVAL;
1151
1152 db = get_bucket((unsigned long) addr);
1153
Thomas Gleixneraef9cb02009-11-17 18:11:28 +01001154 raw_spin_lock_irqsave(&db->lock, flags);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001155
1156 obj = lookup_object(addr, db);
1157 if (!obj && state != ODEBUG_STATE_NONE) {
Arjan van de Ven5cd2b452008-07-25 19:45:39 -07001158 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001159 goto out;
1160 }
1161 if (obj && obj->state != state) {
Arjan van de Ven5cd2b452008-07-25 19:45:39 -07001162 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001163 obj->state, state);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001164 goto out;
1165 }
1166 if (fixups != debug_objects_fixups) {
Arjan van de Ven5cd2b452008-07-25 19:45:39 -07001167 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001168 fixups, debug_objects_fixups);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001169 goto out;
1170 }
1171 if (warnings != debug_objects_warnings) {
Arjan van de Ven5cd2b452008-07-25 19:45:39 -07001172 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001173 warnings, debug_objects_warnings);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001174 goto out;
1175 }
1176 res = 0;
1177out:
Thomas Gleixneraef9cb02009-11-17 18:11:28 +01001178 raw_spin_unlock_irqrestore(&db->lock, flags);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001179 if (res)
1180 debug_objects_enabled = 0;
1181 return res;
1182}
1183
Stephen Boydaedcade2020-08-14 17:40:26 -07001184static __initconst const struct debug_obj_descr descr_type_test = {
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001185 .name = "selftest",
Du, Changbinb9fdac7f2016-05-19 17:09:41 -07001186 .is_static_object = is_static_object,
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001187 .fixup_init = fixup_init,
1188 .fixup_activate = fixup_activate,
1189 .fixup_destroy = fixup_destroy,
1190 .fixup_free = fixup_free,
1191};
1192
1193static __initdata struct self_test obj = { .static_init = 0 };
1194
1195static void __init debug_objects_selftest(void)
1196{
1197 int fixups, oldfixups, warnings, oldwarnings;
1198 unsigned long flags;
1199
1200 local_irq_save(flags);
1201
1202 fixups = oldfixups = debug_objects_fixups;
1203 warnings = oldwarnings = debug_objects_warnings;
1204 descr_test = &descr_type_test;
1205
1206 debug_object_init(&obj, &descr_type_test);
1207 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1208 goto out;
1209 debug_object_activate(&obj, &descr_type_test);
1210 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1211 goto out;
1212 debug_object_activate(&obj, &descr_type_test);
1213 if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1214 goto out;
1215 debug_object_deactivate(&obj, &descr_type_test);
1216 if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1217 goto out;
1218 debug_object_destroy(&obj, &descr_type_test);
1219 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1220 goto out;
1221 debug_object_init(&obj, &descr_type_test);
1222 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1223 goto out;
1224 debug_object_activate(&obj, &descr_type_test);
1225 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1226 goto out;
1227 debug_object_deactivate(&obj, &descr_type_test);
1228 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1229 goto out;
1230 debug_object_free(&obj, &descr_type_test);
1231 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1232 goto out;
1233
1234 obj.static_init = 1;
1235 debug_object_activate(&obj, &descr_type_test);
Stephen Boyd9f78ff02012-03-05 14:59:17 -08001236 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001237 goto out;
1238 debug_object_init(&obj, &descr_type_test);
1239 if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1240 goto out;
1241 debug_object_free(&obj, &descr_type_test);
1242 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1243 goto out;
1244
1245#ifdef CONFIG_DEBUG_OBJECTS_FREE
1246 debug_object_init(&obj, &descr_type_test);
1247 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1248 goto out;
1249 debug_object_activate(&obj, &descr_type_test);
1250 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1251 goto out;
1252 __debug_check_no_obj_freed(&obj, sizeof(obj));
1253 if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1254 goto out;
1255#endif
Fabian Frederick719e4842014-06-04 16:06:04 -07001256 pr_info("selftest passed\n");
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001257
1258out:
1259 debug_objects_fixups = oldfixups;
1260 debug_objects_warnings = oldwarnings;
1261 descr_test = NULL;
1262
1263 local_irq_restore(flags);
1264}
1265#else
1266static inline void debug_objects_selftest(void) { }
1267#endif
1268
1269/*
1270 * Called during early boot to initialize the hash buckets and link
1271 * the static object pool objects into the poll list. After this call
1272 * the object tracker is fully operational.
1273 */
1274void __init debug_objects_early_init(void)
1275{
1276 int i;
1277
1278 for (i = 0; i < ODEBUG_HASH_SIZE; i++)
Thomas Gleixneraef9cb02009-11-17 18:11:28 +01001279 raw_spin_lock_init(&obj_hash[i].lock);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001280
1281 for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1282 hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1283}
1284
1285/*
Thomas Gleixner1be1cb72009-03-16 18:53:18 +01001286 * Convert the statically allocated objects to dynamic ones:
1287 */
Henrik Kretzschmar1fb2f772010-03-26 20:38:35 +01001288static int __init debug_objects_replace_static_objects(void)
Thomas Gleixner1be1cb72009-03-16 18:53:18 +01001289{
1290 struct debug_bucket *db = obj_hash;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001291 struct hlist_node *tmp;
Thomas Gleixner1be1cb72009-03-16 18:53:18 +01001292 struct debug_obj *obj, *new;
1293 HLIST_HEAD(objects);
1294 int i, cnt = 0;
1295
1296 for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1297 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1298 if (!obj)
1299 goto free;
1300 hlist_add_head(&obj->node, &objects);
1301 }
1302
wuchi4b469322022-06-11 21:06:34 +08001303 debug_objects_allocated += i;
1304
Thomas Gleixner1be1cb72009-03-16 18:53:18 +01001305 /*
Qian Caia9ee3a62018-12-28 00:32:32 -08001306 * debug_objects_mem_init() is now called early that only one CPU is up
1307 * and interrupts have been disabled, so it is safe to replace the
1308 * active object references.
Thomas Gleixner1be1cb72009-03-16 18:53:18 +01001309 */
Thomas Gleixner1be1cb72009-03-16 18:53:18 +01001310
1311 /* Remove the statically allocated objects from the pool */
Sasha Levinb67bfe02013-02-27 17:06:00 -08001312 hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
Thomas Gleixner1be1cb72009-03-16 18:53:18 +01001313 hlist_del(&obj->node);
1314 /* Move the allocated objects to the pool */
1315 hlist_move_list(&objects, &obj_pool);
1316
1317 /* Replace the active object references */
1318 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1319 hlist_move_list(&db->list, &objects);
1320
Sasha Levinb67bfe02013-02-27 17:06:00 -08001321 hlist_for_each_entry(obj, &objects, node) {
Thomas Gleixner1be1cb72009-03-16 18:53:18 +01001322 new = hlist_entry(obj_pool.first, typeof(*obj), node);
1323 hlist_del(&new->node);
1324 /* copy object data */
1325 *new = *obj;
1326 hlist_add_head(&new->node, &db->list);
1327 cnt++;
1328 }
1329 }
1330
Fabian Frederickc0f35cc2014-06-04 16:06:05 -07001331 pr_debug("%d of %d active objects replaced\n",
1332 cnt, obj_pool_used);
Thomas Gleixner1be1cb72009-03-16 18:53:18 +01001333 return 0;
1334free:
Sasha Levinb67bfe02013-02-27 17:06:00 -08001335 hlist_for_each_entry_safe(obj, tmp, &objects, node) {
Thomas Gleixner1be1cb72009-03-16 18:53:18 +01001336 hlist_del(&obj->node);
1337 kmem_cache_free(obj_cache, obj);
1338 }
1339 return -ENOMEM;
1340}
1341
1342/*
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001343 * Called after the kmem_caches are functional to setup a dedicated
1344 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1345 * prevents that the debug code is called on kmem_cache_free() for the
1346 * debug tracker objects to avoid recursive calls.
1347 */
1348void __init debug_objects_mem_init(void)
1349{
Waiman Long634d61f2019-05-20 10:14:47 -04001350 int cpu, extras;
Waiman Longd86998b2019-05-20 10:14:46 -04001351
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001352 if (!debug_objects_enabled)
1353 return;
1354
Waiman Longd86998b2019-05-20 10:14:46 -04001355 /*
1356 * Initialize the percpu object pools
1357 *
1358 * Initialization is not strictly necessary, but was done for
1359 * completeness.
1360 */
1361 for_each_possible_cpu(cpu)
1362 INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu));
1363
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001364 obj_cache = kmem_cache_create("debug_objects_cache",
1365 sizeof (struct debug_obj), 0,
Qian Cai8de456c2018-11-30 14:09:48 -08001366 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
1367 NULL);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001368
Thomas Gleixner1be1cb72009-03-16 18:53:18 +01001369 if (!obj_cache || debug_objects_replace_static_objects()) {
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001370 debug_objects_enabled = 0;
Zhong Jiang3ff4f802018-08-01 00:24:58 +08001371 kmem_cache_destroy(obj_cache);
Fabian Frederick719e4842014-06-04 16:06:04 -07001372 pr_warn("out of memory.\n");
wuchi4b469322022-06-11 21:06:34 +08001373 return;
Thomas Gleixner1be1cb72009-03-16 18:53:18 +01001374 } else
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001375 debug_objects_selftest();
Waiman Long634d61f2019-05-20 10:14:47 -04001376
Zqiang88451f22020-09-08 14:27:09 +08001377#ifdef CONFIG_HOTPLUG_CPU
1378 cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
1379 object_cpu_offline);
1380#endif
1381
Waiman Long634d61f2019-05-20 10:14:47 -04001382 /*
1383 * Increase the thresholds for allocating and freeing objects
1384 * according to the number of possible CPUs available in the system.
1385 */
1386 extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1387 debug_objects_pool_size += extras;
1388 debug_objects_pool_min_level += extras;
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001389}