blob: 4b9715470e872233a5ffbbb8e675e7f0b1369686 [file] [log] [blame]
Thomas Gleixner1802d0b2019-05-27 08:55:21 +02001// SPDX-License-Identifier: GPL-2.0-only
Alexander Potapenkocd110162016-03-25 14:22:08 -07002/*
3 * Generic stack depot for storing stack traces.
4 *
5 * Some debugging tools need to save stack traces of certain events which can
6 * be later presented to the user. For example, KASAN needs to safe alloc and
7 * free stacks for each object, but storing two stack traces per object
8 * requires too much memory (e.g. SLUB_DEBUG needs 256 bytes per object for
9 * that).
10 *
11 * Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc
12 * and free stacks repeat a lot, we save about 100x space.
13 * Stacks are never removed from depot, so we store them contiguously one after
14 * another in a contiguos memory allocation.
15 *
16 * Author: Alexander Potapenko <glider@google.com>
17 * Copyright (C) 2016 Google, Inc.
18 *
19 * Based on code by Dmitry Chernenkov.
Alexander Potapenkocd110162016-03-25 14:22:08 -070020 */
21
22#include <linux/gfp.h>
Alexander Potapenko505a0ef2020-04-06 20:10:22 -070023#include <linux/interrupt.h>
Alexander Potapenkocd110162016-03-25 14:22:08 -070024#include <linux/jhash.h>
25#include <linux/kernel.h>
26#include <linux/mm.h>
27#include <linux/percpu.h>
28#include <linux/printk.h>
29#include <linux/slab.h>
30#include <linux/stacktrace.h>
31#include <linux/stackdepot.h>
32#include <linux/string.h>
33#include <linux/types.h>
34
35#define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8)
36
Joonsoo Kim7c311902016-05-05 16:22:35 -070037#define STACK_ALLOC_NULL_PROTECTION_BITS 1
Alexander Potapenkocd110162016-03-25 14:22:08 -070038#define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */
39#define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER))
40#define STACK_ALLOC_ALIGN 4
41#define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \
42 STACK_ALLOC_ALIGN)
Joonsoo Kim7c311902016-05-05 16:22:35 -070043#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
44 STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS)
Dmitry Vyukov02754e02016-10-27 17:46:44 -070045#define STACK_ALLOC_SLABS_CAP 8192
Alexander Potapenkocd110162016-03-25 14:22:08 -070046#define STACK_ALLOC_MAX_SLABS \
47 (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
48 (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP)
49
50/* The compact structure to store the reference to stacks. */
51union handle_parts {
52 depot_stack_handle_t handle;
53 struct {
54 u32 slabindex : STACK_ALLOC_INDEX_BITS;
55 u32 offset : STACK_ALLOC_OFFSET_BITS;
Joonsoo Kim7c311902016-05-05 16:22:35 -070056 u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS;
Alexander Potapenkocd110162016-03-25 14:22:08 -070057 };
58};
59
60struct stack_record {
61 struct stack_record *next; /* Link in the hashtable */
62 u32 hash; /* Hash in the hastable */
63 u32 size; /* Number of frames in the stack */
64 union handle_parts handle;
Gustavo A. R. Silva6251cf82020-12-15 20:43:04 -080065 unsigned long entries[]; /* Variable-sized array of entries. */
Alexander Potapenkocd110162016-03-25 14:22:08 -070066};
67
68static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
69
70static int depot_index;
71static int next_slab_inited;
72static size_t depot_offset;
73static DEFINE_SPINLOCK(depot_lock);
74
75static bool init_stack_slab(void **prealloc)
76{
77 if (!*prealloc)
78 return false;
79 /*
80 * This smp_load_acquire() pairs with smp_store_release() to
81 * |next_slab_inited| below and in depot_alloc_stack().
82 */
83 if (smp_load_acquire(&next_slab_inited))
84 return true;
85 if (stack_slabs[depot_index] == NULL) {
86 stack_slabs[depot_index] = *prealloc;
Alexander Potapenko305e5192020-02-20 20:04:30 -080087 *prealloc = NULL;
Alexander Potapenkocd110162016-03-25 14:22:08 -070088 } else {
Alexander Potapenko305e5192020-02-20 20:04:30 -080089 /* If this is the last depot slab, do not touch the next one. */
90 if (depot_index + 1 < STACK_ALLOC_MAX_SLABS) {
91 stack_slabs[depot_index + 1] = *prealloc;
92 *prealloc = NULL;
93 }
Alexander Potapenkocd110162016-03-25 14:22:08 -070094 /*
95 * This smp_store_release pairs with smp_load_acquire() from
Miles Chenee050dc2019-08-15 19:32:46 +080096 * |next_slab_inited| above and in stack_depot_save().
Alexander Potapenkocd110162016-03-25 14:22:08 -070097 */
98 smp_store_release(&next_slab_inited, 1);
99 }
Alexander Potapenkocd110162016-03-25 14:22:08 -0700100 return true;
101}
102
103/* Allocation of a new stack in raw storage */
104static struct stack_record *depot_alloc_stack(unsigned long *entries, int size,
105 u32 hash, void **prealloc, gfp_t alloc_flags)
106{
Alexander Potapenkocd110162016-03-25 14:22:08 -0700107 struct stack_record *stack;
Gustavo A. R. Silva6251cf82020-12-15 20:43:04 -0800108 size_t required_size = struct_size(stack, entries, size);
Alexander Potapenkocd110162016-03-25 14:22:08 -0700109
110 required_size = ALIGN(required_size, 1 << STACK_ALLOC_ALIGN);
111
112 if (unlikely(depot_offset + required_size > STACK_ALLOC_SIZE)) {
113 if (unlikely(depot_index + 1 >= STACK_ALLOC_MAX_SLABS)) {
114 WARN_ONCE(1, "Stack depot reached limit capacity");
115 return NULL;
116 }
117 depot_index++;
118 depot_offset = 0;
119 /*
120 * smp_store_release() here pairs with smp_load_acquire() from
Miles Chenee050dc2019-08-15 19:32:46 +0800121 * |next_slab_inited| in stack_depot_save() and
Alexander Potapenkocd110162016-03-25 14:22:08 -0700122 * init_stack_slab().
123 */
124 if (depot_index + 1 < STACK_ALLOC_MAX_SLABS)
125 smp_store_release(&next_slab_inited, 0);
126 }
127 init_stack_slab(prealloc);
128 if (stack_slabs[depot_index] == NULL)
129 return NULL;
130
131 stack = stack_slabs[depot_index] + depot_offset;
132
133 stack->hash = hash;
134 stack->size = size;
135 stack->handle.slabindex = depot_index;
136 stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN;
Joonsoo Kim7c311902016-05-05 16:22:35 -0700137 stack->handle.valid = 1;
Gustavo A. R. Silva522668e2020-12-15 20:43:07 -0800138 memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
Alexander Potapenkocd110162016-03-25 14:22:08 -0700139 depot_offset += required_size;
140
141 return stack;
142}
143
Yogesh Lal892f5572021-02-12 17:14:54 +1100144#define STACK_HASH_SIZE (1L << CONFIG_STACK_HASH_ORDER)
Alexander Potapenkocd110162016-03-25 14:22:08 -0700145#define STACK_HASH_MASK (STACK_HASH_SIZE - 1)
146#define STACK_HASH_SEED 0x9747b28c
147
148static struct stack_record *stack_table[STACK_HASH_SIZE] = {
149 [0 ... STACK_HASH_SIZE - 1] = NULL
150};
151
152/* Calculate hash for a stack */
153static inline u32 hash_stack(unsigned long *entries, unsigned int size)
154{
155 return jhash2((u32 *)entries,
Gustavo A. R. Silvaceca9732020-12-15 20:43:10 -0800156 array_size(size, sizeof(*entries)) / sizeof(u32),
157 STACK_HASH_SEED);
Alexander Potapenkocd110162016-03-25 14:22:08 -0700158}
159
Alexander Potapenkoa571b272018-02-06 15:38:24 -0800160/* Use our own, non-instrumented version of memcmp().
161 *
162 * We actually don't care about the order, just the equality.
163 */
164static inline
165int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
166 unsigned int n)
167{
168 for ( ; n-- ; u1++, u2++) {
169 if (*u1 != *u2)
170 return 1;
171 }
172 return 0;
173}
174
Alexander Potapenkocd110162016-03-25 14:22:08 -0700175/* Find a stack that is equal to the one stored in entries in the hash */
176static inline struct stack_record *find_stack(struct stack_record *bucket,
177 unsigned long *entries, int size,
178 u32 hash)
179{
180 struct stack_record *found;
181
182 for (found = bucket; found; found = found->next) {
183 if (found->hash == hash &&
184 found->size == size &&
Alexander Potapenkoa571b272018-02-06 15:38:24 -0800185 !stackdepot_memcmp(entries, found->entries, size))
Alexander Potapenkocd110162016-03-25 14:22:08 -0700186 return found;
Alexander Potapenkocd110162016-03-25 14:22:08 -0700187 }
188 return NULL;
189}
190
Thomas Gleixnerc0cfc332019-04-25 11:44:56 +0200191/**
192 * stack_depot_fetch - Fetch stack entries from a depot
193 *
194 * @handle: Stack depot handle which was returned from
195 * stack_depot_save().
196 * @entries: Pointer to store the entries address
197 *
198 * Return: The number of trace entries for this depot.
199 */
200unsigned int stack_depot_fetch(depot_stack_handle_t handle,
201 unsigned long **entries)
Alexander Potapenkocd110162016-03-25 14:22:08 -0700202{
203 union handle_parts parts = { .handle = handle };
Alexander Potapenko69866e12020-04-06 20:10:15 -0700204 void *slab;
Alexander Potapenkocd110162016-03-25 14:22:08 -0700205 size_t offset = parts.offset << STACK_ALLOC_ALIGN;
Alexander Potapenko69866e12020-04-06 20:10:15 -0700206 struct stack_record *stack;
207
208 *entries = NULL;
209 if (parts.slabindex > depot_index) {
210 WARN(1, "slab index %d out of bounds (%d) for stack id %08x\n",
211 parts.slabindex, depot_index, handle);
212 return 0;
213 }
214 slab = stack_slabs[parts.slabindex];
215 if (!slab)
216 return 0;
217 stack = slab + offset;
Alexander Potapenkocd110162016-03-25 14:22:08 -0700218
Thomas Gleixnerc0cfc332019-04-25 11:44:56 +0200219 *entries = stack->entries;
220 return stack->size;
221}
222EXPORT_SYMBOL_GPL(stack_depot_fetch);
223
Alexander Potapenkocd110162016-03-25 14:22:08 -0700224/**
Thomas Gleixnerc0cfc332019-04-25 11:44:56 +0200225 * stack_depot_save - Save a stack trace from an array
Alexander Potapenkocd110162016-03-25 14:22:08 -0700226 *
Thomas Gleixnerc0cfc332019-04-25 11:44:56 +0200227 * @entries: Pointer to storage array
228 * @nr_entries: Size of the storage array
229 * @alloc_flags: Allocation gfp flags
230 *
231 * Return: The handle of the stack struct stored in depot
Alexander Potapenkocd110162016-03-25 14:22:08 -0700232 */
Thomas Gleixnerc0cfc332019-04-25 11:44:56 +0200233depot_stack_handle_t stack_depot_save(unsigned long *entries,
234 unsigned int nr_entries,
235 gfp_t alloc_flags)
Alexander Potapenkocd110162016-03-25 14:22:08 -0700236{
Alexander Potapenkocd110162016-03-25 14:22:08 -0700237 struct stack_record *found = NULL, **bucket;
Thomas Gleixnerc0cfc332019-04-25 11:44:56 +0200238 depot_stack_handle_t retval = 0;
Alexander Potapenkocd110162016-03-25 14:22:08 -0700239 struct page *page = NULL;
240 void *prealloc = NULL;
Thomas Gleixnerc0cfc332019-04-25 11:44:56 +0200241 unsigned long flags;
242 u32 hash;
Alexander Potapenkocd110162016-03-25 14:22:08 -0700243
Thomas Gleixnerc0cfc332019-04-25 11:44:56 +0200244 if (unlikely(nr_entries == 0))
Alexander Potapenkocd110162016-03-25 14:22:08 -0700245 goto fast_exit;
246
Thomas Gleixnerc0cfc332019-04-25 11:44:56 +0200247 hash = hash_stack(entries, nr_entries);
Alexander Potapenkocd110162016-03-25 14:22:08 -0700248 bucket = &stack_table[hash & STACK_HASH_MASK];
249
250 /*
251 * Fast path: look the stack trace up without locking.
252 * The smp_load_acquire() here pairs with smp_store_release() to
253 * |bucket| below.
254 */
Thomas Gleixnerc0cfc332019-04-25 11:44:56 +0200255 found = find_stack(smp_load_acquire(bucket), entries,
256 nr_entries, hash);
Alexander Potapenkocd110162016-03-25 14:22:08 -0700257 if (found)
258 goto exit;
259
260 /*
261 * Check if the current or the next stack slab need to be initialized.
262 * If so, allocate the memory - we won't be able to do that under the
263 * lock.
264 *
265 * The smp_load_acquire() here pairs with smp_store_release() to
266 * |next_slab_inited| in depot_alloc_stack() and init_stack_slab().
267 */
268 if (unlikely(!smp_load_acquire(&next_slab_inited))) {
269 /*
270 * Zero out zone modifiers, as we don't have specific zone
271 * requirements. Keep the flags related to allocation in atomic
272 * contexts and I/O.
273 */
274 alloc_flags &= ~GFP_ZONEMASK;
275 alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
Kirill A. Shutemov87cc2712016-07-28 15:49:10 -0700276 alloc_flags |= __GFP_NOWARN;
Alexander Potapenkocd110162016-03-25 14:22:08 -0700277 page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER);
278 if (page)
279 prealloc = page_address(page);
280 }
281
282 spin_lock_irqsave(&depot_lock, flags);
283
Thomas Gleixnerc0cfc332019-04-25 11:44:56 +0200284 found = find_stack(*bucket, entries, nr_entries, hash);
Alexander Potapenkocd110162016-03-25 14:22:08 -0700285 if (!found) {
286 struct stack_record *new =
Thomas Gleixnerc0cfc332019-04-25 11:44:56 +0200287 depot_alloc_stack(entries, nr_entries,
Alexander Potapenkocd110162016-03-25 14:22:08 -0700288 hash, &prealloc, alloc_flags);
289 if (new) {
290 new->next = *bucket;
291 /*
292 * This smp_store_release() pairs with
293 * smp_load_acquire() from |bucket| above.
294 */
295 smp_store_release(bucket, new);
296 found = new;
297 }
298 } else if (prealloc) {
299 /*
300 * We didn't need to store this stack trace, but let's keep
301 * the preallocated memory for the future.
302 */
303 WARN_ON(!init_stack_slab(&prealloc));
304 }
305
306 spin_unlock_irqrestore(&depot_lock, flags);
307exit:
308 if (prealloc) {
309 /* Nobody used this memory, ok to free it. */
310 free_pages((unsigned long)prealloc, STACK_ALLOC_ORDER);
311 }
312 if (found)
313 retval = found->handle.handle;
314fast_exit:
315 return retval;
316}
Thomas Gleixnerc0cfc332019-04-25 11:44:56 +0200317EXPORT_SYMBOL_GPL(stack_depot_save);
Alexander Potapenko505a0ef2020-04-06 20:10:22 -0700318
319static inline int in_irqentry_text(unsigned long ptr)
320{
321 return (ptr >= (unsigned long)&__irqentry_text_start &&
322 ptr < (unsigned long)&__irqentry_text_end) ||
323 (ptr >= (unsigned long)&__softirqentry_text_start &&
324 ptr < (unsigned long)&__softirqentry_text_end);
325}
326
327unsigned int filter_irq_stacks(unsigned long *entries,
328 unsigned int nr_entries)
329{
330 unsigned int i;
331
332 for (i = 0; i < nr_entries; i++) {
333 if (in_irqentry_text(entries[i])) {
334 /* Include the irqentry function into the stack. */
335 return i + 1;
336 }
337 }
338 return nr_entries;
339}
340EXPORT_SYMBOL_GPL(filter_irq_stacks);