blob: 59a57b05529ad88546a24676b0ea3aa98b3e54bf [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Christoph Lameter97d06602012-07-06 15:25:11 -05002#ifndef MM_SLAB_H
3#define MM_SLAB_H
4/*
5 * Internal slab definitions
6 */
7
Joonsoo Kim07f361b2014-10-09 15:26:00 -07008#ifdef CONFIG_SLOB
9/*
10 * Common fields provided in kmem_cache by all slab allocators
11 * This struct is either used directly by the allocator (SLOB)
12 * or the allocator must include definitions for all fields
13 * provided in kmem_cache_common in their definition of kmem_cache.
14 *
15 * Once we can do anonymous structs (C11 standard) we could put a
16 * anonymous struct definition in these allocators so that the
17 * separate allocations in the kmem_cache structure of SLAB and
18 * SLUB is no longer needed.
19 */
20struct kmem_cache {
21 unsigned int object_size;/* The original size of the object */
22 unsigned int size; /* The aligned/padded/added on size */
23 unsigned int align; /* Alignment as calculated */
Alexey Dobriyand50112e2017-11-15 17:32:18 -080024 slab_flags_t flags; /* Active flags on the slab */
Alexey Dobriyan7bbdb812018-04-05 16:21:31 -070025 unsigned int useroffset;/* Usercopy region offset */
26 unsigned int usersize; /* Usercopy region size */
Joonsoo Kim07f361b2014-10-09 15:26:00 -070027 const char *name; /* Slab name for sysfs */
28 int refcount; /* Use counter */
29 void (*ctor)(void *); /* Called on object slot creation */
30 struct list_head list; /* List of all slab caches on the system */
31};
32
33#endif /* CONFIG_SLOB */
34
35#ifdef CONFIG_SLAB
36#include <linux/slab_def.h>
37#endif
38
39#ifdef CONFIG_SLUB
40#include <linux/slub_def.h>
41#endif
42
43#include <linux/memcontrol.h>
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -070044#include <linux/fault-inject.h>
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -070045#include <linux/kasan.h>
46#include <linux/kmemleak.h>
Thomas Garnier7c00fce2016-07-26 15:21:56 -070047#include <linux/random.h>
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +010048#include <linux/sched/mm.h>
Joonsoo Kim07f361b2014-10-09 15:26:00 -070049
Christoph Lameter97d06602012-07-06 15:25:11 -050050/*
51 * State of the slab allocator.
52 *
53 * This is used to describe the states of the allocator during bootup.
54 * Allocators use this to gradually bootstrap themselves. Most allocators
55 * have the problem that the structures used for managing slab caches are
56 * allocated from slab caches themselves.
57 */
58enum slab_state {
59 DOWN, /* No slab functionality yet */
60 PARTIAL, /* SLUB: kmem_cache_node available */
Christoph Lameterce8eb6c2013-01-10 19:14:19 +000061 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */
Christoph Lameter97d06602012-07-06 15:25:11 -050062 UP, /* Slab caches usable but not all extras yet */
63 FULL /* Everything is working */
64};
65
66extern enum slab_state slab_state;
67
Christoph Lameter18004c52012-07-06 15:25:12 -050068/* The slab cache mutex protects the management structures during changes */
69extern struct mutex slab_mutex;
Christoph Lameter9b030cb2012-09-05 00:20:33 +000070
71/* The list of all slab caches on the system */
Christoph Lameter18004c52012-07-06 15:25:12 -050072extern struct list_head slab_caches;
73
Christoph Lameter9b030cb2012-09-05 00:20:33 +000074/* The slab cache that manages slab cache information */
75extern struct kmem_cache *kmem_cache;
76
Vlastimil Babkaaf3b5f82017-02-22 15:41:05 -080077/* A table of kmalloc cache names and sizes */
78extern const struct kmalloc_info_struct {
Pengfei Licb5d9fb2019-11-30 17:49:21 -080079 const char *name[NR_KMALLOC_TYPES];
Alexey Dobriyan55de8b92018-04-05 16:20:29 -070080 unsigned int size;
Vlastimil Babkaaf3b5f82017-02-22 15:41:05 -080081} kmalloc_info[];
82
Christoph Lameterf97d5f62013-01-10 19:12:17 +000083#ifndef CONFIG_SLOB
84/* Kmalloc array related functions */
Daniel Sanders34cc6992015-06-24 16:55:57 -070085void setup_kmalloc_cache_index_table(void);
Alexey Dobriyand50112e2017-11-15 17:32:18 -080086void create_kmalloc_caches(slab_flags_t);
Christoph Lameter2c59dd62013-01-10 19:14:19 +000087
88/* Find the kmalloc slab corresponding for a certain size */
89struct kmem_cache *kmalloc_slab(size_t, gfp_t);
Christoph Lameterf97d5f62013-01-10 19:12:17 +000090#endif
91
Long Li44405092020-08-06 23:18:28 -070092gfp_t kmalloc_fix_flags(gfp_t flags);
Christoph Lameterf97d5f62013-01-10 19:12:17 +000093
Vijayanand Jittaee8d2c72021-01-12 00:42:08 +053094#ifdef CONFIG_SLUB
95/*
96 * Tracking user of a slab.
97 */
98#define TRACK_ADDRS_COUNT 16
99struct track {
100 unsigned long addr; /* Called from address */
101#ifdef CONFIG_STACKTRACE
102 unsigned long addrs[TRACK_ADDRS_COUNT]; /* Called from address */
103#endif
104 int cpu; /* Was running on cpu */
105 int pid; /* Pid context */
106 unsigned long when; /* When did the operation occur */
107};
108
109enum track_item { TRACK_ALLOC, TRACK_FREE };
110#endif
111
Christoph Lameter9b030cb2012-09-05 00:20:33 +0000112/* Functions provided by the slab allocators */
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800113int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
Christoph Lameter97d06602012-07-06 15:25:11 -0500114
Alexey Dobriyan55de8b92018-04-05 16:20:29 -0700115struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
116 slab_flags_t flags, unsigned int useroffset,
117 unsigned int usersize);
Christoph Lameter45530c42012-11-28 16:23:07 +0000118extern void create_boot_cache(struct kmem_cache *, const char *name,
Alexey Dobriyan361d5752018-04-05 16:20:33 -0700119 unsigned int size, slab_flags_t flags,
120 unsigned int useroffset, unsigned int usersize);
Christoph Lameter45530c42012-11-28 16:23:07 +0000121
Joonsoo Kim423c9292014-10-09 15:26:22 -0700122int slab_unmergeable(struct kmem_cache *s);
Alexey Dobriyanf4957d52018-04-05 16:20:37 -0700123struct kmem_cache *find_mergeable(unsigned size, unsigned align,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800124 slab_flags_t flags, const char *name, void (*ctor)(void *));
Joonsoo Kim12220de2014-10-09 15:26:24 -0700125#ifndef CONFIG_SLOB
Glauber Costa2633d7a2012-12-18 14:22:34 -0800126struct kmem_cache *
Alexey Dobriyanf4957d52018-04-05 16:20:37 -0700127__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800128 slab_flags_t flags, void (*ctor)(void *));
Joonsoo Kim423c9292014-10-09 15:26:22 -0700129
Alexey Dobriyan0293d1f2018-04-05 16:21:24 -0700130slab_flags_t kmem_cache_flags(unsigned int object_size,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800131 slab_flags_t flags, const char *name,
Joonsoo Kim423c9292014-10-09 15:26:22 -0700132 void (*ctor)(void *));
Christoph Lametercbb79692012-09-05 00:18:32 +0000133#else
Glauber Costa2633d7a2012-12-18 14:22:34 -0800134static inline struct kmem_cache *
Alexey Dobriyanf4957d52018-04-05 16:20:37 -0700135__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800136 slab_flags_t flags, void (*ctor)(void *))
Christoph Lametercbb79692012-09-05 00:18:32 +0000137{ return NULL; }
Joonsoo Kim423c9292014-10-09 15:26:22 -0700138
Alexey Dobriyan0293d1f2018-04-05 16:21:24 -0700139static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800140 slab_flags_t flags, const char *name,
Joonsoo Kim423c9292014-10-09 15:26:22 -0700141 void (*ctor)(void *))
142{
143 return flags;
144}
Christoph Lametercbb79692012-09-05 00:18:32 +0000145#endif
146
147
Glauber Costad8843922012-10-17 15:36:51 +0400148/* Legal flag mask for kmem_cache_create(), for various configurations */
Nicolas Boichat6d6ea1e2019-03-28 20:43:42 -0700149#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
150 SLAB_CACHE_DMA32 | SLAB_PANIC | \
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -0800151 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
Glauber Costad8843922012-10-17 15:36:51 +0400152
153#if defined(CONFIG_DEBUG_SLAB)
154#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
155#elif defined(CONFIG_SLUB_DEBUG)
156#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
Laura Abbottbecfda62016-03-15 14:55:06 -0700157 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
Glauber Costad8843922012-10-17 15:36:51 +0400158#else
159#define SLAB_DEBUG_FLAGS (0)
160#endif
161
162#if defined(CONFIG_SLAB)
163#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
Vladimir Davydov230e9fc2016-01-14 15:18:15 -0800164 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -0800165 SLAB_ACCOUNT)
Glauber Costad8843922012-10-17 15:36:51 +0400166#elif defined(CONFIG_SLUB)
167#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -0800168 SLAB_TEMPORARY | SLAB_ACCOUNT)
Glauber Costad8843922012-10-17 15:36:51 +0400169#else
170#define SLAB_CACHE_FLAGS (0)
171#endif
172
Thomas Garniere70954f2016-12-12 16:41:38 -0800173/* Common flags available with current configuration */
Glauber Costad8843922012-10-17 15:36:51 +0400174#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
175
Thomas Garniere70954f2016-12-12 16:41:38 -0800176/* Common flags permitted for kmem_cache_create */
177#define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
178 SLAB_RED_ZONE | \
179 SLAB_POISON | \
180 SLAB_STORE_USER | \
181 SLAB_TRACE | \
182 SLAB_CONSISTENCY_CHECKS | \
183 SLAB_MEM_SPREAD | \
184 SLAB_NOLEAKTRACE | \
185 SLAB_RECLAIM_ACCOUNT | \
186 SLAB_TEMPORARY | \
Thomas Garniere70954f2016-12-12 16:41:38 -0800187 SLAB_ACCOUNT)
188
Shakeel Buttf9e13c02018-04-05 16:21:57 -0700189bool __kmem_cache_empty(struct kmem_cache *);
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000190int __kmem_cache_shutdown(struct kmem_cache *);
Dmitry Safonov52b4b952016-02-17 13:11:37 -0800191void __kmem_cache_release(struct kmem_cache *);
Tejun Heoc9fc5862017-02-22 15:41:27 -0800192int __kmem_cache_shrink(struct kmem_cache *);
Christoph Lameter41a21282014-05-06 12:50:08 -0700193void slab_kmem_cache_release(struct kmem_cache *);
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000194
Glauber Costab7454ad2012-10-19 18:20:25 +0400195struct seq_file;
196struct file;
Glauber Costab7454ad2012-10-19 18:20:25 +0400197
Glauber Costa0d7561c2012-10-19 18:20:27 +0400198struct slabinfo {
199 unsigned long active_objs;
200 unsigned long num_objs;
201 unsigned long active_slabs;
202 unsigned long num_slabs;
203 unsigned long shared_avail;
204 unsigned int limit;
205 unsigned int batchcount;
206 unsigned int shared;
207 unsigned int objects_per_slab;
208 unsigned int cache_order;
209};
210
211void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
212void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
Glauber Costab7454ad2012-10-19 18:20:25 +0400213ssize_t slabinfo_write(struct file *file, const char __user *buffer,
214 size_t count, loff_t *ppos);
Glauber Costaba6c4962012-12-18 14:22:27 -0800215
Christoph Lameter484748f2015-09-04 15:45:34 -0700216/*
217 * Generic implementation of bulk operations
218 * These are useful for situations in which the allocator cannot
Jesper Dangaard Brouer9f706d62016-03-15 14:54:03 -0700219 * perform optimizations. In that case segments of the object listed
Christoph Lameter484748f2015-09-04 15:45:34 -0700220 * may be allocated or freed using these operations.
221 */
222void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
Jesper Dangaard Brouer865762a2015-11-20 15:57:58 -0800223int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
Christoph Lameter484748f2015-09-04 15:45:34 -0700224
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700225static inline int cache_vmstat_idx(struct kmem_cache *s)
226{
227 return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
Roman Gushchind42f3242020-08-06 23:20:39 -0700228 NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700229}
230
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700231#ifdef CONFIG_SLUB_DEBUG
232#ifdef CONFIG_SLUB_DEBUG_ON
233DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
234#else
235DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
236#endif
237extern void print_tracking(struct kmem_cache *s, void *object);
Vijayanand Jittaee8d2c72021-01-12 00:42:08 +0530238extern unsigned long get_each_object_track(struct kmem_cache *s,
239 struct page *page, enum track_item alloc,
240 int (*fn)(const struct kmem_cache *, const void *,
241 const struct track *, void *), void *private);
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700242#else
243static inline void print_tracking(struct kmem_cache *s, void *object)
244{
245}
Suren Baghdasaryan6a0a7052021-02-22 17:26:22 -0800246#ifdef CONFIG_SLUB
Vijayanand Jittaee8d2c72021-01-12 00:42:08 +0530247static inline unsigned long get_each_object_track(struct kmem_cache *s,
248 struct page *page, enum track_item alloc,
249 int (*fn)(const struct kmem_cache *, const void *,
250 const struct track *, void *), void *private)
251{
252 return 0;
253}
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700254#endif
Suren Baghdasaryan6a0a7052021-02-22 17:26:22 -0800255#endif
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700256
257/*
258 * Returns true if any of the specified slub_debug flags is enabled for the
259 * cache. Use only for flags parsed by setup_slub_debug() as it also enables
260 * the static key.
261 */
262static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
263{
264#ifdef CONFIG_SLUB_DEBUG
265 VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
266 if (static_branch_unlikely(&slub_debug_enabled))
267 return s->flags & flags;
268#endif
269 return false;
270}
271
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700272#ifdef CONFIG_MEMCG_KMEM
Roman Gushchin286e04b2020-08-06 23:20:52 -0700273static inline struct obj_cgroup **page_obj_cgroups(struct page *page)
274{
275 /*
276 * page->mem_cgroup and page->obj_cgroups are sharing the same
277 * space. To distinguish between them in case we don't know for sure
278 * that the page is a slab page (e.g. page_cgroup_ino()), let's
279 * always set the lowest bit of obj_cgroups.
280 */
281 return (struct obj_cgroup **)
282 ((unsigned long)page->obj_cgroups & ~0x1UL);
283}
284
Roman Gushchin98556092020-08-06 23:21:10 -0700285static inline bool page_has_obj_cgroups(struct page *page)
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700286{
Roman Gushchin98556092020-08-06 23:21:10 -0700287 return ((unsigned long)page->obj_cgroups & 0x1UL);
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700288}
289
Roman Gushchin10befea2020-08-06 23:21:27 -0700290int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
291 gfp_t gfp);
Roman Gushchin286e04b2020-08-06 23:20:52 -0700292
293static inline void memcg_free_page_obj_cgroups(struct page *page)
294{
295 kfree(page_obj_cgroups(page));
296 page->obj_cgroups = NULL;
297}
298
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700299static inline size_t obj_full_size(struct kmem_cache *s)
300{
301 /*
302 * For each accounted object there is an extra space which is used
303 * to store obj_cgroup membership. Charge it too.
304 */
305 return s->size + sizeof(struct obj_cgroup *);
306}
307
Roman Gushchinbecaba62020-12-05 22:14:45 -0800308/*
309 * Returns false if the allocation should fail.
310 */
311static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
312 struct obj_cgroup **objcgp,
313 size_t objects, gfp_t flags)
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700314{
Roman Gushchin98556092020-08-06 23:21:10 -0700315 struct obj_cgroup *objcg;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700316
Roman Gushchinbecaba62020-12-05 22:14:45 -0800317 if (!memcg_kmem_enabled())
318 return true;
319
320 if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT))
321 return true;
322
Roman Gushchin98556092020-08-06 23:21:10 -0700323 objcg = get_obj_cgroup_from_current();
324 if (!objcg)
Roman Gushchinbecaba62020-12-05 22:14:45 -0800325 return true;
Roman Gushchin98556092020-08-06 23:21:10 -0700326
327 if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) {
328 obj_cgroup_put(objcg);
Roman Gushchinbecaba62020-12-05 22:14:45 -0800329 return false;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700330 }
331
Roman Gushchinbecaba62020-12-05 22:14:45 -0800332 *objcgp = objcg;
333 return true;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700334}
335
336static inline void mod_objcg_state(struct obj_cgroup *objcg,
337 struct pglist_data *pgdat,
338 int idx, int nr)
339{
340 struct mem_cgroup *memcg;
341 struct lruvec *lruvec;
342
343 rcu_read_lock();
344 memcg = obj_cgroup_memcg(objcg);
345 lruvec = mem_cgroup_lruvec(memcg, pgdat);
346 mod_memcg_lruvec_state(lruvec, idx, nr);
347 rcu_read_unlock();
348}
349
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700350static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
351 struct obj_cgroup *objcg,
Roman Gushchin10befea2020-08-06 23:21:27 -0700352 gfp_t flags, size_t size,
353 void **p)
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700354{
355 struct page *page;
356 unsigned long off;
357 size_t i;
358
Roman Gushchinbecaba62020-12-05 22:14:45 -0800359 if (!memcg_kmem_enabled() || !objcg)
Roman Gushchin10befea2020-08-06 23:21:27 -0700360 return;
361
362 flags &= ~__GFP_ACCOUNT;
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700363 for (i = 0; i < size; i++) {
364 if (likely(p[i])) {
365 page = virt_to_head_page(p[i]);
Roman Gushchin10befea2020-08-06 23:21:27 -0700366
367 if (!page_has_obj_cgroups(page) &&
368 memcg_alloc_page_obj_cgroups(page, s, flags)) {
369 obj_cgroup_uncharge(objcg, obj_full_size(s));
370 continue;
371 }
372
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700373 off = obj_to_index(s, page, p[i]);
374 obj_cgroup_get(objcg);
375 page_obj_cgroups(page)[off] = objcg;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700376 mod_objcg_state(objcg, page_pgdat(page),
377 cache_vmstat_idx(s), obj_full_size(s));
378 } else {
379 obj_cgroup_uncharge(objcg, obj_full_size(s));
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700380 }
381 }
382 obj_cgroup_put(objcg);
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700383}
384
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700385static inline void memcg_slab_free_hook(struct kmem_cache *s_orig,
386 void **p, int objects)
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700387{
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700388 struct kmem_cache *s;
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700389 struct obj_cgroup *objcg;
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700390 struct page *page;
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700391 unsigned int off;
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700392 int i;
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700393
Roman Gushchin10befea2020-08-06 23:21:27 -0700394 if (!memcg_kmem_enabled())
395 return;
396
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700397 for (i = 0; i < objects; i++) {
398 if (unlikely(!p[i]))
399 continue;
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700400
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700401 page = virt_to_head_page(p[i]);
402 if (!page_has_obj_cgroups(page))
403 continue;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700404
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700405 if (!s_orig)
406 s = page->slab_cache;
407 else
408 s = s_orig;
Roman Gushchin10befea2020-08-06 23:21:27 -0700409
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700410 off = obj_to_index(s, page, p[i]);
411 objcg = page_obj_cgroups(page)[off];
412 if (!objcg)
413 continue;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700414
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700415 page_obj_cgroups(page)[off] = NULL;
416 obj_cgroup_uncharge(objcg, obj_full_size(s));
417 mod_objcg_state(objcg, page_pgdat(page), cache_vmstat_idx(s),
418 -obj_full_size(s));
419 obj_cgroup_put(objcg);
420 }
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700421}
422
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700423#else /* CONFIG_MEMCG_KMEM */
Roman Gushchin98556092020-08-06 23:21:10 -0700424static inline bool page_has_obj_cgroups(struct page *page)
425{
426 return false;
427}
428
429static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr)
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700430{
431 return NULL;
432}
433
Roman Gushchin286e04b2020-08-06 23:20:52 -0700434static inline int memcg_alloc_page_obj_cgroups(struct page *page,
435 struct kmem_cache *s, gfp_t gfp)
436{
437 return 0;
438}
439
440static inline void memcg_free_page_obj_cgroups(struct page *page)
441{
442}
443
Roman Gushchinbecaba62020-12-05 22:14:45 -0800444static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
445 struct obj_cgroup **objcgp,
446 size_t objects, gfp_t flags)
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700447{
Roman Gushchinbecaba62020-12-05 22:14:45 -0800448 return true;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700449}
450
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700451static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
452 struct obj_cgroup *objcg,
Roman Gushchin10befea2020-08-06 23:21:27 -0700453 gfp_t flags, size_t size,
454 void **p)
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700455{
456}
457
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700458static inline void memcg_slab_free_hook(struct kmem_cache *s,
459 void **p, int objects)
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700460{
461}
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700462#endif /* CONFIG_MEMCG_KMEM */
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800463
Kees Cooka64b5372019-07-11 20:53:26 -0700464static inline struct kmem_cache *virt_to_cache(const void *obj)
465{
466 struct page *page;
467
468 page = virt_to_head_page(obj);
469 if (WARN_ONCE(!PageSlab(page), "%s: Object is not a Slab page!\n",
470 __func__))
471 return NULL;
472 return page->slab_cache;
473}
474
Roman Gushchin74d555b2020-08-06 23:21:44 -0700475static __always_inline void account_slab_page(struct page *page, int order,
476 struct kmem_cache *s)
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700477{
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700478 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
479 PAGE_SIZE << order);
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700480}
481
Roman Gushchin74d555b2020-08-06 23:21:44 -0700482static __always_inline void unaccount_slab_page(struct page *page, int order,
483 struct kmem_cache *s)
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700484{
Roman Gushchin10befea2020-08-06 23:21:27 -0700485 if (memcg_kmem_enabled())
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700486 memcg_free_page_obj_cgroups(page);
Roman Gushchin98556092020-08-06 23:21:10 -0700487
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700488 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
489 -(PAGE_SIZE << order));
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700490}
491
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700492static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
493{
494 struct kmem_cache *cachep;
495
496 if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700497 !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
498 return s;
499
500 cachep = virt_to_cache(x);
Roman Gushchin10befea2020-08-06 23:21:27 -0700501 if (WARN(cachep && cachep != s,
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700502 "%s: Wrong slab cache. %s but object is from %s\n",
503 __func__, s->name, cachep->name))
504 print_tracking(cachep, x);
505 return cachep;
506}
507
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700508static inline size_t slab_ksize(const struct kmem_cache *s)
509{
510#ifndef CONFIG_SLUB
511 return s->object_size;
512
513#else /* CONFIG_SLUB */
514# ifdef CONFIG_SLUB_DEBUG
515 /*
516 * Debugging requires use of the padding between object
517 * and whatever may come after it.
518 */
519 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
520 return s->object_size;
521# endif
Alexander Potapenko80a92012016-07-28 15:49:07 -0700522 if (s->flags & SLAB_KASAN)
523 return s->object_size;
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700524 /*
525 * If we have the need to store the freelist pointer
526 * back there or track user information then we can
527 * only use the space before that information.
528 */
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -0800529 if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700530 return s->inuse;
531 /*
532 * Else we can use all the padding etc for the allocation
533 */
534 return s->size;
535#endif
536}
537
538static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700539 struct obj_cgroup **objcgp,
540 size_t size, gfp_t flags)
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700541{
542 flags &= gfp_allowed_mask;
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +0100543
544 fs_reclaim_acquire(flags);
545 fs_reclaim_release(flags);
546
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700547 might_sleep_if(gfpflags_allow_blocking(flags));
548
Jesper Dangaard Brouerfab99632016-03-15 14:53:38 -0700549 if (should_failslab(s, flags))
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700550 return NULL;
551
Roman Gushchinbecaba62020-12-05 22:14:45 -0800552 if (!memcg_slab_pre_alloc_hook(s, objcgp, size, flags))
553 return NULL;
Vladimir Davydov45264772016-07-26 15:24:21 -0700554
555 return s;
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700556}
557
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700558static inline void slab_post_alloc_hook(struct kmem_cache *s,
559 struct obj_cgroup *objcg,
560 gfp_t flags, size_t size, void **p)
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700561{
562 size_t i;
563
564 flags &= gfp_allowed_mask;
565 for (i = 0; i < size; i++) {
Andrey Konovalov53128242019-02-20 22:19:11 -0800566 p[i] = kasan_slab_alloc(s, p[i], flags);
Andrey Konovalova2f77572019-02-20 22:19:16 -0800567 /* As p[i] might get tagged, call kmemleak hook after KASAN. */
Andrey Konovalov53128242019-02-20 22:19:11 -0800568 kmemleak_alloc_recursive(p[i], s->object_size, 1,
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700569 s->flags, flags);
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700570 }
Vladimir Davydov45264772016-07-26 15:24:21 -0700571
Roman Gushchinbecaba62020-12-05 22:14:45 -0800572 memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700573}
574
Christoph Lameter44c53562014-08-06 16:04:07 -0700575#ifndef CONFIG_SLOB
Christoph Lameterca349562013-01-10 19:14:19 +0000576/*
577 * The slab lists for all objects.
578 */
579struct kmem_cache_node {
580 spinlock_t list_lock;
581
582#ifdef CONFIG_SLAB
583 struct list_head slabs_partial; /* partial list first, better asm code */
584 struct list_head slabs_full;
585 struct list_head slabs_free;
David Rientjesbf00bd32016-12-12 16:41:44 -0800586 unsigned long total_slabs; /* length of all slab lists */
587 unsigned long free_slabs; /* length of free slab list only */
Christoph Lameterca349562013-01-10 19:14:19 +0000588 unsigned long free_objects;
589 unsigned int free_limit;
590 unsigned int colour_next; /* Per-node cache coloring */
591 struct array_cache *shared; /* shared per node */
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700592 struct alien_cache **alien; /* on other nodes */
Christoph Lameterca349562013-01-10 19:14:19 +0000593 unsigned long next_reap; /* updated without locking */
594 int free_touched; /* updated without locking */
595#endif
596
597#ifdef CONFIG_SLUB
598 unsigned long nr_partial;
599 struct list_head partial;
600#ifdef CONFIG_SLUB_DEBUG
601 atomic_long_t nr_slabs;
602 atomic_long_t total_objects;
603 struct list_head full;
604#endif
605#endif
606
607};
Wanpeng Lie25839f2013-07-04 08:33:23 +0800608
Christoph Lameter44c53562014-08-06 16:04:07 -0700609static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
610{
611 return s->node[node];
612}
613
614/*
615 * Iterator over all nodes. The body will be executed for each node that has
616 * a kmem_cache_node structure allocated (which is true for all online nodes)
617 */
618#define for_each_kmem_cache_node(__s, __node, __n) \
Mikulas Patocka91635822014-10-09 15:26:20 -0700619 for (__node = 0; __node < nr_node_ids; __node++) \
620 if ((__n = get_node(__s, __node)))
Christoph Lameter44c53562014-08-06 16:04:07 -0700621
622#endif
623
Vladimir Davydov1df3b262014-12-10 15:42:16 -0800624void *slab_start(struct seq_file *m, loff_t *pos);
Wanpeng Li276a2432013-07-08 08:08:28 +0800625void *slab_next(struct seq_file *m, void *p, loff_t *pos);
626void slab_stop(struct seq_file *m, void *p);
Vladimir Davydovb0475012014-12-10 15:44:19 -0800627int memcg_slab_show(struct seq_file *m, void *p);
Andrey Ryabinin5240ab42014-08-06 16:04:14 -0700628
Yang Shi852d8be2017-11-15 17:32:07 -0800629#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
630void dump_unreclaimable_slab(void);
631#else
632static inline void dump_unreclaimable_slab(void)
633{
634}
635#endif
636
Alexander Potapenko55834c52016-05-20 16:59:11 -0700637void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
638
Thomas Garnier7c00fce2016-07-26 15:21:56 -0700639#ifdef CONFIG_SLAB_FREELIST_RANDOM
640int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
641 gfp_t gfp);
642void cache_random_seq_destroy(struct kmem_cache *cachep);
643#else
644static inline int cache_random_seq_create(struct kmem_cache *cachep,
645 unsigned int count, gfp_t gfp)
646{
647 return 0;
648}
649static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
650#endif /* CONFIG_SLAB_FREELIST_RANDOM */
651
Alexander Potapenko64713842019-07-11 20:59:19 -0700652static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
653{
654 if (static_branch_unlikely(&init_on_alloc)) {
655 if (c->ctor)
656 return false;
657 if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
658 return flags & __GFP_ZERO;
659 return true;
660 }
661 return flags & __GFP_ZERO;
662}
663
664static inline bool slab_want_init_on_free(struct kmem_cache *c)
665{
666 if (static_branch_unlikely(&init_on_free))
667 return !(c->ctor ||
668 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
669 return false;
670}
671
Andrey Ryabinin5240ab42014-08-06 16:04:14 -0700672#endif /* MM_SLAB_H */