blob: 1f067e8bc37789350d01a7be2dda53a4ac040811 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Christoph Lameter97d06602012-07-06 15:25:11 -05002#ifndef MM_SLAB_H
3#define MM_SLAB_H
4/*
5 * Internal slab definitions
6 */
7
Joonsoo Kim07f361b2014-10-09 15:26:00 -07008#ifdef CONFIG_SLOB
9/*
10 * Common fields provided in kmem_cache by all slab allocators
11 * This struct is either used directly by the allocator (SLOB)
12 * or the allocator must include definitions for all fields
13 * provided in kmem_cache_common in their definition of kmem_cache.
14 *
15 * Once we can do anonymous structs (C11 standard) we could put a
16 * anonymous struct definition in these allocators so that the
17 * separate allocations in the kmem_cache structure of SLAB and
18 * SLUB is no longer needed.
19 */
20struct kmem_cache {
21 unsigned int object_size;/* The original size of the object */
22 unsigned int size; /* The aligned/padded/added on size */
23 unsigned int align; /* Alignment as calculated */
Alexey Dobriyand50112e2017-11-15 17:32:18 -080024 slab_flags_t flags; /* Active flags on the slab */
Alexey Dobriyan7bbdb812018-04-05 16:21:31 -070025 unsigned int useroffset;/* Usercopy region offset */
26 unsigned int usersize; /* Usercopy region size */
Joonsoo Kim07f361b2014-10-09 15:26:00 -070027 const char *name; /* Slab name for sysfs */
28 int refcount; /* Use counter */
29 void (*ctor)(void *); /* Called on object slot creation */
30 struct list_head list; /* List of all slab caches on the system */
31};
32
Waiman Long9adeaa22019-09-23 15:33:49 -070033#else /* !CONFIG_SLOB */
34
35struct memcg_cache_array {
36 struct rcu_head rcu;
37 struct kmem_cache *entries[0];
38};
39
40/*
41 * This is the main placeholder for memcg-related information in kmem caches.
42 * Both the root cache and the child caches will have it. For the root cache,
43 * this will hold a dynamically allocated array large enough to hold
44 * information about the currently limited memcgs in the system. To allow the
45 * array to be accessed without taking any locks, on relocation we free the old
46 * version only after a grace period.
47 *
48 * Root and child caches hold different metadata.
49 *
50 * @root_cache: Common to root and child caches. NULL for root, pointer to
51 * the root cache for children.
52 *
53 * The following fields are specific to root caches.
54 *
55 * @memcg_caches: kmemcg ID indexed table of child caches. This table is
56 * used to index child cachces during allocation and cleared
57 * early during shutdown.
58 *
59 * @root_caches_node: List node for slab_root_caches list.
60 *
61 * @children: List of all child caches. While the child caches are also
62 * reachable through @memcg_caches, a child cache remains on
63 * this list until it is actually destroyed.
64 *
65 * The following fields are specific to child caches.
66 *
67 * @memcg: Pointer to the memcg this cache belongs to.
68 *
69 * @children_node: List node for @root_cache->children list.
70 *
71 * @kmem_caches_node: List node for @memcg->kmem_caches list.
72 */
73struct memcg_cache_params {
74 struct kmem_cache *root_cache;
75 union {
76 struct {
77 struct memcg_cache_array __rcu *memcg_caches;
78 struct list_head __root_caches_node;
79 struct list_head children;
80 bool dying;
81 };
82 struct {
83 struct mem_cgroup *memcg;
84 struct list_head children_node;
85 struct list_head kmem_caches_node;
86 struct percpu_ref refcnt;
87
88 void (*work_fn)(struct kmem_cache *);
89 union {
90 struct rcu_head rcu_head;
91 struct work_struct work;
92 };
93 };
94 };
95};
Joonsoo Kim07f361b2014-10-09 15:26:00 -070096#endif /* CONFIG_SLOB */
97
98#ifdef CONFIG_SLAB
99#include <linux/slab_def.h>
100#endif
101
102#ifdef CONFIG_SLUB
103#include <linux/slub_def.h>
104#endif
105
106#include <linux/memcontrol.h>
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700107#include <linux/fault-inject.h>
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700108#include <linux/kasan.h>
109#include <linux/kmemleak.h>
Thomas Garnier7c00fce2016-07-26 15:21:56 -0700110#include <linux/random.h>
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +0100111#include <linux/sched/mm.h>
Roman Gushchin286e04b2020-08-06 23:20:52 -0700112#include <linux/kmemleak.h>
Joonsoo Kim07f361b2014-10-09 15:26:00 -0700113
Christoph Lameter97d06602012-07-06 15:25:11 -0500114/*
115 * State of the slab allocator.
116 *
117 * This is used to describe the states of the allocator during bootup.
118 * Allocators use this to gradually bootstrap themselves. Most allocators
119 * have the problem that the structures used for managing slab caches are
120 * allocated from slab caches themselves.
121 */
122enum slab_state {
123 DOWN, /* No slab functionality yet */
124 PARTIAL, /* SLUB: kmem_cache_node available */
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000125 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */
Christoph Lameter97d06602012-07-06 15:25:11 -0500126 UP, /* Slab caches usable but not all extras yet */
127 FULL /* Everything is working */
128};
129
130extern enum slab_state slab_state;
131
Christoph Lameter18004c52012-07-06 15:25:12 -0500132/* The slab cache mutex protects the management structures during changes */
133extern struct mutex slab_mutex;
Christoph Lameter9b030cb2012-09-05 00:20:33 +0000134
135/* The list of all slab caches on the system */
Christoph Lameter18004c52012-07-06 15:25:12 -0500136extern struct list_head slab_caches;
137
Christoph Lameter9b030cb2012-09-05 00:20:33 +0000138/* The slab cache that manages slab cache information */
139extern struct kmem_cache *kmem_cache;
140
Vlastimil Babkaaf3b5f82017-02-22 15:41:05 -0800141/* A table of kmalloc cache names and sizes */
142extern const struct kmalloc_info_struct {
Pengfei Licb5d9fb2019-11-30 17:49:21 -0800143 const char *name[NR_KMALLOC_TYPES];
Alexey Dobriyan55de8b92018-04-05 16:20:29 -0700144 unsigned int size;
Vlastimil Babkaaf3b5f82017-02-22 15:41:05 -0800145} kmalloc_info[];
146
Christoph Lameterf97d5f62013-01-10 19:12:17 +0000147#ifndef CONFIG_SLOB
148/* Kmalloc array related functions */
Daniel Sanders34cc6992015-06-24 16:55:57 -0700149void setup_kmalloc_cache_index_table(void);
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800150void create_kmalloc_caches(slab_flags_t);
Christoph Lameter2c59dd62013-01-10 19:14:19 +0000151
152/* Find the kmalloc slab corresponding for a certain size */
153struct kmem_cache *kmalloc_slab(size_t, gfp_t);
Christoph Lameterf97d5f62013-01-10 19:12:17 +0000154#endif
155
Long Li44405092020-08-06 23:18:28 -0700156gfp_t kmalloc_fix_flags(gfp_t flags);
Christoph Lameterf97d5f62013-01-10 19:12:17 +0000157
Christoph Lameter9b030cb2012-09-05 00:20:33 +0000158/* Functions provided by the slab allocators */
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800159int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
Christoph Lameter97d06602012-07-06 15:25:11 -0500160
Alexey Dobriyan55de8b92018-04-05 16:20:29 -0700161struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
162 slab_flags_t flags, unsigned int useroffset,
163 unsigned int usersize);
Christoph Lameter45530c42012-11-28 16:23:07 +0000164extern void create_boot_cache(struct kmem_cache *, const char *name,
Alexey Dobriyan361d5752018-04-05 16:20:33 -0700165 unsigned int size, slab_flags_t flags,
166 unsigned int useroffset, unsigned int usersize);
Christoph Lameter45530c42012-11-28 16:23:07 +0000167
Joonsoo Kim423c9292014-10-09 15:26:22 -0700168int slab_unmergeable(struct kmem_cache *s);
Alexey Dobriyanf4957d52018-04-05 16:20:37 -0700169struct kmem_cache *find_mergeable(unsigned size, unsigned align,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800170 slab_flags_t flags, const char *name, void (*ctor)(void *));
Joonsoo Kim12220de2014-10-09 15:26:24 -0700171#ifndef CONFIG_SLOB
Glauber Costa2633d7a2012-12-18 14:22:34 -0800172struct kmem_cache *
Alexey Dobriyanf4957d52018-04-05 16:20:37 -0700173__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800174 slab_flags_t flags, void (*ctor)(void *));
Joonsoo Kim423c9292014-10-09 15:26:22 -0700175
Alexey Dobriyan0293d1f2018-04-05 16:21:24 -0700176slab_flags_t kmem_cache_flags(unsigned int object_size,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800177 slab_flags_t flags, const char *name,
Joonsoo Kim423c9292014-10-09 15:26:22 -0700178 void (*ctor)(void *));
Christoph Lametercbb79692012-09-05 00:18:32 +0000179#else
Glauber Costa2633d7a2012-12-18 14:22:34 -0800180static inline struct kmem_cache *
Alexey Dobriyanf4957d52018-04-05 16:20:37 -0700181__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800182 slab_flags_t flags, void (*ctor)(void *))
Christoph Lametercbb79692012-09-05 00:18:32 +0000183{ return NULL; }
Joonsoo Kim423c9292014-10-09 15:26:22 -0700184
Alexey Dobriyan0293d1f2018-04-05 16:21:24 -0700185static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800186 slab_flags_t flags, const char *name,
Joonsoo Kim423c9292014-10-09 15:26:22 -0700187 void (*ctor)(void *))
188{
189 return flags;
190}
Christoph Lametercbb79692012-09-05 00:18:32 +0000191#endif
192
193
Glauber Costad8843922012-10-17 15:36:51 +0400194/* Legal flag mask for kmem_cache_create(), for various configurations */
Nicolas Boichat6d6ea1e2019-03-28 20:43:42 -0700195#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
196 SLAB_CACHE_DMA32 | SLAB_PANIC | \
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -0800197 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
Glauber Costad8843922012-10-17 15:36:51 +0400198
199#if defined(CONFIG_DEBUG_SLAB)
200#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
201#elif defined(CONFIG_SLUB_DEBUG)
202#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
Laura Abbottbecfda62016-03-15 14:55:06 -0700203 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
Glauber Costad8843922012-10-17 15:36:51 +0400204#else
205#define SLAB_DEBUG_FLAGS (0)
206#endif
207
208#if defined(CONFIG_SLAB)
209#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
Vladimir Davydov230e9fc2016-01-14 15:18:15 -0800210 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -0800211 SLAB_ACCOUNT)
Glauber Costad8843922012-10-17 15:36:51 +0400212#elif defined(CONFIG_SLUB)
213#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -0800214 SLAB_TEMPORARY | SLAB_ACCOUNT)
Glauber Costad8843922012-10-17 15:36:51 +0400215#else
216#define SLAB_CACHE_FLAGS (0)
217#endif
218
Thomas Garniere70954f2016-12-12 16:41:38 -0800219/* Common flags available with current configuration */
Glauber Costad8843922012-10-17 15:36:51 +0400220#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
221
Thomas Garniere70954f2016-12-12 16:41:38 -0800222/* Common flags permitted for kmem_cache_create */
223#define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
224 SLAB_RED_ZONE | \
225 SLAB_POISON | \
226 SLAB_STORE_USER | \
227 SLAB_TRACE | \
228 SLAB_CONSISTENCY_CHECKS | \
229 SLAB_MEM_SPREAD | \
230 SLAB_NOLEAKTRACE | \
231 SLAB_RECLAIM_ACCOUNT | \
232 SLAB_TEMPORARY | \
Thomas Garniere70954f2016-12-12 16:41:38 -0800233 SLAB_ACCOUNT)
234
Shakeel Buttf9e13c02018-04-05 16:21:57 -0700235bool __kmem_cache_empty(struct kmem_cache *);
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000236int __kmem_cache_shutdown(struct kmem_cache *);
Dmitry Safonov52b4b952016-02-17 13:11:37 -0800237void __kmem_cache_release(struct kmem_cache *);
Tejun Heoc9fc5862017-02-22 15:41:27 -0800238int __kmem_cache_shrink(struct kmem_cache *);
239void __kmemcg_cache_deactivate(struct kmem_cache *s);
Roman Gushchin43486692019-07-11 20:56:09 -0700240void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s);
Christoph Lameter41a21282014-05-06 12:50:08 -0700241void slab_kmem_cache_release(struct kmem_cache *);
Waiman Long04f768a2019-09-23 15:33:46 -0700242void kmem_cache_shrink_all(struct kmem_cache *s);
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000243
Glauber Costab7454ad2012-10-19 18:20:25 +0400244struct seq_file;
245struct file;
Glauber Costab7454ad2012-10-19 18:20:25 +0400246
Glauber Costa0d7561c2012-10-19 18:20:27 +0400247struct slabinfo {
248 unsigned long active_objs;
249 unsigned long num_objs;
250 unsigned long active_slabs;
251 unsigned long num_slabs;
252 unsigned long shared_avail;
253 unsigned int limit;
254 unsigned int batchcount;
255 unsigned int shared;
256 unsigned int objects_per_slab;
257 unsigned int cache_order;
258};
259
260void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
261void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
Glauber Costab7454ad2012-10-19 18:20:25 +0400262ssize_t slabinfo_write(struct file *file, const char __user *buffer,
263 size_t count, loff_t *ppos);
Glauber Costaba6c4962012-12-18 14:22:27 -0800264
Christoph Lameter484748f2015-09-04 15:45:34 -0700265/*
266 * Generic implementation of bulk operations
267 * These are useful for situations in which the allocator cannot
Jesper Dangaard Brouer9f706d62016-03-15 14:54:03 -0700268 * perform optimizations. In that case segments of the object listed
Christoph Lameter484748f2015-09-04 15:45:34 -0700269 * may be allocated or freed using these operations.
270 */
271void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
Jesper Dangaard Brouer865762a2015-11-20 15:57:58 -0800272int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
Christoph Lameter484748f2015-09-04 15:45:34 -0700273
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700274static inline int cache_vmstat_idx(struct kmem_cache *s)
275{
276 return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
Roman Gushchind42f3242020-08-06 23:20:39 -0700277 NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700278}
279
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700280#ifdef CONFIG_SLUB_DEBUG
281#ifdef CONFIG_SLUB_DEBUG_ON
282DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
283#else
284DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
285#endif
286extern void print_tracking(struct kmem_cache *s, void *object);
287#else
288static inline void print_tracking(struct kmem_cache *s, void *object)
289{
290}
291#endif
292
293/*
294 * Returns true if any of the specified slub_debug flags is enabled for the
295 * cache. Use only for flags parsed by setup_slub_debug() as it also enables
296 * the static key.
297 */
298static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
299{
300#ifdef CONFIG_SLUB_DEBUG
301 VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
302 if (static_branch_unlikely(&slub_debug_enabled))
303 return s->flags & flags;
304#endif
305 return false;
306}
307
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700308#ifdef CONFIG_MEMCG_KMEM
Tejun Heo510ded32017-02-22 15:41:24 -0800309
310/* List of all root caches. */
311extern struct list_head slab_root_caches;
312#define root_caches_node memcg_params.__root_caches_node
313
Vladimir Davydov426589f2015-02-12 14:59:23 -0800314/*
315 * Iterate over all memcg caches of the given root cache. The caller must hold
316 * slab_mutex.
317 */
318#define for_each_memcg_cache(iter, root) \
Tejun Heo9eeadc82017-02-22 15:41:17 -0800319 list_for_each_entry(iter, &(root)->memcg_params.children, \
320 memcg_params.children_node)
Vladimir Davydov426589f2015-02-12 14:59:23 -0800321
Glauber Costaba6c4962012-12-18 14:22:27 -0800322static inline bool is_root_cache(struct kmem_cache *s)
323{
Tejun Heo9eeadc82017-02-22 15:41:17 -0800324 return !s->memcg_params.root_cache;
Glauber Costaba6c4962012-12-18 14:22:27 -0800325}
Glauber Costa2633d7a2012-12-18 14:22:34 -0800326
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800327static inline bool slab_equal_or_root(struct kmem_cache *s,
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800328 struct kmem_cache *p)
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800329{
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800330 return p == s || p == s->memcg_params.root_cache;
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800331}
Glauber Costa749c5412012-12-18 14:23:01 -0800332
333/*
334 * We use suffixes to the name in memcg because we can't have caches
335 * created in the system with the same name. But when we print them
336 * locally, better refer to them with the base name
337 */
338static inline const char *cache_name(struct kmem_cache *s)
339{
340 if (!is_root_cache(s))
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800341 s = s->memcg_params.root_cache;
Glauber Costa749c5412012-12-18 14:23:01 -0800342 return s->name;
343}
344
Glauber Costa943a4512012-12-18 14:23:03 -0800345static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
346{
347 if (is_root_cache(s))
348 return s;
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800349 return s->memcg_params.root_cache;
Glauber Costa943a4512012-12-18 14:23:03 -0800350}
Vladimir Davydov5dfb4172014-06-04 16:06:38 -0700351
Roman Gushchin286e04b2020-08-06 23:20:52 -0700352static inline struct obj_cgroup **page_obj_cgroups(struct page *page)
353{
354 /*
355 * page->mem_cgroup and page->obj_cgroups are sharing the same
356 * space. To distinguish between them in case we don't know for sure
357 * that the page is a slab page (e.g. page_cgroup_ino()), let's
358 * always set the lowest bit of obj_cgroups.
359 */
360 return (struct obj_cgroup **)
361 ((unsigned long)page->obj_cgroups & ~0x1UL);
362}
363
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700364/*
365 * Expects a pointer to a slab page. Please note, that PageSlab() check
366 * isn't sufficient, as it returns true also for tail compound slab pages,
367 * which do not have slab_cache pointer set.
Roman Gushchin221ec5c2019-11-05 21:17:03 -0800368 * So this function assumes that the page can pass PageSlab() && !PageTail()
369 * check.
Roman Gushchinfb2f2b02019-07-11 20:56:34 -0700370 *
371 * The kmem_cache can be reparented asynchronously. The caller must ensure
372 * the memcg lifetime, e.g. by taking rcu_read_lock() or cgroup_mutex.
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700373 */
374static inline struct mem_cgroup *memcg_from_slab_page(struct page *page)
375{
376 struct kmem_cache *s;
377
378 s = READ_ONCE(page->slab_cache);
379 if (s && !is_root_cache(s))
Roman Gushchinfb2f2b02019-07-11 20:56:34 -0700380 return READ_ONCE(s->memcg_params.memcg);
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700381
382 return NULL;
383}
384
Roman Gushchin286e04b2020-08-06 23:20:52 -0700385static inline int memcg_alloc_page_obj_cgroups(struct page *page,
386 struct kmem_cache *s, gfp_t gfp)
387{
388 unsigned int objects = objs_per_slab_page(s, page);
389 void *vec;
390
391 vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
392 page_to_nid(page));
393 if (!vec)
394 return -ENOMEM;
395
396 kmemleak_not_leak(vec);
397 page->obj_cgroups = (struct obj_cgroup **) ((unsigned long)vec | 0x1UL);
398 return 0;
399}
400
401static inline void memcg_free_page_obj_cgroups(struct page *page)
402{
403 kfree(page_obj_cgroups(page));
404 page->obj_cgroups = NULL;
405}
406
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700407static inline size_t obj_full_size(struct kmem_cache *s)
408{
409 /*
410 * For each accounted object there is an extra space which is used
411 * to store obj_cgroup membership. Charge it too.
412 */
413 return s->size + sizeof(struct obj_cgroup *);
414}
415
416static inline struct kmem_cache *memcg_slab_pre_alloc_hook(struct kmem_cache *s,
417 struct obj_cgroup **objcgp,
418 size_t objects, gfp_t flags)
419{
420 struct kmem_cache *cachep;
421
422 cachep = memcg_kmem_get_cache(s, objcgp);
423 if (is_root_cache(cachep))
424 return s;
425
426 if (obj_cgroup_charge(*objcgp, flags, objects * obj_full_size(s))) {
427 obj_cgroup_put(*objcgp);
428 memcg_kmem_put_cache(cachep);
429 cachep = NULL;
430 }
431
432 return cachep;
433}
434
435static inline void mod_objcg_state(struct obj_cgroup *objcg,
436 struct pglist_data *pgdat,
437 int idx, int nr)
438{
439 struct mem_cgroup *memcg;
440 struct lruvec *lruvec;
441
442 rcu_read_lock();
443 memcg = obj_cgroup_memcg(objcg);
444 lruvec = mem_cgroup_lruvec(memcg, pgdat);
445 mod_memcg_lruvec_state(lruvec, idx, nr);
446 rcu_read_unlock();
447}
448
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700449static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
450 struct obj_cgroup *objcg,
451 size_t size, void **p)
452{
453 struct page *page;
454 unsigned long off;
455 size_t i;
456
457 for (i = 0; i < size; i++) {
458 if (likely(p[i])) {
459 page = virt_to_head_page(p[i]);
460 off = obj_to_index(s, page, p[i]);
461 obj_cgroup_get(objcg);
462 page_obj_cgroups(page)[off] = objcg;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700463 mod_objcg_state(objcg, page_pgdat(page),
464 cache_vmstat_idx(s), obj_full_size(s));
465 } else {
466 obj_cgroup_uncharge(objcg, obj_full_size(s));
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700467 }
468 }
469 obj_cgroup_put(objcg);
470 memcg_kmem_put_cache(s);
471}
472
473static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page,
474 void *p)
475{
476 struct obj_cgroup *objcg;
477 unsigned int off;
478
479 if (!memcg_kmem_enabled() || is_root_cache(s))
480 return;
481
482 off = obj_to_index(s, page, p);
483 objcg = page_obj_cgroups(page)[off];
484 page_obj_cgroups(page)[off] = NULL;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700485
486 obj_cgroup_uncharge(objcg, obj_full_size(s));
487 mod_objcg_state(objcg, page_pgdat(page), cache_vmstat_idx(s),
488 -obj_full_size(s));
489
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700490 obj_cgroup_put(objcg);
491}
492
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800493extern void slab_init_memcg_params(struct kmem_cache *);
Roman Gushchinc03914b2019-07-11 20:56:02 -0700494extern void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg);
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800495
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700496#else /* CONFIG_MEMCG_KMEM */
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800497
Tejun Heo510ded32017-02-22 15:41:24 -0800498/* If !memcg, all caches are root. */
499#define slab_root_caches slab_caches
500#define root_caches_node list
501
Vladimir Davydov426589f2015-02-12 14:59:23 -0800502#define for_each_memcg_cache(iter, root) \
503 for ((void)(iter), (void)(root); 0; )
Vladimir Davydov426589f2015-02-12 14:59:23 -0800504
Glauber Costaba6c4962012-12-18 14:22:27 -0800505static inline bool is_root_cache(struct kmem_cache *s)
506{
507 return true;
508}
509
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800510static inline bool slab_equal_or_root(struct kmem_cache *s,
511 struct kmem_cache *p)
512{
Kees Cook598a0712019-07-11 20:53:23 -0700513 return s == p;
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800514}
Glauber Costa749c5412012-12-18 14:23:01 -0800515
516static inline const char *cache_name(struct kmem_cache *s)
517{
518 return s->name;
519}
520
Glauber Costa943a4512012-12-18 14:23:03 -0800521static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
522{
523 return s;
524}
Vladimir Davydov5dfb4172014-06-04 16:06:38 -0700525
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700526static inline struct mem_cgroup *memcg_from_slab_page(struct page *page)
527{
528 return NULL;
529}
530
Roman Gushchin286e04b2020-08-06 23:20:52 -0700531static inline int memcg_alloc_page_obj_cgroups(struct page *page,
532 struct kmem_cache *s, gfp_t gfp)
533{
534 return 0;
535}
536
537static inline void memcg_free_page_obj_cgroups(struct page *page)
538{
539}
540
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700541static inline struct kmem_cache *memcg_slab_pre_alloc_hook(struct kmem_cache *s,
542 struct obj_cgroup **objcgp,
543 size_t objects, gfp_t flags)
544{
545 return NULL;
546}
547
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700548static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
549 struct obj_cgroup *objcg,
550 size_t size, void **p)
551{
552}
553
554static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page,
555 void *p)
556{
557}
558
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800559static inline void slab_init_memcg_params(struct kmem_cache *s)
560{
561}
Tejun Heo510ded32017-02-22 15:41:24 -0800562
Roman Gushchinc03914b2019-07-11 20:56:02 -0700563static inline void memcg_link_cache(struct kmem_cache *s,
564 struct mem_cgroup *memcg)
Tejun Heo510ded32017-02-22 15:41:24 -0800565{
566}
567
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700568#endif /* CONFIG_MEMCG_KMEM */
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800569
Kees Cooka64b5372019-07-11 20:53:26 -0700570static inline struct kmem_cache *virt_to_cache(const void *obj)
571{
572 struct page *page;
573
574 page = virt_to_head_page(obj);
575 if (WARN_ONCE(!PageSlab(page), "%s: Object is not a Slab page!\n",
576 __func__))
577 return NULL;
578 return page->slab_cache;
579}
580
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700581static __always_inline int charge_slab_page(struct page *page,
582 gfp_t gfp, int order,
583 struct kmem_cache *s)
584{
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700585#ifdef CONFIG_MEMCG_KMEM
586 if (memcg_kmem_enabled() && !is_root_cache(s)) {
587 int ret;
Roman Gushchin286e04b2020-08-06 23:20:52 -0700588
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700589 ret = memcg_alloc_page_obj_cgroups(page, s, gfp);
590 if (ret)
591 return ret;
592
593 percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order);
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700594 }
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700595#endif
596 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
597 PAGE_SIZE << order);
598 return 0;
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700599}
600
601static __always_inline void uncharge_slab_page(struct page *page, int order,
602 struct kmem_cache *s)
603{
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700604#ifdef CONFIG_MEMCG_KMEM
605 if (memcg_kmem_enabled() && !is_root_cache(s)) {
606 memcg_free_page_obj_cgroups(page);
607 percpu_ref_put_many(&s->memcg_params.refcnt, 1 << order);
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700608 }
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700609#endif
610 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
611 -(PAGE_SIZE << order));
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700612}
613
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700614static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
615{
616 struct kmem_cache *cachep;
617
618 if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
619 !memcg_kmem_enabled() &&
620 !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
621 return s;
622
623 cachep = virt_to_cache(x);
624 if (WARN(cachep && !slab_equal_or_root(cachep, s),
625 "%s: Wrong slab cache. %s but object is from %s\n",
626 __func__, s->name, cachep->name))
627 print_tracking(cachep, x);
628 return cachep;
629}
630
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700631static inline size_t slab_ksize(const struct kmem_cache *s)
632{
633#ifndef CONFIG_SLUB
634 return s->object_size;
635
636#else /* CONFIG_SLUB */
637# ifdef CONFIG_SLUB_DEBUG
638 /*
639 * Debugging requires use of the padding between object
640 * and whatever may come after it.
641 */
642 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
643 return s->object_size;
644# endif
Alexander Potapenko80a92012016-07-28 15:49:07 -0700645 if (s->flags & SLAB_KASAN)
646 return s->object_size;
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700647 /*
648 * If we have the need to store the freelist pointer
649 * back there or track user information then we can
650 * only use the space before that information.
651 */
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -0800652 if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700653 return s->inuse;
654 /*
655 * Else we can use all the padding etc for the allocation
656 */
657 return s->size;
658#endif
659}
660
661static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700662 struct obj_cgroup **objcgp,
663 size_t size, gfp_t flags)
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700664{
665 flags &= gfp_allowed_mask;
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +0100666
667 fs_reclaim_acquire(flags);
668 fs_reclaim_release(flags);
669
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700670 might_sleep_if(gfpflags_allow_blocking(flags));
671
Jesper Dangaard Brouerfab99632016-03-15 14:53:38 -0700672 if (should_failslab(s, flags))
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700673 return NULL;
674
Vladimir Davydov45264772016-07-26 15:24:21 -0700675 if (memcg_kmem_enabled() &&
676 ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700677 return memcg_slab_pre_alloc_hook(s, objcgp, size, flags);
Vladimir Davydov45264772016-07-26 15:24:21 -0700678
679 return s;
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700680}
681
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700682static inline void slab_post_alloc_hook(struct kmem_cache *s,
683 struct obj_cgroup *objcg,
684 gfp_t flags, size_t size, void **p)
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700685{
686 size_t i;
687
688 flags &= gfp_allowed_mask;
689 for (i = 0; i < size; i++) {
Andrey Konovalov53128242019-02-20 22:19:11 -0800690 p[i] = kasan_slab_alloc(s, p[i], flags);
Andrey Konovalova2f77572019-02-20 22:19:16 -0800691 /* As p[i] might get tagged, call kmemleak hook after KASAN. */
Andrey Konovalov53128242019-02-20 22:19:11 -0800692 kmemleak_alloc_recursive(p[i], s->object_size, 1,
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700693 s->flags, flags);
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700694 }
Vladimir Davydov45264772016-07-26 15:24:21 -0700695
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700696 if (memcg_kmem_enabled() && !is_root_cache(s))
697 memcg_slab_post_alloc_hook(s, objcg, size, p);
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700698}
699
Christoph Lameter44c53562014-08-06 16:04:07 -0700700#ifndef CONFIG_SLOB
Christoph Lameterca349562013-01-10 19:14:19 +0000701/*
702 * The slab lists for all objects.
703 */
704struct kmem_cache_node {
705 spinlock_t list_lock;
706
707#ifdef CONFIG_SLAB
708 struct list_head slabs_partial; /* partial list first, better asm code */
709 struct list_head slabs_full;
710 struct list_head slabs_free;
David Rientjesbf00bd32016-12-12 16:41:44 -0800711 unsigned long total_slabs; /* length of all slab lists */
712 unsigned long free_slabs; /* length of free slab list only */
Christoph Lameterca349562013-01-10 19:14:19 +0000713 unsigned long free_objects;
714 unsigned int free_limit;
715 unsigned int colour_next; /* Per-node cache coloring */
716 struct array_cache *shared; /* shared per node */
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700717 struct alien_cache **alien; /* on other nodes */
Christoph Lameterca349562013-01-10 19:14:19 +0000718 unsigned long next_reap; /* updated without locking */
719 int free_touched; /* updated without locking */
720#endif
721
722#ifdef CONFIG_SLUB
723 unsigned long nr_partial;
724 struct list_head partial;
725#ifdef CONFIG_SLUB_DEBUG
726 atomic_long_t nr_slabs;
727 atomic_long_t total_objects;
728 struct list_head full;
729#endif
730#endif
731
732};
Wanpeng Lie25839f2013-07-04 08:33:23 +0800733
Christoph Lameter44c53562014-08-06 16:04:07 -0700734static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
735{
736 return s->node[node];
737}
738
739/*
740 * Iterator over all nodes. The body will be executed for each node that has
741 * a kmem_cache_node structure allocated (which is true for all online nodes)
742 */
743#define for_each_kmem_cache_node(__s, __node, __n) \
Mikulas Patocka91635822014-10-09 15:26:20 -0700744 for (__node = 0; __node < nr_node_ids; __node++) \
745 if ((__n = get_node(__s, __node)))
Christoph Lameter44c53562014-08-06 16:04:07 -0700746
747#endif
748
Vladimir Davydov1df3b262014-12-10 15:42:16 -0800749void *slab_start(struct seq_file *m, loff_t *pos);
Wanpeng Li276a2432013-07-08 08:08:28 +0800750void *slab_next(struct seq_file *m, void *p, loff_t *pos);
751void slab_stop(struct seq_file *m, void *p);
Tejun Heobc2791f2017-02-22 15:41:21 -0800752void *memcg_slab_start(struct seq_file *m, loff_t *pos);
753void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos);
754void memcg_slab_stop(struct seq_file *m, void *p);
Vladimir Davydovb0475012014-12-10 15:44:19 -0800755int memcg_slab_show(struct seq_file *m, void *p);
Andrey Ryabinin5240ab42014-08-06 16:04:14 -0700756
Yang Shi852d8be2017-11-15 17:32:07 -0800757#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
758void dump_unreclaimable_slab(void);
759#else
760static inline void dump_unreclaimable_slab(void)
761{
762}
763#endif
764
Alexander Potapenko55834c52016-05-20 16:59:11 -0700765void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
766
Thomas Garnier7c00fce2016-07-26 15:21:56 -0700767#ifdef CONFIG_SLAB_FREELIST_RANDOM
768int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
769 gfp_t gfp);
770void cache_random_seq_destroy(struct kmem_cache *cachep);
771#else
772static inline int cache_random_seq_create(struct kmem_cache *cachep,
773 unsigned int count, gfp_t gfp)
774{
775 return 0;
776}
777static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
778#endif /* CONFIG_SLAB_FREELIST_RANDOM */
779
Alexander Potapenko64713842019-07-11 20:59:19 -0700780static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
781{
782 if (static_branch_unlikely(&init_on_alloc)) {
783 if (c->ctor)
784 return false;
785 if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
786 return flags & __GFP_ZERO;
787 return true;
788 }
789 return flags & __GFP_ZERO;
790}
791
792static inline bool slab_want_init_on_free(struct kmem_cache *c)
793{
794 if (static_branch_unlikely(&init_on_free))
795 return !(c->ctor ||
796 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
797 return false;
798}
799
Andrey Ryabinin5240ab42014-08-06 16:04:14 -0700800#endif /* MM_SLAB_H */