blob: fceb4341ba910aaa0bfe1e0a284537f59c4a070f [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Christoph Lameter97d06602012-07-06 15:25:11 -05002#ifndef MM_SLAB_H
3#define MM_SLAB_H
4/*
5 * Internal slab definitions
6 */
7
Joonsoo Kim07f361b2014-10-09 15:26:00 -07008#ifdef CONFIG_SLOB
9/*
10 * Common fields provided in kmem_cache by all slab allocators
11 * This struct is either used directly by the allocator (SLOB)
12 * or the allocator must include definitions for all fields
13 * provided in kmem_cache_common in their definition of kmem_cache.
14 *
15 * Once we can do anonymous structs (C11 standard) we could put a
16 * anonymous struct definition in these allocators so that the
17 * separate allocations in the kmem_cache structure of SLAB and
18 * SLUB is no longer needed.
19 */
20struct kmem_cache {
21 unsigned int object_size;/* The original size of the object */
22 unsigned int size; /* The aligned/padded/added on size */
23 unsigned int align; /* Alignment as calculated */
Alexey Dobriyand50112e2017-11-15 17:32:18 -080024 slab_flags_t flags; /* Active flags on the slab */
Alexey Dobriyan7bbdb812018-04-05 16:21:31 -070025 unsigned int useroffset;/* Usercopy region offset */
26 unsigned int usersize; /* Usercopy region size */
Joonsoo Kim07f361b2014-10-09 15:26:00 -070027 const char *name; /* Slab name for sysfs */
28 int refcount; /* Use counter */
29 void (*ctor)(void *); /* Called on object slot creation */
30 struct list_head list; /* List of all slab caches on the system */
31};
32
Waiman Long9adeaa22019-09-23 15:33:49 -070033#else /* !CONFIG_SLOB */
34
35struct memcg_cache_array {
36 struct rcu_head rcu;
37 struct kmem_cache *entries[0];
38};
39
40/*
41 * This is the main placeholder for memcg-related information in kmem caches.
42 * Both the root cache and the child caches will have it. For the root cache,
43 * this will hold a dynamically allocated array large enough to hold
44 * information about the currently limited memcgs in the system. To allow the
45 * array to be accessed without taking any locks, on relocation we free the old
46 * version only after a grace period.
47 *
48 * Root and child caches hold different metadata.
49 *
50 * @root_cache: Common to root and child caches. NULL for root, pointer to
51 * the root cache for children.
52 *
53 * The following fields are specific to root caches.
54 *
55 * @memcg_caches: kmemcg ID indexed table of child caches. This table is
56 * used to index child cachces during allocation and cleared
57 * early during shutdown.
58 *
59 * @root_caches_node: List node for slab_root_caches list.
60 *
61 * @children: List of all child caches. While the child caches are also
62 * reachable through @memcg_caches, a child cache remains on
63 * this list until it is actually destroyed.
64 *
65 * The following fields are specific to child caches.
66 *
67 * @memcg: Pointer to the memcg this cache belongs to.
68 *
69 * @children_node: List node for @root_cache->children list.
70 *
71 * @kmem_caches_node: List node for @memcg->kmem_caches list.
72 */
73struct memcg_cache_params {
74 struct kmem_cache *root_cache;
75 union {
76 struct {
77 struct memcg_cache_array __rcu *memcg_caches;
78 struct list_head __root_caches_node;
79 struct list_head children;
80 bool dying;
81 };
82 struct {
83 struct mem_cgroup *memcg;
84 struct list_head children_node;
85 struct list_head kmem_caches_node;
86 struct percpu_ref refcnt;
87
88 void (*work_fn)(struct kmem_cache *);
89 union {
90 struct rcu_head rcu_head;
91 struct work_struct work;
92 };
93 };
94 };
95};
Joonsoo Kim07f361b2014-10-09 15:26:00 -070096#endif /* CONFIG_SLOB */
97
98#ifdef CONFIG_SLAB
99#include <linux/slab_def.h>
100#endif
101
102#ifdef CONFIG_SLUB
103#include <linux/slub_def.h>
104#endif
105
106#include <linux/memcontrol.h>
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700107#include <linux/fault-inject.h>
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700108#include <linux/kasan.h>
109#include <linux/kmemleak.h>
Thomas Garnier7c00fce2016-07-26 15:21:56 -0700110#include <linux/random.h>
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +0100111#include <linux/sched/mm.h>
Joonsoo Kim07f361b2014-10-09 15:26:00 -0700112
Christoph Lameter97d06602012-07-06 15:25:11 -0500113/*
114 * State of the slab allocator.
115 *
116 * This is used to describe the states of the allocator during bootup.
117 * Allocators use this to gradually bootstrap themselves. Most allocators
118 * have the problem that the structures used for managing slab caches are
119 * allocated from slab caches themselves.
120 */
121enum slab_state {
122 DOWN, /* No slab functionality yet */
123 PARTIAL, /* SLUB: kmem_cache_node available */
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000124 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */
Christoph Lameter97d06602012-07-06 15:25:11 -0500125 UP, /* Slab caches usable but not all extras yet */
126 FULL /* Everything is working */
127};
128
129extern enum slab_state slab_state;
130
Christoph Lameter18004c52012-07-06 15:25:12 -0500131/* The slab cache mutex protects the management structures during changes */
132extern struct mutex slab_mutex;
Christoph Lameter9b030cb2012-09-05 00:20:33 +0000133
134/* The list of all slab caches on the system */
Christoph Lameter18004c52012-07-06 15:25:12 -0500135extern struct list_head slab_caches;
136
Christoph Lameter9b030cb2012-09-05 00:20:33 +0000137/* The slab cache that manages slab cache information */
138extern struct kmem_cache *kmem_cache;
139
Vlastimil Babkaaf3b5f82017-02-22 15:41:05 -0800140/* A table of kmalloc cache names and sizes */
141extern const struct kmalloc_info_struct {
Pengfei Licb5d9fb2019-11-30 17:49:21 -0800142 const char *name[NR_KMALLOC_TYPES];
Alexey Dobriyan55de8b92018-04-05 16:20:29 -0700143 unsigned int size;
Vlastimil Babkaaf3b5f82017-02-22 15:41:05 -0800144} kmalloc_info[];
145
Christoph Lameterf97d5f62013-01-10 19:12:17 +0000146#ifndef CONFIG_SLOB
147/* Kmalloc array related functions */
Daniel Sanders34cc6992015-06-24 16:55:57 -0700148void setup_kmalloc_cache_index_table(void);
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800149void create_kmalloc_caches(slab_flags_t);
Christoph Lameter2c59dd62013-01-10 19:14:19 +0000150
151/* Find the kmalloc slab corresponding for a certain size */
152struct kmem_cache *kmalloc_slab(size_t, gfp_t);
Christoph Lameterf97d5f62013-01-10 19:12:17 +0000153#endif
154
Long Li44405092020-08-06 23:18:28 -0700155gfp_t kmalloc_fix_flags(gfp_t flags);
Christoph Lameterf97d5f62013-01-10 19:12:17 +0000156
Christoph Lameter9b030cb2012-09-05 00:20:33 +0000157/* Functions provided by the slab allocators */
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800158int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
Christoph Lameter97d06602012-07-06 15:25:11 -0500159
Alexey Dobriyan55de8b92018-04-05 16:20:29 -0700160struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
161 slab_flags_t flags, unsigned int useroffset,
162 unsigned int usersize);
Christoph Lameter45530c42012-11-28 16:23:07 +0000163extern void create_boot_cache(struct kmem_cache *, const char *name,
Alexey Dobriyan361d5752018-04-05 16:20:33 -0700164 unsigned int size, slab_flags_t flags,
165 unsigned int useroffset, unsigned int usersize);
Christoph Lameter45530c42012-11-28 16:23:07 +0000166
Joonsoo Kim423c9292014-10-09 15:26:22 -0700167int slab_unmergeable(struct kmem_cache *s);
Alexey Dobriyanf4957d52018-04-05 16:20:37 -0700168struct kmem_cache *find_mergeable(unsigned size, unsigned align,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800169 slab_flags_t flags, const char *name, void (*ctor)(void *));
Joonsoo Kim12220de2014-10-09 15:26:24 -0700170#ifndef CONFIG_SLOB
Glauber Costa2633d7a2012-12-18 14:22:34 -0800171struct kmem_cache *
Alexey Dobriyanf4957d52018-04-05 16:20:37 -0700172__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800173 slab_flags_t flags, void (*ctor)(void *));
Joonsoo Kim423c9292014-10-09 15:26:22 -0700174
Alexey Dobriyan0293d1f2018-04-05 16:21:24 -0700175slab_flags_t kmem_cache_flags(unsigned int object_size,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800176 slab_flags_t flags, const char *name,
Joonsoo Kim423c9292014-10-09 15:26:22 -0700177 void (*ctor)(void *));
Christoph Lametercbb79692012-09-05 00:18:32 +0000178#else
Glauber Costa2633d7a2012-12-18 14:22:34 -0800179static inline struct kmem_cache *
Alexey Dobriyanf4957d52018-04-05 16:20:37 -0700180__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800181 slab_flags_t flags, void (*ctor)(void *))
Christoph Lametercbb79692012-09-05 00:18:32 +0000182{ return NULL; }
Joonsoo Kim423c9292014-10-09 15:26:22 -0700183
Alexey Dobriyan0293d1f2018-04-05 16:21:24 -0700184static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800185 slab_flags_t flags, const char *name,
Joonsoo Kim423c9292014-10-09 15:26:22 -0700186 void (*ctor)(void *))
187{
188 return flags;
189}
Christoph Lametercbb79692012-09-05 00:18:32 +0000190#endif
191
192
Glauber Costad8843922012-10-17 15:36:51 +0400193/* Legal flag mask for kmem_cache_create(), for various configurations */
Nicolas Boichat6d6ea1e2019-03-28 20:43:42 -0700194#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
195 SLAB_CACHE_DMA32 | SLAB_PANIC | \
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -0800196 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
Glauber Costad8843922012-10-17 15:36:51 +0400197
198#if defined(CONFIG_DEBUG_SLAB)
199#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
200#elif defined(CONFIG_SLUB_DEBUG)
201#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
Laura Abbottbecfda62016-03-15 14:55:06 -0700202 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
Glauber Costad8843922012-10-17 15:36:51 +0400203#else
204#define SLAB_DEBUG_FLAGS (0)
205#endif
206
207#if defined(CONFIG_SLAB)
208#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
Vladimir Davydov230e9fc2016-01-14 15:18:15 -0800209 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -0800210 SLAB_ACCOUNT)
Glauber Costad8843922012-10-17 15:36:51 +0400211#elif defined(CONFIG_SLUB)
212#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -0800213 SLAB_TEMPORARY | SLAB_ACCOUNT)
Glauber Costad8843922012-10-17 15:36:51 +0400214#else
215#define SLAB_CACHE_FLAGS (0)
216#endif
217
Thomas Garniere70954f2016-12-12 16:41:38 -0800218/* Common flags available with current configuration */
Glauber Costad8843922012-10-17 15:36:51 +0400219#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
220
Thomas Garniere70954f2016-12-12 16:41:38 -0800221/* Common flags permitted for kmem_cache_create */
222#define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
223 SLAB_RED_ZONE | \
224 SLAB_POISON | \
225 SLAB_STORE_USER | \
226 SLAB_TRACE | \
227 SLAB_CONSISTENCY_CHECKS | \
228 SLAB_MEM_SPREAD | \
229 SLAB_NOLEAKTRACE | \
230 SLAB_RECLAIM_ACCOUNT | \
231 SLAB_TEMPORARY | \
Thomas Garniere70954f2016-12-12 16:41:38 -0800232 SLAB_ACCOUNT)
233
Shakeel Buttf9e13c02018-04-05 16:21:57 -0700234bool __kmem_cache_empty(struct kmem_cache *);
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000235int __kmem_cache_shutdown(struct kmem_cache *);
Dmitry Safonov52b4b952016-02-17 13:11:37 -0800236void __kmem_cache_release(struct kmem_cache *);
Tejun Heoc9fc5862017-02-22 15:41:27 -0800237int __kmem_cache_shrink(struct kmem_cache *);
238void __kmemcg_cache_deactivate(struct kmem_cache *s);
Roman Gushchin43486692019-07-11 20:56:09 -0700239void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s);
Christoph Lameter41a21282014-05-06 12:50:08 -0700240void slab_kmem_cache_release(struct kmem_cache *);
Waiman Long04f768a2019-09-23 15:33:46 -0700241void kmem_cache_shrink_all(struct kmem_cache *s);
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000242
Glauber Costab7454ad2012-10-19 18:20:25 +0400243struct seq_file;
244struct file;
Glauber Costab7454ad2012-10-19 18:20:25 +0400245
Glauber Costa0d7561c2012-10-19 18:20:27 +0400246struct slabinfo {
247 unsigned long active_objs;
248 unsigned long num_objs;
249 unsigned long active_slabs;
250 unsigned long num_slabs;
251 unsigned long shared_avail;
252 unsigned int limit;
253 unsigned int batchcount;
254 unsigned int shared;
255 unsigned int objects_per_slab;
256 unsigned int cache_order;
257};
258
259void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
260void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
Glauber Costab7454ad2012-10-19 18:20:25 +0400261ssize_t slabinfo_write(struct file *file, const char __user *buffer,
262 size_t count, loff_t *ppos);
Glauber Costaba6c4962012-12-18 14:22:27 -0800263
Christoph Lameter484748f2015-09-04 15:45:34 -0700264/*
265 * Generic implementation of bulk operations
266 * These are useful for situations in which the allocator cannot
Jesper Dangaard Brouer9f706d62016-03-15 14:54:03 -0700267 * perform optimizations. In that case segments of the object listed
Christoph Lameter484748f2015-09-04 15:45:34 -0700268 * may be allocated or freed using these operations.
269 */
270void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
Jesper Dangaard Brouer865762a2015-11-20 15:57:58 -0800271int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
Christoph Lameter484748f2015-09-04 15:45:34 -0700272
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700273static inline int cache_vmstat_idx(struct kmem_cache *s)
274{
275 return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
276 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE;
277}
278
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700279#ifdef CONFIG_SLUB_DEBUG
280#ifdef CONFIG_SLUB_DEBUG_ON
281DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
282#else
283DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
284#endif
285extern void print_tracking(struct kmem_cache *s, void *object);
286#else
287static inline void print_tracking(struct kmem_cache *s, void *object)
288{
289}
290#endif
291
292/*
293 * Returns true if any of the specified slub_debug flags is enabled for the
294 * cache. Use only for flags parsed by setup_slub_debug() as it also enables
295 * the static key.
296 */
297static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
298{
299#ifdef CONFIG_SLUB_DEBUG
300 VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
301 if (static_branch_unlikely(&slub_debug_enabled))
302 return s->flags & flags;
303#endif
304 return false;
305}
306
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700307#ifdef CONFIG_MEMCG_KMEM
Tejun Heo510ded32017-02-22 15:41:24 -0800308
309/* List of all root caches. */
310extern struct list_head slab_root_caches;
311#define root_caches_node memcg_params.__root_caches_node
312
Vladimir Davydov426589f2015-02-12 14:59:23 -0800313/*
314 * Iterate over all memcg caches of the given root cache. The caller must hold
315 * slab_mutex.
316 */
317#define for_each_memcg_cache(iter, root) \
Tejun Heo9eeadc82017-02-22 15:41:17 -0800318 list_for_each_entry(iter, &(root)->memcg_params.children, \
319 memcg_params.children_node)
Vladimir Davydov426589f2015-02-12 14:59:23 -0800320
Glauber Costaba6c4962012-12-18 14:22:27 -0800321static inline bool is_root_cache(struct kmem_cache *s)
322{
Tejun Heo9eeadc82017-02-22 15:41:17 -0800323 return !s->memcg_params.root_cache;
Glauber Costaba6c4962012-12-18 14:22:27 -0800324}
Glauber Costa2633d7a2012-12-18 14:22:34 -0800325
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800326static inline bool slab_equal_or_root(struct kmem_cache *s,
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800327 struct kmem_cache *p)
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800328{
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800329 return p == s || p == s->memcg_params.root_cache;
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800330}
Glauber Costa749c5412012-12-18 14:23:01 -0800331
332/*
333 * We use suffixes to the name in memcg because we can't have caches
334 * created in the system with the same name. But when we print them
335 * locally, better refer to them with the base name
336 */
337static inline const char *cache_name(struct kmem_cache *s)
338{
339 if (!is_root_cache(s))
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800340 s = s->memcg_params.root_cache;
Glauber Costa749c5412012-12-18 14:23:01 -0800341 return s->name;
342}
343
Glauber Costa943a4512012-12-18 14:23:03 -0800344static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
345{
346 if (is_root_cache(s))
347 return s;
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800348 return s->memcg_params.root_cache;
Glauber Costa943a4512012-12-18 14:23:03 -0800349}
Vladimir Davydov5dfb4172014-06-04 16:06:38 -0700350
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700351/*
352 * Expects a pointer to a slab page. Please note, that PageSlab() check
353 * isn't sufficient, as it returns true also for tail compound slab pages,
354 * which do not have slab_cache pointer set.
Roman Gushchin221ec5c2019-11-05 21:17:03 -0800355 * So this function assumes that the page can pass PageSlab() && !PageTail()
356 * check.
Roman Gushchinfb2f2b02019-07-11 20:56:34 -0700357 *
358 * The kmem_cache can be reparented asynchronously. The caller must ensure
359 * the memcg lifetime, e.g. by taking rcu_read_lock() or cgroup_mutex.
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700360 */
361static inline struct mem_cgroup *memcg_from_slab_page(struct page *page)
362{
363 struct kmem_cache *s;
364
365 s = READ_ONCE(page->slab_cache);
366 if (s && !is_root_cache(s))
Roman Gushchinfb2f2b02019-07-11 20:56:34 -0700367 return READ_ONCE(s->memcg_params.memcg);
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700368
369 return NULL;
370}
371
372/*
373 * Charge the slab page belonging to the non-root kmem_cache.
374 * Can be called for non-root kmem_caches only.
375 */
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -0800376static __always_inline int memcg_charge_slab(struct page *page,
377 gfp_t gfp, int order,
378 struct kmem_cache *s)
Vladimir Davydov5dfb4172014-06-04 16:06:38 -0700379{
Waiman Longd7670872020-06-25 20:29:49 -0700380 int nr_pages = 1 << order;
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700381 struct mem_cgroup *memcg;
382 struct lruvec *lruvec;
Roman Gushchinf0a3a242019-07-11 20:56:27 -0700383 int ret;
384
Roman Gushchinfb2f2b02019-07-11 20:56:34 -0700385 rcu_read_lock();
386 memcg = READ_ONCE(s->memcg_params.memcg);
387 while (memcg && !css_tryget_online(&memcg->css))
388 memcg = parent_mem_cgroup(memcg);
389 rcu_read_unlock();
390
391 if (unlikely(!memcg || mem_cgroup_is_root(memcg))) {
392 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
Roman Gushchin9c315e42020-04-01 21:06:53 -0700393 nr_pages);
394 percpu_ref_get_many(&s->memcg_params.refcnt, nr_pages);
Roman Gushchinfb2f2b02019-07-11 20:56:34 -0700395 return 0;
396 }
397
Roman Gushchin4b13f642020-04-01 21:06:56 -0700398 ret = memcg_kmem_charge(memcg, gfp, nr_pages);
Roman Gushchinf0a3a242019-07-11 20:56:27 -0700399 if (ret)
Roman Gushchinfb2f2b02019-07-11 20:56:34 -0700400 goto out;
Roman Gushchinf0a3a242019-07-11 20:56:27 -0700401
Johannes Weiner867e5e12019-11-30 17:55:34 -0800402 lruvec = mem_cgroup_lruvec(memcg, page_pgdat(page));
Roman Gushchin9c315e42020-04-01 21:06:53 -0700403 mod_lruvec_state(lruvec, cache_vmstat_idx(s), nr_pages);
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700404
405 /* transer try_charge() page references to kmem_cache */
Roman Gushchin9c315e42020-04-01 21:06:53 -0700406 percpu_ref_get_many(&s->memcg_params.refcnt, nr_pages);
407 css_put_many(&memcg->css, nr_pages);
Roman Gushchinfb2f2b02019-07-11 20:56:34 -0700408out:
409 css_put(&memcg->css);
410 return ret;
Vladimir Davydov27ee57c2016-03-17 14:17:35 -0700411}
412
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700413/*
414 * Uncharge a slab page belonging to a non-root kmem_cache.
415 * Can be called for non-root kmem_caches only.
416 */
Vladimir Davydov27ee57c2016-03-17 14:17:35 -0700417static __always_inline void memcg_uncharge_slab(struct page *page, int order,
418 struct kmem_cache *s)
419{
Waiman Longd7670872020-06-25 20:29:49 -0700420 int nr_pages = 1 << order;
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700421 struct mem_cgroup *memcg;
422 struct lruvec *lruvec;
423
Roman Gushchinfb2f2b02019-07-11 20:56:34 -0700424 rcu_read_lock();
425 memcg = READ_ONCE(s->memcg_params.memcg);
426 if (likely(!mem_cgroup_is_root(memcg))) {
Johannes Weiner867e5e12019-11-30 17:55:34 -0800427 lruvec = mem_cgroup_lruvec(memcg, page_pgdat(page));
Roman Gushchin9c315e42020-04-01 21:06:53 -0700428 mod_lruvec_state(lruvec, cache_vmstat_idx(s), -nr_pages);
Roman Gushchin4b13f642020-04-01 21:06:56 -0700429 memcg_kmem_uncharge(memcg, nr_pages);
Roman Gushchinfb2f2b02019-07-11 20:56:34 -0700430 } else {
431 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
Roman Gushchin9c315e42020-04-01 21:06:53 -0700432 -nr_pages);
Roman Gushchinfb2f2b02019-07-11 20:56:34 -0700433 }
434 rcu_read_unlock();
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700435
Roman Gushchin9c315e42020-04-01 21:06:53 -0700436 percpu_ref_put_many(&s->memcg_params.refcnt, nr_pages);
Vladimir Davydov5dfb4172014-06-04 16:06:38 -0700437}
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800438
439extern void slab_init_memcg_params(struct kmem_cache *);
Roman Gushchinc03914b2019-07-11 20:56:02 -0700440extern void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg);
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800441
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700442#else /* CONFIG_MEMCG_KMEM */
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800443
Tejun Heo510ded32017-02-22 15:41:24 -0800444/* If !memcg, all caches are root. */
445#define slab_root_caches slab_caches
446#define root_caches_node list
447
Vladimir Davydov426589f2015-02-12 14:59:23 -0800448#define for_each_memcg_cache(iter, root) \
449 for ((void)(iter), (void)(root); 0; )
Vladimir Davydov426589f2015-02-12 14:59:23 -0800450
Glauber Costaba6c4962012-12-18 14:22:27 -0800451static inline bool is_root_cache(struct kmem_cache *s)
452{
453 return true;
454}
455
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800456static inline bool slab_equal_or_root(struct kmem_cache *s,
457 struct kmem_cache *p)
458{
Kees Cook598a0712019-07-11 20:53:23 -0700459 return s == p;
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800460}
Glauber Costa749c5412012-12-18 14:23:01 -0800461
462static inline const char *cache_name(struct kmem_cache *s)
463{
464 return s->name;
465}
466
Glauber Costa943a4512012-12-18 14:23:03 -0800467static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
468{
469 return s;
470}
Vladimir Davydov5dfb4172014-06-04 16:06:38 -0700471
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700472static inline struct mem_cgroup *memcg_from_slab_page(struct page *page)
473{
474 return NULL;
475}
476
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -0800477static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
478 struct kmem_cache *s)
Vladimir Davydov5dfb4172014-06-04 16:06:38 -0700479{
480 return 0;
481}
482
Vladimir Davydov27ee57c2016-03-17 14:17:35 -0700483static inline void memcg_uncharge_slab(struct page *page, int order,
484 struct kmem_cache *s)
485{
486}
487
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800488static inline void slab_init_memcg_params(struct kmem_cache *s)
489{
490}
Tejun Heo510ded32017-02-22 15:41:24 -0800491
Roman Gushchinc03914b2019-07-11 20:56:02 -0700492static inline void memcg_link_cache(struct kmem_cache *s,
493 struct mem_cgroup *memcg)
Tejun Heo510ded32017-02-22 15:41:24 -0800494{
495}
496
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700497#endif /* CONFIG_MEMCG_KMEM */
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800498
Kees Cooka64b5372019-07-11 20:53:26 -0700499static inline struct kmem_cache *virt_to_cache(const void *obj)
500{
501 struct page *page;
502
503 page = virt_to_head_page(obj);
504 if (WARN_ONCE(!PageSlab(page), "%s: Object is not a Slab page!\n",
505 __func__))
506 return NULL;
507 return page->slab_cache;
508}
509
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700510static __always_inline int charge_slab_page(struct page *page,
511 gfp_t gfp, int order,
512 struct kmem_cache *s)
513{
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700514 if (is_root_cache(s)) {
515 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
516 1 << order);
517 return 0;
518 }
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700519
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700520 return memcg_charge_slab(page, gfp, order, s);
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700521}
522
523static __always_inline void uncharge_slab_page(struct page *page, int order,
524 struct kmem_cache *s)
525{
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700526 if (is_root_cache(s)) {
527 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
528 -(1 << order));
529 return;
530 }
531
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700532 memcg_uncharge_slab(page, order, s);
533}
534
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700535static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
536{
537 struct kmem_cache *cachep;
538
539 if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
540 !memcg_kmem_enabled() &&
541 !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
542 return s;
543
544 cachep = virt_to_cache(x);
545 if (WARN(cachep && !slab_equal_or_root(cachep, s),
546 "%s: Wrong slab cache. %s but object is from %s\n",
547 __func__, s->name, cachep->name))
548 print_tracking(cachep, x);
549 return cachep;
550}
551
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700552static inline size_t slab_ksize(const struct kmem_cache *s)
553{
554#ifndef CONFIG_SLUB
555 return s->object_size;
556
557#else /* CONFIG_SLUB */
558# ifdef CONFIG_SLUB_DEBUG
559 /*
560 * Debugging requires use of the padding between object
561 * and whatever may come after it.
562 */
563 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
564 return s->object_size;
565# endif
Alexander Potapenko80a92012016-07-28 15:49:07 -0700566 if (s->flags & SLAB_KASAN)
567 return s->object_size;
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700568 /*
569 * If we have the need to store the freelist pointer
570 * back there or track user information then we can
571 * only use the space before that information.
572 */
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -0800573 if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700574 return s->inuse;
575 /*
576 * Else we can use all the padding etc for the allocation
577 */
578 return s->size;
579#endif
580}
581
582static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
583 gfp_t flags)
584{
585 flags &= gfp_allowed_mask;
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +0100586
587 fs_reclaim_acquire(flags);
588 fs_reclaim_release(flags);
589
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700590 might_sleep_if(gfpflags_allow_blocking(flags));
591
Jesper Dangaard Brouerfab99632016-03-15 14:53:38 -0700592 if (should_failslab(s, flags))
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700593 return NULL;
594
Vladimir Davydov45264772016-07-26 15:24:21 -0700595 if (memcg_kmem_enabled() &&
596 ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
597 return memcg_kmem_get_cache(s);
598
599 return s;
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700600}
601
602static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
603 size_t size, void **p)
604{
605 size_t i;
606
607 flags &= gfp_allowed_mask;
608 for (i = 0; i < size; i++) {
Andrey Konovalov53128242019-02-20 22:19:11 -0800609 p[i] = kasan_slab_alloc(s, p[i], flags);
Andrey Konovalova2f77572019-02-20 22:19:16 -0800610 /* As p[i] might get tagged, call kmemleak hook after KASAN. */
Andrey Konovalov53128242019-02-20 22:19:11 -0800611 kmemleak_alloc_recursive(p[i], s->object_size, 1,
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700612 s->flags, flags);
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700613 }
Vladimir Davydov45264772016-07-26 15:24:21 -0700614
615 if (memcg_kmem_enabled())
616 memcg_kmem_put_cache(s);
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700617}
618
Christoph Lameter44c53562014-08-06 16:04:07 -0700619#ifndef CONFIG_SLOB
Christoph Lameterca349562013-01-10 19:14:19 +0000620/*
621 * The slab lists for all objects.
622 */
623struct kmem_cache_node {
624 spinlock_t list_lock;
625
626#ifdef CONFIG_SLAB
627 struct list_head slabs_partial; /* partial list first, better asm code */
628 struct list_head slabs_full;
629 struct list_head slabs_free;
David Rientjesbf00bd32016-12-12 16:41:44 -0800630 unsigned long total_slabs; /* length of all slab lists */
631 unsigned long free_slabs; /* length of free slab list only */
Christoph Lameterca349562013-01-10 19:14:19 +0000632 unsigned long free_objects;
633 unsigned int free_limit;
634 unsigned int colour_next; /* Per-node cache coloring */
635 struct array_cache *shared; /* shared per node */
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700636 struct alien_cache **alien; /* on other nodes */
Christoph Lameterca349562013-01-10 19:14:19 +0000637 unsigned long next_reap; /* updated without locking */
638 int free_touched; /* updated without locking */
639#endif
640
641#ifdef CONFIG_SLUB
642 unsigned long nr_partial;
643 struct list_head partial;
644#ifdef CONFIG_SLUB_DEBUG
645 atomic_long_t nr_slabs;
646 atomic_long_t total_objects;
647 struct list_head full;
648#endif
649#endif
650
651};
Wanpeng Lie25839f2013-07-04 08:33:23 +0800652
Christoph Lameter44c53562014-08-06 16:04:07 -0700653static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
654{
655 return s->node[node];
656}
657
658/*
659 * Iterator over all nodes. The body will be executed for each node that has
660 * a kmem_cache_node structure allocated (which is true for all online nodes)
661 */
662#define for_each_kmem_cache_node(__s, __node, __n) \
Mikulas Patocka91635822014-10-09 15:26:20 -0700663 for (__node = 0; __node < nr_node_ids; __node++) \
664 if ((__n = get_node(__s, __node)))
Christoph Lameter44c53562014-08-06 16:04:07 -0700665
666#endif
667
Vladimir Davydov1df3b262014-12-10 15:42:16 -0800668void *slab_start(struct seq_file *m, loff_t *pos);
Wanpeng Li276a2432013-07-08 08:08:28 +0800669void *slab_next(struct seq_file *m, void *p, loff_t *pos);
670void slab_stop(struct seq_file *m, void *p);
Tejun Heobc2791f2017-02-22 15:41:21 -0800671void *memcg_slab_start(struct seq_file *m, loff_t *pos);
672void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos);
673void memcg_slab_stop(struct seq_file *m, void *p);
Vladimir Davydovb0475012014-12-10 15:44:19 -0800674int memcg_slab_show(struct seq_file *m, void *p);
Andrey Ryabinin5240ab42014-08-06 16:04:14 -0700675
Yang Shi852d8be2017-11-15 17:32:07 -0800676#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
677void dump_unreclaimable_slab(void);
678#else
679static inline void dump_unreclaimable_slab(void)
680{
681}
682#endif
683
Alexander Potapenko55834c52016-05-20 16:59:11 -0700684void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
685
Thomas Garnier7c00fce2016-07-26 15:21:56 -0700686#ifdef CONFIG_SLAB_FREELIST_RANDOM
687int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
688 gfp_t gfp);
689void cache_random_seq_destroy(struct kmem_cache *cachep);
690#else
691static inline int cache_random_seq_create(struct kmem_cache *cachep,
692 unsigned int count, gfp_t gfp)
693{
694 return 0;
695}
696static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
697#endif /* CONFIG_SLAB_FREELIST_RANDOM */
698
Alexander Potapenko64713842019-07-11 20:59:19 -0700699static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
700{
701 if (static_branch_unlikely(&init_on_alloc)) {
702 if (c->ctor)
703 return false;
704 if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
705 return flags & __GFP_ZERO;
706 return true;
707 }
708 return flags & __GFP_ZERO;
709}
710
711static inline bool slab_want_init_on_free(struct kmem_cache *c)
712{
713 if (static_branch_unlikely(&init_on_free))
714 return !(c->ctor ||
715 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
716 return false;
717}
718
Andrey Ryabinin5240ab42014-08-06 16:04:14 -0700719#endif /* MM_SLAB_H */