blob: 29300d7f042eae9ab52a49c502b68a3a1fb39dba [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Christoph Lameter97d06602012-07-06 15:25:11 -05002#ifndef MM_SLAB_H
3#define MM_SLAB_H
4/*
5 * Internal slab definitions
6 */
7
Joonsoo Kim07f361b2014-10-09 15:26:00 -07008#ifdef CONFIG_SLOB
9/*
10 * Common fields provided in kmem_cache by all slab allocators
11 * This struct is either used directly by the allocator (SLOB)
12 * or the allocator must include definitions for all fields
13 * provided in kmem_cache_common in their definition of kmem_cache.
14 *
15 * Once we can do anonymous structs (C11 standard) we could put a
16 * anonymous struct definition in these allocators so that the
17 * separate allocations in the kmem_cache structure of SLAB and
18 * SLUB is no longer needed.
19 */
20struct kmem_cache {
21 unsigned int object_size;/* The original size of the object */
22 unsigned int size; /* The aligned/padded/added on size */
23 unsigned int align; /* Alignment as calculated */
Alexey Dobriyand50112e2017-11-15 17:32:18 -080024 slab_flags_t flags; /* Active flags on the slab */
Alexey Dobriyan7bbdb812018-04-05 16:21:31 -070025 unsigned int useroffset;/* Usercopy region offset */
26 unsigned int usersize; /* Usercopy region size */
Joonsoo Kim07f361b2014-10-09 15:26:00 -070027 const char *name; /* Slab name for sysfs */
28 int refcount; /* Use counter */
29 void (*ctor)(void *); /* Called on object slot creation */
30 struct list_head list; /* List of all slab caches on the system */
31};
32
33#endif /* CONFIG_SLOB */
34
35#ifdef CONFIG_SLAB
36#include <linux/slab_def.h>
37#endif
38
39#ifdef CONFIG_SLUB
40#include <linux/slub_def.h>
41#endif
42
43#include <linux/memcontrol.h>
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -070044#include <linux/fault-inject.h>
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -070045#include <linux/kasan.h>
46#include <linux/kmemleak.h>
Thomas Garnier7c00fce2016-07-26 15:21:56 -070047#include <linux/random.h>
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +010048#include <linux/sched/mm.h>
Liujie Xie8bc63372021-05-11 19:05:32 +080049#include <linux/android_vendor.h>
Joonsoo Kim07f361b2014-10-09 15:26:00 -070050
Christoph Lameter97d06602012-07-06 15:25:11 -050051/*
52 * State of the slab allocator.
53 *
54 * This is used to describe the states of the allocator during bootup.
55 * Allocators use this to gradually bootstrap themselves. Most allocators
56 * have the problem that the structures used for managing slab caches are
57 * allocated from slab caches themselves.
58 */
59enum slab_state {
60 DOWN, /* No slab functionality yet */
61 PARTIAL, /* SLUB: kmem_cache_node available */
Christoph Lameterce8eb6c2013-01-10 19:14:19 +000062 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */
Christoph Lameter97d06602012-07-06 15:25:11 -050063 UP, /* Slab caches usable but not all extras yet */
64 FULL /* Everything is working */
65};
66
67extern enum slab_state slab_state;
68
Christoph Lameter18004c52012-07-06 15:25:12 -050069/* The slab cache mutex protects the management structures during changes */
70extern struct mutex slab_mutex;
Christoph Lameter9b030cb2012-09-05 00:20:33 +000071
72/* The list of all slab caches on the system */
Christoph Lameter18004c52012-07-06 15:25:12 -050073extern struct list_head slab_caches;
74
Christoph Lameter9b030cb2012-09-05 00:20:33 +000075/* The slab cache that manages slab cache information */
76extern struct kmem_cache *kmem_cache;
77
Vlastimil Babkaaf3b5f82017-02-22 15:41:05 -080078/* A table of kmalloc cache names and sizes */
79extern const struct kmalloc_info_struct {
Pengfei Licb5d9fb2019-11-30 17:49:21 -080080 const char *name[NR_KMALLOC_TYPES];
Alexey Dobriyan55de8b92018-04-05 16:20:29 -070081 unsigned int size;
Vlastimil Babkaaf3b5f82017-02-22 15:41:05 -080082} kmalloc_info[];
83
Christoph Lameterf97d5f62013-01-10 19:12:17 +000084#ifndef CONFIG_SLOB
85/* Kmalloc array related functions */
Daniel Sanders34cc6992015-06-24 16:55:57 -070086void setup_kmalloc_cache_index_table(void);
Alexey Dobriyand50112e2017-11-15 17:32:18 -080087void create_kmalloc_caches(slab_flags_t);
Christoph Lameter2c59dd62013-01-10 19:14:19 +000088
89/* Find the kmalloc slab corresponding for a certain size */
90struct kmem_cache *kmalloc_slab(size_t, gfp_t);
Christoph Lameterf97d5f62013-01-10 19:12:17 +000091#endif
92
Long Li44405092020-08-06 23:18:28 -070093gfp_t kmalloc_fix_flags(gfp_t flags);
Christoph Lameterf97d5f62013-01-10 19:12:17 +000094
Vijayanand Jittaee8d2c72021-01-12 00:42:08 +053095#ifdef CONFIG_SLUB
96/*
97 * Tracking user of a slab.
98 */
99#define TRACK_ADDRS_COUNT 16
100struct track {
101 unsigned long addr; /* Called from address */
102#ifdef CONFIG_STACKTRACE
103 unsigned long addrs[TRACK_ADDRS_COUNT]; /* Called from address */
104#endif
105 int cpu; /* Was running on cpu */
106 int pid; /* Pid context */
107 unsigned long when; /* When did the operation occur */
Liujie Xie8bc63372021-05-11 19:05:32 +0800108#ifdef CONFIG_STACKTRACE
109 ANDROID_OEM_DATA(1);
110#endif
Vijayanand Jittaee8d2c72021-01-12 00:42:08 +0530111};
112
113enum track_item { TRACK_ALLOC, TRACK_FREE };
114#endif
115
Christoph Lameter9b030cb2012-09-05 00:20:33 +0000116/* Functions provided by the slab allocators */
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800117int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
Christoph Lameter97d06602012-07-06 15:25:11 -0500118
Alexey Dobriyan55de8b92018-04-05 16:20:29 -0700119struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
120 slab_flags_t flags, unsigned int useroffset,
121 unsigned int usersize);
Christoph Lameter45530c42012-11-28 16:23:07 +0000122extern void create_boot_cache(struct kmem_cache *, const char *name,
Alexey Dobriyan361d5752018-04-05 16:20:33 -0700123 unsigned int size, slab_flags_t flags,
124 unsigned int useroffset, unsigned int usersize);
Christoph Lameter45530c42012-11-28 16:23:07 +0000125
Joonsoo Kim423c9292014-10-09 15:26:22 -0700126int slab_unmergeable(struct kmem_cache *s);
Alexey Dobriyanf4957d52018-04-05 16:20:37 -0700127struct kmem_cache *find_mergeable(unsigned size, unsigned align,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800128 slab_flags_t flags, const char *name, void (*ctor)(void *));
Joonsoo Kim12220de2014-10-09 15:26:24 -0700129#ifndef CONFIG_SLOB
Glauber Costa2633d7a2012-12-18 14:22:34 -0800130struct kmem_cache *
Alexey Dobriyanf4957d52018-04-05 16:20:37 -0700131__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800132 slab_flags_t flags, void (*ctor)(void *));
Joonsoo Kim423c9292014-10-09 15:26:22 -0700133
Alexey Dobriyan0293d1f2018-04-05 16:21:24 -0700134slab_flags_t kmem_cache_flags(unsigned int object_size,
Nikolay Borisov2e95bc62021-02-24 12:00:58 -0800135 slab_flags_t flags, const char *name);
Christoph Lametercbb79692012-09-05 00:18:32 +0000136#else
Glauber Costa2633d7a2012-12-18 14:22:34 -0800137static inline struct kmem_cache *
Alexey Dobriyanf4957d52018-04-05 16:20:37 -0700138__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800139 slab_flags_t flags, void (*ctor)(void *))
Christoph Lametercbb79692012-09-05 00:18:32 +0000140{ return NULL; }
Joonsoo Kim423c9292014-10-09 15:26:22 -0700141
Alexey Dobriyan0293d1f2018-04-05 16:21:24 -0700142static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
Nikolay Borisov2e95bc62021-02-24 12:00:58 -0800143 slab_flags_t flags, const char *name)
Joonsoo Kim423c9292014-10-09 15:26:22 -0700144{
145 return flags;
146}
Christoph Lametercbb79692012-09-05 00:18:32 +0000147#endif
148
149
Glauber Costad8843922012-10-17 15:36:51 +0400150/* Legal flag mask for kmem_cache_create(), for various configurations */
Nicolas Boichat6d6ea1e2019-03-28 20:43:42 -0700151#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
152 SLAB_CACHE_DMA32 | SLAB_PANIC | \
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -0800153 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
Glauber Costad8843922012-10-17 15:36:51 +0400154
155#if defined(CONFIG_DEBUG_SLAB)
156#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
157#elif defined(CONFIG_SLUB_DEBUG)
158#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
Laura Abbottbecfda62016-03-15 14:55:06 -0700159 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
Glauber Costad8843922012-10-17 15:36:51 +0400160#else
161#define SLAB_DEBUG_FLAGS (0)
162#endif
163
164#if defined(CONFIG_SLAB)
165#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
Vladimir Davydov230e9fc2016-01-14 15:18:15 -0800166 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -0800167 SLAB_ACCOUNT)
Glauber Costad8843922012-10-17 15:36:51 +0400168#elif defined(CONFIG_SLUB)
169#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -0800170 SLAB_TEMPORARY | SLAB_ACCOUNT)
Glauber Costad8843922012-10-17 15:36:51 +0400171#else
172#define SLAB_CACHE_FLAGS (0)
173#endif
174
Thomas Garniere70954f2016-12-12 16:41:38 -0800175/* Common flags available with current configuration */
Glauber Costad8843922012-10-17 15:36:51 +0400176#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
177
Thomas Garniere70954f2016-12-12 16:41:38 -0800178/* Common flags permitted for kmem_cache_create */
179#define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
180 SLAB_RED_ZONE | \
181 SLAB_POISON | \
182 SLAB_STORE_USER | \
183 SLAB_TRACE | \
184 SLAB_CONSISTENCY_CHECKS | \
185 SLAB_MEM_SPREAD | \
186 SLAB_NOLEAKTRACE | \
187 SLAB_RECLAIM_ACCOUNT | \
188 SLAB_TEMPORARY | \
Thomas Garniere70954f2016-12-12 16:41:38 -0800189 SLAB_ACCOUNT)
190
Shakeel Buttf9e13c02018-04-05 16:21:57 -0700191bool __kmem_cache_empty(struct kmem_cache *);
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000192int __kmem_cache_shutdown(struct kmem_cache *);
Dmitry Safonov52b4b952016-02-17 13:11:37 -0800193void __kmem_cache_release(struct kmem_cache *);
Tejun Heoc9fc5862017-02-22 15:41:27 -0800194int __kmem_cache_shrink(struct kmem_cache *);
Christoph Lameter41a21282014-05-06 12:50:08 -0700195void slab_kmem_cache_release(struct kmem_cache *);
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000196
Glauber Costab7454ad2012-10-19 18:20:25 +0400197struct seq_file;
198struct file;
Glauber Costab7454ad2012-10-19 18:20:25 +0400199
Glauber Costa0d7561c2012-10-19 18:20:27 +0400200struct slabinfo {
201 unsigned long active_objs;
202 unsigned long num_objs;
203 unsigned long active_slabs;
204 unsigned long num_slabs;
205 unsigned long shared_avail;
206 unsigned int limit;
207 unsigned int batchcount;
208 unsigned int shared;
209 unsigned int objects_per_slab;
210 unsigned int cache_order;
211};
212
213void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
214void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
Glauber Costab7454ad2012-10-19 18:20:25 +0400215ssize_t slabinfo_write(struct file *file, const char __user *buffer,
216 size_t count, loff_t *ppos);
Glauber Costaba6c4962012-12-18 14:22:27 -0800217
Christoph Lameter484748f2015-09-04 15:45:34 -0700218/*
219 * Generic implementation of bulk operations
220 * These are useful for situations in which the allocator cannot
Jesper Dangaard Brouer9f706d62016-03-15 14:54:03 -0700221 * perform optimizations. In that case segments of the object listed
Christoph Lameter484748f2015-09-04 15:45:34 -0700222 * may be allocated or freed using these operations.
223 */
224void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
Jesper Dangaard Brouer865762a2015-11-20 15:57:58 -0800225int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
Christoph Lameter484748f2015-09-04 15:45:34 -0700226
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700227static inline int cache_vmstat_idx(struct kmem_cache *s)
228{
229 return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
Roman Gushchind42f3242020-08-06 23:20:39 -0700230 NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700231}
232
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700233#ifdef CONFIG_SLUB_DEBUG
234#ifdef CONFIG_SLUB_DEBUG_ON
235DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
236#else
237DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
238#endif
239extern void print_tracking(struct kmem_cache *s, void *object);
Vijayanand Jittaee8d2c72021-01-12 00:42:08 +0530240extern unsigned long get_each_object_track(struct kmem_cache *s,
241 struct page *page, enum track_item alloc,
242 int (*fn)(const struct kmem_cache *, const void *,
243 const struct track *, void *), void *private);
Vijayanand Jitta14309282021-03-23 08:57:33 +0530244extern slab_flags_t slub_debug;
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700245#else
246static inline void print_tracking(struct kmem_cache *s, void *object)
247{
248}
Suren Baghdasaryan6a0a7052021-02-22 17:26:22 -0800249#ifdef CONFIG_SLUB
Vijayanand Jittaee8d2c72021-01-12 00:42:08 +0530250static inline unsigned long get_each_object_track(struct kmem_cache *s,
251 struct page *page, enum track_item alloc,
252 int (*fn)(const struct kmem_cache *, const void *,
253 const struct track *, void *), void *private)
254{
255 return 0;
256}
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700257#endif
Suren Baghdasaryan6a0a7052021-02-22 17:26:22 -0800258#endif
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700259
260/*
261 * Returns true if any of the specified slub_debug flags is enabled for the
262 * cache. Use only for flags parsed by setup_slub_debug() as it also enables
263 * the static key.
264 */
265static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
266{
267#ifdef CONFIG_SLUB_DEBUG
268 VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
269 if (static_branch_unlikely(&slub_debug_enabled))
270 return s->flags & flags;
271#endif
272 return false;
273}
274
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700275#ifdef CONFIG_MEMCG_KMEM
Roman Gushchin286e04b2020-08-06 23:20:52 -0700276static inline struct obj_cgroup **page_obj_cgroups(struct page *page)
277{
278 /*
279 * page->mem_cgroup and page->obj_cgroups are sharing the same
280 * space. To distinguish between them in case we don't know for sure
281 * that the page is a slab page (e.g. page_cgroup_ino()), let's
282 * always set the lowest bit of obj_cgroups.
283 */
284 return (struct obj_cgroup **)
285 ((unsigned long)page->obj_cgroups & ~0x1UL);
286}
287
Roman Gushchin98556092020-08-06 23:21:10 -0700288static inline bool page_has_obj_cgroups(struct page *page)
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700289{
Roman Gushchin98556092020-08-06 23:21:10 -0700290 return ((unsigned long)page->obj_cgroups & 0x1UL);
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700291}
292
Roman Gushchin10befea2020-08-06 23:21:27 -0700293int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
294 gfp_t gfp);
Roman Gushchin286e04b2020-08-06 23:20:52 -0700295
296static inline void memcg_free_page_obj_cgroups(struct page *page)
297{
298 kfree(page_obj_cgroups(page));
299 page->obj_cgroups = NULL;
300}
301
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700302static inline size_t obj_full_size(struct kmem_cache *s)
303{
304 /*
305 * For each accounted object there is an extra space which is used
306 * to store obj_cgroup membership. Charge it too.
307 */
308 return s->size + sizeof(struct obj_cgroup *);
309}
310
Roman Gushchinbecaba62020-12-05 22:14:45 -0800311/*
312 * Returns false if the allocation should fail.
313 */
314static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
315 struct obj_cgroup **objcgp,
316 size_t objects, gfp_t flags)
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700317{
Roman Gushchin98556092020-08-06 23:21:10 -0700318 struct obj_cgroup *objcg;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700319
Roman Gushchinbecaba62020-12-05 22:14:45 -0800320 if (!memcg_kmem_enabled())
321 return true;
322
323 if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT))
324 return true;
325
Roman Gushchin98556092020-08-06 23:21:10 -0700326 objcg = get_obj_cgroup_from_current();
327 if (!objcg)
Roman Gushchinbecaba62020-12-05 22:14:45 -0800328 return true;
Roman Gushchin98556092020-08-06 23:21:10 -0700329
330 if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) {
331 obj_cgroup_put(objcg);
Roman Gushchinbecaba62020-12-05 22:14:45 -0800332 return false;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700333 }
334
Roman Gushchinbecaba62020-12-05 22:14:45 -0800335 *objcgp = objcg;
336 return true;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700337}
338
339static inline void mod_objcg_state(struct obj_cgroup *objcg,
340 struct pglist_data *pgdat,
341 int idx, int nr)
342{
343 struct mem_cgroup *memcg;
344 struct lruvec *lruvec;
345
346 rcu_read_lock();
347 memcg = obj_cgroup_memcg(objcg);
348 lruvec = mem_cgroup_lruvec(memcg, pgdat);
349 mod_memcg_lruvec_state(lruvec, idx, nr);
350 rcu_read_unlock();
351}
352
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700353static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
354 struct obj_cgroup *objcg,
Roman Gushchin10befea2020-08-06 23:21:27 -0700355 gfp_t flags, size_t size,
356 void **p)
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700357{
358 struct page *page;
359 unsigned long off;
360 size_t i;
361
Roman Gushchinbecaba62020-12-05 22:14:45 -0800362 if (!memcg_kmem_enabled() || !objcg)
Roman Gushchin10befea2020-08-06 23:21:27 -0700363 return;
364
365 flags &= ~__GFP_ACCOUNT;
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700366 for (i = 0; i < size; i++) {
367 if (likely(p[i])) {
368 page = virt_to_head_page(p[i]);
Roman Gushchin10befea2020-08-06 23:21:27 -0700369
370 if (!page_has_obj_cgroups(page) &&
371 memcg_alloc_page_obj_cgroups(page, s, flags)) {
372 obj_cgroup_uncharge(objcg, obj_full_size(s));
373 continue;
374 }
375
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700376 off = obj_to_index(s, page, p[i]);
377 obj_cgroup_get(objcg);
378 page_obj_cgroups(page)[off] = objcg;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700379 mod_objcg_state(objcg, page_pgdat(page),
380 cache_vmstat_idx(s), obj_full_size(s));
381 } else {
382 obj_cgroup_uncharge(objcg, obj_full_size(s));
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700383 }
384 }
385 obj_cgroup_put(objcg);
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700386}
387
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700388static inline void memcg_slab_free_hook(struct kmem_cache *s_orig,
389 void **p, int objects)
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700390{
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700391 struct kmem_cache *s;
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700392 struct obj_cgroup *objcg;
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700393 struct page *page;
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700394 unsigned int off;
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700395 int i;
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700396
Roman Gushchin10befea2020-08-06 23:21:27 -0700397 if (!memcg_kmem_enabled())
398 return;
399
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700400 for (i = 0; i < objects; i++) {
401 if (unlikely(!p[i]))
402 continue;
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700403
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700404 page = virt_to_head_page(p[i]);
405 if (!page_has_obj_cgroups(page))
406 continue;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700407
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700408 if (!s_orig)
409 s = page->slab_cache;
410 else
411 s = s_orig;
Roman Gushchin10befea2020-08-06 23:21:27 -0700412
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700413 off = obj_to_index(s, page, p[i]);
414 objcg = page_obj_cgroups(page)[off];
415 if (!objcg)
416 continue;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700417
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700418 page_obj_cgroups(page)[off] = NULL;
419 obj_cgroup_uncharge(objcg, obj_full_size(s));
420 mod_objcg_state(objcg, page_pgdat(page), cache_vmstat_idx(s),
421 -obj_full_size(s));
422 obj_cgroup_put(objcg);
423 }
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700424}
425
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700426#else /* CONFIG_MEMCG_KMEM */
Roman Gushchin98556092020-08-06 23:21:10 -0700427static inline bool page_has_obj_cgroups(struct page *page)
428{
429 return false;
430}
431
432static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr)
Roman Gushchin4d96ba32019-07-11 20:56:31 -0700433{
434 return NULL;
435}
436
Roman Gushchin286e04b2020-08-06 23:20:52 -0700437static inline int memcg_alloc_page_obj_cgroups(struct page *page,
438 struct kmem_cache *s, gfp_t gfp)
439{
440 return 0;
441}
442
443static inline void memcg_free_page_obj_cgroups(struct page *page)
444{
445}
446
Roman Gushchinbecaba62020-12-05 22:14:45 -0800447static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
448 struct obj_cgroup **objcgp,
449 size_t objects, gfp_t flags)
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700450{
Roman Gushchinbecaba62020-12-05 22:14:45 -0800451 return true;
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700452}
453
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700454static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
455 struct obj_cgroup *objcg,
Roman Gushchin10befea2020-08-06 23:21:27 -0700456 gfp_t flags, size_t size,
457 void **p)
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700458{
459}
460
Bharata B Raod1b2cf62020-10-13 16:53:09 -0700461static inline void memcg_slab_free_hook(struct kmem_cache *s,
462 void **p, int objects)
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700463{
464}
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700465#endif /* CONFIG_MEMCG_KMEM */
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800466
Kees Cooka64b5372019-07-11 20:53:26 -0700467static inline struct kmem_cache *virt_to_cache(const void *obj)
468{
469 struct page *page;
470
471 page = virt_to_head_page(obj);
472 if (WARN_ONCE(!PageSlab(page), "%s: Object is not a Slab page!\n",
473 __func__))
474 return NULL;
475 return page->slab_cache;
476}
477
Roman Gushchin74d555b2020-08-06 23:21:44 -0700478static __always_inline void account_slab_page(struct page *page, int order,
479 struct kmem_cache *s)
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700480{
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700481 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
482 PAGE_SIZE << order);
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700483}
484
Roman Gushchin74d555b2020-08-06 23:21:44 -0700485static __always_inline void unaccount_slab_page(struct page *page, int order,
486 struct kmem_cache *s)
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700487{
Roman Gushchin10befea2020-08-06 23:21:27 -0700488 if (memcg_kmem_enabled())
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700489 memcg_free_page_obj_cgroups(page);
Roman Gushchin98556092020-08-06 23:21:10 -0700490
Roman Gushchinf2fe7b02020-08-06 23:20:59 -0700491 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
492 -(PAGE_SIZE << order));
Roman Gushchin6cea1d52019-07-11 20:56:16 -0700493}
494
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700495static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
496{
497 struct kmem_cache *cachep;
498
499 if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700500 !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
501 return s;
502
503 cachep = virt_to_cache(x);
Roman Gushchin10befea2020-08-06 23:21:27 -0700504 if (WARN(cachep && cachep != s,
Vlastimil Babkae42f1742020-08-06 23:19:05 -0700505 "%s: Wrong slab cache. %s but object is from %s\n",
506 __func__, s->name, cachep->name))
507 print_tracking(cachep, x);
508 return cachep;
509}
510
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700511static inline size_t slab_ksize(const struct kmem_cache *s)
512{
513#ifndef CONFIG_SLUB
514 return s->object_size;
515
516#else /* CONFIG_SLUB */
517# ifdef CONFIG_SLUB_DEBUG
518 /*
519 * Debugging requires use of the padding between object
520 * and whatever may come after it.
521 */
522 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
523 return s->object_size;
524# endif
Alexander Potapenko80a92012016-07-28 15:49:07 -0700525 if (s->flags & SLAB_KASAN)
526 return s->object_size;
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700527 /*
528 * If we have the need to store the freelist pointer
529 * back there or track user information then we can
530 * only use the space before that information.
531 */
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -0800532 if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700533 return s->inuse;
534 /*
535 * Else we can use all the padding etc for the allocation
536 */
537 return s->size;
538#endif
539}
540
541static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700542 struct obj_cgroup **objcgp,
543 size_t size, gfp_t flags)
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700544{
545 flags &= gfp_allowed_mask;
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +0100546
547 fs_reclaim_acquire(flags);
548 fs_reclaim_release(flags);
549
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700550 might_sleep_if(gfpflags_allow_blocking(flags));
551
Jesper Dangaard Brouerfab99632016-03-15 14:53:38 -0700552 if (should_failslab(s, flags))
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700553 return NULL;
554
Roman Gushchinbecaba62020-12-05 22:14:45 -0800555 if (!memcg_slab_pre_alloc_hook(s, objcgp, size, flags))
556 return NULL;
Vladimir Davydov45264772016-07-26 15:24:21 -0700557
558 return s;
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700559}
560
Roman Gushchin964d4bd2020-08-06 23:20:56 -0700561static inline void slab_post_alloc_hook(struct kmem_cache *s,
Andrey Konovalov5a7af112021-03-18 17:01:41 +1100562 struct obj_cgroup *objcg, gfp_t flags,
563 size_t size, void **p, bool init)
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700564{
565 size_t i;
566
567 flags &= gfp_allowed_mask;
Andrey Konovalov5a7af112021-03-18 17:01:41 +1100568
569 /*
570 * As memory initialization might be integrated into KASAN,
571 * kasan_slab_alloc and initialization memset must be
572 * kept together to avoid discrepancies in behavior.
573 *
574 * As p[i] might get tagged, memset and kmemleak hook come after KASAN.
575 */
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700576 for (i = 0; i < size; i++) {
Andrey Konovalov5a7af112021-03-18 17:01:41 +1100577 p[i] = kasan_slab_alloc(s, p[i], flags, init);
578 if (p[i] && init && !kasan_has_integrated_init())
579 memset(p[i], 0, s->object_size);
Andrey Konovalov53128242019-02-20 22:19:11 -0800580 kmemleak_alloc_recursive(p[i], s->object_size, 1,
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700581 s->flags, flags);
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700582 }
Vladimir Davydov45264772016-07-26 15:24:21 -0700583
Roman Gushchinbecaba62020-12-05 22:14:45 -0800584 memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700585}
586
Christoph Lameter44c53562014-08-06 16:04:07 -0700587#ifndef CONFIG_SLOB
Christoph Lameterca349562013-01-10 19:14:19 +0000588/*
589 * The slab lists for all objects.
590 */
591struct kmem_cache_node {
592 spinlock_t list_lock;
593
594#ifdef CONFIG_SLAB
595 struct list_head slabs_partial; /* partial list first, better asm code */
596 struct list_head slabs_full;
597 struct list_head slabs_free;
David Rientjesbf00bd32016-12-12 16:41:44 -0800598 unsigned long total_slabs; /* length of all slab lists */
599 unsigned long free_slabs; /* length of free slab list only */
Christoph Lameterca349562013-01-10 19:14:19 +0000600 unsigned long free_objects;
601 unsigned int free_limit;
602 unsigned int colour_next; /* Per-node cache coloring */
603 struct array_cache *shared; /* shared per node */
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700604 struct alien_cache **alien; /* on other nodes */
Christoph Lameterca349562013-01-10 19:14:19 +0000605 unsigned long next_reap; /* updated without locking */
606 int free_touched; /* updated without locking */
607#endif
608
609#ifdef CONFIG_SLUB
610 unsigned long nr_partial;
611 struct list_head partial;
612#ifdef CONFIG_SLUB_DEBUG
613 atomic_long_t nr_slabs;
614 atomic_long_t total_objects;
615 struct list_head full;
616#endif
617#endif
618
619};
Wanpeng Lie25839f2013-07-04 08:33:23 +0800620
Christoph Lameter44c53562014-08-06 16:04:07 -0700621static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
622{
623 return s->node[node];
624}
625
626/*
627 * Iterator over all nodes. The body will be executed for each node that has
628 * a kmem_cache_node structure allocated (which is true for all online nodes)
629 */
630#define for_each_kmem_cache_node(__s, __node, __n) \
Mikulas Patocka91635822014-10-09 15:26:20 -0700631 for (__node = 0; __node < nr_node_ids; __node++) \
632 if ((__n = get_node(__s, __node)))
Christoph Lameter44c53562014-08-06 16:04:07 -0700633
634#endif
635
Vladimir Davydov1df3b262014-12-10 15:42:16 -0800636void *slab_start(struct seq_file *m, loff_t *pos);
Wanpeng Li276a2432013-07-08 08:08:28 +0800637void *slab_next(struct seq_file *m, void *p, loff_t *pos);
638void slab_stop(struct seq_file *m, void *p);
Vladimir Davydovb0475012014-12-10 15:44:19 -0800639int memcg_slab_show(struct seq_file *m, void *p);
Andrey Ryabinin5240ab42014-08-06 16:04:14 -0700640
Yang Shi852d8be2017-11-15 17:32:07 -0800641#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
642void dump_unreclaimable_slab(void);
643#else
644static inline void dump_unreclaimable_slab(void)
645{
646}
647#endif
648
Alexander Potapenko55834c52016-05-20 16:59:11 -0700649void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
650
Thomas Garnier7c00fce2016-07-26 15:21:56 -0700651#ifdef CONFIG_SLAB_FREELIST_RANDOM
652int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
653 gfp_t gfp);
654void cache_random_seq_destroy(struct kmem_cache *cachep);
655#else
656static inline int cache_random_seq_create(struct kmem_cache *cachep,
657 unsigned int count, gfp_t gfp)
658{
659 return 0;
660}
661static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
662#endif /* CONFIG_SLAB_FREELIST_RANDOM */
663
Alexander Potapenko64713842019-07-11 20:59:19 -0700664static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
665{
666 if (static_branch_unlikely(&init_on_alloc)) {
667 if (c->ctor)
668 return false;
669 if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
670 return flags & __GFP_ZERO;
671 return true;
672 }
673 return flags & __GFP_ZERO;
674}
675
676static inline bool slab_want_init_on_free(struct kmem_cache *c)
677{
678 if (static_branch_unlikely(&init_on_free))
679 return !(c->ctor ||
680 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
681 return false;
682}
683
Faiyaz Mohammed54e74122021-06-15 09:19:37 +0530684#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
685void debugfs_slab_release(struct kmem_cache *);
686#else
687static inline void debugfs_slab_release(struct kmem_cache *s) { }
688#endif
689
Andrey Ryabinin5240ab42014-08-06 16:04:14 -0700690#endif /* MM_SLAB_H */