Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Christoph Lameter | 97d0660 | 2012-07-06 15:25:11 -0500 | [diff] [blame] | 2 | #ifndef MM_SLAB_H |
| 3 | #define MM_SLAB_H |
| 4 | /* |
| 5 | * Internal slab definitions |
| 6 | */ |
| 7 | |
Joonsoo Kim | 07f361b | 2014-10-09 15:26:00 -0700 | [diff] [blame] | 8 | #ifdef CONFIG_SLOB |
| 9 | /* |
| 10 | * Common fields provided in kmem_cache by all slab allocators |
| 11 | * This struct is either used directly by the allocator (SLOB) |
| 12 | * or the allocator must include definitions for all fields |
| 13 | * provided in kmem_cache_common in their definition of kmem_cache. |
| 14 | * |
| 15 | * Once we can do anonymous structs (C11 standard) we could put a |
| 16 | * anonymous struct definition in these allocators so that the |
| 17 | * separate allocations in the kmem_cache structure of SLAB and |
| 18 | * SLUB is no longer needed. |
| 19 | */ |
| 20 | struct kmem_cache { |
| 21 | unsigned int object_size;/* The original size of the object */ |
| 22 | unsigned int size; /* The aligned/padded/added on size */ |
| 23 | unsigned int align; /* Alignment as calculated */ |
Alexey Dobriyan | d50112e | 2017-11-15 17:32:18 -0800 | [diff] [blame] | 24 | slab_flags_t flags; /* Active flags on the slab */ |
Alexey Dobriyan | 7bbdb81 | 2018-04-05 16:21:31 -0700 | [diff] [blame] | 25 | unsigned int useroffset;/* Usercopy region offset */ |
| 26 | unsigned int usersize; /* Usercopy region size */ |
Joonsoo Kim | 07f361b | 2014-10-09 15:26:00 -0700 | [diff] [blame] | 27 | const char *name; /* Slab name for sysfs */ |
| 28 | int refcount; /* Use counter */ |
| 29 | void (*ctor)(void *); /* Called on object slot creation */ |
| 30 | struct list_head list; /* List of all slab caches on the system */ |
| 31 | }; |
| 32 | |
Waiman Long | 9adeaa2 | 2019-09-23 15:33:49 -0700 | [diff] [blame] | 33 | #else /* !CONFIG_SLOB */ |
| 34 | |
| 35 | struct memcg_cache_array { |
| 36 | struct rcu_head rcu; |
| 37 | struct kmem_cache *entries[0]; |
| 38 | }; |
| 39 | |
| 40 | /* |
| 41 | * This is the main placeholder for memcg-related information in kmem caches. |
| 42 | * Both the root cache and the child caches will have it. For the root cache, |
| 43 | * this will hold a dynamically allocated array large enough to hold |
| 44 | * information about the currently limited memcgs in the system. To allow the |
| 45 | * array to be accessed without taking any locks, on relocation we free the old |
| 46 | * version only after a grace period. |
| 47 | * |
| 48 | * Root and child caches hold different metadata. |
| 49 | * |
| 50 | * @root_cache: Common to root and child caches. NULL for root, pointer to |
| 51 | * the root cache for children. |
| 52 | * |
| 53 | * The following fields are specific to root caches. |
| 54 | * |
| 55 | * @memcg_caches: kmemcg ID indexed table of child caches. This table is |
| 56 | * used to index child cachces during allocation and cleared |
| 57 | * early during shutdown. |
| 58 | * |
| 59 | * @root_caches_node: List node for slab_root_caches list. |
| 60 | * |
| 61 | * @children: List of all child caches. While the child caches are also |
| 62 | * reachable through @memcg_caches, a child cache remains on |
| 63 | * this list until it is actually destroyed. |
| 64 | * |
| 65 | * The following fields are specific to child caches. |
| 66 | * |
| 67 | * @memcg: Pointer to the memcg this cache belongs to. |
| 68 | * |
| 69 | * @children_node: List node for @root_cache->children list. |
| 70 | * |
| 71 | * @kmem_caches_node: List node for @memcg->kmem_caches list. |
| 72 | */ |
| 73 | struct memcg_cache_params { |
| 74 | struct kmem_cache *root_cache; |
| 75 | union { |
| 76 | struct { |
| 77 | struct memcg_cache_array __rcu *memcg_caches; |
| 78 | struct list_head __root_caches_node; |
| 79 | struct list_head children; |
| 80 | bool dying; |
| 81 | }; |
| 82 | struct { |
| 83 | struct mem_cgroup *memcg; |
| 84 | struct list_head children_node; |
| 85 | struct list_head kmem_caches_node; |
| 86 | struct percpu_ref refcnt; |
| 87 | |
| 88 | void (*work_fn)(struct kmem_cache *); |
| 89 | union { |
| 90 | struct rcu_head rcu_head; |
| 91 | struct work_struct work; |
| 92 | }; |
| 93 | }; |
| 94 | }; |
| 95 | }; |
Joonsoo Kim | 07f361b | 2014-10-09 15:26:00 -0700 | [diff] [blame] | 96 | #endif /* CONFIG_SLOB */ |
| 97 | |
| 98 | #ifdef CONFIG_SLAB |
| 99 | #include <linux/slab_def.h> |
| 100 | #endif |
| 101 | |
| 102 | #ifdef CONFIG_SLUB |
| 103 | #include <linux/slub_def.h> |
| 104 | #endif |
| 105 | |
| 106 | #include <linux/memcontrol.h> |
Jesper Dangaard Brouer | 11c7aec | 2016-03-15 14:53:35 -0700 | [diff] [blame] | 107 | #include <linux/fault-inject.h> |
Jesper Dangaard Brouer | 11c7aec | 2016-03-15 14:53:35 -0700 | [diff] [blame] | 108 | #include <linux/kasan.h> |
| 109 | #include <linux/kmemleak.h> |
Thomas Garnier | 7c00fce | 2016-07-26 15:21:56 -0700 | [diff] [blame] | 110 | #include <linux/random.h> |
Peter Zijlstra | d92a8cf | 2017-03-03 10:13:38 +0100 | [diff] [blame] | 111 | #include <linux/sched/mm.h> |
Roman Gushchin | 286e04b | 2020-08-06 23:20:52 -0700 | [diff] [blame] | 112 | #include <linux/kmemleak.h> |
Joonsoo Kim | 07f361b | 2014-10-09 15:26:00 -0700 | [diff] [blame] | 113 | |
Christoph Lameter | 97d0660 | 2012-07-06 15:25:11 -0500 | [diff] [blame] | 114 | /* |
| 115 | * State of the slab allocator. |
| 116 | * |
| 117 | * This is used to describe the states of the allocator during bootup. |
| 118 | * Allocators use this to gradually bootstrap themselves. Most allocators |
| 119 | * have the problem that the structures used for managing slab caches are |
| 120 | * allocated from slab caches themselves. |
| 121 | */ |
| 122 | enum slab_state { |
| 123 | DOWN, /* No slab functionality yet */ |
| 124 | PARTIAL, /* SLUB: kmem_cache_node available */ |
Christoph Lameter | ce8eb6c | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 125 | PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */ |
Christoph Lameter | 97d0660 | 2012-07-06 15:25:11 -0500 | [diff] [blame] | 126 | UP, /* Slab caches usable but not all extras yet */ |
| 127 | FULL /* Everything is working */ |
| 128 | }; |
| 129 | |
| 130 | extern enum slab_state slab_state; |
| 131 | |
Christoph Lameter | 18004c5 | 2012-07-06 15:25:12 -0500 | [diff] [blame] | 132 | /* The slab cache mutex protects the management structures during changes */ |
| 133 | extern struct mutex slab_mutex; |
Christoph Lameter | 9b030cb | 2012-09-05 00:20:33 +0000 | [diff] [blame] | 134 | |
| 135 | /* The list of all slab caches on the system */ |
Christoph Lameter | 18004c5 | 2012-07-06 15:25:12 -0500 | [diff] [blame] | 136 | extern struct list_head slab_caches; |
| 137 | |
Christoph Lameter | 9b030cb | 2012-09-05 00:20:33 +0000 | [diff] [blame] | 138 | /* The slab cache that manages slab cache information */ |
| 139 | extern struct kmem_cache *kmem_cache; |
| 140 | |
Vlastimil Babka | af3b5f8 | 2017-02-22 15:41:05 -0800 | [diff] [blame] | 141 | /* A table of kmalloc cache names and sizes */ |
| 142 | extern const struct kmalloc_info_struct { |
Pengfei Li | cb5d9fb | 2019-11-30 17:49:21 -0800 | [diff] [blame] | 143 | const char *name[NR_KMALLOC_TYPES]; |
Alexey Dobriyan | 55de8b9 | 2018-04-05 16:20:29 -0700 | [diff] [blame] | 144 | unsigned int size; |
Vlastimil Babka | af3b5f8 | 2017-02-22 15:41:05 -0800 | [diff] [blame] | 145 | } kmalloc_info[]; |
| 146 | |
Christoph Lameter | f97d5f6 | 2013-01-10 19:12:17 +0000 | [diff] [blame] | 147 | #ifndef CONFIG_SLOB |
| 148 | /* Kmalloc array related functions */ |
Daniel Sanders | 34cc699 | 2015-06-24 16:55:57 -0700 | [diff] [blame] | 149 | void setup_kmalloc_cache_index_table(void); |
Alexey Dobriyan | d50112e | 2017-11-15 17:32:18 -0800 | [diff] [blame] | 150 | void create_kmalloc_caches(slab_flags_t); |
Christoph Lameter | 2c59dd6 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 151 | |
| 152 | /* Find the kmalloc slab corresponding for a certain size */ |
| 153 | struct kmem_cache *kmalloc_slab(size_t, gfp_t); |
Christoph Lameter | f97d5f6 | 2013-01-10 19:12:17 +0000 | [diff] [blame] | 154 | #endif |
| 155 | |
Long Li | 4440509 | 2020-08-06 23:18:28 -0700 | [diff] [blame] | 156 | gfp_t kmalloc_fix_flags(gfp_t flags); |
Christoph Lameter | f97d5f6 | 2013-01-10 19:12:17 +0000 | [diff] [blame] | 157 | |
Christoph Lameter | 9b030cb | 2012-09-05 00:20:33 +0000 | [diff] [blame] | 158 | /* Functions provided by the slab allocators */ |
Alexey Dobriyan | d50112e | 2017-11-15 17:32:18 -0800 | [diff] [blame] | 159 | int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags); |
Christoph Lameter | 97d0660 | 2012-07-06 15:25:11 -0500 | [diff] [blame] | 160 | |
Alexey Dobriyan | 55de8b9 | 2018-04-05 16:20:29 -0700 | [diff] [blame] | 161 | struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size, |
| 162 | slab_flags_t flags, unsigned int useroffset, |
| 163 | unsigned int usersize); |
Christoph Lameter | 45530c4 | 2012-11-28 16:23:07 +0000 | [diff] [blame] | 164 | extern void create_boot_cache(struct kmem_cache *, const char *name, |
Alexey Dobriyan | 361d575 | 2018-04-05 16:20:33 -0700 | [diff] [blame] | 165 | unsigned int size, slab_flags_t flags, |
| 166 | unsigned int useroffset, unsigned int usersize); |
Christoph Lameter | 45530c4 | 2012-11-28 16:23:07 +0000 | [diff] [blame] | 167 | |
Joonsoo Kim | 423c929 | 2014-10-09 15:26:22 -0700 | [diff] [blame] | 168 | int slab_unmergeable(struct kmem_cache *s); |
Alexey Dobriyan | f4957d5 | 2018-04-05 16:20:37 -0700 | [diff] [blame] | 169 | struct kmem_cache *find_mergeable(unsigned size, unsigned align, |
Alexey Dobriyan | d50112e | 2017-11-15 17:32:18 -0800 | [diff] [blame] | 170 | slab_flags_t flags, const char *name, void (*ctor)(void *)); |
Joonsoo Kim | 12220de | 2014-10-09 15:26:24 -0700 | [diff] [blame] | 171 | #ifndef CONFIG_SLOB |
Glauber Costa | 2633d7a | 2012-12-18 14:22:34 -0800 | [diff] [blame] | 172 | struct kmem_cache * |
Alexey Dobriyan | f4957d5 | 2018-04-05 16:20:37 -0700 | [diff] [blame] | 173 | __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, |
Alexey Dobriyan | d50112e | 2017-11-15 17:32:18 -0800 | [diff] [blame] | 174 | slab_flags_t flags, void (*ctor)(void *)); |
Joonsoo Kim | 423c929 | 2014-10-09 15:26:22 -0700 | [diff] [blame] | 175 | |
Alexey Dobriyan | 0293d1f | 2018-04-05 16:21:24 -0700 | [diff] [blame] | 176 | slab_flags_t kmem_cache_flags(unsigned int object_size, |
Alexey Dobriyan | d50112e | 2017-11-15 17:32:18 -0800 | [diff] [blame] | 177 | slab_flags_t flags, const char *name, |
Joonsoo Kim | 423c929 | 2014-10-09 15:26:22 -0700 | [diff] [blame] | 178 | void (*ctor)(void *)); |
Christoph Lameter | cbb7969 | 2012-09-05 00:18:32 +0000 | [diff] [blame] | 179 | #else |
Glauber Costa | 2633d7a | 2012-12-18 14:22:34 -0800 | [diff] [blame] | 180 | static inline struct kmem_cache * |
Alexey Dobriyan | f4957d5 | 2018-04-05 16:20:37 -0700 | [diff] [blame] | 181 | __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, |
Alexey Dobriyan | d50112e | 2017-11-15 17:32:18 -0800 | [diff] [blame] | 182 | slab_flags_t flags, void (*ctor)(void *)) |
Christoph Lameter | cbb7969 | 2012-09-05 00:18:32 +0000 | [diff] [blame] | 183 | { return NULL; } |
Joonsoo Kim | 423c929 | 2014-10-09 15:26:22 -0700 | [diff] [blame] | 184 | |
Alexey Dobriyan | 0293d1f | 2018-04-05 16:21:24 -0700 | [diff] [blame] | 185 | static inline slab_flags_t kmem_cache_flags(unsigned int object_size, |
Alexey Dobriyan | d50112e | 2017-11-15 17:32:18 -0800 | [diff] [blame] | 186 | slab_flags_t flags, const char *name, |
Joonsoo Kim | 423c929 | 2014-10-09 15:26:22 -0700 | [diff] [blame] | 187 | void (*ctor)(void *)) |
| 188 | { |
| 189 | return flags; |
| 190 | } |
Christoph Lameter | cbb7969 | 2012-09-05 00:18:32 +0000 | [diff] [blame] | 191 | #endif |
| 192 | |
| 193 | |
Glauber Costa | d884392 | 2012-10-17 15:36:51 +0400 | [diff] [blame] | 194 | /* Legal flag mask for kmem_cache_create(), for various configurations */ |
Nicolas Boichat | 6d6ea1e | 2019-03-28 20:43:42 -0700 | [diff] [blame] | 195 | #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \ |
| 196 | SLAB_CACHE_DMA32 | SLAB_PANIC | \ |
Paul E. McKenney | 5f0d5a3 | 2017-01-18 02:53:44 -0800 | [diff] [blame] | 197 | SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS ) |
Glauber Costa | d884392 | 2012-10-17 15:36:51 +0400 | [diff] [blame] | 198 | |
| 199 | #if defined(CONFIG_DEBUG_SLAB) |
| 200 | #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) |
| 201 | #elif defined(CONFIG_SLUB_DEBUG) |
| 202 | #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ |
Laura Abbott | becfda6 | 2016-03-15 14:55:06 -0700 | [diff] [blame] | 203 | SLAB_TRACE | SLAB_CONSISTENCY_CHECKS) |
Glauber Costa | d884392 | 2012-10-17 15:36:51 +0400 | [diff] [blame] | 204 | #else |
| 205 | #define SLAB_DEBUG_FLAGS (0) |
| 206 | #endif |
| 207 | |
| 208 | #if defined(CONFIG_SLAB) |
| 209 | #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ |
Vladimir Davydov | 230e9fc | 2016-01-14 15:18:15 -0800 | [diff] [blame] | 210 | SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \ |
Levin, Alexander (Sasha Levin) | 75f296d | 2017-11-15 17:35:54 -0800 | [diff] [blame] | 211 | SLAB_ACCOUNT) |
Glauber Costa | d884392 | 2012-10-17 15:36:51 +0400 | [diff] [blame] | 212 | #elif defined(CONFIG_SLUB) |
| 213 | #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ |
Levin, Alexander (Sasha Levin) | 75f296d | 2017-11-15 17:35:54 -0800 | [diff] [blame] | 214 | SLAB_TEMPORARY | SLAB_ACCOUNT) |
Glauber Costa | d884392 | 2012-10-17 15:36:51 +0400 | [diff] [blame] | 215 | #else |
| 216 | #define SLAB_CACHE_FLAGS (0) |
| 217 | #endif |
| 218 | |
Thomas Garnier | e70954f | 2016-12-12 16:41:38 -0800 | [diff] [blame] | 219 | /* Common flags available with current configuration */ |
Glauber Costa | d884392 | 2012-10-17 15:36:51 +0400 | [diff] [blame] | 220 | #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) |
| 221 | |
Thomas Garnier | e70954f | 2016-12-12 16:41:38 -0800 | [diff] [blame] | 222 | /* Common flags permitted for kmem_cache_create */ |
| 223 | #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \ |
| 224 | SLAB_RED_ZONE | \ |
| 225 | SLAB_POISON | \ |
| 226 | SLAB_STORE_USER | \ |
| 227 | SLAB_TRACE | \ |
| 228 | SLAB_CONSISTENCY_CHECKS | \ |
| 229 | SLAB_MEM_SPREAD | \ |
| 230 | SLAB_NOLEAKTRACE | \ |
| 231 | SLAB_RECLAIM_ACCOUNT | \ |
| 232 | SLAB_TEMPORARY | \ |
Thomas Garnier | e70954f | 2016-12-12 16:41:38 -0800 | [diff] [blame] | 233 | SLAB_ACCOUNT) |
| 234 | |
Shakeel Butt | f9e13c0 | 2018-04-05 16:21:57 -0700 | [diff] [blame] | 235 | bool __kmem_cache_empty(struct kmem_cache *); |
Christoph Lameter | 945cf2b | 2012-09-04 23:18:33 +0000 | [diff] [blame] | 236 | int __kmem_cache_shutdown(struct kmem_cache *); |
Dmitry Safonov | 52b4b95 | 2016-02-17 13:11:37 -0800 | [diff] [blame] | 237 | void __kmem_cache_release(struct kmem_cache *); |
Tejun Heo | c9fc586 | 2017-02-22 15:41:27 -0800 | [diff] [blame] | 238 | int __kmem_cache_shrink(struct kmem_cache *); |
| 239 | void __kmemcg_cache_deactivate(struct kmem_cache *s); |
Roman Gushchin | 4348669 | 2019-07-11 20:56:09 -0700 | [diff] [blame] | 240 | void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s); |
Christoph Lameter | 41a2128 | 2014-05-06 12:50:08 -0700 | [diff] [blame] | 241 | void slab_kmem_cache_release(struct kmem_cache *); |
Waiman Long | 04f768a | 2019-09-23 15:33:46 -0700 | [diff] [blame] | 242 | void kmem_cache_shrink_all(struct kmem_cache *s); |
Christoph Lameter | 945cf2b | 2012-09-04 23:18:33 +0000 | [diff] [blame] | 243 | |
Glauber Costa | b7454ad | 2012-10-19 18:20:25 +0400 | [diff] [blame] | 244 | struct seq_file; |
| 245 | struct file; |
Glauber Costa | b7454ad | 2012-10-19 18:20:25 +0400 | [diff] [blame] | 246 | |
Glauber Costa | 0d7561c | 2012-10-19 18:20:27 +0400 | [diff] [blame] | 247 | struct slabinfo { |
| 248 | unsigned long active_objs; |
| 249 | unsigned long num_objs; |
| 250 | unsigned long active_slabs; |
| 251 | unsigned long num_slabs; |
| 252 | unsigned long shared_avail; |
| 253 | unsigned int limit; |
| 254 | unsigned int batchcount; |
| 255 | unsigned int shared; |
| 256 | unsigned int objects_per_slab; |
| 257 | unsigned int cache_order; |
| 258 | }; |
| 259 | |
| 260 | void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo); |
| 261 | void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); |
Glauber Costa | b7454ad | 2012-10-19 18:20:25 +0400 | [diff] [blame] | 262 | ssize_t slabinfo_write(struct file *file, const char __user *buffer, |
| 263 | size_t count, loff_t *ppos); |
Glauber Costa | ba6c496 | 2012-12-18 14:22:27 -0800 | [diff] [blame] | 264 | |
Christoph Lameter | 484748f | 2015-09-04 15:45:34 -0700 | [diff] [blame] | 265 | /* |
| 266 | * Generic implementation of bulk operations |
| 267 | * These are useful for situations in which the allocator cannot |
Jesper Dangaard Brouer | 9f706d6 | 2016-03-15 14:54:03 -0700 | [diff] [blame] | 268 | * perform optimizations. In that case segments of the object listed |
Christoph Lameter | 484748f | 2015-09-04 15:45:34 -0700 | [diff] [blame] | 269 | * may be allocated or freed using these operations. |
| 270 | */ |
| 271 | void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); |
Jesper Dangaard Brouer | 865762a | 2015-11-20 15:57:58 -0800 | [diff] [blame] | 272 | int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); |
Christoph Lameter | 484748f | 2015-09-04 15:45:34 -0700 | [diff] [blame] | 273 | |
Roman Gushchin | 6cea1d5 | 2019-07-11 20:56:16 -0700 | [diff] [blame] | 274 | static inline int cache_vmstat_idx(struct kmem_cache *s) |
| 275 | { |
| 276 | return (s->flags & SLAB_RECLAIM_ACCOUNT) ? |
Roman Gushchin | d42f324 | 2020-08-06 23:20:39 -0700 | [diff] [blame] | 277 | NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B; |
Roman Gushchin | 6cea1d5 | 2019-07-11 20:56:16 -0700 | [diff] [blame] | 278 | } |
| 279 | |
Vlastimil Babka | e42f174 | 2020-08-06 23:19:05 -0700 | [diff] [blame] | 280 | #ifdef CONFIG_SLUB_DEBUG |
| 281 | #ifdef CONFIG_SLUB_DEBUG_ON |
| 282 | DECLARE_STATIC_KEY_TRUE(slub_debug_enabled); |
| 283 | #else |
| 284 | DECLARE_STATIC_KEY_FALSE(slub_debug_enabled); |
| 285 | #endif |
| 286 | extern void print_tracking(struct kmem_cache *s, void *object); |
| 287 | #else |
| 288 | static inline void print_tracking(struct kmem_cache *s, void *object) |
| 289 | { |
| 290 | } |
| 291 | #endif |
| 292 | |
| 293 | /* |
| 294 | * Returns true if any of the specified slub_debug flags is enabled for the |
| 295 | * cache. Use only for flags parsed by setup_slub_debug() as it also enables |
| 296 | * the static key. |
| 297 | */ |
| 298 | static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags) |
| 299 | { |
| 300 | #ifdef CONFIG_SLUB_DEBUG |
| 301 | VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS)); |
| 302 | if (static_branch_unlikely(&slub_debug_enabled)) |
| 303 | return s->flags & flags; |
| 304 | #endif |
| 305 | return false; |
| 306 | } |
| 307 | |
Kirill Tkhai | 84c07d1 | 2018-08-17 15:47:25 -0700 | [diff] [blame] | 308 | #ifdef CONFIG_MEMCG_KMEM |
Tejun Heo | 510ded3 | 2017-02-22 15:41:24 -0800 | [diff] [blame] | 309 | |
| 310 | /* List of all root caches. */ |
| 311 | extern struct list_head slab_root_caches; |
| 312 | #define root_caches_node memcg_params.__root_caches_node |
| 313 | |
Vladimir Davydov | 426589f | 2015-02-12 14:59:23 -0800 | [diff] [blame] | 314 | /* |
| 315 | * Iterate over all memcg caches of the given root cache. The caller must hold |
| 316 | * slab_mutex. |
| 317 | */ |
| 318 | #define for_each_memcg_cache(iter, root) \ |
Tejun Heo | 9eeadc8 | 2017-02-22 15:41:17 -0800 | [diff] [blame] | 319 | list_for_each_entry(iter, &(root)->memcg_params.children, \ |
| 320 | memcg_params.children_node) |
Vladimir Davydov | 426589f | 2015-02-12 14:59:23 -0800 | [diff] [blame] | 321 | |
Glauber Costa | ba6c496 | 2012-12-18 14:22:27 -0800 | [diff] [blame] | 322 | static inline bool is_root_cache(struct kmem_cache *s) |
| 323 | { |
Tejun Heo | 9eeadc8 | 2017-02-22 15:41:17 -0800 | [diff] [blame] | 324 | return !s->memcg_params.root_cache; |
Glauber Costa | ba6c496 | 2012-12-18 14:22:27 -0800 | [diff] [blame] | 325 | } |
Glauber Costa | 2633d7a | 2012-12-18 14:22:34 -0800 | [diff] [blame] | 326 | |
Glauber Costa | b9ce5ef | 2012-12-18 14:22:46 -0800 | [diff] [blame] | 327 | static inline bool slab_equal_or_root(struct kmem_cache *s, |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 328 | struct kmem_cache *p) |
Glauber Costa | b9ce5ef | 2012-12-18 14:22:46 -0800 | [diff] [blame] | 329 | { |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 330 | return p == s || p == s->memcg_params.root_cache; |
Glauber Costa | b9ce5ef | 2012-12-18 14:22:46 -0800 | [diff] [blame] | 331 | } |
Glauber Costa | 749c541 | 2012-12-18 14:23:01 -0800 | [diff] [blame] | 332 | |
| 333 | /* |
| 334 | * We use suffixes to the name in memcg because we can't have caches |
| 335 | * created in the system with the same name. But when we print them |
| 336 | * locally, better refer to them with the base name |
| 337 | */ |
| 338 | static inline const char *cache_name(struct kmem_cache *s) |
| 339 | { |
| 340 | if (!is_root_cache(s)) |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 341 | s = s->memcg_params.root_cache; |
Glauber Costa | 749c541 | 2012-12-18 14:23:01 -0800 | [diff] [blame] | 342 | return s->name; |
| 343 | } |
| 344 | |
Glauber Costa | 943a451 | 2012-12-18 14:23:03 -0800 | [diff] [blame] | 345 | static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) |
| 346 | { |
| 347 | if (is_root_cache(s)) |
| 348 | return s; |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 349 | return s->memcg_params.root_cache; |
Glauber Costa | 943a451 | 2012-12-18 14:23:03 -0800 | [diff] [blame] | 350 | } |
Vladimir Davydov | 5dfb417 | 2014-06-04 16:06:38 -0700 | [diff] [blame] | 351 | |
Roman Gushchin | 286e04b | 2020-08-06 23:20:52 -0700 | [diff] [blame] | 352 | static inline struct obj_cgroup **page_obj_cgroups(struct page *page) |
| 353 | { |
| 354 | /* |
| 355 | * page->mem_cgroup and page->obj_cgroups are sharing the same |
| 356 | * space. To distinguish between them in case we don't know for sure |
| 357 | * that the page is a slab page (e.g. page_cgroup_ino()), let's |
| 358 | * always set the lowest bit of obj_cgroups. |
| 359 | */ |
| 360 | return (struct obj_cgroup **) |
| 361 | ((unsigned long)page->obj_cgroups & ~0x1UL); |
| 362 | } |
| 363 | |
Roman Gushchin | 4d96ba3 | 2019-07-11 20:56:31 -0700 | [diff] [blame] | 364 | /* |
| 365 | * Expects a pointer to a slab page. Please note, that PageSlab() check |
| 366 | * isn't sufficient, as it returns true also for tail compound slab pages, |
| 367 | * which do not have slab_cache pointer set. |
Roman Gushchin | 221ec5c | 2019-11-05 21:17:03 -0800 | [diff] [blame] | 368 | * So this function assumes that the page can pass PageSlab() && !PageTail() |
| 369 | * check. |
Roman Gushchin | fb2f2b0 | 2019-07-11 20:56:34 -0700 | [diff] [blame] | 370 | * |
| 371 | * The kmem_cache can be reparented asynchronously. The caller must ensure |
| 372 | * the memcg lifetime, e.g. by taking rcu_read_lock() or cgroup_mutex. |
Roman Gushchin | 4d96ba3 | 2019-07-11 20:56:31 -0700 | [diff] [blame] | 373 | */ |
| 374 | static inline struct mem_cgroup *memcg_from_slab_page(struct page *page) |
| 375 | { |
| 376 | struct kmem_cache *s; |
| 377 | |
| 378 | s = READ_ONCE(page->slab_cache); |
| 379 | if (s && !is_root_cache(s)) |
Roman Gushchin | fb2f2b0 | 2019-07-11 20:56:34 -0700 | [diff] [blame] | 380 | return READ_ONCE(s->memcg_params.memcg); |
Roman Gushchin | 4d96ba3 | 2019-07-11 20:56:31 -0700 | [diff] [blame] | 381 | |
| 382 | return NULL; |
| 383 | } |
| 384 | |
Roman Gushchin | 286e04b | 2020-08-06 23:20:52 -0700 | [diff] [blame] | 385 | static inline int memcg_alloc_page_obj_cgroups(struct page *page, |
| 386 | struct kmem_cache *s, gfp_t gfp) |
| 387 | { |
| 388 | unsigned int objects = objs_per_slab_page(s, page); |
| 389 | void *vec; |
| 390 | |
| 391 | vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp, |
| 392 | page_to_nid(page)); |
| 393 | if (!vec) |
| 394 | return -ENOMEM; |
| 395 | |
| 396 | kmemleak_not_leak(vec); |
| 397 | page->obj_cgroups = (struct obj_cgroup **) ((unsigned long)vec | 0x1UL); |
| 398 | return 0; |
| 399 | } |
| 400 | |
| 401 | static inline void memcg_free_page_obj_cgroups(struct page *page) |
| 402 | { |
| 403 | kfree(page_obj_cgroups(page)); |
| 404 | page->obj_cgroups = NULL; |
| 405 | } |
| 406 | |
Roman Gushchin | f2fe7b0 | 2020-08-06 23:20:59 -0700 | [diff] [blame^] | 407 | static inline size_t obj_full_size(struct kmem_cache *s) |
| 408 | { |
| 409 | /* |
| 410 | * For each accounted object there is an extra space which is used |
| 411 | * to store obj_cgroup membership. Charge it too. |
| 412 | */ |
| 413 | return s->size + sizeof(struct obj_cgroup *); |
| 414 | } |
| 415 | |
| 416 | static inline struct kmem_cache *memcg_slab_pre_alloc_hook(struct kmem_cache *s, |
| 417 | struct obj_cgroup **objcgp, |
| 418 | size_t objects, gfp_t flags) |
| 419 | { |
| 420 | struct kmem_cache *cachep; |
| 421 | |
| 422 | cachep = memcg_kmem_get_cache(s, objcgp); |
| 423 | if (is_root_cache(cachep)) |
| 424 | return s; |
| 425 | |
| 426 | if (obj_cgroup_charge(*objcgp, flags, objects * obj_full_size(s))) { |
| 427 | obj_cgroup_put(*objcgp); |
| 428 | memcg_kmem_put_cache(cachep); |
| 429 | cachep = NULL; |
| 430 | } |
| 431 | |
| 432 | return cachep; |
| 433 | } |
| 434 | |
| 435 | static inline void mod_objcg_state(struct obj_cgroup *objcg, |
| 436 | struct pglist_data *pgdat, |
| 437 | int idx, int nr) |
| 438 | { |
| 439 | struct mem_cgroup *memcg; |
| 440 | struct lruvec *lruvec; |
| 441 | |
| 442 | rcu_read_lock(); |
| 443 | memcg = obj_cgroup_memcg(objcg); |
| 444 | lruvec = mem_cgroup_lruvec(memcg, pgdat); |
| 445 | mod_memcg_lruvec_state(lruvec, idx, nr); |
| 446 | rcu_read_unlock(); |
| 447 | } |
| 448 | |
Roman Gushchin | 964d4bd | 2020-08-06 23:20:56 -0700 | [diff] [blame] | 449 | static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s, |
| 450 | struct obj_cgroup *objcg, |
| 451 | size_t size, void **p) |
| 452 | { |
| 453 | struct page *page; |
| 454 | unsigned long off; |
| 455 | size_t i; |
| 456 | |
| 457 | for (i = 0; i < size; i++) { |
| 458 | if (likely(p[i])) { |
| 459 | page = virt_to_head_page(p[i]); |
| 460 | off = obj_to_index(s, page, p[i]); |
| 461 | obj_cgroup_get(objcg); |
| 462 | page_obj_cgroups(page)[off] = objcg; |
Roman Gushchin | f2fe7b0 | 2020-08-06 23:20:59 -0700 | [diff] [blame^] | 463 | mod_objcg_state(objcg, page_pgdat(page), |
| 464 | cache_vmstat_idx(s), obj_full_size(s)); |
| 465 | } else { |
| 466 | obj_cgroup_uncharge(objcg, obj_full_size(s)); |
Roman Gushchin | 964d4bd | 2020-08-06 23:20:56 -0700 | [diff] [blame] | 467 | } |
| 468 | } |
| 469 | obj_cgroup_put(objcg); |
| 470 | memcg_kmem_put_cache(s); |
| 471 | } |
| 472 | |
| 473 | static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page, |
| 474 | void *p) |
| 475 | { |
| 476 | struct obj_cgroup *objcg; |
| 477 | unsigned int off; |
| 478 | |
| 479 | if (!memcg_kmem_enabled() || is_root_cache(s)) |
| 480 | return; |
| 481 | |
| 482 | off = obj_to_index(s, page, p); |
| 483 | objcg = page_obj_cgroups(page)[off]; |
| 484 | page_obj_cgroups(page)[off] = NULL; |
Roman Gushchin | f2fe7b0 | 2020-08-06 23:20:59 -0700 | [diff] [blame^] | 485 | |
| 486 | obj_cgroup_uncharge(objcg, obj_full_size(s)); |
| 487 | mod_objcg_state(objcg, page_pgdat(page), cache_vmstat_idx(s), |
| 488 | -obj_full_size(s)); |
| 489 | |
Roman Gushchin | 964d4bd | 2020-08-06 23:20:56 -0700 | [diff] [blame] | 490 | obj_cgroup_put(objcg); |
| 491 | } |
| 492 | |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 493 | extern void slab_init_memcg_params(struct kmem_cache *); |
Roman Gushchin | c03914b | 2019-07-11 20:56:02 -0700 | [diff] [blame] | 494 | extern void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg); |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 495 | |
Kirill Tkhai | 84c07d1 | 2018-08-17 15:47:25 -0700 | [diff] [blame] | 496 | #else /* CONFIG_MEMCG_KMEM */ |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 497 | |
Tejun Heo | 510ded3 | 2017-02-22 15:41:24 -0800 | [diff] [blame] | 498 | /* If !memcg, all caches are root. */ |
| 499 | #define slab_root_caches slab_caches |
| 500 | #define root_caches_node list |
| 501 | |
Vladimir Davydov | 426589f | 2015-02-12 14:59:23 -0800 | [diff] [blame] | 502 | #define for_each_memcg_cache(iter, root) \ |
| 503 | for ((void)(iter), (void)(root); 0; ) |
Vladimir Davydov | 426589f | 2015-02-12 14:59:23 -0800 | [diff] [blame] | 504 | |
Glauber Costa | ba6c496 | 2012-12-18 14:22:27 -0800 | [diff] [blame] | 505 | static inline bool is_root_cache(struct kmem_cache *s) |
| 506 | { |
| 507 | return true; |
| 508 | } |
| 509 | |
Glauber Costa | b9ce5ef | 2012-12-18 14:22:46 -0800 | [diff] [blame] | 510 | static inline bool slab_equal_or_root(struct kmem_cache *s, |
| 511 | struct kmem_cache *p) |
| 512 | { |
Kees Cook | 598a071 | 2019-07-11 20:53:23 -0700 | [diff] [blame] | 513 | return s == p; |
Glauber Costa | b9ce5ef | 2012-12-18 14:22:46 -0800 | [diff] [blame] | 514 | } |
Glauber Costa | 749c541 | 2012-12-18 14:23:01 -0800 | [diff] [blame] | 515 | |
| 516 | static inline const char *cache_name(struct kmem_cache *s) |
| 517 | { |
| 518 | return s->name; |
| 519 | } |
| 520 | |
Glauber Costa | 943a451 | 2012-12-18 14:23:03 -0800 | [diff] [blame] | 521 | static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) |
| 522 | { |
| 523 | return s; |
| 524 | } |
Vladimir Davydov | 5dfb417 | 2014-06-04 16:06:38 -0700 | [diff] [blame] | 525 | |
Roman Gushchin | 4d96ba3 | 2019-07-11 20:56:31 -0700 | [diff] [blame] | 526 | static inline struct mem_cgroup *memcg_from_slab_page(struct page *page) |
| 527 | { |
| 528 | return NULL; |
| 529 | } |
| 530 | |
Roman Gushchin | 286e04b | 2020-08-06 23:20:52 -0700 | [diff] [blame] | 531 | static inline int memcg_alloc_page_obj_cgroups(struct page *page, |
| 532 | struct kmem_cache *s, gfp_t gfp) |
| 533 | { |
| 534 | return 0; |
| 535 | } |
| 536 | |
| 537 | static inline void memcg_free_page_obj_cgroups(struct page *page) |
| 538 | { |
| 539 | } |
| 540 | |
Roman Gushchin | f2fe7b0 | 2020-08-06 23:20:59 -0700 | [diff] [blame^] | 541 | static inline struct kmem_cache *memcg_slab_pre_alloc_hook(struct kmem_cache *s, |
| 542 | struct obj_cgroup **objcgp, |
| 543 | size_t objects, gfp_t flags) |
| 544 | { |
| 545 | return NULL; |
| 546 | } |
| 547 | |
Roman Gushchin | 964d4bd | 2020-08-06 23:20:56 -0700 | [diff] [blame] | 548 | static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s, |
| 549 | struct obj_cgroup *objcg, |
| 550 | size_t size, void **p) |
| 551 | { |
| 552 | } |
| 553 | |
| 554 | static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page, |
| 555 | void *p) |
| 556 | { |
| 557 | } |
| 558 | |
Vladimir Davydov | f7ce319 | 2015-02-12 14:59:20 -0800 | [diff] [blame] | 559 | static inline void slab_init_memcg_params(struct kmem_cache *s) |
| 560 | { |
| 561 | } |
Tejun Heo | 510ded3 | 2017-02-22 15:41:24 -0800 | [diff] [blame] | 562 | |
Roman Gushchin | c03914b | 2019-07-11 20:56:02 -0700 | [diff] [blame] | 563 | static inline void memcg_link_cache(struct kmem_cache *s, |
| 564 | struct mem_cgroup *memcg) |
Tejun Heo | 510ded3 | 2017-02-22 15:41:24 -0800 | [diff] [blame] | 565 | { |
| 566 | } |
| 567 | |
Kirill Tkhai | 84c07d1 | 2018-08-17 15:47:25 -0700 | [diff] [blame] | 568 | #endif /* CONFIG_MEMCG_KMEM */ |
Glauber Costa | b9ce5ef | 2012-12-18 14:22:46 -0800 | [diff] [blame] | 569 | |
Kees Cook | a64b537 | 2019-07-11 20:53:26 -0700 | [diff] [blame] | 570 | static inline struct kmem_cache *virt_to_cache(const void *obj) |
| 571 | { |
| 572 | struct page *page; |
| 573 | |
| 574 | page = virt_to_head_page(obj); |
| 575 | if (WARN_ONCE(!PageSlab(page), "%s: Object is not a Slab page!\n", |
| 576 | __func__)) |
| 577 | return NULL; |
| 578 | return page->slab_cache; |
| 579 | } |
| 580 | |
Roman Gushchin | 6cea1d5 | 2019-07-11 20:56:16 -0700 | [diff] [blame] | 581 | static __always_inline int charge_slab_page(struct page *page, |
| 582 | gfp_t gfp, int order, |
| 583 | struct kmem_cache *s) |
| 584 | { |
Roman Gushchin | f2fe7b0 | 2020-08-06 23:20:59 -0700 | [diff] [blame^] | 585 | #ifdef CONFIG_MEMCG_KMEM |
| 586 | if (memcg_kmem_enabled() && !is_root_cache(s)) { |
| 587 | int ret; |
Roman Gushchin | 286e04b | 2020-08-06 23:20:52 -0700 | [diff] [blame] | 588 | |
Roman Gushchin | f2fe7b0 | 2020-08-06 23:20:59 -0700 | [diff] [blame^] | 589 | ret = memcg_alloc_page_obj_cgroups(page, s, gfp); |
| 590 | if (ret) |
| 591 | return ret; |
| 592 | |
| 593 | percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order); |
Roman Gushchin | 4d96ba3 | 2019-07-11 20:56:31 -0700 | [diff] [blame] | 594 | } |
Roman Gushchin | f2fe7b0 | 2020-08-06 23:20:59 -0700 | [diff] [blame^] | 595 | #endif |
| 596 | mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), |
| 597 | PAGE_SIZE << order); |
| 598 | return 0; |
Roman Gushchin | 6cea1d5 | 2019-07-11 20:56:16 -0700 | [diff] [blame] | 599 | } |
| 600 | |
| 601 | static __always_inline void uncharge_slab_page(struct page *page, int order, |
| 602 | struct kmem_cache *s) |
| 603 | { |
Roman Gushchin | f2fe7b0 | 2020-08-06 23:20:59 -0700 | [diff] [blame^] | 604 | #ifdef CONFIG_MEMCG_KMEM |
| 605 | if (memcg_kmem_enabled() && !is_root_cache(s)) { |
| 606 | memcg_free_page_obj_cgroups(page); |
| 607 | percpu_ref_put_many(&s->memcg_params.refcnt, 1 << order); |
Roman Gushchin | 4d96ba3 | 2019-07-11 20:56:31 -0700 | [diff] [blame] | 608 | } |
Roman Gushchin | f2fe7b0 | 2020-08-06 23:20:59 -0700 | [diff] [blame^] | 609 | #endif |
| 610 | mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), |
| 611 | -(PAGE_SIZE << order)); |
Roman Gushchin | 6cea1d5 | 2019-07-11 20:56:16 -0700 | [diff] [blame] | 612 | } |
| 613 | |
Vlastimil Babka | e42f174 | 2020-08-06 23:19:05 -0700 | [diff] [blame] | 614 | static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) |
| 615 | { |
| 616 | struct kmem_cache *cachep; |
| 617 | |
| 618 | if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) && |
| 619 | !memcg_kmem_enabled() && |
| 620 | !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) |
| 621 | return s; |
| 622 | |
| 623 | cachep = virt_to_cache(x); |
| 624 | if (WARN(cachep && !slab_equal_or_root(cachep, s), |
| 625 | "%s: Wrong slab cache. %s but object is from %s\n", |
| 626 | __func__, s->name, cachep->name)) |
| 627 | print_tracking(cachep, x); |
| 628 | return cachep; |
| 629 | } |
| 630 | |
Jesper Dangaard Brouer | 11c7aec | 2016-03-15 14:53:35 -0700 | [diff] [blame] | 631 | static inline size_t slab_ksize(const struct kmem_cache *s) |
| 632 | { |
| 633 | #ifndef CONFIG_SLUB |
| 634 | return s->object_size; |
| 635 | |
| 636 | #else /* CONFIG_SLUB */ |
| 637 | # ifdef CONFIG_SLUB_DEBUG |
| 638 | /* |
| 639 | * Debugging requires use of the padding between object |
| 640 | * and whatever may come after it. |
| 641 | */ |
| 642 | if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) |
| 643 | return s->object_size; |
| 644 | # endif |
Alexander Potapenko | 80a9201 | 2016-07-28 15:49:07 -0700 | [diff] [blame] | 645 | if (s->flags & SLAB_KASAN) |
| 646 | return s->object_size; |
Jesper Dangaard Brouer | 11c7aec | 2016-03-15 14:53:35 -0700 | [diff] [blame] | 647 | /* |
| 648 | * If we have the need to store the freelist pointer |
| 649 | * back there or track user information then we can |
| 650 | * only use the space before that information. |
| 651 | */ |
Paul E. McKenney | 5f0d5a3 | 2017-01-18 02:53:44 -0800 | [diff] [blame] | 652 | if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER)) |
Jesper Dangaard Brouer | 11c7aec | 2016-03-15 14:53:35 -0700 | [diff] [blame] | 653 | return s->inuse; |
| 654 | /* |
| 655 | * Else we can use all the padding etc for the allocation |
| 656 | */ |
| 657 | return s->size; |
| 658 | #endif |
| 659 | } |
| 660 | |
| 661 | static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, |
Roman Gushchin | 964d4bd | 2020-08-06 23:20:56 -0700 | [diff] [blame] | 662 | struct obj_cgroup **objcgp, |
| 663 | size_t size, gfp_t flags) |
Jesper Dangaard Brouer | 11c7aec | 2016-03-15 14:53:35 -0700 | [diff] [blame] | 664 | { |
| 665 | flags &= gfp_allowed_mask; |
Peter Zijlstra | d92a8cf | 2017-03-03 10:13:38 +0100 | [diff] [blame] | 666 | |
| 667 | fs_reclaim_acquire(flags); |
| 668 | fs_reclaim_release(flags); |
| 669 | |
Jesper Dangaard Brouer | 11c7aec | 2016-03-15 14:53:35 -0700 | [diff] [blame] | 670 | might_sleep_if(gfpflags_allow_blocking(flags)); |
| 671 | |
Jesper Dangaard Brouer | fab9963 | 2016-03-15 14:53:38 -0700 | [diff] [blame] | 672 | if (should_failslab(s, flags)) |
Jesper Dangaard Brouer | 11c7aec | 2016-03-15 14:53:35 -0700 | [diff] [blame] | 673 | return NULL; |
| 674 | |
Vladimir Davydov | 4526477 | 2016-07-26 15:24:21 -0700 | [diff] [blame] | 675 | if (memcg_kmem_enabled() && |
| 676 | ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT))) |
Roman Gushchin | f2fe7b0 | 2020-08-06 23:20:59 -0700 | [diff] [blame^] | 677 | return memcg_slab_pre_alloc_hook(s, objcgp, size, flags); |
Vladimir Davydov | 4526477 | 2016-07-26 15:24:21 -0700 | [diff] [blame] | 678 | |
| 679 | return s; |
Jesper Dangaard Brouer | 11c7aec | 2016-03-15 14:53:35 -0700 | [diff] [blame] | 680 | } |
| 681 | |
Roman Gushchin | 964d4bd | 2020-08-06 23:20:56 -0700 | [diff] [blame] | 682 | static inline void slab_post_alloc_hook(struct kmem_cache *s, |
| 683 | struct obj_cgroup *objcg, |
| 684 | gfp_t flags, size_t size, void **p) |
Jesper Dangaard Brouer | 11c7aec | 2016-03-15 14:53:35 -0700 | [diff] [blame] | 685 | { |
| 686 | size_t i; |
| 687 | |
| 688 | flags &= gfp_allowed_mask; |
| 689 | for (i = 0; i < size; i++) { |
Andrey Konovalov | 5312824 | 2019-02-20 22:19:11 -0800 | [diff] [blame] | 690 | p[i] = kasan_slab_alloc(s, p[i], flags); |
Andrey Konovalov | a2f7757 | 2019-02-20 22:19:16 -0800 | [diff] [blame] | 691 | /* As p[i] might get tagged, call kmemleak hook after KASAN. */ |
Andrey Konovalov | 5312824 | 2019-02-20 22:19:11 -0800 | [diff] [blame] | 692 | kmemleak_alloc_recursive(p[i], s->object_size, 1, |
Jesper Dangaard Brouer | 11c7aec | 2016-03-15 14:53:35 -0700 | [diff] [blame] | 693 | s->flags, flags); |
Jesper Dangaard Brouer | 11c7aec | 2016-03-15 14:53:35 -0700 | [diff] [blame] | 694 | } |
Vladimir Davydov | 4526477 | 2016-07-26 15:24:21 -0700 | [diff] [blame] | 695 | |
Roman Gushchin | 964d4bd | 2020-08-06 23:20:56 -0700 | [diff] [blame] | 696 | if (memcg_kmem_enabled() && !is_root_cache(s)) |
| 697 | memcg_slab_post_alloc_hook(s, objcg, size, p); |
Jesper Dangaard Brouer | 11c7aec | 2016-03-15 14:53:35 -0700 | [diff] [blame] | 698 | } |
| 699 | |
Christoph Lameter | 44c5356 | 2014-08-06 16:04:07 -0700 | [diff] [blame] | 700 | #ifndef CONFIG_SLOB |
Christoph Lameter | ca34956 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 701 | /* |
| 702 | * The slab lists for all objects. |
| 703 | */ |
| 704 | struct kmem_cache_node { |
| 705 | spinlock_t list_lock; |
| 706 | |
| 707 | #ifdef CONFIG_SLAB |
| 708 | struct list_head slabs_partial; /* partial list first, better asm code */ |
| 709 | struct list_head slabs_full; |
| 710 | struct list_head slabs_free; |
David Rientjes | bf00bd3 | 2016-12-12 16:41:44 -0800 | [diff] [blame] | 711 | unsigned long total_slabs; /* length of all slab lists */ |
| 712 | unsigned long free_slabs; /* length of free slab list only */ |
Christoph Lameter | ca34956 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 713 | unsigned long free_objects; |
| 714 | unsigned int free_limit; |
| 715 | unsigned int colour_next; /* Per-node cache coloring */ |
| 716 | struct array_cache *shared; /* shared per node */ |
Joonsoo Kim | c8522a3 | 2014-08-06 16:04:29 -0700 | [diff] [blame] | 717 | struct alien_cache **alien; /* on other nodes */ |
Christoph Lameter | ca34956 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 718 | unsigned long next_reap; /* updated without locking */ |
| 719 | int free_touched; /* updated without locking */ |
| 720 | #endif |
| 721 | |
| 722 | #ifdef CONFIG_SLUB |
| 723 | unsigned long nr_partial; |
| 724 | struct list_head partial; |
| 725 | #ifdef CONFIG_SLUB_DEBUG |
| 726 | atomic_long_t nr_slabs; |
| 727 | atomic_long_t total_objects; |
| 728 | struct list_head full; |
| 729 | #endif |
| 730 | #endif |
| 731 | |
| 732 | }; |
Wanpeng Li | e25839f | 2013-07-04 08:33:23 +0800 | [diff] [blame] | 733 | |
Christoph Lameter | 44c5356 | 2014-08-06 16:04:07 -0700 | [diff] [blame] | 734 | static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) |
| 735 | { |
| 736 | return s->node[node]; |
| 737 | } |
| 738 | |
| 739 | /* |
| 740 | * Iterator over all nodes. The body will be executed for each node that has |
| 741 | * a kmem_cache_node structure allocated (which is true for all online nodes) |
| 742 | */ |
| 743 | #define for_each_kmem_cache_node(__s, __node, __n) \ |
Mikulas Patocka | 9163582 | 2014-10-09 15:26:20 -0700 | [diff] [blame] | 744 | for (__node = 0; __node < nr_node_ids; __node++) \ |
| 745 | if ((__n = get_node(__s, __node))) |
Christoph Lameter | 44c5356 | 2014-08-06 16:04:07 -0700 | [diff] [blame] | 746 | |
| 747 | #endif |
| 748 | |
Vladimir Davydov | 1df3b26 | 2014-12-10 15:42:16 -0800 | [diff] [blame] | 749 | void *slab_start(struct seq_file *m, loff_t *pos); |
Wanpeng Li | 276a243 | 2013-07-08 08:08:28 +0800 | [diff] [blame] | 750 | void *slab_next(struct seq_file *m, void *p, loff_t *pos); |
| 751 | void slab_stop(struct seq_file *m, void *p); |
Tejun Heo | bc2791f | 2017-02-22 15:41:21 -0800 | [diff] [blame] | 752 | void *memcg_slab_start(struct seq_file *m, loff_t *pos); |
| 753 | void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos); |
| 754 | void memcg_slab_stop(struct seq_file *m, void *p); |
Vladimir Davydov | b047501 | 2014-12-10 15:44:19 -0800 | [diff] [blame] | 755 | int memcg_slab_show(struct seq_file *m, void *p); |
Andrey Ryabinin | 5240ab4 | 2014-08-06 16:04:14 -0700 | [diff] [blame] | 756 | |
Yang Shi | 852d8be | 2017-11-15 17:32:07 -0800 | [diff] [blame] | 757 | #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG) |
| 758 | void dump_unreclaimable_slab(void); |
| 759 | #else |
| 760 | static inline void dump_unreclaimable_slab(void) |
| 761 | { |
| 762 | } |
| 763 | #endif |
| 764 | |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 765 | void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr); |
| 766 | |
Thomas Garnier | 7c00fce | 2016-07-26 15:21:56 -0700 | [diff] [blame] | 767 | #ifdef CONFIG_SLAB_FREELIST_RANDOM |
| 768 | int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, |
| 769 | gfp_t gfp); |
| 770 | void cache_random_seq_destroy(struct kmem_cache *cachep); |
| 771 | #else |
| 772 | static inline int cache_random_seq_create(struct kmem_cache *cachep, |
| 773 | unsigned int count, gfp_t gfp) |
| 774 | { |
| 775 | return 0; |
| 776 | } |
| 777 | static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { } |
| 778 | #endif /* CONFIG_SLAB_FREELIST_RANDOM */ |
| 779 | |
Alexander Potapenko | 6471384 | 2019-07-11 20:59:19 -0700 | [diff] [blame] | 780 | static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c) |
| 781 | { |
| 782 | if (static_branch_unlikely(&init_on_alloc)) { |
| 783 | if (c->ctor) |
| 784 | return false; |
| 785 | if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) |
| 786 | return flags & __GFP_ZERO; |
| 787 | return true; |
| 788 | } |
| 789 | return flags & __GFP_ZERO; |
| 790 | } |
| 791 | |
| 792 | static inline bool slab_want_init_on_free(struct kmem_cache *c) |
| 793 | { |
| 794 | if (static_branch_unlikely(&init_on_free)) |
| 795 | return !(c->ctor || |
| 796 | (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))); |
| 797 | return false; |
| 798 | } |
| 799 | |
Andrey Ryabinin | 5240ab4 | 2014-08-06 16:04:14 -0700 | [diff] [blame] | 800 | #endif /* MM_SLAB_H */ |