blob: 43d8a38b534f18fc09c3c4973974ca4f21b00ec6 [file] [log] [blame]
Christoph Lameter97d06602012-07-06 15:25:11 -05001#ifndef MM_SLAB_H
2#define MM_SLAB_H
3/*
4 * Internal slab definitions
5 */
6
7/*
8 * State of the slab allocator.
9 *
10 * This is used to describe the states of the allocator during bootup.
11 * Allocators use this to gradually bootstrap themselves. Most allocators
12 * have the problem that the structures used for managing slab caches are
13 * allocated from slab caches themselves.
14 */
15enum slab_state {
16 DOWN, /* No slab functionality yet */
17 PARTIAL, /* SLUB: kmem_cache_node available */
18 PARTIAL_ARRAYCACHE, /* SLAB: kmalloc size for arraycache available */
19 PARTIAL_L3, /* SLAB: kmalloc size for l3 struct available */
20 UP, /* Slab caches usable but not all extras yet */
21 FULL /* Everything is working */
22};
23
24extern enum slab_state slab_state;
25
Christoph Lameter18004c52012-07-06 15:25:12 -050026/* The slab cache mutex protects the management structures during changes */
27extern struct mutex slab_mutex;
Christoph Lameter9b030cb2012-09-05 00:20:33 +000028
29/* The list of all slab caches on the system */
Christoph Lameter18004c52012-07-06 15:25:12 -050030extern struct list_head slab_caches;
31
Christoph Lameter9b030cb2012-09-05 00:20:33 +000032/* The slab cache that manages slab cache information */
33extern struct kmem_cache *kmem_cache;
34
Christoph Lameter45906852012-11-28 16:23:16 +000035unsigned long calculate_alignment(unsigned long flags,
36 unsigned long align, unsigned long size);
37
Christoph Lameter9b030cb2012-09-05 00:20:33 +000038/* Functions provided by the slab allocators */
Christoph Lameter8a13a4c2012-09-04 23:18:33 +000039extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
Christoph Lameter97d06602012-07-06 15:25:11 -050040
Christoph Lameter45530c42012-11-28 16:23:07 +000041extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
42 unsigned long flags);
43extern void create_boot_cache(struct kmem_cache *, const char *name,
44 size_t size, unsigned long flags);
45
Glauber Costa2633d7a2012-12-18 14:22:34 -080046struct mem_cgroup;
Christoph Lametercbb79692012-09-05 00:18:32 +000047#ifdef CONFIG_SLUB
Glauber Costa2633d7a2012-12-18 14:22:34 -080048struct kmem_cache *
49__kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
50 size_t align, unsigned long flags, void (*ctor)(void *));
Christoph Lametercbb79692012-09-05 00:18:32 +000051#else
Glauber Costa2633d7a2012-12-18 14:22:34 -080052static inline struct kmem_cache *
53__kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
54 size_t align, unsigned long flags, void (*ctor)(void *))
Christoph Lametercbb79692012-09-05 00:18:32 +000055{ return NULL; }
56#endif
57
58
Glauber Costad8843922012-10-17 15:36:51 +040059/* Legal flag mask for kmem_cache_create(), for various configurations */
60#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
61 SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
62
63#if defined(CONFIG_DEBUG_SLAB)
64#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
65#elif defined(CONFIG_SLUB_DEBUG)
66#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
67 SLAB_TRACE | SLAB_DEBUG_FREE)
68#else
69#define SLAB_DEBUG_FLAGS (0)
70#endif
71
72#if defined(CONFIG_SLAB)
73#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
74 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | SLAB_NOTRACK)
75#elif defined(CONFIG_SLUB)
76#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
77 SLAB_TEMPORARY | SLAB_NOTRACK)
78#else
79#define SLAB_CACHE_FLAGS (0)
80#endif
81
82#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
83
Christoph Lameter945cf2b2012-09-04 23:18:33 +000084int __kmem_cache_shutdown(struct kmem_cache *);
Christoph Lameter945cf2b2012-09-04 23:18:33 +000085
Glauber Costab7454ad2012-10-19 18:20:25 +040086struct seq_file;
87struct file;
Glauber Costab7454ad2012-10-19 18:20:25 +040088
Glauber Costa0d7561c2012-10-19 18:20:27 +040089struct slabinfo {
90 unsigned long active_objs;
91 unsigned long num_objs;
92 unsigned long active_slabs;
93 unsigned long num_slabs;
94 unsigned long shared_avail;
95 unsigned int limit;
96 unsigned int batchcount;
97 unsigned int shared;
98 unsigned int objects_per_slab;
99 unsigned int cache_order;
100};
101
102void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
103void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
Glauber Costab7454ad2012-10-19 18:20:25 +0400104ssize_t slabinfo_write(struct file *file, const char __user *buffer,
105 size_t count, loff_t *ppos);
Glauber Costaba6c4962012-12-18 14:22:27 -0800106
107#ifdef CONFIG_MEMCG_KMEM
108static inline bool is_root_cache(struct kmem_cache *s)
109{
110 return !s->memcg_params || s->memcg_params->is_root_cache;
111}
Glauber Costa2633d7a2012-12-18 14:22:34 -0800112
113static inline bool cache_match_memcg(struct kmem_cache *cachep,
114 struct mem_cgroup *memcg)
115{
116 return (is_root_cache(cachep) && !memcg) ||
117 (cachep->memcg_params->memcg == memcg);
118}
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800119
Glauber Costa1f458cb2012-12-18 14:22:50 -0800120static inline void memcg_bind_pages(struct kmem_cache *s, int order)
121{
122 if (!is_root_cache(s))
123 atomic_add(1 << order, &s->memcg_params->nr_pages);
124}
125
126static inline void memcg_release_pages(struct kmem_cache *s, int order)
127{
128 if (is_root_cache(s))
129 return;
130
131 if (atomic_sub_and_test((1 << order), &s->memcg_params->nr_pages))
132 mem_cgroup_destroy_cache(s);
133}
134
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800135static inline bool slab_equal_or_root(struct kmem_cache *s,
136 struct kmem_cache *p)
137{
138 return (p == s) ||
139 (s->memcg_params && (p == s->memcg_params->root_cache));
140}
Glauber Costaba6c4962012-12-18 14:22:27 -0800141#else
142static inline bool is_root_cache(struct kmem_cache *s)
143{
144 return true;
145}
146
Glauber Costa2633d7a2012-12-18 14:22:34 -0800147static inline bool cache_match_memcg(struct kmem_cache *cachep,
148 struct mem_cgroup *memcg)
149{
150 return true;
151}
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800152
Glauber Costa1f458cb2012-12-18 14:22:50 -0800153static inline void memcg_bind_pages(struct kmem_cache *s, int order)
154{
155}
156
157static inline void memcg_release_pages(struct kmem_cache *s, int order)
158{
159}
160
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800161static inline bool slab_equal_or_root(struct kmem_cache *s,
162 struct kmem_cache *p)
163{
164 return true;
165}
Glauber Costaba6c4962012-12-18 14:22:27 -0800166#endif
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800167
168static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
169{
170 struct kmem_cache *cachep;
171 struct page *page;
172
173 /*
174 * When kmemcg is not being used, both assignments should return the
175 * same value. but we don't want to pay the assignment price in that
176 * case. If it is not compiled in, the compiler should be smart enough
177 * to not do even the assignment. In that case, slab_equal_or_root
178 * will also be a constant.
179 */
180 if (!memcg_kmem_enabled() && !unlikely(s->flags & SLAB_DEBUG_FREE))
181 return s;
182
183 page = virt_to_head_page(x);
184 cachep = page->slab_cache;
185 if (slab_equal_or_root(cachep, s))
186 return cachep;
187
188 pr_err("%s: Wrong slab cache. %s but object is from %s\n",
189 __FUNCTION__, cachep->name, s->name);
190 WARN_ON_ONCE(1);
191 return s;
192}
Christoph Lameter97d06602012-07-06 15:25:11 -0500193#endif