blob: edb212298c8a24ab1f0496662d9d23cec794ec62 [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/mm/vmalloc.c
4 *
5 * Copyright (C) 1993 Linus Torvalds
6 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
7 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
8 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
Christoph Lameter930fc452005-10-29 18:15:41 -07009 * Numa awareness, Christoph Lameter, SGI, June 2005
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 */
11
Nick Piggindb64fe02008-10-18 20:27:03 -070012#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/mm.h>
14#include <linux/module.h>
15#include <linux/highmem.h>
Ingo Molnarc3edc402017-02-02 08:35:14 +010016#include <linux/sched/signal.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/slab.h>
18#include <linux/spinlock.h>
19#include <linux/interrupt.h>
Alexey Dobriyan5f6a6a92008-10-06 03:50:47 +040020#include <linux/proc_fs.h>
Christoph Lametera10aa572008-04-28 02:12:40 -070021#include <linux/seq_file.h>
Rick Edgecombe868b1042019-04-25 17:11:36 -070022#include <linux/set_memory.h>
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070023#include <linux/debugobjects.h>
Christoph Lameter23016962008-04-28 02:12:42 -070024#include <linux/kallsyms.h>
Nick Piggindb64fe02008-10-18 20:27:03 -070025#include <linux/list.h>
Chris Wilson4da56b92016-04-04 14:46:42 +010026#include <linux/notifier.h>
Nick Piggindb64fe02008-10-18 20:27:03 -070027#include <linux/rbtree.h>
28#include <linux/radix-tree.h>
29#include <linux/rcupdate.h>
Tejun Heof0aa6612009-02-20 16:29:08 +090030#include <linux/pfn.h>
Catalin Marinas89219d32009-06-11 13:23:19 +010031#include <linux/kmemleak.h>
Arun Sharma600634972011-07-26 16:09:06 -070032#include <linux/atomic.h>
Gideon Israel Dsouza3b321232014-04-07 15:37:26 -070033#include <linux/compiler.h>
Al Viro32fcfd42013-03-10 20:14:08 -040034#include <linux/llist.h>
Toshi Kani0f616be2015-04-14 15:47:17 -070035#include <linux/bitops.h>
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -070036#include <linux/rbtree_augmented.h>
Gideon Israel Dsouza3b321232014-04-07 15:37:26 -070037
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080038#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <asm/tlbflush.h>
David Miller2dca6992009-09-21 12:22:34 -070040#include <asm/shmparam.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Mel Gormandd56b042015-11-06 16:28:43 -080042#include "internal.h"
43
Al Viro32fcfd42013-03-10 20:14:08 -040044struct vfree_deferred {
45 struct llist_head list;
46 struct work_struct wq;
47};
48static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
49
50static void __vunmap(const void *, int);
51
52static void free_work(struct work_struct *w)
53{
54 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
Byungchul Park894e58c2017-09-06 16:24:26 -070055 struct llist_node *t, *llnode;
56
57 llist_for_each_safe(llnode, t, llist_del_all(&p->list))
58 __vunmap((void *)llnode, 1);
Al Viro32fcfd42013-03-10 20:14:08 -040059}
60
Nick Piggindb64fe02008-10-18 20:27:03 -070061/*** Page table manipulation functions ***/
Adrian Bunkb2213852006-09-25 23:31:02 -070062
Linus Torvalds1da177e2005-04-16 15:20:36 -070063static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
64{
65 pte_t *pte;
66
67 pte = pte_offset_kernel(pmd, addr);
68 do {
69 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
70 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
71 } while (pte++, addr += PAGE_SIZE, addr != end);
72}
73
Nick Piggindb64fe02008-10-18 20:27:03 -070074static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -070075{
76 pmd_t *pmd;
77 unsigned long next;
78
79 pmd = pmd_offset(pud, addr);
80 do {
81 next = pmd_addr_end(addr, end);
Toshi Kanib9820d82015-04-14 15:47:26 -070082 if (pmd_clear_huge(pmd))
83 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 if (pmd_none_or_clear_bad(pmd))
85 continue;
86 vunmap_pte_range(pmd, addr, next);
87 } while (pmd++, addr = next, addr != end);
88}
89
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +030090static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -070091{
92 pud_t *pud;
93 unsigned long next;
94
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +030095 pud = pud_offset(p4d, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 do {
97 next = pud_addr_end(addr, end);
Toshi Kanib9820d82015-04-14 15:47:26 -070098 if (pud_clear_huge(pud))
99 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100 if (pud_none_or_clear_bad(pud))
101 continue;
102 vunmap_pmd_range(pud, addr, next);
103 } while (pud++, addr = next, addr != end);
104}
105
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300106static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end)
107{
108 p4d_t *p4d;
109 unsigned long next;
110
111 p4d = p4d_offset(pgd, addr);
112 do {
113 next = p4d_addr_end(addr, end);
114 if (p4d_clear_huge(p4d))
115 continue;
116 if (p4d_none_or_clear_bad(p4d))
117 continue;
118 vunmap_pud_range(p4d, addr, next);
119 } while (p4d++, addr = next, addr != end);
120}
121
Nick Piggindb64fe02008-10-18 20:27:03 -0700122static void vunmap_page_range(unsigned long addr, unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123{
124 pgd_t *pgd;
125 unsigned long next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
127 BUG_ON(addr >= end);
128 pgd = pgd_offset_k(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 do {
130 next = pgd_addr_end(addr, end);
131 if (pgd_none_or_clear_bad(pgd))
132 continue;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300133 vunmap_p4d_range(pgd, addr, next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 } while (pgd++, addr = next, addr != end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135}
136
137static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
Nick Piggindb64fe02008-10-18 20:27:03 -0700138 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139{
140 pte_t *pte;
141
Nick Piggindb64fe02008-10-18 20:27:03 -0700142 /*
143 * nr is a running index into the array which helps higher level
144 * callers keep track of where we're up to.
145 */
146
Hugh Dickins872fec12005-10-29 18:16:21 -0700147 pte = pte_alloc_kernel(pmd, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 if (!pte)
149 return -ENOMEM;
150 do {
Nick Piggindb64fe02008-10-18 20:27:03 -0700151 struct page *page = pages[*nr];
152
153 if (WARN_ON(!pte_none(*pte)))
154 return -EBUSY;
155 if (WARN_ON(!page))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 return -ENOMEM;
157 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
Nick Piggindb64fe02008-10-18 20:27:03 -0700158 (*nr)++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 } while (pte++, addr += PAGE_SIZE, addr != end);
160 return 0;
161}
162
Nick Piggindb64fe02008-10-18 20:27:03 -0700163static int vmap_pmd_range(pud_t *pud, unsigned long addr,
164 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165{
166 pmd_t *pmd;
167 unsigned long next;
168
169 pmd = pmd_alloc(&init_mm, pud, addr);
170 if (!pmd)
171 return -ENOMEM;
172 do {
173 next = pmd_addr_end(addr, end);
Nick Piggindb64fe02008-10-18 20:27:03 -0700174 if (vmap_pte_range(pmd, addr, next, prot, pages, nr))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 return -ENOMEM;
176 } while (pmd++, addr = next, addr != end);
177 return 0;
178}
179
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300180static int vmap_pud_range(p4d_t *p4d, unsigned long addr,
Nick Piggindb64fe02008-10-18 20:27:03 -0700181 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182{
183 pud_t *pud;
184 unsigned long next;
185
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300186 pud = pud_alloc(&init_mm, p4d, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 if (!pud)
188 return -ENOMEM;
189 do {
190 next = pud_addr_end(addr, end);
Nick Piggindb64fe02008-10-18 20:27:03 -0700191 if (vmap_pmd_range(pud, addr, next, prot, pages, nr))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 return -ENOMEM;
193 } while (pud++, addr = next, addr != end);
194 return 0;
195}
196
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300197static int vmap_p4d_range(pgd_t *pgd, unsigned long addr,
198 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
199{
200 p4d_t *p4d;
201 unsigned long next;
202
203 p4d = p4d_alloc(&init_mm, pgd, addr);
204 if (!p4d)
205 return -ENOMEM;
206 do {
207 next = p4d_addr_end(addr, end);
208 if (vmap_pud_range(p4d, addr, next, prot, pages, nr))
209 return -ENOMEM;
210 } while (p4d++, addr = next, addr != end);
211 return 0;
212}
213
Nick Piggindb64fe02008-10-18 20:27:03 -0700214/*
215 * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and
216 * will have pfns corresponding to the "pages" array.
217 *
218 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
219 */
Tejun Heo8fc48982009-02-20 16:29:08 +0900220static int vmap_page_range_noflush(unsigned long start, unsigned long end,
221 pgprot_t prot, struct page **pages)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222{
223 pgd_t *pgd;
224 unsigned long next;
Adam Lackorzynski2e4e27c2009-01-04 12:00:46 -0800225 unsigned long addr = start;
Nick Piggindb64fe02008-10-18 20:27:03 -0700226 int err = 0;
227 int nr = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228
229 BUG_ON(addr >= end);
230 pgd = pgd_offset_k(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 do {
232 next = pgd_addr_end(addr, end);
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300233 err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 if (err)
Figo.zhangbf88c8c2009-09-21 17:01:47 -0700235 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 } while (pgd++, addr = next, addr != end);
Nick Piggindb64fe02008-10-18 20:27:03 -0700237
Nick Piggindb64fe02008-10-18 20:27:03 -0700238 return nr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239}
240
Tejun Heo8fc48982009-02-20 16:29:08 +0900241static int vmap_page_range(unsigned long start, unsigned long end,
242 pgprot_t prot, struct page **pages)
243{
244 int ret;
245
246 ret = vmap_page_range_noflush(start, end, prot, pages);
247 flush_cache_vmap(start, end);
248 return ret;
249}
250
KAMEZAWA Hiroyuki81ac3ad2009-09-22 16:45:49 -0700251int is_vmalloc_or_module_addr(const void *x)
Linus Torvalds73bdf0a2008-10-15 08:35:12 -0700252{
253 /*
Russell Kingab4f2ee2008-11-06 17:11:07 +0000254 * ARM, x86-64 and sparc64 put modules in a special place,
Linus Torvalds73bdf0a2008-10-15 08:35:12 -0700255 * and fall back on vmalloc() if that fails. Others
256 * just put it in the vmalloc space.
257 */
258#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
259 unsigned long addr = (unsigned long)x;
260 if (addr >= MODULES_VADDR && addr < MODULES_END)
261 return 1;
262#endif
263 return is_vmalloc_addr(x);
264}
265
Christoph Lameter48667e72008-02-04 22:28:31 -0800266/*
malcadd688f2014-01-27 17:06:53 -0800267 * Walk a vmap address to the struct page it maps.
Christoph Lameter48667e72008-02-04 22:28:31 -0800268 */
malcadd688f2014-01-27 17:06:53 -0800269struct page *vmalloc_to_page(const void *vmalloc_addr)
Christoph Lameter48667e72008-02-04 22:28:31 -0800270{
271 unsigned long addr = (unsigned long) vmalloc_addr;
malcadd688f2014-01-27 17:06:53 -0800272 struct page *page = NULL;
Christoph Lameter48667e72008-02-04 22:28:31 -0800273 pgd_t *pgd = pgd_offset_k(addr);
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300274 p4d_t *p4d;
275 pud_t *pud;
276 pmd_t *pmd;
277 pte_t *ptep, pte;
Christoph Lameter48667e72008-02-04 22:28:31 -0800278
Ingo Molnar7aa413d2008-06-19 13:28:11 +0200279 /*
280 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
281 * architectures that do not vmalloc module space
282 */
Linus Torvalds73bdf0a2008-10-15 08:35:12 -0700283 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
Jiri Slaby59ea7462008-06-12 13:56:40 +0200284
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300285 if (pgd_none(*pgd))
286 return NULL;
287 p4d = p4d_offset(pgd, addr);
288 if (p4d_none(*p4d))
289 return NULL;
290 pud = pud_offset(p4d, addr);
Ard Biesheuvel029c54b2017-06-23 15:08:41 -0700291
292 /*
293 * Don't dereference bad PUD or PMD (below) entries. This will also
294 * identify huge mappings, which we may encounter on architectures
295 * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be
296 * identified as vmalloc addresses by is_vmalloc_addr(), but are
297 * not [unambiguously] associated with a struct page, so there is
298 * no correct value to return for them.
299 */
300 WARN_ON_ONCE(pud_bad(*pud));
301 if (pud_none(*pud) || pud_bad(*pud))
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300302 return NULL;
303 pmd = pmd_offset(pud, addr);
Ard Biesheuvel029c54b2017-06-23 15:08:41 -0700304 WARN_ON_ONCE(pmd_bad(*pmd));
305 if (pmd_none(*pmd) || pmd_bad(*pmd))
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300306 return NULL;
Nick Piggindb64fe02008-10-18 20:27:03 -0700307
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300308 ptep = pte_offset_map(pmd, addr);
309 pte = *ptep;
310 if (pte_present(pte))
311 page = pte_page(pte);
312 pte_unmap(ptep);
malcadd688f2014-01-27 17:06:53 -0800313 return page;
Jianyu Zhanece86e222014-01-21 15:49:12 -0800314}
315EXPORT_SYMBOL(vmalloc_to_page);
316
malcadd688f2014-01-27 17:06:53 -0800317/*
318 * Map a vmalloc()-space virtual address to the physical page frame number.
319 */
320unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
321{
322 return page_to_pfn(vmalloc_to_page(vmalloc_addr));
323}
324EXPORT_SYMBOL(vmalloc_to_pfn);
325
Nick Piggindb64fe02008-10-18 20:27:03 -0700326
327/*** Global kva allocator ***/
328
Uladzislau Rezki (Sony)bb850f42019-05-17 14:31:34 -0700329#define DEBUG_AUGMENT_PROPAGATE_CHECK 0
Uladzislau Rezki (Sony)a6cf4e02019-05-17 14:31:37 -0700330#define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
Uladzislau Rezki (Sony)bb850f42019-05-17 14:31:34 -0700331
Yisheng Xie78c72742017-07-10 15:48:09 -0700332#define VM_LAZY_FREE 0x02
Nick Piggindb64fe02008-10-18 20:27:03 -0700333#define VM_VM_AREA 0x04
334
Nick Piggindb64fe02008-10-18 20:27:03 -0700335static DEFINE_SPINLOCK(vmap_area_lock);
Joonsoo Kimf1c40692013-04-29 15:07:37 -0700336/* Export for kexec only */
337LIST_HEAD(vmap_area_list);
Chris Wilson80c4bd72016-05-20 16:57:38 -0700338static LLIST_HEAD(vmap_purge_list);
Nick Piggin89699602011-03-22 16:30:36 -0700339static struct rb_root vmap_area_root = RB_ROOT;
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -0700340static bool vmap_initialized __read_mostly;
Nick Piggin89699602011-03-22 16:30:36 -0700341
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -0700342/*
343 * This kmem_cache is used for vmap_area objects. Instead of
344 * allocating from slab we reuse an object from this cache to
345 * make things faster. Especially in "no edge" splitting of
346 * free block.
347 */
348static struct kmem_cache *vmap_area_cachep;
Nick Piggin89699602011-03-22 16:30:36 -0700349
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -0700350/*
351 * This linked list is used in pair with free_vmap_area_root.
352 * It gives O(1) access to prev/next to perform fast coalescing.
353 */
354static LIST_HEAD(free_vmap_area_list);
355
356/*
357 * This augment red-black tree represents the free vmap space.
358 * All vmap_area objects in this tree are sorted by va->va_start
359 * address. It is used for allocation and merging when a vmap
360 * object is released.
361 *
362 * Each vmap_area node contains a maximum available free block
363 * of its sub-tree, right or left. Therefore it is possible to
364 * find a lowest match of free area.
365 */
366static struct rb_root free_vmap_area_root = RB_ROOT;
367
Uladzislau Rezki (Sony)82dd23e2019-07-11 20:58:57 -0700368/*
369 * Preload a CPU with one object for "no edge" split case. The
370 * aim is to get rid of allocations from the atomic context, thus
371 * to use more permissive allocation masks.
372 */
373static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node);
374
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -0700375static __always_inline unsigned long
376va_size(struct vmap_area *va)
377{
378 return (va->va_end - va->va_start);
379}
380
381static __always_inline unsigned long
382get_subtree_max_size(struct rb_node *node)
383{
384 struct vmap_area *va;
385
386 va = rb_entry_safe(node, struct vmap_area, rb_node);
387 return va ? va->subtree_max_size : 0;
388}
389
390/*
391 * Gets called when remove the node and rotate.
392 */
393static __always_inline unsigned long
394compute_subtree_max_size(struct vmap_area *va)
395{
396 return max3(va_size(va),
397 get_subtree_max_size(va->rb_node.rb_left),
398 get_subtree_max_size(va->rb_node.rb_right));
399}
400
401RB_DECLARE_CALLBACKS(static, free_vmap_area_rb_augment_cb,
402 struct vmap_area, rb_node, unsigned long, subtree_max_size,
403 compute_subtree_max_size)
404
405static void purge_vmap_area_lazy(void);
406static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
407static unsigned long lazy_max_pages(void);
Nick Piggindb64fe02008-10-18 20:27:03 -0700408
409static struct vmap_area *__find_vmap_area(unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410{
Nick Piggindb64fe02008-10-18 20:27:03 -0700411 struct rb_node *n = vmap_area_root.rb_node;
412
413 while (n) {
414 struct vmap_area *va;
415
416 va = rb_entry(n, struct vmap_area, rb_node);
417 if (addr < va->va_start)
418 n = n->rb_left;
HATAYAMA Daisukecef2ac32013-07-03 15:02:17 -0700419 else if (addr >= va->va_end)
Nick Piggindb64fe02008-10-18 20:27:03 -0700420 n = n->rb_right;
421 else
422 return va;
423 }
424
425 return NULL;
426}
427
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -0700428/*
429 * This function returns back addresses of parent node
430 * and its left or right link for further processing.
431 */
432static __always_inline struct rb_node **
433find_va_links(struct vmap_area *va,
434 struct rb_root *root, struct rb_node *from,
435 struct rb_node **parent)
Nick Piggindb64fe02008-10-18 20:27:03 -0700436{
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -0700437 struct vmap_area *tmp_va;
438 struct rb_node **link;
Nick Piggindb64fe02008-10-18 20:27:03 -0700439
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -0700440 if (root) {
441 link = &root->rb_node;
442 if (unlikely(!*link)) {
443 *parent = NULL;
444 return link;
445 }
446 } else {
447 link = &from;
Nick Piggindb64fe02008-10-18 20:27:03 -0700448 }
449
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -0700450 /*
451 * Go to the bottom of the tree. When we hit the last point
452 * we end up with parent rb_node and correct direction, i name
453 * it link, where the new va->rb_node will be attached to.
454 */
455 do {
456 tmp_va = rb_entry(*link, struct vmap_area, rb_node);
Nick Piggindb64fe02008-10-18 20:27:03 -0700457
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -0700458 /*
459 * During the traversal we also do some sanity check.
460 * Trigger the BUG() if there are sides(left/right)
461 * or full overlaps.
462 */
463 if (va->va_start < tmp_va->va_end &&
464 va->va_end <= tmp_va->va_start)
465 link = &(*link)->rb_left;
466 else if (va->va_end > tmp_va->va_start &&
467 va->va_start >= tmp_va->va_end)
468 link = &(*link)->rb_right;
469 else
470 BUG();
471 } while (*link);
472
473 *parent = &tmp_va->rb_node;
474 return link;
Nick Piggindb64fe02008-10-18 20:27:03 -0700475}
476
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -0700477static __always_inline struct list_head *
478get_va_next_sibling(struct rb_node *parent, struct rb_node **link)
479{
480 struct list_head *list;
Nick Piggindb64fe02008-10-18 20:27:03 -0700481
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -0700482 if (unlikely(!parent))
483 /*
484 * The red-black tree where we try to find VA neighbors
485 * before merging or inserting is empty, i.e. it means
486 * there is no free vmap space. Normally it does not
487 * happen but we handle this case anyway.
488 */
489 return NULL;
490
491 list = &rb_entry(parent, struct vmap_area, rb_node)->list;
492 return (&parent->rb_right == link ? list->next : list);
493}
494
495static __always_inline void
496link_va(struct vmap_area *va, struct rb_root *root,
497 struct rb_node *parent, struct rb_node **link, struct list_head *head)
498{
499 /*
500 * VA is still not in the list, but we can
501 * identify its future previous list_head node.
502 */
503 if (likely(parent)) {
504 head = &rb_entry(parent, struct vmap_area, rb_node)->list;
505 if (&parent->rb_right != link)
506 head = head->prev;
507 }
508
509 /* Insert to the rb-tree */
510 rb_link_node(&va->rb_node, parent, link);
511 if (root == &free_vmap_area_root) {
512 /*
513 * Some explanation here. Just perform simple insertion
514 * to the tree. We do not set va->subtree_max_size to
515 * its current size before calling rb_insert_augmented().
516 * It is because of we populate the tree from the bottom
517 * to parent levels when the node _is_ in the tree.
518 *
519 * Therefore we set subtree_max_size to zero after insertion,
520 * to let __augment_tree_propagate_from() puts everything to
521 * the correct order later on.
522 */
523 rb_insert_augmented(&va->rb_node,
524 root, &free_vmap_area_rb_augment_cb);
525 va->subtree_max_size = 0;
526 } else {
527 rb_insert_color(&va->rb_node, root);
528 }
529
530 /* Address-sort this list */
531 list_add(&va->list, head);
532}
533
534static __always_inline void
535unlink_va(struct vmap_area *va, struct rb_root *root)
536{
Uladzislau Rezki (Sony)460e42d2019-07-11 20:59:03 -0700537 if (WARN_ON(RB_EMPTY_NODE(&va->rb_node)))
538 return;
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -0700539
Uladzislau Rezki (Sony)460e42d2019-07-11 20:59:03 -0700540 if (root == &free_vmap_area_root)
541 rb_erase_augmented(&va->rb_node,
542 root, &free_vmap_area_rb_augment_cb);
543 else
544 rb_erase(&va->rb_node, root);
545
546 list_del(&va->list);
547 RB_CLEAR_NODE(&va->rb_node);
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -0700548}
549
Uladzislau Rezki (Sony)bb850f42019-05-17 14:31:34 -0700550#if DEBUG_AUGMENT_PROPAGATE_CHECK
551static void
552augment_tree_propagate_check(struct rb_node *n)
553{
554 struct vmap_area *va;
555 struct rb_node *node;
556 unsigned long size;
557 bool found = false;
558
559 if (n == NULL)
560 return;
561
562 va = rb_entry(n, struct vmap_area, rb_node);
563 size = va->subtree_max_size;
564 node = n;
565
566 while (node) {
567 va = rb_entry(node, struct vmap_area, rb_node);
568
569 if (get_subtree_max_size(node->rb_left) == size) {
570 node = node->rb_left;
571 } else {
572 if (va_size(va) == size) {
573 found = true;
574 break;
575 }
576
577 node = node->rb_right;
578 }
579 }
580
581 if (!found) {
582 va = rb_entry(n, struct vmap_area, rb_node);
583 pr_emerg("tree is corrupted: %lu, %lu\n",
584 va_size(va), va->subtree_max_size);
585 }
586
587 augment_tree_propagate_check(n->rb_left);
588 augment_tree_propagate_check(n->rb_right);
589}
590#endif
591
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -0700592/*
593 * This function populates subtree_max_size from bottom to upper
594 * levels starting from VA point. The propagation must be done
595 * when VA size is modified by changing its va_start/va_end. Or
596 * in case of newly inserting of VA to the tree.
597 *
598 * It means that __augment_tree_propagate_from() must be called:
599 * - After VA has been inserted to the tree(free path);
600 * - After VA has been shrunk(allocation path);
601 * - After VA has been increased(merging path).
602 *
603 * Please note that, it does not mean that upper parent nodes
604 * and their subtree_max_size are recalculated all the time up
605 * to the root node.
606 *
607 * 4--8
608 * /\
609 * / \
610 * / \
611 * 2--2 8--8
612 *
613 * For example if we modify the node 4, shrinking it to 2, then
614 * no any modification is required. If we shrink the node 2 to 1
615 * its subtree_max_size is updated only, and set to 1. If we shrink
616 * the node 8 to 6, then its subtree_max_size is set to 6 and parent
617 * node becomes 4--6.
618 */
619static __always_inline void
620augment_tree_propagate_from(struct vmap_area *va)
621{
622 struct rb_node *node = &va->rb_node;
623 unsigned long new_va_sub_max_size;
624
625 while (node) {
626 va = rb_entry(node, struct vmap_area, rb_node);
627 new_va_sub_max_size = compute_subtree_max_size(va);
628
629 /*
630 * If the newly calculated maximum available size of the
631 * subtree is equal to the current one, then it means that
632 * the tree is propagated correctly. So we have to stop at
633 * this point to save cycles.
634 */
635 if (va->subtree_max_size == new_va_sub_max_size)
636 break;
637
638 va->subtree_max_size = new_va_sub_max_size;
639 node = rb_parent(&va->rb_node);
640 }
Uladzislau Rezki (Sony)bb850f42019-05-17 14:31:34 -0700641
642#if DEBUG_AUGMENT_PROPAGATE_CHECK
643 augment_tree_propagate_check(free_vmap_area_root.rb_node);
644#endif
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -0700645}
646
647static void
648insert_vmap_area(struct vmap_area *va,
649 struct rb_root *root, struct list_head *head)
650{
651 struct rb_node **link;
652 struct rb_node *parent;
653
654 link = find_va_links(va, root, NULL, &parent);
655 link_va(va, root, parent, link, head);
656}
657
658static void
659insert_vmap_area_augment(struct vmap_area *va,
660 struct rb_node *from, struct rb_root *root,
661 struct list_head *head)
662{
663 struct rb_node **link;
664 struct rb_node *parent;
665
666 if (from)
667 link = find_va_links(va, NULL, from, &parent);
668 else
669 link = find_va_links(va, root, NULL, &parent);
670
671 link_va(va, root, parent, link, head);
672 augment_tree_propagate_from(va);
673}
674
675/*
676 * Merge de-allocated chunk of VA memory with previous
677 * and next free blocks. If coalesce is not done a new
678 * free area is inserted. If VA has been merged, it is
679 * freed.
680 */
681static __always_inline void
682merge_or_add_vmap_area(struct vmap_area *va,
683 struct rb_root *root, struct list_head *head)
684{
685 struct vmap_area *sibling;
686 struct list_head *next;
687 struct rb_node **link;
688 struct rb_node *parent;
689 bool merged = false;
690
691 /*
692 * Find a place in the tree where VA potentially will be
693 * inserted, unless it is merged with its sibling/siblings.
694 */
695 link = find_va_links(va, root, NULL, &parent);
696
697 /*
698 * Get next node of VA to check if merging can be done.
699 */
700 next = get_va_next_sibling(parent, link);
701 if (unlikely(next == NULL))
702 goto insert;
703
704 /*
705 * start end
706 * | |
707 * |<------VA------>|<-----Next----->|
708 * | |
709 * start end
710 */
711 if (next != head) {
712 sibling = list_entry(next, struct vmap_area, list);
713 if (sibling->va_start == va->va_end) {
714 sibling->va_start = va->va_start;
715
716 /* Check and update the tree if needed. */
717 augment_tree_propagate_from(sibling);
718
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -0700719 /* Free vmap_area object. */
720 kmem_cache_free(vmap_area_cachep, va);
721
722 /* Point to the new merged area. */
723 va = sibling;
724 merged = true;
725 }
726 }
727
728 /*
729 * start end
730 * | |
731 * |<-----Prev----->|<------VA------>|
732 * | |
733 * start end
734 */
735 if (next->prev != head) {
736 sibling = list_entry(next->prev, struct vmap_area, list);
737 if (sibling->va_end == va->va_start) {
738 sibling->va_end = va->va_end;
739
740 /* Check and update the tree if needed. */
741 augment_tree_propagate_from(sibling);
742
Uladzislau Rezki (Sony)54f63d92019-07-11 20:59:00 -0700743 if (merged)
744 unlink_va(va, root);
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -0700745
746 /* Free vmap_area object. */
747 kmem_cache_free(vmap_area_cachep, va);
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -0700748 return;
749 }
750 }
751
752insert:
753 if (!merged) {
754 link_va(va, root, parent, link, head);
755 augment_tree_propagate_from(va);
756 }
757}
758
759static __always_inline bool
760is_within_this_va(struct vmap_area *va, unsigned long size,
761 unsigned long align, unsigned long vstart)
762{
763 unsigned long nva_start_addr;
764
765 if (va->va_start > vstart)
766 nva_start_addr = ALIGN(va->va_start, align);
767 else
768 nva_start_addr = ALIGN(vstart, align);
769
770 /* Can be overflowed due to big size or alignment. */
771 if (nva_start_addr + size < nva_start_addr ||
772 nva_start_addr < vstart)
773 return false;
774
775 return (nva_start_addr + size <= va->va_end);
776}
777
778/*
779 * Find the first free block(lowest start address) in the tree,
780 * that will accomplish the request corresponding to passing
781 * parameters.
782 */
783static __always_inline struct vmap_area *
784find_vmap_lowest_match(unsigned long size,
785 unsigned long align, unsigned long vstart)
786{
787 struct vmap_area *va;
788 struct rb_node *node;
789 unsigned long length;
790
791 /* Start from the root. */
792 node = free_vmap_area_root.rb_node;
793
794 /* Adjust the search size for alignment overhead. */
795 length = size + align - 1;
796
797 while (node) {
798 va = rb_entry(node, struct vmap_area, rb_node);
799
800 if (get_subtree_max_size(node->rb_left) >= length &&
801 vstart < va->va_start) {
802 node = node->rb_left;
803 } else {
804 if (is_within_this_va(va, size, align, vstart))
805 return va;
806
807 /*
808 * Does not make sense to go deeper towards the right
809 * sub-tree if it does not have a free block that is
810 * equal or bigger to the requested search length.
811 */
812 if (get_subtree_max_size(node->rb_right) >= length) {
813 node = node->rb_right;
814 continue;
815 }
816
817 /*
Andrew Morton3806b042019-05-31 22:30:03 -0700818 * OK. We roll back and find the first right sub-tree,
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -0700819 * that will satisfy the search criteria. It can happen
820 * only once due to "vstart" restriction.
821 */
822 while ((node = rb_parent(node))) {
823 va = rb_entry(node, struct vmap_area, rb_node);
824 if (is_within_this_va(va, size, align, vstart))
825 return va;
826
827 if (get_subtree_max_size(node->rb_right) >= length &&
828 vstart <= va->va_start) {
829 node = node->rb_right;
830 break;
831 }
832 }
833 }
834 }
835
836 return NULL;
837}
838
Uladzislau Rezki (Sony)a6cf4e02019-05-17 14:31:37 -0700839#if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
840#include <linux/random.h>
841
842static struct vmap_area *
843find_vmap_lowest_linear_match(unsigned long size,
844 unsigned long align, unsigned long vstart)
845{
846 struct vmap_area *va;
847
848 list_for_each_entry(va, &free_vmap_area_list, list) {
849 if (!is_within_this_va(va, size, align, vstart))
850 continue;
851
852 return va;
853 }
854
855 return NULL;
856}
857
858static void
859find_vmap_lowest_match_check(unsigned long size)
860{
861 struct vmap_area *va_1, *va_2;
862 unsigned long vstart;
863 unsigned int rnd;
864
865 get_random_bytes(&rnd, sizeof(rnd));
866 vstart = VMALLOC_START + rnd;
867
868 va_1 = find_vmap_lowest_match(size, 1, vstart);
869 va_2 = find_vmap_lowest_linear_match(size, 1, vstart);
870
871 if (va_1 != va_2)
872 pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n",
873 va_1, va_2, vstart);
874}
875#endif
876
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -0700877enum fit_type {
878 NOTHING_FIT = 0,
879 FL_FIT_TYPE = 1, /* full fit */
880 LE_FIT_TYPE = 2, /* left edge fit */
881 RE_FIT_TYPE = 3, /* right edge fit */
882 NE_FIT_TYPE = 4 /* no edge fit */
883};
884
885static __always_inline enum fit_type
886classify_va_fit_type(struct vmap_area *va,
887 unsigned long nva_start_addr, unsigned long size)
888{
889 enum fit_type type;
890
891 /* Check if it is within VA. */
892 if (nva_start_addr < va->va_start ||
893 nva_start_addr + size > va->va_end)
894 return NOTHING_FIT;
895
896 /* Now classify. */
897 if (va->va_start == nva_start_addr) {
898 if (va->va_end == nva_start_addr + size)
899 type = FL_FIT_TYPE;
900 else
901 type = LE_FIT_TYPE;
902 } else if (va->va_end == nva_start_addr + size) {
903 type = RE_FIT_TYPE;
904 } else {
905 type = NE_FIT_TYPE;
906 }
907
908 return type;
909}
910
911static __always_inline int
912adjust_va_to_fit_type(struct vmap_area *va,
913 unsigned long nva_start_addr, unsigned long size,
914 enum fit_type type)
915{
Arnd Bergmann2c929232019-06-28 12:07:09 -0700916 struct vmap_area *lva = NULL;
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -0700917
918 if (type == FL_FIT_TYPE) {
919 /*
920 * No need to split VA, it fully fits.
921 *
922 * | |
923 * V NVA V
924 * |---------------|
925 */
926 unlink_va(va, &free_vmap_area_root);
927 kmem_cache_free(vmap_area_cachep, va);
928 } else if (type == LE_FIT_TYPE) {
929 /*
930 * Split left edge of fit VA.
931 *
932 * | |
933 * V NVA V R
934 * |-------|-------|
935 */
936 va->va_start += size;
937 } else if (type == RE_FIT_TYPE) {
938 /*
939 * Split right edge of fit VA.
940 *
941 * | |
942 * L V NVA V
943 * |-------|-------|
944 */
945 va->va_end = nva_start_addr;
946 } else if (type == NE_FIT_TYPE) {
947 /*
948 * Split no edge of fit VA.
949 *
950 * | |
951 * L V NVA V R
952 * |---|-------|---|
953 */
Uladzislau Rezki (Sony)82dd23e2019-07-11 20:58:57 -0700954 lva = __this_cpu_xchg(ne_fit_preload_node, NULL);
955 if (unlikely(!lva)) {
956 /*
957 * For percpu allocator we do not do any pre-allocation
958 * and leave it as it is. The reason is it most likely
959 * never ends up with NE_FIT_TYPE splitting. In case of
960 * percpu allocations offsets and sizes are aligned to
961 * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE
962 * are its main fitting cases.
963 *
964 * There are a few exceptions though, as an example it is
965 * a first allocation (early boot up) when we have "one"
966 * big free space that has to be split.
967 */
968 lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
969 if (!lva)
970 return -1;
971 }
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -0700972
973 /*
974 * Build the remainder.
975 */
976 lva->va_start = va->va_start;
977 lva->va_end = nva_start_addr;
978
979 /*
980 * Shrink this VA to remaining size.
981 */
982 va->va_start = nva_start_addr + size;
983 } else {
984 return -1;
985 }
986
987 if (type != FL_FIT_TYPE) {
988 augment_tree_propagate_from(va);
989
Arnd Bergmann2c929232019-06-28 12:07:09 -0700990 if (lva) /* type == NE_FIT_TYPE */
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -0700991 insert_vmap_area_augment(lva, &va->rb_node,
992 &free_vmap_area_root, &free_vmap_area_list);
993 }
994
995 return 0;
996}
997
998/*
999 * Returns a start address of the newly allocated area, if success.
1000 * Otherwise a vend is returned that indicates failure.
1001 */
1002static __always_inline unsigned long
1003__alloc_vmap_area(unsigned long size, unsigned long align,
Uladzislau Rezki (Sony)cacca6b2019-07-11 20:58:53 -07001004 unsigned long vstart, unsigned long vend)
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07001005{
1006 unsigned long nva_start_addr;
1007 struct vmap_area *va;
1008 enum fit_type type;
1009 int ret;
1010
1011 va = find_vmap_lowest_match(size, align, vstart);
1012 if (unlikely(!va))
1013 return vend;
1014
1015 if (va->va_start > vstart)
1016 nva_start_addr = ALIGN(va->va_start, align);
1017 else
1018 nva_start_addr = ALIGN(vstart, align);
1019
1020 /* Check the "vend" restriction. */
1021 if (nva_start_addr + size > vend)
1022 return vend;
1023
1024 /* Classify what we have found. */
1025 type = classify_va_fit_type(va, nva_start_addr, size);
1026 if (WARN_ON_ONCE(type == NOTHING_FIT))
1027 return vend;
1028
1029 /* Update the free vmap_area. */
1030 ret = adjust_va_to_fit_type(va, nva_start_addr, size, type);
1031 if (ret)
1032 return vend;
1033
Uladzislau Rezki (Sony)a6cf4e02019-05-17 14:31:37 -07001034#if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1035 find_vmap_lowest_match_check(size);
1036#endif
1037
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07001038 return nva_start_addr;
1039}
Chris Wilson4da56b92016-04-04 14:46:42 +01001040
Nick Piggindb64fe02008-10-18 20:27:03 -07001041/*
1042 * Allocate a region of KVA of the specified size and alignment, within the
1043 * vstart and vend.
1044 */
1045static struct vmap_area *alloc_vmap_area(unsigned long size,
1046 unsigned long align,
1047 unsigned long vstart, unsigned long vend,
1048 int node, gfp_t gfp_mask)
1049{
Uladzislau Rezki (Sony)82dd23e2019-07-11 20:58:57 -07001050 struct vmap_area *va, *pva;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051 unsigned long addr;
Nick Piggindb64fe02008-10-18 20:27:03 -07001052 int purged = 0;
1053
Nick Piggin77669702009-02-27 14:03:03 -08001054 BUG_ON(!size);
Alexander Kuleshov891c49a2015-11-05 18:46:51 -08001055 BUG_ON(offset_in_page(size));
Nick Piggin89699602011-03-22 16:30:36 -07001056 BUG_ON(!is_power_of_2(align));
Nick Piggindb64fe02008-10-18 20:27:03 -07001057
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07001058 if (unlikely(!vmap_initialized))
1059 return ERR_PTR(-EBUSY);
1060
Christoph Hellwig5803ed22016-12-12 16:44:20 -08001061 might_sleep();
Chris Wilson4da56b92016-04-04 14:46:42 +01001062
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07001063 va = kmem_cache_alloc_node(vmap_area_cachep,
Nick Piggindb64fe02008-10-18 20:27:03 -07001064 gfp_mask & GFP_RECLAIM_MASK, node);
1065 if (unlikely(!va))
1066 return ERR_PTR(-ENOMEM);
1067
Catalin Marinas7f88f882013-11-12 15:07:45 -08001068 /*
1069 * Only scan the relevant parts containing pointers to other objects
1070 * to avoid false negatives.
1071 */
1072 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK);
1073
Nick Piggindb64fe02008-10-18 20:27:03 -07001074retry:
Uladzislau Rezki (Sony)82dd23e2019-07-11 20:58:57 -07001075 /*
1076 * Preload this CPU with one extra vmap_area object to ensure
1077 * that we have it available when fit type of free area is
1078 * NE_FIT_TYPE.
1079 *
1080 * The preload is done in non-atomic context, thus it allows us
1081 * to use more permissive allocation masks to be more stable under
1082 * low memory condition and high memory pressure.
1083 *
1084 * Even if it fails we do not really care about that. Just proceed
1085 * as it is. "overflow" path will refill the cache we allocate from.
1086 */
1087 preempt_disable();
1088 if (!__this_cpu_read(ne_fit_preload_node)) {
1089 preempt_enable();
1090 pva = kmem_cache_alloc_node(vmap_area_cachep, GFP_KERNEL, node);
1091 preempt_disable();
1092
1093 if (__this_cpu_cmpxchg(ne_fit_preload_node, NULL, pva)) {
1094 if (pva)
1095 kmem_cache_free(vmap_area_cachep, pva);
1096 }
1097 }
1098
Nick Piggindb64fe02008-10-18 20:27:03 -07001099 spin_lock(&vmap_area_lock);
Uladzislau Rezki (Sony)82dd23e2019-07-11 20:58:57 -07001100 preempt_enable();
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07001101
Nick Piggin89699602011-03-22 16:30:36 -07001102 /*
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07001103 * If an allocation fails, the "vend" address is
1104 * returned. Therefore trigger the overflow path.
Nick Piggin89699602011-03-22 16:30:36 -07001105 */
Uladzislau Rezki (Sony)cacca6b2019-07-11 20:58:53 -07001106 addr = __alloc_vmap_area(size, align, vstart, vend);
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07001107 if (unlikely(addr == vend))
Nick Piggin89699602011-03-22 16:30:36 -07001108 goto overflow;
Nick Piggindb64fe02008-10-18 20:27:03 -07001109
1110 va->va_start = addr;
1111 va->va_end = addr + size;
1112 va->flags = 0;
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07001113 insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
1114
Nick Piggindb64fe02008-10-18 20:27:03 -07001115 spin_unlock(&vmap_area_lock);
1116
Wang Xiaoqiang61e16552016-01-15 16:57:19 -08001117 BUG_ON(!IS_ALIGNED(va->va_start, align));
Nick Piggin89699602011-03-22 16:30:36 -07001118 BUG_ON(va->va_start < vstart);
1119 BUG_ON(va->va_end > vend);
1120
Nick Piggindb64fe02008-10-18 20:27:03 -07001121 return va;
Nick Piggin89699602011-03-22 16:30:36 -07001122
1123overflow:
1124 spin_unlock(&vmap_area_lock);
1125 if (!purged) {
1126 purge_vmap_area_lazy();
1127 purged = 1;
1128 goto retry;
1129 }
Chris Wilson4da56b92016-04-04 14:46:42 +01001130
1131 if (gfpflags_allow_blocking(gfp_mask)) {
1132 unsigned long freed = 0;
1133 blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
1134 if (freed > 0) {
1135 purged = 0;
1136 goto retry;
1137 }
1138 }
1139
Florian Fainelli03497d72017-04-27 11:19:00 -07001140 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
Joe Perches756a0252016-03-17 14:19:47 -07001141 pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
1142 size);
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07001143
1144 kmem_cache_free(vmap_area_cachep, va);
Nick Piggin89699602011-03-22 16:30:36 -07001145 return ERR_PTR(-EBUSY);
Nick Piggindb64fe02008-10-18 20:27:03 -07001146}
1147
Chris Wilson4da56b92016-04-04 14:46:42 +01001148int register_vmap_purge_notifier(struct notifier_block *nb)
1149{
1150 return blocking_notifier_chain_register(&vmap_notify_list, nb);
1151}
1152EXPORT_SYMBOL_GPL(register_vmap_purge_notifier);
1153
1154int unregister_vmap_purge_notifier(struct notifier_block *nb)
1155{
1156 return blocking_notifier_chain_unregister(&vmap_notify_list, nb);
1157}
1158EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
1159
Nick Piggindb64fe02008-10-18 20:27:03 -07001160static void __free_vmap_area(struct vmap_area *va)
1161{
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07001162 /*
1163 * Remove from the busy tree/list.
1164 */
1165 unlink_va(va, &vmap_area_root);
Nick Piggindb64fe02008-10-18 20:27:03 -07001166
Tejun Heoca23e402009-08-14 15:00:52 +09001167 /*
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07001168 * Merge VA with its neighbors, otherwise just add it.
Tejun Heoca23e402009-08-14 15:00:52 +09001169 */
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07001170 merge_or_add_vmap_area(va,
1171 &free_vmap_area_root, &free_vmap_area_list);
Nick Piggindb64fe02008-10-18 20:27:03 -07001172}
1173
1174/*
1175 * Free a region of KVA allocated by alloc_vmap_area
1176 */
1177static void free_vmap_area(struct vmap_area *va)
1178{
1179 spin_lock(&vmap_area_lock);
1180 __free_vmap_area(va);
1181 spin_unlock(&vmap_area_lock);
1182}
1183
1184/*
1185 * Clear the pagetable entries of a given vmap_area
1186 */
1187static void unmap_vmap_area(struct vmap_area *va)
1188{
1189 vunmap_page_range(va->va_start, va->va_end);
1190}
1191
1192/*
1193 * lazy_max_pages is the maximum amount of virtual address space we gather up
1194 * before attempting to purge with a TLB flush.
1195 *
1196 * There is a tradeoff here: a larger number will cover more kernel page tables
1197 * and take slightly longer to purge, but it will linearly reduce the number of
1198 * global TLB flushes that must be performed. It would seem natural to scale
1199 * this number up linearly with the number of CPUs (because vmapping activity
1200 * could also scale linearly with the number of CPUs), however it is likely
1201 * that in practice, workloads might be constrained in other ways that mean
1202 * vmap activity will not scale linearly with CPUs. Also, I want to be
1203 * conservative and not introduce a big latency on huge systems, so go with
1204 * a less aggressive log scale. It will still be an improvement over the old
1205 * code, and it will be simple to change the scale factor if we find that it
1206 * becomes a problem on bigger systems.
1207 */
1208static unsigned long lazy_max_pages(void)
1209{
1210 unsigned int log;
1211
1212 log = fls(num_online_cpus());
1213
1214 return log * (32UL * 1024 * 1024 / PAGE_SIZE);
1215}
1216
Uladzislau Rezki (Sony)4d36e6f2019-05-14 15:41:25 -07001217static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0);
Nick Piggindb64fe02008-10-18 20:27:03 -07001218
Christoph Hellwig0574ecd2016-12-12 16:44:07 -08001219/*
1220 * Serialize vmap purging. There is no actual criticial section protected
1221 * by this look, but we want to avoid concurrent calls for performance
1222 * reasons and to make the pcpu_get_vm_areas more deterministic.
1223 */
Christoph Hellwigf9e09972016-12-12 16:44:23 -08001224static DEFINE_MUTEX(vmap_purge_lock);
Christoph Hellwig0574ecd2016-12-12 16:44:07 -08001225
Nick Piggin02b709d2010-02-01 22:25:57 +11001226/* for per-CPU blocks */
1227static void purge_fragmented_blocks_allcpus(void);
1228
Nick Piggindb64fe02008-10-18 20:27:03 -07001229/*
Cliff Wickman3ee48b62010-09-16 11:44:02 -05001230 * called before a call to iounmap() if the caller wants vm_area_struct's
1231 * immediately freed.
1232 */
1233void set_iounmap_nonlazy(void)
1234{
Uladzislau Rezki (Sony)4d36e6f2019-05-14 15:41:25 -07001235 atomic_long_set(&vmap_lazy_nr, lazy_max_pages()+1);
Cliff Wickman3ee48b62010-09-16 11:44:02 -05001236}
1237
1238/*
Nick Piggindb64fe02008-10-18 20:27:03 -07001239 * Purges all lazily-freed vmap areas.
Nick Piggindb64fe02008-10-18 20:27:03 -07001240 */
Christoph Hellwig0574ecd2016-12-12 16:44:07 -08001241static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
Nick Piggindb64fe02008-10-18 20:27:03 -07001242{
Uladzislau Rezki (Sony)4d36e6f2019-05-14 15:41:25 -07001243 unsigned long resched_threshold;
Chris Wilson80c4bd72016-05-20 16:57:38 -07001244 struct llist_node *valist;
Nick Piggindb64fe02008-10-18 20:27:03 -07001245 struct vmap_area *va;
Vegard Nossumcbb76672009-02-27 14:03:04 -08001246 struct vmap_area *n_va;
Nick Piggindb64fe02008-10-18 20:27:03 -07001247
Christoph Hellwig0574ecd2016-12-12 16:44:07 -08001248 lockdep_assert_held(&vmap_purge_lock);
Nick Piggin02b709d2010-02-01 22:25:57 +11001249
Chris Wilson80c4bd72016-05-20 16:57:38 -07001250 valist = llist_del_all(&vmap_purge_list);
Uladzislau Rezki (Sony)68571be92019-05-14 15:41:22 -07001251 if (unlikely(valist == NULL))
1252 return false;
1253
1254 /*
1255 * TODO: to calculate a flush range without looping.
1256 * The list can be up to lazy_max_pages() elements.
1257 */
Chris Wilson80c4bd72016-05-20 16:57:38 -07001258 llist_for_each_entry(va, valist, purge_list) {
Christoph Hellwig0574ecd2016-12-12 16:44:07 -08001259 if (va->va_start < start)
1260 start = va->va_start;
1261 if (va->va_end > end)
1262 end = va->va_end;
Nick Piggindb64fe02008-10-18 20:27:03 -07001263 }
Nick Piggindb64fe02008-10-18 20:27:03 -07001264
Christoph Hellwig0574ecd2016-12-12 16:44:07 -08001265 flush_tlb_kernel_range(start, end);
Uladzislau Rezki (Sony)4d36e6f2019-05-14 15:41:25 -07001266 resched_threshold = lazy_max_pages() << 1;
Nick Piggindb64fe02008-10-18 20:27:03 -07001267
Christoph Hellwig0574ecd2016-12-12 16:44:07 -08001268 spin_lock(&vmap_area_lock);
Joel Fernandes763b2182016-12-12 16:44:26 -08001269 llist_for_each_entry_safe(va, n_va, valist, purge_list) {
Uladzislau Rezki (Sony)4d36e6f2019-05-14 15:41:25 -07001270 unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
Joel Fernandes763b2182016-12-12 16:44:26 -08001271
Christoph Hellwig0574ecd2016-12-12 16:44:07 -08001272 __free_vmap_area(va);
Uladzislau Rezki (Sony)4d36e6f2019-05-14 15:41:25 -07001273 atomic_long_sub(nr, &vmap_lazy_nr);
Uladzislau Rezki (Sony)68571be92019-05-14 15:41:22 -07001274
Uladzislau Rezki (Sony)4d36e6f2019-05-14 15:41:25 -07001275 if (atomic_long_read(&vmap_lazy_nr) < resched_threshold)
Uladzislau Rezki (Sony)68571be92019-05-14 15:41:22 -07001276 cond_resched_lock(&vmap_area_lock);
Joel Fernandes763b2182016-12-12 16:44:26 -08001277 }
Christoph Hellwig0574ecd2016-12-12 16:44:07 -08001278 spin_unlock(&vmap_area_lock);
1279 return true;
Nick Piggindb64fe02008-10-18 20:27:03 -07001280}
1281
1282/*
Nick Piggin496850e2008-11-19 15:36:33 -08001283 * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
1284 * is already purging.
1285 */
1286static void try_purge_vmap_area_lazy(void)
1287{
Christoph Hellwigf9e09972016-12-12 16:44:23 -08001288 if (mutex_trylock(&vmap_purge_lock)) {
Christoph Hellwig0574ecd2016-12-12 16:44:07 -08001289 __purge_vmap_area_lazy(ULONG_MAX, 0);
Christoph Hellwigf9e09972016-12-12 16:44:23 -08001290 mutex_unlock(&vmap_purge_lock);
Christoph Hellwig0574ecd2016-12-12 16:44:07 -08001291 }
Nick Piggin496850e2008-11-19 15:36:33 -08001292}
1293
1294/*
Nick Piggindb64fe02008-10-18 20:27:03 -07001295 * Kick off a purge of the outstanding lazy areas.
1296 */
1297static void purge_vmap_area_lazy(void)
1298{
Christoph Hellwigf9e09972016-12-12 16:44:23 -08001299 mutex_lock(&vmap_purge_lock);
Christoph Hellwig0574ecd2016-12-12 16:44:07 -08001300 purge_fragmented_blocks_allcpus();
1301 __purge_vmap_area_lazy(ULONG_MAX, 0);
Christoph Hellwigf9e09972016-12-12 16:44:23 -08001302 mutex_unlock(&vmap_purge_lock);
Nick Piggindb64fe02008-10-18 20:27:03 -07001303}
1304
1305/*
Jeremy Fitzhardinge64141da2010-12-02 14:31:18 -08001306 * Free a vmap area, caller ensuring that the area has been unmapped
1307 * and flush_cache_vunmap had been called for the correct range
1308 * previously.
Nick Piggindb64fe02008-10-18 20:27:03 -07001309 */
Jeremy Fitzhardinge64141da2010-12-02 14:31:18 -08001310static void free_vmap_area_noflush(struct vmap_area *va)
Nick Piggindb64fe02008-10-18 20:27:03 -07001311{
Uladzislau Rezki (Sony)4d36e6f2019-05-14 15:41:25 -07001312 unsigned long nr_lazy;
Chris Wilson80c4bd72016-05-20 16:57:38 -07001313
Uladzislau Rezki (Sony)4d36e6f2019-05-14 15:41:25 -07001314 nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
1315 PAGE_SHIFT, &vmap_lazy_nr);
Chris Wilson80c4bd72016-05-20 16:57:38 -07001316
1317 /* After this point, we may free va at any time */
1318 llist_add(&va->purge_list, &vmap_purge_list);
1319
1320 if (unlikely(nr_lazy > lazy_max_pages()))
Nick Piggin496850e2008-11-19 15:36:33 -08001321 try_purge_vmap_area_lazy();
Nick Piggindb64fe02008-10-18 20:27:03 -07001322}
1323
Nick Pigginb29acbd2008-12-01 13:13:47 -08001324/*
1325 * Free and unmap a vmap area
1326 */
1327static void free_unmap_vmap_area(struct vmap_area *va)
1328{
1329 flush_cache_vunmap(va->va_start, va->va_end);
Christoph Hellwigc8eef012016-12-12 16:44:01 -08001330 unmap_vmap_area(va);
Chintan Pandya82a2e922018-06-07 17:06:46 -07001331 if (debug_pagealloc_enabled())
1332 flush_tlb_kernel_range(va->va_start, va->va_end);
1333
Christoph Hellwigc8eef012016-12-12 16:44:01 -08001334 free_vmap_area_noflush(va);
Nick Pigginb29acbd2008-12-01 13:13:47 -08001335}
1336
Nick Piggindb64fe02008-10-18 20:27:03 -07001337static struct vmap_area *find_vmap_area(unsigned long addr)
1338{
1339 struct vmap_area *va;
1340
1341 spin_lock(&vmap_area_lock);
1342 va = __find_vmap_area(addr);
1343 spin_unlock(&vmap_area_lock);
1344
1345 return va;
1346}
1347
Nick Piggindb64fe02008-10-18 20:27:03 -07001348/*** Per cpu kva allocator ***/
1349
1350/*
1351 * vmap space is limited especially on 32 bit architectures. Ensure there is
1352 * room for at least 16 percpu vmap blocks per CPU.
1353 */
1354/*
1355 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
1356 * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess
1357 * instead (we just need a rough idea)
1358 */
1359#if BITS_PER_LONG == 32
1360#define VMALLOC_SPACE (128UL*1024*1024)
1361#else
1362#define VMALLOC_SPACE (128UL*1024*1024*1024)
1363#endif
1364
1365#define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
1366#define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */
1367#define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */
1368#define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
1369#define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */
1370#define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */
Clemens Ladischf982f9152011-06-21 22:09:50 +02001371#define VMAP_BBMAP_BITS \
1372 VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
1373 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
1374 VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
Nick Piggindb64fe02008-10-18 20:27:03 -07001375
1376#define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
1377
1378struct vmap_block_queue {
1379 spinlock_t lock;
1380 struct list_head free;
Nick Piggindb64fe02008-10-18 20:27:03 -07001381};
1382
1383struct vmap_block {
1384 spinlock_t lock;
1385 struct vmap_area *va;
Nick Piggindb64fe02008-10-18 20:27:03 -07001386 unsigned long free, dirty;
Roman Pen7d61bfe2015-04-15 16:13:55 -07001387 unsigned long dirty_min, dirty_max; /*< dirty range */
Nick Pigginde560422010-02-01 22:24:18 +11001388 struct list_head free_list;
1389 struct rcu_head rcu_head;
Nick Piggin02b709d2010-02-01 22:25:57 +11001390 struct list_head purge;
Nick Piggindb64fe02008-10-18 20:27:03 -07001391};
1392
1393/* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
1394static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
1395
1396/*
1397 * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block
1398 * in the free path. Could get rid of this if we change the API to return a
1399 * "cookie" from alloc, to be passed to free. But no big deal yet.
1400 */
1401static DEFINE_SPINLOCK(vmap_block_tree_lock);
1402static RADIX_TREE(vmap_block_tree, GFP_ATOMIC);
1403
1404/*
1405 * We should probably have a fallback mechanism to allocate virtual memory
1406 * out of partially filled vmap blocks. However vmap block sizing should be
1407 * fairly reasonable according to the vmalloc size, so it shouldn't be a
1408 * big problem.
1409 */
1410
1411static unsigned long addr_to_vb_idx(unsigned long addr)
1412{
1413 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
1414 addr /= VMAP_BLOCK_SIZE;
1415 return addr;
1416}
1417
Roman Pencf725ce2015-04-15 16:13:52 -07001418static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
1419{
1420 unsigned long addr;
1421
1422 addr = va_start + (pages_off << PAGE_SHIFT);
1423 BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
1424 return (void *)addr;
1425}
1426
1427/**
1428 * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
1429 * block. Of course pages number can't exceed VMAP_BBMAP_BITS
1430 * @order: how many 2^order pages should be occupied in newly allocated block
1431 * @gfp_mask: flags for the page level allocator
1432 *
Mike Rapoporta862f682019-03-05 15:48:42 -08001433 * Return: virtual address in a newly allocated block or ERR_PTR(-errno)
Roman Pencf725ce2015-04-15 16:13:52 -07001434 */
1435static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
Nick Piggindb64fe02008-10-18 20:27:03 -07001436{
1437 struct vmap_block_queue *vbq;
1438 struct vmap_block *vb;
1439 struct vmap_area *va;
1440 unsigned long vb_idx;
1441 int node, err;
Roman Pencf725ce2015-04-15 16:13:52 -07001442 void *vaddr;
Nick Piggindb64fe02008-10-18 20:27:03 -07001443
1444 node = numa_node_id();
1445
1446 vb = kmalloc_node(sizeof(struct vmap_block),
1447 gfp_mask & GFP_RECLAIM_MASK, node);
1448 if (unlikely(!vb))
1449 return ERR_PTR(-ENOMEM);
1450
1451 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
1452 VMALLOC_START, VMALLOC_END,
1453 node, gfp_mask);
Tobias Klauserddf9c6d42011-01-13 15:46:15 -08001454 if (IS_ERR(va)) {
Nick Piggindb64fe02008-10-18 20:27:03 -07001455 kfree(vb);
Julia Lawalle7d86342010-08-09 17:18:28 -07001456 return ERR_CAST(va);
Nick Piggindb64fe02008-10-18 20:27:03 -07001457 }
1458
1459 err = radix_tree_preload(gfp_mask);
1460 if (unlikely(err)) {
1461 kfree(vb);
1462 free_vmap_area(va);
1463 return ERR_PTR(err);
1464 }
1465
Roman Pencf725ce2015-04-15 16:13:52 -07001466 vaddr = vmap_block_vaddr(va->va_start, 0);
Nick Piggindb64fe02008-10-18 20:27:03 -07001467 spin_lock_init(&vb->lock);
1468 vb->va = va;
Roman Pencf725ce2015-04-15 16:13:52 -07001469 /* At least something should be left free */
1470 BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
1471 vb->free = VMAP_BBMAP_BITS - (1UL << order);
Nick Piggindb64fe02008-10-18 20:27:03 -07001472 vb->dirty = 0;
Roman Pen7d61bfe2015-04-15 16:13:55 -07001473 vb->dirty_min = VMAP_BBMAP_BITS;
1474 vb->dirty_max = 0;
Nick Piggindb64fe02008-10-18 20:27:03 -07001475 INIT_LIST_HEAD(&vb->free_list);
Nick Piggindb64fe02008-10-18 20:27:03 -07001476
1477 vb_idx = addr_to_vb_idx(va->va_start);
1478 spin_lock(&vmap_block_tree_lock);
1479 err = radix_tree_insert(&vmap_block_tree, vb_idx, vb);
1480 spin_unlock(&vmap_block_tree_lock);
1481 BUG_ON(err);
1482 radix_tree_preload_end();
1483
1484 vbq = &get_cpu_var(vmap_block_queue);
Nick Piggindb64fe02008-10-18 20:27:03 -07001485 spin_lock(&vbq->lock);
Roman Pen68ac5462015-04-15 16:13:48 -07001486 list_add_tail_rcu(&vb->free_list, &vbq->free);
Nick Piggindb64fe02008-10-18 20:27:03 -07001487 spin_unlock(&vbq->lock);
Tejun Heo3f04ba82009-10-29 22:34:12 +09001488 put_cpu_var(vmap_block_queue);
Nick Piggindb64fe02008-10-18 20:27:03 -07001489
Roman Pencf725ce2015-04-15 16:13:52 -07001490 return vaddr;
Nick Piggindb64fe02008-10-18 20:27:03 -07001491}
1492
Nick Piggindb64fe02008-10-18 20:27:03 -07001493static void free_vmap_block(struct vmap_block *vb)
1494{
1495 struct vmap_block *tmp;
1496 unsigned long vb_idx;
1497
Nick Piggindb64fe02008-10-18 20:27:03 -07001498 vb_idx = addr_to_vb_idx(vb->va->va_start);
1499 spin_lock(&vmap_block_tree_lock);
1500 tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
1501 spin_unlock(&vmap_block_tree_lock);
1502 BUG_ON(tmp != vb);
1503
Jeremy Fitzhardinge64141da2010-12-02 14:31:18 -08001504 free_vmap_area_noflush(vb->va);
Lai Jiangshan22a3c7d2011-03-18 12:13:08 +08001505 kfree_rcu(vb, rcu_head);
Nick Piggindb64fe02008-10-18 20:27:03 -07001506}
1507
Nick Piggin02b709d2010-02-01 22:25:57 +11001508static void purge_fragmented_blocks(int cpu)
1509{
1510 LIST_HEAD(purge);
1511 struct vmap_block *vb;
1512 struct vmap_block *n_vb;
1513 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
1514
1515 rcu_read_lock();
1516 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1517
1518 if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
1519 continue;
1520
1521 spin_lock(&vb->lock);
1522 if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
1523 vb->free = 0; /* prevent further allocs after releasing lock */
1524 vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
Roman Pen7d61bfe2015-04-15 16:13:55 -07001525 vb->dirty_min = 0;
1526 vb->dirty_max = VMAP_BBMAP_BITS;
Nick Piggin02b709d2010-02-01 22:25:57 +11001527 spin_lock(&vbq->lock);
1528 list_del_rcu(&vb->free_list);
1529 spin_unlock(&vbq->lock);
1530 spin_unlock(&vb->lock);
1531 list_add_tail(&vb->purge, &purge);
1532 } else
1533 spin_unlock(&vb->lock);
1534 }
1535 rcu_read_unlock();
1536
1537 list_for_each_entry_safe(vb, n_vb, &purge, purge) {
1538 list_del(&vb->purge);
1539 free_vmap_block(vb);
1540 }
1541}
1542
Nick Piggin02b709d2010-02-01 22:25:57 +11001543static void purge_fragmented_blocks_allcpus(void)
1544{
1545 int cpu;
1546
1547 for_each_possible_cpu(cpu)
1548 purge_fragmented_blocks(cpu);
1549}
1550
Nick Piggindb64fe02008-10-18 20:27:03 -07001551static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
1552{
1553 struct vmap_block_queue *vbq;
1554 struct vmap_block *vb;
Roman Pencf725ce2015-04-15 16:13:52 -07001555 void *vaddr = NULL;
Nick Piggindb64fe02008-10-18 20:27:03 -07001556 unsigned int order;
1557
Alexander Kuleshov891c49a2015-11-05 18:46:51 -08001558 BUG_ON(offset_in_page(size));
Nick Piggindb64fe02008-10-18 20:27:03 -07001559 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
Jan Karaaa91c4d2012-07-31 16:41:37 -07001560 if (WARN_ON(size == 0)) {
1561 /*
1562 * Allocating 0 bytes isn't what caller wants since
1563 * get_order(0) returns funny result. Just warn and terminate
1564 * early.
1565 */
1566 return NULL;
1567 }
Nick Piggindb64fe02008-10-18 20:27:03 -07001568 order = get_order(size);
1569
Nick Piggindb64fe02008-10-18 20:27:03 -07001570 rcu_read_lock();
1571 vbq = &get_cpu_var(vmap_block_queue);
1572 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
Roman Pencf725ce2015-04-15 16:13:52 -07001573 unsigned long pages_off;
Nick Piggindb64fe02008-10-18 20:27:03 -07001574
1575 spin_lock(&vb->lock);
Roman Pencf725ce2015-04-15 16:13:52 -07001576 if (vb->free < (1UL << order)) {
1577 spin_unlock(&vb->lock);
1578 continue;
1579 }
Nick Piggin02b709d2010-02-01 22:25:57 +11001580
Roman Pencf725ce2015-04-15 16:13:52 -07001581 pages_off = VMAP_BBMAP_BITS - vb->free;
1582 vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
Nick Piggin02b709d2010-02-01 22:25:57 +11001583 vb->free -= 1UL << order;
1584 if (vb->free == 0) {
1585 spin_lock(&vbq->lock);
1586 list_del_rcu(&vb->free_list);
1587 spin_unlock(&vbq->lock);
Nick Piggindb64fe02008-10-18 20:27:03 -07001588 }
Roman Pencf725ce2015-04-15 16:13:52 -07001589
Nick Piggindb64fe02008-10-18 20:27:03 -07001590 spin_unlock(&vb->lock);
Nick Piggin02b709d2010-02-01 22:25:57 +11001591 break;
Nick Piggindb64fe02008-10-18 20:27:03 -07001592 }
Nick Piggin02b709d2010-02-01 22:25:57 +11001593
Tejun Heo3f04ba82009-10-29 22:34:12 +09001594 put_cpu_var(vmap_block_queue);
Nick Piggindb64fe02008-10-18 20:27:03 -07001595 rcu_read_unlock();
1596
Roman Pencf725ce2015-04-15 16:13:52 -07001597 /* Allocate new block if nothing was found */
1598 if (!vaddr)
1599 vaddr = new_vmap_block(order, gfp_mask);
Nick Piggindb64fe02008-10-18 20:27:03 -07001600
Roman Pencf725ce2015-04-15 16:13:52 -07001601 return vaddr;
Nick Piggindb64fe02008-10-18 20:27:03 -07001602}
1603
1604static void vb_free(const void *addr, unsigned long size)
1605{
1606 unsigned long offset;
1607 unsigned long vb_idx;
1608 unsigned int order;
1609 struct vmap_block *vb;
1610
Alexander Kuleshov891c49a2015-11-05 18:46:51 -08001611 BUG_ON(offset_in_page(size));
Nick Piggindb64fe02008-10-18 20:27:03 -07001612 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
Nick Pigginb29acbd2008-12-01 13:13:47 -08001613
1614 flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size);
1615
Nick Piggindb64fe02008-10-18 20:27:03 -07001616 order = get_order(size);
1617
1618 offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1);
Roman Pen7d61bfe2015-04-15 16:13:55 -07001619 offset >>= PAGE_SHIFT;
Nick Piggindb64fe02008-10-18 20:27:03 -07001620
1621 vb_idx = addr_to_vb_idx((unsigned long)addr);
1622 rcu_read_lock();
1623 vb = radix_tree_lookup(&vmap_block_tree, vb_idx);
1624 rcu_read_unlock();
1625 BUG_ON(!vb);
1626
Jeremy Fitzhardinge64141da2010-12-02 14:31:18 -08001627 vunmap_page_range((unsigned long)addr, (unsigned long)addr + size);
1628
Chintan Pandya82a2e922018-06-07 17:06:46 -07001629 if (debug_pagealloc_enabled())
1630 flush_tlb_kernel_range((unsigned long)addr,
1631 (unsigned long)addr + size);
1632
Nick Piggindb64fe02008-10-18 20:27:03 -07001633 spin_lock(&vb->lock);
Roman Pen7d61bfe2015-04-15 16:13:55 -07001634
1635 /* Expand dirty range */
1636 vb->dirty_min = min(vb->dirty_min, offset);
1637 vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
MinChan Kimd0868172009-03-31 15:19:26 -07001638
Nick Piggindb64fe02008-10-18 20:27:03 -07001639 vb->dirty += 1UL << order;
1640 if (vb->dirty == VMAP_BBMAP_BITS) {
Nick Pigginde560422010-02-01 22:24:18 +11001641 BUG_ON(vb->free);
Nick Piggindb64fe02008-10-18 20:27:03 -07001642 spin_unlock(&vb->lock);
1643 free_vmap_block(vb);
1644 } else
1645 spin_unlock(&vb->lock);
1646}
1647
Rick Edgecombe868b1042019-04-25 17:11:36 -07001648static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
Nick Piggindb64fe02008-10-18 20:27:03 -07001649{
Nick Piggindb64fe02008-10-18 20:27:03 -07001650 int cpu;
Nick Piggindb64fe02008-10-18 20:27:03 -07001651
Jeremy Fitzhardinge9b463332008-10-28 19:22:34 +11001652 if (unlikely(!vmap_initialized))
1653 return;
1654
Christoph Hellwig5803ed22016-12-12 16:44:20 -08001655 might_sleep();
1656
Nick Piggindb64fe02008-10-18 20:27:03 -07001657 for_each_possible_cpu(cpu) {
1658 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
1659 struct vmap_block *vb;
1660
1661 rcu_read_lock();
1662 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
Nick Piggindb64fe02008-10-18 20:27:03 -07001663 spin_lock(&vb->lock);
Roman Pen7d61bfe2015-04-15 16:13:55 -07001664 if (vb->dirty) {
1665 unsigned long va_start = vb->va->va_start;
Nick Piggindb64fe02008-10-18 20:27:03 -07001666 unsigned long s, e;
Joonsoo Kimb136be5e2013-09-11 14:21:40 -07001667
Roman Pen7d61bfe2015-04-15 16:13:55 -07001668 s = va_start + (vb->dirty_min << PAGE_SHIFT);
1669 e = va_start + (vb->dirty_max << PAGE_SHIFT);
Nick Piggindb64fe02008-10-18 20:27:03 -07001670
Roman Pen7d61bfe2015-04-15 16:13:55 -07001671 start = min(s, start);
1672 end = max(e, end);
1673
Nick Piggindb64fe02008-10-18 20:27:03 -07001674 flush = 1;
Nick Piggindb64fe02008-10-18 20:27:03 -07001675 }
1676 spin_unlock(&vb->lock);
1677 }
1678 rcu_read_unlock();
1679 }
1680
Christoph Hellwigf9e09972016-12-12 16:44:23 -08001681 mutex_lock(&vmap_purge_lock);
Christoph Hellwig0574ecd2016-12-12 16:44:07 -08001682 purge_fragmented_blocks_allcpus();
1683 if (!__purge_vmap_area_lazy(start, end) && flush)
1684 flush_tlb_kernel_range(start, end);
Christoph Hellwigf9e09972016-12-12 16:44:23 -08001685 mutex_unlock(&vmap_purge_lock);
Nick Piggindb64fe02008-10-18 20:27:03 -07001686}
Rick Edgecombe868b1042019-04-25 17:11:36 -07001687
1688/**
1689 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
1690 *
1691 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
1692 * to amortize TLB flushing overheads. What this means is that any page you
1693 * have now, may, in a former life, have been mapped into kernel virtual
1694 * address by the vmap layer and so there might be some CPUs with TLB entries
1695 * still referencing that page (additional to the regular 1:1 kernel mapping).
1696 *
1697 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
1698 * be sure that none of the pages we have control over will have any aliases
1699 * from the vmap layer.
1700 */
1701void vm_unmap_aliases(void)
1702{
1703 unsigned long start = ULONG_MAX, end = 0;
1704 int flush = 0;
1705
1706 _vm_unmap_aliases(start, end, flush);
1707}
Nick Piggindb64fe02008-10-18 20:27:03 -07001708EXPORT_SYMBOL_GPL(vm_unmap_aliases);
1709
1710/**
1711 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
1712 * @mem: the pointer returned by vm_map_ram
1713 * @count: the count passed to that vm_map_ram call (cannot unmap partial)
1714 */
1715void vm_unmap_ram(const void *mem, unsigned int count)
1716{
Guillermo Julián Moreno65ee03c2016-06-03 14:55:33 -07001717 unsigned long size = (unsigned long)count << PAGE_SHIFT;
Nick Piggindb64fe02008-10-18 20:27:03 -07001718 unsigned long addr = (unsigned long)mem;
Christoph Hellwig9c3acf62016-12-12 16:44:04 -08001719 struct vmap_area *va;
Nick Piggindb64fe02008-10-18 20:27:03 -07001720
Christoph Hellwig5803ed22016-12-12 16:44:20 -08001721 might_sleep();
Nick Piggindb64fe02008-10-18 20:27:03 -07001722 BUG_ON(!addr);
1723 BUG_ON(addr < VMALLOC_START);
1724 BUG_ON(addr > VMALLOC_END);
Shawn Lina1c0b1a2016-03-17 14:20:37 -07001725 BUG_ON(!PAGE_ALIGNED(addr));
Nick Piggindb64fe02008-10-18 20:27:03 -07001726
Christoph Hellwig9c3acf62016-12-12 16:44:04 -08001727 if (likely(count <= VMAP_MAX_ALLOC)) {
Chintan Pandya05e3ff92018-06-07 17:06:53 -07001728 debug_check_no_locks_freed(mem, size);
Nick Piggindb64fe02008-10-18 20:27:03 -07001729 vb_free(mem, size);
Christoph Hellwig9c3acf62016-12-12 16:44:04 -08001730 return;
1731 }
1732
1733 va = find_vmap_area(addr);
1734 BUG_ON(!va);
Chintan Pandya05e3ff92018-06-07 17:06:53 -07001735 debug_check_no_locks_freed((void *)va->va_start,
1736 (va->va_end - va->va_start));
Christoph Hellwig9c3acf62016-12-12 16:44:04 -08001737 free_unmap_vmap_area(va);
Nick Piggindb64fe02008-10-18 20:27:03 -07001738}
1739EXPORT_SYMBOL(vm_unmap_ram);
1740
1741/**
1742 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
1743 * @pages: an array of pointers to the pages to be mapped
1744 * @count: number of pages
1745 * @node: prefer to allocate data structures on this node
1746 * @prot: memory protection to use. PAGE_KERNEL for regular RAM
Randy Dunlape99c97a2008-10-29 14:01:09 -07001747 *
Gioh Kim36437632014-04-07 15:37:37 -07001748 * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
1749 * faster than vmap so it's good. But if you mix long-life and short-life
1750 * objects with vm_map_ram(), it could consume lots of address space through
1751 * fragmentation (especially on a 32bit machine). You could see failures in
1752 * the end. Please use this function for short-lived objects.
1753 *
Randy Dunlape99c97a2008-10-29 14:01:09 -07001754 * Returns: a pointer to the address that has been mapped, or %NULL on failure
Nick Piggindb64fe02008-10-18 20:27:03 -07001755 */
1756void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
1757{
Guillermo Julián Moreno65ee03c2016-06-03 14:55:33 -07001758 unsigned long size = (unsigned long)count << PAGE_SHIFT;
Nick Piggindb64fe02008-10-18 20:27:03 -07001759 unsigned long addr;
1760 void *mem;
1761
1762 if (likely(count <= VMAP_MAX_ALLOC)) {
1763 mem = vb_alloc(size, GFP_KERNEL);
1764 if (IS_ERR(mem))
1765 return NULL;
1766 addr = (unsigned long)mem;
1767 } else {
1768 struct vmap_area *va;
1769 va = alloc_vmap_area(size, PAGE_SIZE,
1770 VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
1771 if (IS_ERR(va))
1772 return NULL;
1773
1774 addr = va->va_start;
1775 mem = (void *)addr;
1776 }
1777 if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
1778 vm_unmap_ram(mem, count);
1779 return NULL;
1780 }
1781 return mem;
1782}
1783EXPORT_SYMBOL(vm_map_ram);
1784
Joonsoo Kim4341fa42013-04-29 15:07:39 -07001785static struct vm_struct *vmlist __initdata;
Mike Rapoport92eac162019-03-05 15:48:36 -08001786
Tejun Heof0aa6612009-02-20 16:29:08 +09001787/**
Nicolas Pitrebe9b7332011-08-25 00:24:21 -04001788 * vm_area_add_early - add vmap area early during boot
1789 * @vm: vm_struct to add
1790 *
1791 * This function is used to add fixed kernel vm area to vmlist before
1792 * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags
1793 * should contain proper values and the other fields should be zero.
1794 *
1795 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
1796 */
1797void __init vm_area_add_early(struct vm_struct *vm)
1798{
1799 struct vm_struct *tmp, **p;
1800
1801 BUG_ON(vmap_initialized);
1802 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
1803 if (tmp->addr >= vm->addr) {
1804 BUG_ON(tmp->addr < vm->addr + vm->size);
1805 break;
1806 } else
1807 BUG_ON(tmp->addr + tmp->size > vm->addr);
1808 }
1809 vm->next = *p;
1810 *p = vm;
1811}
1812
1813/**
Tejun Heof0aa6612009-02-20 16:29:08 +09001814 * vm_area_register_early - register vmap area early during boot
1815 * @vm: vm_struct to register
Tejun Heoc0c0a292009-02-24 11:57:21 +09001816 * @align: requested alignment
Tejun Heof0aa6612009-02-20 16:29:08 +09001817 *
1818 * This function is used to register kernel vm area before
1819 * vmalloc_init() is called. @vm->size and @vm->flags should contain
1820 * proper values on entry and other fields should be zero. On return,
1821 * vm->addr contains the allocated address.
1822 *
1823 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
1824 */
Tejun Heoc0c0a292009-02-24 11:57:21 +09001825void __init vm_area_register_early(struct vm_struct *vm, size_t align)
Tejun Heof0aa6612009-02-20 16:29:08 +09001826{
1827 static size_t vm_init_off __initdata;
Tejun Heoc0c0a292009-02-24 11:57:21 +09001828 unsigned long addr;
Tejun Heof0aa6612009-02-20 16:29:08 +09001829
Tejun Heoc0c0a292009-02-24 11:57:21 +09001830 addr = ALIGN(VMALLOC_START + vm_init_off, align);
1831 vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
1832
1833 vm->addr = (void *)addr;
Tejun Heof0aa6612009-02-20 16:29:08 +09001834
Nicolas Pitrebe9b7332011-08-25 00:24:21 -04001835 vm_area_add_early(vm);
Tejun Heof0aa6612009-02-20 16:29:08 +09001836}
1837
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07001838static void vmap_init_free_space(void)
1839{
1840 unsigned long vmap_start = 1;
1841 const unsigned long vmap_end = ULONG_MAX;
1842 struct vmap_area *busy, *free;
1843
1844 /*
1845 * B F B B B F
1846 * -|-----|.....|-----|-----|-----|.....|-
1847 * | The KVA space |
1848 * |<--------------------------------->|
1849 */
1850 list_for_each_entry(busy, &vmap_area_list, list) {
1851 if (busy->va_start - vmap_start > 0) {
1852 free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
1853 if (!WARN_ON_ONCE(!free)) {
1854 free->va_start = vmap_start;
1855 free->va_end = busy->va_start;
1856
1857 insert_vmap_area_augment(free, NULL,
1858 &free_vmap_area_root,
1859 &free_vmap_area_list);
1860 }
1861 }
1862
1863 vmap_start = busy->va_end;
1864 }
1865
1866 if (vmap_end - vmap_start > 0) {
1867 free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
1868 if (!WARN_ON_ONCE(!free)) {
1869 free->va_start = vmap_start;
1870 free->va_end = vmap_end;
1871
1872 insert_vmap_area_augment(free, NULL,
1873 &free_vmap_area_root,
1874 &free_vmap_area_list);
1875 }
1876 }
1877}
1878
Nick Piggindb64fe02008-10-18 20:27:03 -07001879void __init vmalloc_init(void)
1880{
Ivan Kokshaysky822c18f2009-01-15 13:50:48 -08001881 struct vmap_area *va;
1882 struct vm_struct *tmp;
Nick Piggindb64fe02008-10-18 20:27:03 -07001883 int i;
1884
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07001885 /*
1886 * Create the cache for vmap_area objects.
1887 */
1888 vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
1889
Nick Piggindb64fe02008-10-18 20:27:03 -07001890 for_each_possible_cpu(i) {
1891 struct vmap_block_queue *vbq;
Al Viro32fcfd42013-03-10 20:14:08 -04001892 struct vfree_deferred *p;
Nick Piggindb64fe02008-10-18 20:27:03 -07001893
1894 vbq = &per_cpu(vmap_block_queue, i);
1895 spin_lock_init(&vbq->lock);
1896 INIT_LIST_HEAD(&vbq->free);
Al Viro32fcfd42013-03-10 20:14:08 -04001897 p = &per_cpu(vfree_deferred, i);
1898 init_llist_head(&p->list);
1899 INIT_WORK(&p->wq, free_work);
Nick Piggindb64fe02008-10-18 20:27:03 -07001900 }
Jeremy Fitzhardinge9b463332008-10-28 19:22:34 +11001901
Ivan Kokshaysky822c18f2009-01-15 13:50:48 -08001902 /* Import existing vmlist entries. */
1903 for (tmp = vmlist; tmp; tmp = tmp->next) {
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07001904 va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
1905 if (WARN_ON_ONCE(!va))
1906 continue;
1907
KyongHodbda5912012-05-29 15:06:49 -07001908 va->flags = VM_VM_AREA;
Ivan Kokshaysky822c18f2009-01-15 13:50:48 -08001909 va->va_start = (unsigned long)tmp->addr;
1910 va->va_end = va->va_start + tmp->size;
KyongHodbda5912012-05-29 15:06:49 -07001911 va->vm = tmp;
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07001912 insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
Ivan Kokshaysky822c18f2009-01-15 13:50:48 -08001913 }
Tejun Heoca23e402009-08-14 15:00:52 +09001914
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07001915 /*
1916 * Now we can initialize a free vmap space.
1917 */
1918 vmap_init_free_space();
Jeremy Fitzhardinge9b463332008-10-28 19:22:34 +11001919 vmap_initialized = true;
Nick Piggindb64fe02008-10-18 20:27:03 -07001920}
1921
Tejun Heo8fc48982009-02-20 16:29:08 +09001922/**
1923 * map_kernel_range_noflush - map kernel VM area with the specified pages
1924 * @addr: start of the VM area to map
1925 * @size: size of the VM area to map
1926 * @prot: page protection flags to use
1927 * @pages: pages to map
1928 *
1929 * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size
1930 * specify should have been allocated using get_vm_area() and its
1931 * friends.
1932 *
1933 * NOTE:
1934 * This function does NOT do any cache flushing. The caller is
1935 * responsible for calling flush_cache_vmap() on to-be-mapped areas
1936 * before calling this function.
1937 *
1938 * RETURNS:
1939 * The number of pages mapped on success, -errno on failure.
1940 */
1941int map_kernel_range_noflush(unsigned long addr, unsigned long size,
1942 pgprot_t prot, struct page **pages)
1943{
1944 return vmap_page_range_noflush(addr, addr + size, prot, pages);
1945}
1946
1947/**
1948 * unmap_kernel_range_noflush - unmap kernel VM area
1949 * @addr: start of the VM area to unmap
1950 * @size: size of the VM area to unmap
1951 *
1952 * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size
1953 * specify should have been allocated using get_vm_area() and its
1954 * friends.
1955 *
1956 * NOTE:
1957 * This function does NOT do any cache flushing. The caller is
1958 * responsible for calling flush_cache_vunmap() on to-be-mapped areas
1959 * before calling this function and flush_tlb_kernel_range() after.
1960 */
1961void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
1962{
1963 vunmap_page_range(addr, addr + size);
1964}
Huang Ying81e88fd2011-01-12 14:44:55 +08001965EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush);
Tejun Heo8fc48982009-02-20 16:29:08 +09001966
1967/**
1968 * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
1969 * @addr: start of the VM area to unmap
1970 * @size: size of the VM area to unmap
1971 *
1972 * Similar to unmap_kernel_range_noflush() but flushes vcache before
1973 * the unmapping and tlb after.
1974 */
Nick Piggindb64fe02008-10-18 20:27:03 -07001975void unmap_kernel_range(unsigned long addr, unsigned long size)
1976{
1977 unsigned long end = addr + size;
Tejun Heof6fcba72009-02-20 15:38:48 -08001978
1979 flush_cache_vunmap(addr, end);
Nick Piggindb64fe02008-10-18 20:27:03 -07001980 vunmap_page_range(addr, end);
1981 flush_tlb_kernel_range(addr, end);
1982}
Minchan Kim93ef6d6c2014-06-04 16:11:09 -07001983EXPORT_SYMBOL_GPL(unmap_kernel_range);
Nick Piggindb64fe02008-10-18 20:27:03 -07001984
WANG Chaof6f8ed42014-08-06 16:06:58 -07001985int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages)
Nick Piggindb64fe02008-10-18 20:27:03 -07001986{
1987 unsigned long addr = (unsigned long)area->addr;
Wanpeng Li762216a2013-09-11 14:22:42 -07001988 unsigned long end = addr + get_vm_area_size(area);
Nick Piggindb64fe02008-10-18 20:27:03 -07001989 int err;
1990
WANG Chaof6f8ed42014-08-06 16:06:58 -07001991 err = vmap_page_range(addr, end, prot, pages);
Nick Piggindb64fe02008-10-18 20:27:03 -07001992
WANG Chaof6f8ed42014-08-06 16:06:58 -07001993 return err > 0 ? 0 : err;
Nick Piggindb64fe02008-10-18 20:27:03 -07001994}
1995EXPORT_SYMBOL_GPL(map_vm_area);
1996
Mitsuo Hayasakaf5252e02011-10-31 17:08:13 -07001997static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
Marek Szyprowski5e6cafc2012-04-13 12:32:09 +02001998 unsigned long flags, const void *caller)
Tejun Heocf88c792009-08-14 15:00:52 +09001999{
Joonsoo Kimc69480a2013-04-29 15:07:30 -07002000 spin_lock(&vmap_area_lock);
Tejun Heocf88c792009-08-14 15:00:52 +09002001 vm->flags = flags;
2002 vm->addr = (void *)va->va_start;
2003 vm->size = va->va_end - va->va_start;
2004 vm->caller = caller;
Minchan Kimdb1aeca2012-01-10 15:08:39 -08002005 va->vm = vm;
Tejun Heocf88c792009-08-14 15:00:52 +09002006 va->flags |= VM_VM_AREA;
Joonsoo Kimc69480a2013-04-29 15:07:30 -07002007 spin_unlock(&vmap_area_lock);
Mitsuo Hayasakaf5252e02011-10-31 17:08:13 -07002008}
Tejun Heocf88c792009-08-14 15:00:52 +09002009
Zhang Yanfei20fc02b2013-07-08 15:59:58 -07002010static void clear_vm_uninitialized_flag(struct vm_struct *vm)
Mitsuo Hayasakaf5252e02011-10-31 17:08:13 -07002011{
Joonsoo Kimd4033af2013-04-29 15:07:35 -07002012 /*
Zhang Yanfei20fc02b2013-07-08 15:59:58 -07002013 * Before removing VM_UNINITIALIZED,
Joonsoo Kimd4033af2013-04-29 15:07:35 -07002014 * we should make sure that vm has proper values.
2015 * Pair with smp_rmb() in show_numa_info().
2016 */
2017 smp_wmb();
Zhang Yanfei20fc02b2013-07-08 15:59:58 -07002018 vm->flags &= ~VM_UNINITIALIZED;
Tejun Heocf88c792009-08-14 15:00:52 +09002019}
2020
Nick Piggindb64fe02008-10-18 20:27:03 -07002021static struct vm_struct *__get_vm_area_node(unsigned long size,
David Miller2dca6992009-09-21 12:22:34 -07002022 unsigned long align, unsigned long flags, unsigned long start,
Marek Szyprowski5e6cafc2012-04-13 12:32:09 +02002023 unsigned long end, int node, gfp_t gfp_mask, const void *caller)
Nick Piggindb64fe02008-10-18 20:27:03 -07002024{
Kautuk Consul00065262011-12-19 17:12:04 -08002025 struct vmap_area *va;
Nick Piggindb64fe02008-10-18 20:27:03 -07002026 struct vm_struct *area;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027
Giridhar Pemmasani52fd24c2006-10-28 10:38:34 -07002028 BUG_ON(in_interrupt());
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029 size = PAGE_ALIGN(size);
OGAWA Hirofumi31be8302006-11-16 01:19:29 -08002030 if (unlikely(!size))
2031 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032
zijun_hu252e5c62016-10-07 16:57:26 -07002033 if (flags & VM_IOREMAP)
2034 align = 1ul << clamp_t(int, get_count_order_long(size),
2035 PAGE_SHIFT, IOREMAP_MAX_ORDER);
2036
Tejun Heocf88c792009-08-14 15:00:52 +09002037 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038 if (unlikely(!area))
2039 return NULL;
2040
Andrey Ryabinin71394fe2015-02-13 14:40:03 -08002041 if (!(flags & VM_NO_GUARD))
2042 size += PAGE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043
Nick Piggindb64fe02008-10-18 20:27:03 -07002044 va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
2045 if (IS_ERR(va)) {
2046 kfree(area);
2047 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049
Zhang Yanfeid82b1d82013-07-03 15:04:47 -07002050 setup_vmalloc_vm(area, va, flags, caller);
Mitsuo Hayasakaf5252e02011-10-31 17:08:13 -07002051
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052 return area;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053}
2054
Christoph Lameter930fc452005-10-29 18:15:41 -07002055struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
2056 unsigned long start, unsigned long end)
2057{
David Rientjes00ef2d22013-02-22 16:35:36 -08002058 return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
2059 GFP_KERNEL, __builtin_return_address(0));
Christoph Lameter930fc452005-10-29 18:15:41 -07002060}
Rusty Russell5992b6d2007-07-19 01:49:21 -07002061EXPORT_SYMBOL_GPL(__get_vm_area);
Christoph Lameter930fc452005-10-29 18:15:41 -07002062
Benjamin Herrenschmidtc2968612009-02-18 14:48:12 -08002063struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
2064 unsigned long start, unsigned long end,
Marek Szyprowski5e6cafc2012-04-13 12:32:09 +02002065 const void *caller)
Benjamin Herrenschmidtc2968612009-02-18 14:48:12 -08002066{
David Rientjes00ef2d22013-02-22 16:35:36 -08002067 return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
2068 GFP_KERNEL, caller);
Benjamin Herrenschmidtc2968612009-02-18 14:48:12 -08002069}
2070
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071/**
Mike Rapoport92eac162019-03-05 15:48:36 -08002072 * get_vm_area - reserve a contiguous kernel virtual area
2073 * @size: size of the area
2074 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075 *
Mike Rapoport92eac162019-03-05 15:48:36 -08002076 * Search an area of @size in the kernel virtual mapping area,
2077 * and reserved it for out purposes. Returns the area descriptor
2078 * on success or %NULL on failure.
Mike Rapoporta862f682019-03-05 15:48:42 -08002079 *
2080 * Return: the area descriptor on success or %NULL on failure.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081 */
2082struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
2083{
David Miller2dca6992009-09-21 12:22:34 -07002084 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
David Rientjes00ef2d22013-02-22 16:35:36 -08002085 NUMA_NO_NODE, GFP_KERNEL,
2086 __builtin_return_address(0));
Christoph Lameter23016962008-04-28 02:12:42 -07002087}
2088
2089struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
Marek Szyprowski5e6cafc2012-04-13 12:32:09 +02002090 const void *caller)
Christoph Lameter23016962008-04-28 02:12:42 -07002091{
David Miller2dca6992009-09-21 12:22:34 -07002092 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
David Rientjes00ef2d22013-02-22 16:35:36 -08002093 NUMA_NO_NODE, GFP_KERNEL, caller);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094}
2095
Marek Szyprowskie9da6e92012-07-30 09:11:33 +02002096/**
Mike Rapoport92eac162019-03-05 15:48:36 -08002097 * find_vm_area - find a continuous kernel virtual area
2098 * @addr: base address
Marek Szyprowskie9da6e92012-07-30 09:11:33 +02002099 *
Mike Rapoport92eac162019-03-05 15:48:36 -08002100 * Search for the kernel VM area starting at @addr, and return it.
2101 * It is up to the caller to do all required locking to keep the returned
2102 * pointer valid.
Mike Rapoporta862f682019-03-05 15:48:42 -08002103 *
2104 * Return: pointer to the found area or %NULL on faulure
Marek Szyprowskie9da6e92012-07-30 09:11:33 +02002105 */
2106struct vm_struct *find_vm_area(const void *addr)
Nick Piggin83342312006-06-23 02:03:20 -07002107{
Nick Piggindb64fe02008-10-18 20:27:03 -07002108 struct vmap_area *va;
Nick Piggin83342312006-06-23 02:03:20 -07002109
Nick Piggindb64fe02008-10-18 20:27:03 -07002110 va = find_vmap_area((unsigned long)addr);
2111 if (va && va->flags & VM_VM_AREA)
Minchan Kimdb1aeca2012-01-10 15:08:39 -08002112 return va->vm;
Nick Piggin83342312006-06-23 02:03:20 -07002113
Andi Kleen7856dfe2005-05-20 14:27:57 -07002114 return NULL;
Andi Kleen7856dfe2005-05-20 14:27:57 -07002115}
2116
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117/**
Mike Rapoport92eac162019-03-05 15:48:36 -08002118 * remove_vm_area - find and remove a continuous kernel virtual area
2119 * @addr: base address
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120 *
Mike Rapoport92eac162019-03-05 15:48:36 -08002121 * Search for the kernel VM area starting at @addr, and remove it.
2122 * This function returns the found VM area, but using it is NOT safe
2123 * on SMP machines, except for its size or flags.
Mike Rapoporta862f682019-03-05 15:48:42 -08002124 *
2125 * Return: pointer to the found area or %NULL on faulure
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126 */
Christoph Lameterb3bdda02008-02-04 22:28:32 -08002127struct vm_struct *remove_vm_area(const void *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128{
Nick Piggindb64fe02008-10-18 20:27:03 -07002129 struct vmap_area *va;
2130
Christoph Hellwig5803ed22016-12-12 16:44:20 -08002131 might_sleep();
2132
Nick Piggindb64fe02008-10-18 20:27:03 -07002133 va = find_vmap_area((unsigned long)addr);
2134 if (va && va->flags & VM_VM_AREA) {
Minchan Kimdb1aeca2012-01-10 15:08:39 -08002135 struct vm_struct *vm = va->vm;
Mitsuo Hayasakaf5252e02011-10-31 17:08:13 -07002136
Joonsoo Kimc69480a2013-04-29 15:07:30 -07002137 spin_lock(&vmap_area_lock);
2138 va->vm = NULL;
2139 va->flags &= ~VM_VM_AREA;
Yisheng Xie78c72742017-07-10 15:48:09 -07002140 va->flags |= VM_LAZY_FREE;
Joonsoo Kimc69480a2013-04-29 15:07:30 -07002141 spin_unlock(&vmap_area_lock);
2142
Andrey Ryabinina5af5aa2015-03-12 16:26:11 -07002143 kasan_free_shadow(vm);
KAMEZAWA Hiroyukidd32c272009-09-21 17:02:32 -07002144 free_unmap_vmap_area(va);
KAMEZAWA Hiroyukidd32c272009-09-21 17:02:32 -07002145
Nick Piggindb64fe02008-10-18 20:27:03 -07002146 return vm;
2147 }
2148 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149}
2150
Rick Edgecombe868b1042019-04-25 17:11:36 -07002151static inline void set_area_direct_map(const struct vm_struct *area,
2152 int (*set_direct_map)(struct page *page))
2153{
2154 int i;
2155
2156 for (i = 0; i < area->nr_pages; i++)
2157 if (page_address(area->pages[i]))
2158 set_direct_map(area->pages[i]);
2159}
2160
2161/* Handle removing and resetting vm mappings related to the vm_struct. */
2162static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
2163{
Rick Edgecombe868b1042019-04-25 17:11:36 -07002164 unsigned long start = ULONG_MAX, end = 0;
2165 int flush_reset = area->flags & VM_FLUSH_RESET_PERMS;
Rick Edgecombe31e67342019-05-27 14:10:58 -07002166 int flush_dmap = 0;
Rick Edgecombe868b1042019-04-25 17:11:36 -07002167 int i;
2168
Rick Edgecombe868b1042019-04-25 17:11:36 -07002169 remove_vm_area(area->addr);
2170
2171 /* If this is not VM_FLUSH_RESET_PERMS memory, no need for the below. */
2172 if (!flush_reset)
2173 return;
2174
2175 /*
2176 * If not deallocating pages, just do the flush of the VM area and
2177 * return.
2178 */
2179 if (!deallocate_pages) {
2180 vm_unmap_aliases();
2181 return;
2182 }
2183
2184 /*
2185 * If execution gets here, flush the vm mapping and reset the direct
2186 * map. Find the start and end range of the direct mappings to make sure
2187 * the vm_unmap_aliases() flush includes the direct map.
2188 */
2189 for (i = 0; i < area->nr_pages; i++) {
Rick Edgecombe8e41f872019-05-27 14:10:57 -07002190 unsigned long addr = (unsigned long)page_address(area->pages[i]);
2191 if (addr) {
Rick Edgecombe868b1042019-04-25 17:11:36 -07002192 start = min(addr, start);
Rick Edgecombe8e41f872019-05-27 14:10:57 -07002193 end = max(addr + PAGE_SIZE, end);
Rick Edgecombe31e67342019-05-27 14:10:58 -07002194 flush_dmap = 1;
Rick Edgecombe868b1042019-04-25 17:11:36 -07002195 }
2196 }
2197
2198 /*
2199 * Set direct map to something invalid so that it won't be cached if
2200 * there are any accesses after the TLB flush, then flush the TLB and
2201 * reset the direct map permissions to the default.
2202 */
2203 set_area_direct_map(area, set_direct_map_invalid_noflush);
Rick Edgecombe31e67342019-05-27 14:10:58 -07002204 _vm_unmap_aliases(start, end, flush_dmap);
Rick Edgecombe868b1042019-04-25 17:11:36 -07002205 set_area_direct_map(area, set_direct_map_default_noflush);
2206}
2207
Christoph Lameterb3bdda02008-02-04 22:28:32 -08002208static void __vunmap(const void *addr, int deallocate_pages)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209{
2210 struct vm_struct *area;
2211
2212 if (!addr)
2213 return;
2214
HATAYAMA Daisukee69e9d4a2013-07-03 15:02:18 -07002215 if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
Dan Carpenterab15d9b2013-07-08 15:59:53 -07002216 addr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218
Liviu Dudau6ade2032019-03-05 15:42:54 -08002219 area = find_vm_area(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220 if (unlikely(!area)) {
Arjan van de Ven4c8573e2008-07-25 19:45:37 -07002221 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002222 addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223 return;
2224 }
2225
Chintan Pandya05e3ff92018-06-07 17:06:53 -07002226 debug_check_no_locks_freed(area->addr, get_vm_area_size(area));
2227 debug_check_no_obj_freed(area->addr, get_vm_area_size(area));
Ingo Molnar9a11b49a2006-07-03 00:24:33 -07002228
Rick Edgecombe868b1042019-04-25 17:11:36 -07002229 vm_remove_mappings(area, deallocate_pages);
2230
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231 if (deallocate_pages) {
2232 int i;
2233
2234 for (i = 0; i < area->nr_pages; i++) {
Christoph Lameterbf53d6f2008-02-04 22:28:34 -08002235 struct page *page = area->pages[i];
2236
2237 BUG_ON(!page);
Vladimir Davydov49491482016-07-26 15:24:24 -07002238 __free_pages(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239 }
2240
David Rientjes244d63e2016-01-14 15:19:35 -08002241 kvfree(area->pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242 }
2243
2244 kfree(area);
2245 return;
2246}
Andrey Ryabininbf22e372016-12-12 16:44:10 -08002247
2248static inline void __vfree_deferred(const void *addr)
2249{
2250 /*
2251 * Use raw_cpu_ptr() because this can be called from preemptible
2252 * context. Preemption is absolutely fine here, because the llist_add()
2253 * implementation is lockless, so it works even if we are adding to
2254 * nother cpu's list. schedule_work() should be fine with this too.
2255 */
2256 struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
2257
2258 if (llist_add((struct llist_node *)addr, &p->list))
2259 schedule_work(&p->wq);
2260}
2261
2262/**
Mike Rapoport92eac162019-03-05 15:48:36 -08002263 * vfree_atomic - release memory allocated by vmalloc()
2264 * @addr: memory base address
Andrey Ryabininbf22e372016-12-12 16:44:10 -08002265 *
Mike Rapoport92eac162019-03-05 15:48:36 -08002266 * This one is just like vfree() but can be called in any atomic context
2267 * except NMIs.
Andrey Ryabininbf22e372016-12-12 16:44:10 -08002268 */
2269void vfree_atomic(const void *addr)
2270{
2271 BUG_ON(in_nmi());
2272
2273 kmemleak_free(addr);
2274
2275 if (!addr)
2276 return;
2277 __vfree_deferred(addr);
2278}
2279
Roman Penyaevc67dc622019-03-05 15:43:24 -08002280static void __vfree(const void *addr)
2281{
2282 if (unlikely(in_interrupt()))
2283 __vfree_deferred(addr);
2284 else
2285 __vunmap(addr, 1);
2286}
2287
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288/**
Mike Rapoport92eac162019-03-05 15:48:36 -08002289 * vfree - release memory allocated by vmalloc()
2290 * @addr: memory base address
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291 *
Mike Rapoport92eac162019-03-05 15:48:36 -08002292 * Free the virtually continuous memory area starting at @addr, as
2293 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
2294 * NULL, no operation is performed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295 *
Mike Rapoport92eac162019-03-05 15:48:36 -08002296 * Must not be called in NMI context (strictly speaking, only if we don't
2297 * have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
2298 * conventions for vfree() arch-depenedent would be a really bad idea)
Andrew Mortonc9fcee52013-05-07 16:18:18 -07002299 *
Mike Rapoport92eac162019-03-05 15:48:36 -08002300 * May sleep if called *not* from interrupt context.
Andrey Ryabinin3ca4ea32018-10-26 15:07:03 -07002301 *
Mike Rapoport92eac162019-03-05 15:48:36 -08002302 * NOTE: assumes that the object at @addr has a size >= sizeof(llist_node)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303 */
Christoph Lameterb3bdda02008-02-04 22:28:32 -08002304void vfree(const void *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305{
Al Viro32fcfd42013-03-10 20:14:08 -04002306 BUG_ON(in_nmi());
Catalin Marinas89219d32009-06-11 13:23:19 +01002307
2308 kmemleak_free(addr);
2309
Andrey Ryabinina8dda162018-10-26 15:07:07 -07002310 might_sleep_if(!in_interrupt());
2311
Al Viro32fcfd42013-03-10 20:14:08 -04002312 if (!addr)
2313 return;
Roman Penyaevc67dc622019-03-05 15:43:24 -08002314
2315 __vfree(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002316}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317EXPORT_SYMBOL(vfree);
2318
2319/**
Mike Rapoport92eac162019-03-05 15:48:36 -08002320 * vunmap - release virtual mapping obtained by vmap()
2321 * @addr: memory base address
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322 *
Mike Rapoport92eac162019-03-05 15:48:36 -08002323 * Free the virtually contiguous memory area starting at @addr,
2324 * which was created from the page array passed to vmap().
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325 *
Mike Rapoport92eac162019-03-05 15:48:36 -08002326 * Must not be called in interrupt context.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002327 */
Christoph Lameterb3bdda02008-02-04 22:28:32 -08002328void vunmap(const void *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329{
2330 BUG_ON(in_interrupt());
Peter Zijlstra34754b62009-02-25 16:04:03 +01002331 might_sleep();
Al Viro32fcfd42013-03-10 20:14:08 -04002332 if (addr)
2333 __vunmap(addr, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002334}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335EXPORT_SYMBOL(vunmap);
2336
2337/**
Mike Rapoport92eac162019-03-05 15:48:36 -08002338 * vmap - map an array of pages into virtually contiguous space
2339 * @pages: array of page pointers
2340 * @count: number of pages to map
2341 * @flags: vm_area->flags
2342 * @prot: page protection for the mapping
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343 *
Mike Rapoport92eac162019-03-05 15:48:36 -08002344 * Maps @count pages from @pages into contiguous kernel virtual
2345 * space.
Mike Rapoporta862f682019-03-05 15:48:42 -08002346 *
2347 * Return: the address of the area or %NULL on failure
Linus Torvalds1da177e2005-04-16 15:20:36 -07002348 */
2349void *vmap(struct page **pages, unsigned int count,
Mike Rapoport92eac162019-03-05 15:48:36 -08002350 unsigned long flags, pgprot_t prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351{
2352 struct vm_struct *area;
Guillermo Julián Moreno65ee03c2016-06-03 14:55:33 -07002353 unsigned long size; /* In bytes */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354
Peter Zijlstra34754b62009-02-25 16:04:03 +01002355 might_sleep();
2356
Arun KSca79b0c2018-12-28 00:34:29 -08002357 if (count > totalram_pages())
Linus Torvalds1da177e2005-04-16 15:20:36 -07002358 return NULL;
2359
Guillermo Julián Moreno65ee03c2016-06-03 14:55:33 -07002360 size = (unsigned long)count << PAGE_SHIFT;
2361 area = get_vm_area_caller(size, flags, __builtin_return_address(0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362 if (!area)
2363 return NULL;
Christoph Lameter23016962008-04-28 02:12:42 -07002364
WANG Chaof6f8ed42014-08-06 16:06:58 -07002365 if (map_vm_area(area, prot, pages)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366 vunmap(area->addr);
2367 return NULL;
2368 }
2369
2370 return area->addr;
2371}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372EXPORT_SYMBOL(vmap);
2373
Michal Hocko8594a212017-05-12 15:46:41 -07002374static void *__vmalloc_node(unsigned long size, unsigned long align,
2375 gfp_t gfp_mask, pgprot_t prot,
2376 int node, const void *caller);
Adrian Bunke31d9eb2008-02-04 22:29:09 -08002377static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
Wanpeng Li3722e132013-11-12 15:07:29 -08002378 pgprot_t prot, int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379{
2380 struct page **pages;
2381 unsigned int nr_pages, array_size, i;
David Rientjes930f0362014-08-06 16:06:28 -07002382 const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
Laura Abbott704b8622017-08-18 15:16:27 -07002383 const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
2384 const gfp_t highmem_mask = (gfp_mask & (GFP_DMA | GFP_DMA32)) ?
2385 0 :
2386 __GFP_HIGHMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387
Wanpeng Li762216a2013-09-11 14:22:42 -07002388 nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002389 array_size = (nr_pages * sizeof(struct page *));
2390
2391 area->nr_pages = nr_pages;
2392 /* Please note that the recursion is strictly bounded. */
Jan Kiszka8757d5f2006-07-14 00:23:56 -07002393 if (array_size > PAGE_SIZE) {
Laura Abbott704b8622017-08-18 15:16:27 -07002394 pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask,
Wanpeng Li3722e132013-11-12 15:07:29 -08002395 PAGE_KERNEL, node, area->caller);
Andrew Morton286e1ea2006-10-17 00:09:57 -07002396 } else {
Jan Beulich976d6df2009-12-14 17:58:39 -08002397 pages = kmalloc_node(array_size, nested_gfp, node);
Andrew Morton286e1ea2006-10-17 00:09:57 -07002398 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002399 area->pages = pages;
2400 if (!area->pages) {
2401 remove_vm_area(area->addr);
2402 kfree(area);
2403 return NULL;
2404 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002405
2406 for (i = 0; i < area->nr_pages; i++) {
Christoph Lameterbf53d6f2008-02-04 22:28:34 -08002407 struct page *page;
2408
Jianguo Wu4b909512013-11-12 15:07:11 -08002409 if (node == NUMA_NO_NODE)
Laura Abbott704b8622017-08-18 15:16:27 -07002410 page = alloc_page(alloc_mask|highmem_mask);
Christoph Lameter930fc452005-10-29 18:15:41 -07002411 else
Laura Abbott704b8622017-08-18 15:16:27 -07002412 page = alloc_pages_node(node, alloc_mask|highmem_mask, 0);
Christoph Lameterbf53d6f2008-02-04 22:28:34 -08002413
2414 if (unlikely(!page)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002415 /* Successfully allocated i pages, free them in __vunmap() */
2416 area->nr_pages = i;
2417 goto fail;
2418 }
Christoph Lameterbf53d6f2008-02-04 22:28:34 -08002419 area->pages[i] = page;
Laura Abbott704b8622017-08-18 15:16:27 -07002420 if (gfpflags_allow_blocking(gfp_mask|highmem_mask))
Eric Dumazet660654f2014-08-06 16:06:25 -07002421 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002422 }
2423
WANG Chaof6f8ed42014-08-06 16:06:58 -07002424 if (map_vm_area(area, prot, pages))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002425 goto fail;
2426 return area->addr;
2427
2428fail:
Michal Hockoa8e99252017-02-22 15:46:10 -08002429 warn_alloc(gfp_mask, NULL,
Michal Hocko7877cdc2016-10-07 17:01:55 -07002430 "vmalloc: allocation failure, allocated %ld of %ld bytes",
Dave Hansen22943ab2011-05-24 17:12:18 -07002431 (area->nr_pages*PAGE_SIZE), area->size);
Roman Penyaevc67dc622019-03-05 15:43:24 -08002432 __vfree(area->addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433 return NULL;
2434}
2435
David Rientjesd0a21262011-01-13 15:46:02 -08002436/**
Mike Rapoport92eac162019-03-05 15:48:36 -08002437 * __vmalloc_node_range - allocate virtually contiguous memory
2438 * @size: allocation size
2439 * @align: desired alignment
2440 * @start: vm area range start
2441 * @end: vm area range end
2442 * @gfp_mask: flags for the page level allocator
2443 * @prot: protection mask for the allocated pages
2444 * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD)
2445 * @node: node to use for allocation or NUMA_NO_NODE
2446 * @caller: caller's return address
David Rientjesd0a21262011-01-13 15:46:02 -08002447 *
Mike Rapoport92eac162019-03-05 15:48:36 -08002448 * Allocate enough pages to cover @size from the page level
2449 * allocator with @gfp_mask flags. Map them into contiguous
2450 * kernel virtual space, using a pagetable protection of @prot.
Mike Rapoporta862f682019-03-05 15:48:42 -08002451 *
2452 * Return: the address of the area or %NULL on failure
David Rientjesd0a21262011-01-13 15:46:02 -08002453 */
2454void *__vmalloc_node_range(unsigned long size, unsigned long align,
2455 unsigned long start, unsigned long end, gfp_t gfp_mask,
Andrey Ryabinincb9e3c22015-02-13 14:40:07 -08002456 pgprot_t prot, unsigned long vm_flags, int node,
2457 const void *caller)
Christoph Lameter930fc452005-10-29 18:15:41 -07002458{
David Rientjesd0a21262011-01-13 15:46:02 -08002459 struct vm_struct *area;
2460 void *addr;
2461 unsigned long real_size = size;
2462
2463 size = PAGE_ALIGN(size);
Arun KSca79b0c2018-12-28 00:34:29 -08002464 if (!size || (size >> PAGE_SHIFT) > totalram_pages())
Joe Perchesde7d2b52011-10-31 17:08:48 -07002465 goto fail;
David Rientjesd0a21262011-01-13 15:46:02 -08002466
Andrey Ryabinincb9e3c22015-02-13 14:40:07 -08002467 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
2468 vm_flags, start, end, node, gfp_mask, caller);
David Rientjesd0a21262011-01-13 15:46:02 -08002469 if (!area)
Joe Perchesde7d2b52011-10-31 17:08:48 -07002470 goto fail;
David Rientjesd0a21262011-01-13 15:46:02 -08002471
Wanpeng Li3722e132013-11-12 15:07:29 -08002472 addr = __vmalloc_area_node(area, gfp_mask, prot, node);
Mel Gorman1368edf2011-12-08 14:34:30 -08002473 if (!addr)
Wanpeng Lib82225f32013-11-12 15:07:33 -08002474 return NULL;
Catalin Marinas89219d32009-06-11 13:23:19 +01002475
2476 /*
Zhang Yanfei20fc02b2013-07-08 15:59:58 -07002477 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
2478 * flag. It means that vm_struct is not fully initialized.
Joonsoo Kim4341fa42013-04-29 15:07:39 -07002479 * Now, it is fully initialized, so remove this flag here.
Mitsuo Hayasakaf5252e02011-10-31 17:08:13 -07002480 */
Zhang Yanfei20fc02b2013-07-08 15:59:58 -07002481 clear_vm_uninitialized_flag(area);
Mitsuo Hayasakaf5252e02011-10-31 17:08:13 -07002482
Catalin Marinas94f4a162017-07-06 15:40:22 -07002483 kmemleak_vmalloc(area, size, gfp_mask);
Catalin Marinas89219d32009-06-11 13:23:19 +01002484
2485 return addr;
Joe Perchesde7d2b52011-10-31 17:08:48 -07002486
2487fail:
Michal Hockoa8e99252017-02-22 15:46:10 -08002488 warn_alloc(gfp_mask, NULL,
Michal Hocko7877cdc2016-10-07 17:01:55 -07002489 "vmalloc: allocation failure: %lu bytes", real_size);
Joe Perchesde7d2b52011-10-31 17:08:48 -07002490 return NULL;
Christoph Lameter930fc452005-10-29 18:15:41 -07002491}
2492
Uladzislau Rezki (Sony)153178e2019-03-05 15:43:30 -08002493/*
2494 * This is only for performance analysis of vmalloc and stress purpose.
2495 * It is required by vmalloc test module, therefore do not use it other
2496 * than that.
2497 */
2498#ifdef CONFIG_TEST_VMALLOC_MODULE
2499EXPORT_SYMBOL_GPL(__vmalloc_node_range);
2500#endif
2501
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502/**
Mike Rapoport92eac162019-03-05 15:48:36 -08002503 * __vmalloc_node - allocate virtually contiguous memory
2504 * @size: allocation size
2505 * @align: desired alignment
2506 * @gfp_mask: flags for the page level allocator
2507 * @prot: protection mask for the allocated pages
2508 * @node: node to use for allocation or NUMA_NO_NODE
2509 * @caller: caller's return address
Linus Torvalds1da177e2005-04-16 15:20:36 -07002510 *
Mike Rapoport92eac162019-03-05 15:48:36 -08002511 * Allocate enough pages to cover @size from the page level
2512 * allocator with @gfp_mask flags. Map them into contiguous
2513 * kernel virtual space, using a pagetable protection of @prot.
Michal Hockoa7c3e902017-05-08 15:57:09 -07002514 *
Mike Rapoport92eac162019-03-05 15:48:36 -08002515 * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
2516 * and __GFP_NOFAIL are not supported
Michal Hockoa7c3e902017-05-08 15:57:09 -07002517 *
Mike Rapoport92eac162019-03-05 15:48:36 -08002518 * Any use of gfp flags outside of GFP_KERNEL should be consulted
2519 * with mm people.
Mike Rapoporta862f682019-03-05 15:48:42 -08002520 *
2521 * Return: pointer to the allocated memory or %NULL on error
Linus Torvalds1da177e2005-04-16 15:20:36 -07002522 */
Michal Hocko8594a212017-05-12 15:46:41 -07002523static void *__vmalloc_node(unsigned long size, unsigned long align,
David Miller2dca6992009-09-21 12:22:34 -07002524 gfp_t gfp_mask, pgprot_t prot,
Marek Szyprowski5e6cafc2012-04-13 12:32:09 +02002525 int node, const void *caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002526{
David Rientjesd0a21262011-01-13 15:46:02 -08002527 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
Andrey Ryabinincb9e3c22015-02-13 14:40:07 -08002528 gfp_mask, prot, 0, node, caller);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002529}
2530
Christoph Lameter930fc452005-10-29 18:15:41 -07002531void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
2532{
David Rientjes00ef2d22013-02-22 16:35:36 -08002533 return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE,
Christoph Lameter23016962008-04-28 02:12:42 -07002534 __builtin_return_address(0));
Christoph Lameter930fc452005-10-29 18:15:41 -07002535}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536EXPORT_SYMBOL(__vmalloc);
2537
Michal Hocko8594a212017-05-12 15:46:41 -07002538static inline void *__vmalloc_node_flags(unsigned long size,
2539 int node, gfp_t flags)
2540{
2541 return __vmalloc_node(size, 1, flags, PAGE_KERNEL,
2542 node, __builtin_return_address(0));
2543}
2544
2545
2546void *__vmalloc_node_flags_caller(unsigned long size, int node, gfp_t flags,
2547 void *caller)
2548{
2549 return __vmalloc_node(size, 1, flags, PAGE_KERNEL, node, caller);
2550}
2551
Linus Torvalds1da177e2005-04-16 15:20:36 -07002552/**
Mike Rapoport92eac162019-03-05 15:48:36 -08002553 * vmalloc - allocate virtually contiguous memory
2554 * @size: allocation size
Linus Torvalds1da177e2005-04-16 15:20:36 -07002555 *
Mike Rapoport92eac162019-03-05 15:48:36 -08002556 * Allocate enough pages to cover @size from the page level
2557 * allocator and map them into contiguous kernel virtual space.
2558 *
2559 * For tight control over page level allocator and protection flags
2560 * use __vmalloc() instead.
Mike Rapoporta862f682019-03-05 15:48:42 -08002561 *
2562 * Return: pointer to the allocated memory or %NULL on error
Linus Torvalds1da177e2005-04-16 15:20:36 -07002563 */
2564void *vmalloc(unsigned long size)
2565{
David Rientjes00ef2d22013-02-22 16:35:36 -08002566 return __vmalloc_node_flags(size, NUMA_NO_NODE,
Michal Hocko19809c22017-05-08 15:57:44 -07002567 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002568}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569EXPORT_SYMBOL(vmalloc);
2570
Christoph Lameter930fc452005-10-29 18:15:41 -07002571/**
Mike Rapoport92eac162019-03-05 15:48:36 -08002572 * vzalloc - allocate virtually contiguous memory with zero fill
2573 * @size: allocation size
Dave Younge1ca7782010-10-26 14:22:06 -07002574 *
Mike Rapoport92eac162019-03-05 15:48:36 -08002575 * Allocate enough pages to cover @size from the page level
2576 * allocator and map them into contiguous kernel virtual space.
2577 * The memory allocated is set to zero.
2578 *
2579 * For tight control over page level allocator and protection flags
2580 * use __vmalloc() instead.
Mike Rapoporta862f682019-03-05 15:48:42 -08002581 *
2582 * Return: pointer to the allocated memory or %NULL on error
Dave Younge1ca7782010-10-26 14:22:06 -07002583 */
2584void *vzalloc(unsigned long size)
2585{
David Rientjes00ef2d22013-02-22 16:35:36 -08002586 return __vmalloc_node_flags(size, NUMA_NO_NODE,
Michal Hocko19809c22017-05-08 15:57:44 -07002587 GFP_KERNEL | __GFP_ZERO);
Dave Younge1ca7782010-10-26 14:22:06 -07002588}
2589EXPORT_SYMBOL(vzalloc);
2590
2591/**
Rolf Eike Beeread04082006-09-27 01:50:13 -07002592 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
2593 * @size: allocation size
Nick Piggin83342312006-06-23 02:03:20 -07002594 *
Rolf Eike Beeread04082006-09-27 01:50:13 -07002595 * The resulting memory area is zeroed so it can be mapped to userspace
2596 * without leaking data.
Mike Rapoporta862f682019-03-05 15:48:42 -08002597 *
2598 * Return: pointer to the allocated memory or %NULL on error
Nick Piggin83342312006-06-23 02:03:20 -07002599 */
2600void *vmalloc_user(unsigned long size)
2601{
Roman Penyaevbc84c532019-03-05 15:43:27 -08002602 return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END,
2603 GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
2604 VM_USERMAP, NUMA_NO_NODE,
2605 __builtin_return_address(0));
Nick Piggin83342312006-06-23 02:03:20 -07002606}
2607EXPORT_SYMBOL(vmalloc_user);
2608
2609/**
Mike Rapoport92eac162019-03-05 15:48:36 -08002610 * vmalloc_node - allocate memory on a specific node
2611 * @size: allocation size
2612 * @node: numa node
Christoph Lameter930fc452005-10-29 18:15:41 -07002613 *
Mike Rapoport92eac162019-03-05 15:48:36 -08002614 * Allocate enough pages to cover @size from the page level
2615 * allocator and map them into contiguous kernel virtual space.
Christoph Lameter930fc452005-10-29 18:15:41 -07002616 *
Mike Rapoport92eac162019-03-05 15:48:36 -08002617 * For tight control over page level allocator and protection flags
2618 * use __vmalloc() instead.
Mike Rapoporta862f682019-03-05 15:48:42 -08002619 *
2620 * Return: pointer to the allocated memory or %NULL on error
Christoph Lameter930fc452005-10-29 18:15:41 -07002621 */
2622void *vmalloc_node(unsigned long size, int node)
2623{
Michal Hocko19809c22017-05-08 15:57:44 -07002624 return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL,
Christoph Lameter23016962008-04-28 02:12:42 -07002625 node, __builtin_return_address(0));
Christoph Lameter930fc452005-10-29 18:15:41 -07002626}
2627EXPORT_SYMBOL(vmalloc_node);
2628
Dave Younge1ca7782010-10-26 14:22:06 -07002629/**
2630 * vzalloc_node - allocate memory on a specific node with zero fill
2631 * @size: allocation size
2632 * @node: numa node
2633 *
2634 * Allocate enough pages to cover @size from the page level
2635 * allocator and map them into contiguous kernel virtual space.
2636 * The memory allocated is set to zero.
2637 *
2638 * For tight control over page level allocator and protection flags
2639 * use __vmalloc_node() instead.
Mike Rapoporta862f682019-03-05 15:48:42 -08002640 *
2641 * Return: pointer to the allocated memory or %NULL on error
Dave Younge1ca7782010-10-26 14:22:06 -07002642 */
2643void *vzalloc_node(unsigned long size, int node)
2644{
2645 return __vmalloc_node_flags(size, node,
Michal Hocko19809c22017-05-08 15:57:44 -07002646 GFP_KERNEL | __GFP_ZERO);
Dave Younge1ca7782010-10-26 14:22:06 -07002647}
2648EXPORT_SYMBOL(vzalloc_node);
2649
Linus Torvalds1da177e2005-04-16 15:20:36 -07002650/**
Mike Rapoport92eac162019-03-05 15:48:36 -08002651 * vmalloc_exec - allocate virtually contiguous, executable memory
2652 * @size: allocation size
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653 *
Mike Rapoport92eac162019-03-05 15:48:36 -08002654 * Kernel-internal function to allocate enough pages to cover @size
2655 * the page level allocator and map them into contiguous and
2656 * executable kernel virtual space.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002657 *
Mike Rapoport92eac162019-03-05 15:48:36 -08002658 * For tight control over page level allocator and protection flags
2659 * use __vmalloc() instead.
Mike Rapoporta862f682019-03-05 15:48:42 -08002660 *
2661 * Return: pointer to the allocated memory or %NULL on error
Linus Torvalds1da177e2005-04-16 15:20:36 -07002662 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002663void *vmalloc_exec(unsigned long size)
2664{
Rick Edgecombe868b1042019-04-25 17:11:36 -07002665 return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
2666 GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS,
2667 NUMA_NO_NODE, __builtin_return_address(0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002668}
2669
Andi Kleen0d08e0d2007-05-02 19:27:12 +02002670#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
Michal Hocko698d0832018-02-21 14:46:01 -08002671#define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
Andi Kleen0d08e0d2007-05-02 19:27:12 +02002672#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
Michal Hocko698d0832018-02-21 14:46:01 -08002673#define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
Andi Kleen0d08e0d2007-05-02 19:27:12 +02002674#else
Michal Hocko698d0832018-02-21 14:46:01 -08002675/*
2676 * 64b systems should always have either DMA or DMA32 zones. For others
2677 * GFP_DMA32 should do the right thing and use the normal zone.
2678 */
2679#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
Andi Kleen0d08e0d2007-05-02 19:27:12 +02002680#endif
2681
Linus Torvalds1da177e2005-04-16 15:20:36 -07002682/**
Mike Rapoport92eac162019-03-05 15:48:36 -08002683 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
2684 * @size: allocation size
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685 *
Mike Rapoport92eac162019-03-05 15:48:36 -08002686 * Allocate enough 32bit PA addressable pages to cover @size from the
2687 * page level allocator and map them into contiguous kernel virtual space.
Mike Rapoporta862f682019-03-05 15:48:42 -08002688 *
2689 * Return: pointer to the allocated memory or %NULL on error
Linus Torvalds1da177e2005-04-16 15:20:36 -07002690 */
2691void *vmalloc_32(unsigned long size)
2692{
David Miller2dca6992009-09-21 12:22:34 -07002693 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
David Rientjes00ef2d22013-02-22 16:35:36 -08002694 NUMA_NO_NODE, __builtin_return_address(0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002695}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002696EXPORT_SYMBOL(vmalloc_32);
2697
Nick Piggin83342312006-06-23 02:03:20 -07002698/**
Rolf Eike Beeread04082006-09-27 01:50:13 -07002699 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
Mike Rapoport92eac162019-03-05 15:48:36 -08002700 * @size: allocation size
Rolf Eike Beeread04082006-09-27 01:50:13 -07002701 *
2702 * The resulting memory area is 32bit addressable and zeroed so it can be
2703 * mapped to userspace without leaking data.
Mike Rapoporta862f682019-03-05 15:48:42 -08002704 *
2705 * Return: pointer to the allocated memory or %NULL on error
Nick Piggin83342312006-06-23 02:03:20 -07002706 */
2707void *vmalloc_32_user(unsigned long size)
2708{
Roman Penyaevbc84c532019-03-05 15:43:27 -08002709 return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END,
2710 GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
2711 VM_USERMAP, NUMA_NO_NODE,
2712 __builtin_return_address(0));
Nick Piggin83342312006-06-23 02:03:20 -07002713}
2714EXPORT_SYMBOL(vmalloc_32_user);
2715
KAMEZAWA Hiroyukid0107eb2009-09-21 17:02:34 -07002716/*
2717 * small helper routine , copy contents to buf from addr.
2718 * If the page is not present, fill zero.
2719 */
2720
2721static int aligned_vread(char *buf, char *addr, unsigned long count)
2722{
2723 struct page *p;
2724 int copied = 0;
2725
2726 while (count) {
2727 unsigned long offset, length;
2728
Alexander Kuleshov891c49a2015-11-05 18:46:51 -08002729 offset = offset_in_page(addr);
KAMEZAWA Hiroyukid0107eb2009-09-21 17:02:34 -07002730 length = PAGE_SIZE - offset;
2731 if (length > count)
2732 length = count;
2733 p = vmalloc_to_page(addr);
2734 /*
2735 * To do safe access to this _mapped_ area, we need
2736 * lock. But adding lock here means that we need to add
2737 * overhead of vmalloc()/vfree() calles for this _debug_
2738 * interface, rarely used. Instead of that, we'll use
2739 * kmap() and get small overhead in this access function.
2740 */
2741 if (p) {
2742 /*
2743 * we can expect USER0 is not used (see vread/vwrite's
2744 * function description)
2745 */
Cong Wang9b04c5f2011-11-25 23:14:39 +08002746 void *map = kmap_atomic(p);
KAMEZAWA Hiroyukid0107eb2009-09-21 17:02:34 -07002747 memcpy(buf, map + offset, length);
Cong Wang9b04c5f2011-11-25 23:14:39 +08002748 kunmap_atomic(map);
KAMEZAWA Hiroyukid0107eb2009-09-21 17:02:34 -07002749 } else
2750 memset(buf, 0, length);
2751
2752 addr += length;
2753 buf += length;
2754 copied += length;
2755 count -= length;
2756 }
2757 return copied;
2758}
2759
2760static int aligned_vwrite(char *buf, char *addr, unsigned long count)
2761{
2762 struct page *p;
2763 int copied = 0;
2764
2765 while (count) {
2766 unsigned long offset, length;
2767
Alexander Kuleshov891c49a2015-11-05 18:46:51 -08002768 offset = offset_in_page(addr);
KAMEZAWA Hiroyukid0107eb2009-09-21 17:02:34 -07002769 length = PAGE_SIZE - offset;
2770 if (length > count)
2771 length = count;
2772 p = vmalloc_to_page(addr);
2773 /*
2774 * To do safe access to this _mapped_ area, we need
2775 * lock. But adding lock here means that we need to add
2776 * overhead of vmalloc()/vfree() calles for this _debug_
2777 * interface, rarely used. Instead of that, we'll use
2778 * kmap() and get small overhead in this access function.
2779 */
2780 if (p) {
2781 /*
2782 * we can expect USER0 is not used (see vread/vwrite's
2783 * function description)
2784 */
Cong Wang9b04c5f2011-11-25 23:14:39 +08002785 void *map = kmap_atomic(p);
KAMEZAWA Hiroyukid0107eb2009-09-21 17:02:34 -07002786 memcpy(map + offset, buf, length);
Cong Wang9b04c5f2011-11-25 23:14:39 +08002787 kunmap_atomic(map);
KAMEZAWA Hiroyukid0107eb2009-09-21 17:02:34 -07002788 }
2789 addr += length;
2790 buf += length;
2791 copied += length;
2792 count -= length;
2793 }
2794 return copied;
2795}
2796
2797/**
Mike Rapoport92eac162019-03-05 15:48:36 -08002798 * vread() - read vmalloc area in a safe way.
2799 * @buf: buffer for reading data
2800 * @addr: vm address.
2801 * @count: number of bytes to be read.
KAMEZAWA Hiroyukid0107eb2009-09-21 17:02:34 -07002802 *
Mike Rapoport92eac162019-03-05 15:48:36 -08002803 * This function checks that addr is a valid vmalloc'ed area, and
2804 * copy data from that area to a given buffer. If the given memory range
2805 * of [addr...addr+count) includes some valid address, data is copied to
2806 * proper area of @buf. If there are memory holes, they'll be zero-filled.
2807 * IOREMAP area is treated as memory hole and no copy is done.
KAMEZAWA Hiroyukid0107eb2009-09-21 17:02:34 -07002808 *
Mike Rapoport92eac162019-03-05 15:48:36 -08002809 * If [addr...addr+count) doesn't includes any intersects with alive
2810 * vm_struct area, returns 0. @buf should be kernel's buffer.
KAMEZAWA Hiroyukid0107eb2009-09-21 17:02:34 -07002811 *
Mike Rapoport92eac162019-03-05 15:48:36 -08002812 * Note: In usual ops, vread() is never necessary because the caller
2813 * should know vmalloc() area is valid and can use memcpy().
2814 * This is for routines which have to access vmalloc area without
Geert Uytterhoevend9009d62019-07-11 20:59:06 -07002815 * any information, as /dev/kmem.
Mike Rapoporta862f682019-03-05 15:48:42 -08002816 *
2817 * Return: number of bytes for which addr and buf should be increased
2818 * (same number as @count) or %0 if [addr...addr+count) doesn't
2819 * include any intersection with valid vmalloc area
KAMEZAWA Hiroyukid0107eb2009-09-21 17:02:34 -07002820 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002821long vread(char *buf, char *addr, unsigned long count)
2822{
Joonsoo Kime81ce852013-04-29 15:07:32 -07002823 struct vmap_area *va;
2824 struct vm_struct *vm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825 char *vaddr, *buf_start = buf;
KAMEZAWA Hiroyukid0107eb2009-09-21 17:02:34 -07002826 unsigned long buflen = count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002827 unsigned long n;
2828
2829 /* Don't allow overflow */
2830 if ((unsigned long) addr + count < count)
2831 count = -(unsigned long) addr;
2832
Joonsoo Kime81ce852013-04-29 15:07:32 -07002833 spin_lock(&vmap_area_lock);
2834 list_for_each_entry(va, &vmap_area_list, list) {
2835 if (!count)
2836 break;
2837
2838 if (!(va->flags & VM_VM_AREA))
2839 continue;
2840
2841 vm = va->vm;
2842 vaddr = (char *) vm->addr;
Wanpeng Li762216a2013-09-11 14:22:42 -07002843 if (addr >= vaddr + get_vm_area_size(vm))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002844 continue;
2845 while (addr < vaddr) {
2846 if (count == 0)
2847 goto finished;
2848 *buf = '\0';
2849 buf++;
2850 addr++;
2851 count--;
2852 }
Wanpeng Li762216a2013-09-11 14:22:42 -07002853 n = vaddr + get_vm_area_size(vm) - addr;
KAMEZAWA Hiroyukid0107eb2009-09-21 17:02:34 -07002854 if (n > count)
2855 n = count;
Joonsoo Kime81ce852013-04-29 15:07:32 -07002856 if (!(vm->flags & VM_IOREMAP))
KAMEZAWA Hiroyukid0107eb2009-09-21 17:02:34 -07002857 aligned_vread(buf, addr, n);
2858 else /* IOREMAP area is treated as memory hole */
2859 memset(buf, 0, n);
2860 buf += n;
2861 addr += n;
2862 count -= n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002863 }
2864finished:
Joonsoo Kime81ce852013-04-29 15:07:32 -07002865 spin_unlock(&vmap_area_lock);
KAMEZAWA Hiroyukid0107eb2009-09-21 17:02:34 -07002866
2867 if (buf == buf_start)
2868 return 0;
2869 /* zero-fill memory holes */
2870 if (buf != buf_start + buflen)
2871 memset(buf, 0, buflen - (buf - buf_start));
2872
2873 return buflen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002874}
2875
KAMEZAWA Hiroyukid0107eb2009-09-21 17:02:34 -07002876/**
Mike Rapoport92eac162019-03-05 15:48:36 -08002877 * vwrite() - write vmalloc area in a safe way.
2878 * @buf: buffer for source data
2879 * @addr: vm address.
2880 * @count: number of bytes to be read.
KAMEZAWA Hiroyukid0107eb2009-09-21 17:02:34 -07002881 *
Mike Rapoport92eac162019-03-05 15:48:36 -08002882 * This function checks that addr is a valid vmalloc'ed area, and
2883 * copy data from a buffer to the given addr. If specified range of
2884 * [addr...addr+count) includes some valid address, data is copied from
2885 * proper area of @buf. If there are memory holes, no copy to hole.
2886 * IOREMAP area is treated as memory hole and no copy is done.
KAMEZAWA Hiroyukid0107eb2009-09-21 17:02:34 -07002887 *
Mike Rapoport92eac162019-03-05 15:48:36 -08002888 * If [addr...addr+count) doesn't includes any intersects with alive
2889 * vm_struct area, returns 0. @buf should be kernel's buffer.
KAMEZAWA Hiroyukid0107eb2009-09-21 17:02:34 -07002890 *
Mike Rapoport92eac162019-03-05 15:48:36 -08002891 * Note: In usual ops, vwrite() is never necessary because the caller
2892 * should know vmalloc() area is valid and can use memcpy().
2893 * This is for routines which have to access vmalloc area without
Geert Uytterhoevend9009d62019-07-11 20:59:06 -07002894 * any information, as /dev/kmem.
Mike Rapoporta862f682019-03-05 15:48:42 -08002895 *
2896 * Return: number of bytes for which addr and buf should be
2897 * increased (same number as @count) or %0 if [addr...addr+count)
2898 * doesn't include any intersection with valid vmalloc area
KAMEZAWA Hiroyukid0107eb2009-09-21 17:02:34 -07002899 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900long vwrite(char *buf, char *addr, unsigned long count)
2901{
Joonsoo Kime81ce852013-04-29 15:07:32 -07002902 struct vmap_area *va;
2903 struct vm_struct *vm;
KAMEZAWA Hiroyukid0107eb2009-09-21 17:02:34 -07002904 char *vaddr;
2905 unsigned long n, buflen;
2906 int copied = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002907
2908 /* Don't allow overflow */
2909 if ((unsigned long) addr + count < count)
2910 count = -(unsigned long) addr;
KAMEZAWA Hiroyukid0107eb2009-09-21 17:02:34 -07002911 buflen = count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002912
Joonsoo Kime81ce852013-04-29 15:07:32 -07002913 spin_lock(&vmap_area_lock);
2914 list_for_each_entry(va, &vmap_area_list, list) {
2915 if (!count)
2916 break;
2917
2918 if (!(va->flags & VM_VM_AREA))
2919 continue;
2920
2921 vm = va->vm;
2922 vaddr = (char *) vm->addr;
Wanpeng Li762216a2013-09-11 14:22:42 -07002923 if (addr >= vaddr + get_vm_area_size(vm))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924 continue;
2925 while (addr < vaddr) {
2926 if (count == 0)
2927 goto finished;
2928 buf++;
2929 addr++;
2930 count--;
2931 }
Wanpeng Li762216a2013-09-11 14:22:42 -07002932 n = vaddr + get_vm_area_size(vm) - addr;
KAMEZAWA Hiroyukid0107eb2009-09-21 17:02:34 -07002933 if (n > count)
2934 n = count;
Joonsoo Kime81ce852013-04-29 15:07:32 -07002935 if (!(vm->flags & VM_IOREMAP)) {
KAMEZAWA Hiroyukid0107eb2009-09-21 17:02:34 -07002936 aligned_vwrite(buf, addr, n);
2937 copied++;
2938 }
2939 buf += n;
2940 addr += n;
2941 count -= n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002942 }
2943finished:
Joonsoo Kime81ce852013-04-29 15:07:32 -07002944 spin_unlock(&vmap_area_lock);
KAMEZAWA Hiroyukid0107eb2009-09-21 17:02:34 -07002945 if (!copied)
2946 return 0;
2947 return buflen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002948}
Nick Piggin83342312006-06-23 02:03:20 -07002949
2950/**
Mike Rapoport92eac162019-03-05 15:48:36 -08002951 * remap_vmalloc_range_partial - map vmalloc pages to userspace
2952 * @vma: vma to cover
2953 * @uaddr: target user address to start at
2954 * @kaddr: virtual address of vmalloc kernel memory
2955 * @size: size of map area
HATAYAMA Daisukee69e9d4a2013-07-03 15:02:18 -07002956 *
Mike Rapoport92eac162019-03-05 15:48:36 -08002957 * Returns: 0 for success, -Exxx on failure
HATAYAMA Daisukee69e9d4a2013-07-03 15:02:18 -07002958 *
Mike Rapoport92eac162019-03-05 15:48:36 -08002959 * This function checks that @kaddr is a valid vmalloc'ed area,
2960 * and that it is big enough to cover the range starting at
2961 * @uaddr in @vma. Will return failure if that criteria isn't
2962 * met.
HATAYAMA Daisukee69e9d4a2013-07-03 15:02:18 -07002963 *
Mike Rapoport92eac162019-03-05 15:48:36 -08002964 * Similar to remap_pfn_range() (see mm/memory.c)
HATAYAMA Daisukee69e9d4a2013-07-03 15:02:18 -07002965 */
2966int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
2967 void *kaddr, unsigned long size)
2968{
2969 struct vm_struct *area;
2970
2971 size = PAGE_ALIGN(size);
2972
2973 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
2974 return -EINVAL;
2975
2976 area = find_vm_area(kaddr);
2977 if (!area)
2978 return -EINVAL;
2979
2980 if (!(area->flags & VM_USERMAP))
2981 return -EINVAL;
2982
Roman Penyaev401592d2019-03-05 15:43:20 -08002983 if (kaddr + size > area->addr + get_vm_area_size(area))
HATAYAMA Daisukee69e9d4a2013-07-03 15:02:18 -07002984 return -EINVAL;
2985
2986 do {
2987 struct page *page = vmalloc_to_page(kaddr);
2988 int ret;
2989
2990 ret = vm_insert_page(vma, uaddr, page);
2991 if (ret)
2992 return ret;
2993
2994 uaddr += PAGE_SIZE;
2995 kaddr += PAGE_SIZE;
2996 size -= PAGE_SIZE;
2997 } while (size > 0);
2998
2999 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3000
3001 return 0;
3002}
3003EXPORT_SYMBOL(remap_vmalloc_range_partial);
3004
3005/**
Mike Rapoport92eac162019-03-05 15:48:36 -08003006 * remap_vmalloc_range - map vmalloc pages to userspace
3007 * @vma: vma to cover (map full range of vma)
3008 * @addr: vmalloc memory
3009 * @pgoff: number of pages into addr before first page to map
Randy Dunlap76824862008-03-19 17:00:40 -07003010 *
Mike Rapoport92eac162019-03-05 15:48:36 -08003011 * Returns: 0 for success, -Exxx on failure
Nick Piggin83342312006-06-23 02:03:20 -07003012 *
Mike Rapoport92eac162019-03-05 15:48:36 -08003013 * This function checks that addr is a valid vmalloc'ed area, and
3014 * that it is big enough to cover the vma. Will return failure if
3015 * that criteria isn't met.
Nick Piggin83342312006-06-23 02:03:20 -07003016 *
Mike Rapoport92eac162019-03-05 15:48:36 -08003017 * Similar to remap_pfn_range() (see mm/memory.c)
Nick Piggin83342312006-06-23 02:03:20 -07003018 */
3019int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
3020 unsigned long pgoff)
3021{
HATAYAMA Daisukee69e9d4a2013-07-03 15:02:18 -07003022 return remap_vmalloc_range_partial(vma, vma->vm_start,
3023 addr + (pgoff << PAGE_SHIFT),
3024 vma->vm_end - vma->vm_start);
Nick Piggin83342312006-06-23 02:03:20 -07003025}
3026EXPORT_SYMBOL(remap_vmalloc_range);
3027
Christoph Hellwig1eeb66a2007-05-08 00:27:03 -07003028/*
3029 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
3030 * have one.
3031 */
Gideon Israel Dsouza3b321232014-04-07 15:37:26 -07003032void __weak vmalloc_sync_all(void)
Christoph Hellwig1eeb66a2007-05-08 00:27:03 -07003033{
3034}
Jeremy Fitzhardinge5f4352f2007-07-17 18:37:04 -07003035
3036
Anshuman Khandual8b1e0f82019-07-11 20:58:43 -07003037static int f(pte_t *pte, unsigned long addr, void *data)
Jeremy Fitzhardinge5f4352f2007-07-17 18:37:04 -07003038{
David Vrabelcd129092011-09-29 16:53:32 +01003039 pte_t ***p = data;
3040
3041 if (p) {
3042 *(*p) = pte;
3043 (*p)++;
3044 }
Jeremy Fitzhardinge5f4352f2007-07-17 18:37:04 -07003045 return 0;
3046}
3047
3048/**
Mike Rapoport92eac162019-03-05 15:48:36 -08003049 * alloc_vm_area - allocate a range of kernel address space
3050 * @size: size of the area
3051 * @ptes: returns the PTEs for the address space
Randy Dunlap76824862008-03-19 17:00:40 -07003052 *
Mike Rapoport92eac162019-03-05 15:48:36 -08003053 * Returns: NULL on failure, vm_struct on success
Jeremy Fitzhardinge5f4352f2007-07-17 18:37:04 -07003054 *
Mike Rapoport92eac162019-03-05 15:48:36 -08003055 * This function reserves a range of kernel address space, and
3056 * allocates pagetables to map that range. No actual mappings
3057 * are created.
David Vrabelcd129092011-09-29 16:53:32 +01003058 *
Mike Rapoport92eac162019-03-05 15:48:36 -08003059 * If @ptes is non-NULL, pointers to the PTEs (in init_mm)
3060 * allocated for the VM area are returned.
Jeremy Fitzhardinge5f4352f2007-07-17 18:37:04 -07003061 */
David Vrabelcd129092011-09-29 16:53:32 +01003062struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
Jeremy Fitzhardinge5f4352f2007-07-17 18:37:04 -07003063{
3064 struct vm_struct *area;
3065
Christoph Lameter23016962008-04-28 02:12:42 -07003066 area = get_vm_area_caller(size, VM_IOREMAP,
3067 __builtin_return_address(0));
Jeremy Fitzhardinge5f4352f2007-07-17 18:37:04 -07003068 if (area == NULL)
3069 return NULL;
3070
3071 /*
3072 * This ensures that page tables are constructed for this region
3073 * of kernel virtual address space and mapped into init_mm.
3074 */
3075 if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
David Vrabelcd129092011-09-29 16:53:32 +01003076 size, f, ptes ? &ptes : NULL)) {
Jeremy Fitzhardinge5f4352f2007-07-17 18:37:04 -07003077 free_vm_area(area);
3078 return NULL;
3079 }
3080
Jeremy Fitzhardinge5f4352f2007-07-17 18:37:04 -07003081 return area;
3082}
3083EXPORT_SYMBOL_GPL(alloc_vm_area);
3084
3085void free_vm_area(struct vm_struct *area)
3086{
3087 struct vm_struct *ret;
3088 ret = remove_vm_area(area->addr);
3089 BUG_ON(ret != area);
3090 kfree(area);
3091}
3092EXPORT_SYMBOL_GPL(free_vm_area);
Christoph Lametera10aa572008-04-28 02:12:40 -07003093
Tejun Heo4f8b02b2010-09-03 18:22:47 +02003094#ifdef CONFIG_SMP
Tejun Heoca23e402009-08-14 15:00:52 +09003095static struct vmap_area *node_to_va(struct rb_node *n)
3096{
Geliang Tang4583e772017-02-22 15:41:54 -08003097 return rb_entry_safe(n, struct vmap_area, rb_node);
Tejun Heoca23e402009-08-14 15:00:52 +09003098}
3099
3100/**
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07003101 * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to
3102 * @addr: target address
Tejun Heoca23e402009-08-14 15:00:52 +09003103 *
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07003104 * Returns: vmap_area if it is found. If there is no such area
3105 * the first highest(reverse order) vmap_area is returned
3106 * i.e. va->va_start < addr && va->va_end < addr or NULL
3107 * if there are no any areas before @addr.
Tejun Heoca23e402009-08-14 15:00:52 +09003108 */
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07003109static struct vmap_area *
3110pvm_find_va_enclose_addr(unsigned long addr)
Tejun Heoca23e402009-08-14 15:00:52 +09003111{
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07003112 struct vmap_area *va, *tmp;
3113 struct rb_node *n;
3114
3115 n = free_vmap_area_root.rb_node;
3116 va = NULL;
Tejun Heoca23e402009-08-14 15:00:52 +09003117
3118 while (n) {
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07003119 tmp = rb_entry(n, struct vmap_area, rb_node);
3120 if (tmp->va_start <= addr) {
3121 va = tmp;
3122 if (tmp->va_end >= addr)
3123 break;
3124
Tejun Heoca23e402009-08-14 15:00:52 +09003125 n = n->rb_right;
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07003126 } else {
3127 n = n->rb_left;
3128 }
Tejun Heoca23e402009-08-14 15:00:52 +09003129 }
3130
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07003131 return va;
Tejun Heoca23e402009-08-14 15:00:52 +09003132}
3133
3134/**
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07003135 * pvm_determine_end_from_reverse - find the highest aligned address
3136 * of free block below VMALLOC_END
3137 * @va:
3138 * in - the VA we start the search(reverse order);
3139 * out - the VA with the highest aligned end address.
Tejun Heoca23e402009-08-14 15:00:52 +09003140 *
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07003141 * Returns: determined end address within vmap_area
Tejun Heoca23e402009-08-14 15:00:52 +09003142 */
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07003143static unsigned long
3144pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align)
Tejun Heoca23e402009-08-14 15:00:52 +09003145{
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07003146 unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
Tejun Heoca23e402009-08-14 15:00:52 +09003147 unsigned long addr;
3148
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07003149 if (likely(*va)) {
3150 list_for_each_entry_from_reverse((*va),
3151 &free_vmap_area_list, list) {
3152 addr = min((*va)->va_end & ~(align - 1), vmalloc_end);
3153 if ((*va)->va_start < addr)
3154 return addr;
3155 }
Tejun Heoca23e402009-08-14 15:00:52 +09003156 }
3157
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07003158 return 0;
Tejun Heoca23e402009-08-14 15:00:52 +09003159}
3160
3161/**
3162 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
3163 * @offsets: array containing offset of each area
3164 * @sizes: array containing size of each area
3165 * @nr_vms: the number of areas to allocate
3166 * @align: alignment, all entries in @offsets and @sizes must be aligned to this
Tejun Heoca23e402009-08-14 15:00:52 +09003167 *
3168 * Returns: kmalloc'd vm_struct pointer array pointing to allocated
3169 * vm_structs on success, %NULL on failure
3170 *
3171 * Percpu allocator wants to use congruent vm areas so that it can
3172 * maintain the offsets among percpu areas. This function allocates
David Rientjesec3f64f2011-01-13 15:46:01 -08003173 * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to
3174 * be scattered pretty far, distance between two areas easily going up
3175 * to gigabytes. To avoid interacting with regular vmallocs, these
3176 * areas are allocated from top.
Tejun Heoca23e402009-08-14 15:00:52 +09003177 *
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07003178 * Despite its complicated look, this allocator is rather simple. It
3179 * does everything top-down and scans free blocks from the end looking
3180 * for matching base. While scanning, if any of the areas do not fit the
3181 * base address is pulled down to fit the area. Scanning is repeated till
3182 * all the areas fit and then all necessary data structures are inserted
3183 * and the result is returned.
Tejun Heoca23e402009-08-14 15:00:52 +09003184 */
3185struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
3186 const size_t *sizes, int nr_vms,
David Rientjesec3f64f2011-01-13 15:46:01 -08003187 size_t align)
Tejun Heoca23e402009-08-14 15:00:52 +09003188{
3189 const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
3190 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07003191 struct vmap_area **vas, *va;
Tejun Heoca23e402009-08-14 15:00:52 +09003192 struct vm_struct **vms;
3193 int area, area2, last_area, term_area;
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07003194 unsigned long base, start, size, end, last_end;
Tejun Heoca23e402009-08-14 15:00:52 +09003195 bool purged = false;
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07003196 enum fit_type type;
Tejun Heoca23e402009-08-14 15:00:52 +09003197
Tejun Heoca23e402009-08-14 15:00:52 +09003198 /* verify parameters and allocate data structures */
Alexander Kuleshov891c49a2015-11-05 18:46:51 -08003199 BUG_ON(offset_in_page(align) || !is_power_of_2(align));
Tejun Heoca23e402009-08-14 15:00:52 +09003200 for (last_area = 0, area = 0; area < nr_vms; area++) {
3201 start = offsets[area];
3202 end = start + sizes[area];
3203
3204 /* is everything aligned properly? */
3205 BUG_ON(!IS_ALIGNED(offsets[area], align));
3206 BUG_ON(!IS_ALIGNED(sizes[area], align));
3207
3208 /* detect the area with the highest address */
3209 if (start > offsets[last_area])
3210 last_area = area;
3211
Wei Yangc568da22017-09-06 16:24:09 -07003212 for (area2 = area + 1; area2 < nr_vms; area2++) {
Tejun Heoca23e402009-08-14 15:00:52 +09003213 unsigned long start2 = offsets[area2];
3214 unsigned long end2 = start2 + sizes[area2];
3215
Wei Yangc568da22017-09-06 16:24:09 -07003216 BUG_ON(start2 < end && start < end2);
Tejun Heoca23e402009-08-14 15:00:52 +09003217 }
3218 }
3219 last_end = offsets[last_area] + sizes[last_area];
3220
3221 if (vmalloc_end - vmalloc_start < last_end) {
3222 WARN_ON(true);
3223 return NULL;
3224 }
3225
Thomas Meyer4d67d862012-05-29 15:06:21 -07003226 vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
3227 vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
Tejun Heoca23e402009-08-14 15:00:52 +09003228 if (!vas || !vms)
Kautuk Consulf1db7af2012-01-12 17:20:08 -08003229 goto err_free2;
Tejun Heoca23e402009-08-14 15:00:52 +09003230
3231 for (area = 0; area < nr_vms; area++) {
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07003232 vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL);
David Rientjesec3f64f2011-01-13 15:46:01 -08003233 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
Tejun Heoca23e402009-08-14 15:00:52 +09003234 if (!vas[area] || !vms[area])
3235 goto err_free;
3236 }
3237retry:
3238 spin_lock(&vmap_area_lock);
3239
3240 /* start scanning - we scan from the top, begin with the last area */
3241 area = term_area = last_area;
3242 start = offsets[area];
3243 end = start + sizes[area];
3244
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07003245 va = pvm_find_va_enclose_addr(vmalloc_end);
3246 base = pvm_determine_end_from_reverse(&va, align) - end;
Tejun Heoca23e402009-08-14 15:00:52 +09003247
3248 while (true) {
Tejun Heoca23e402009-08-14 15:00:52 +09003249 /*
3250 * base might have underflowed, add last_end before
3251 * comparing.
3252 */
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07003253 if (base + last_end < vmalloc_start + last_end)
3254 goto overflow;
Tejun Heoca23e402009-08-14 15:00:52 +09003255
3256 /*
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07003257 * Fitting base has not been found.
Tejun Heoca23e402009-08-14 15:00:52 +09003258 */
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07003259 if (va == NULL)
3260 goto overflow;
Tejun Heoca23e402009-08-14 15:00:52 +09003261
3262 /*
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07003263 * If this VA does not fit, move base downwards and recheck.
Tejun Heoca23e402009-08-14 15:00:52 +09003264 */
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07003265 if (base + start < va->va_start || base + end > va->va_end) {
3266 va = node_to_va(rb_prev(&va->rb_node));
3267 base = pvm_determine_end_from_reverse(&va, align) - end;
Tejun Heoca23e402009-08-14 15:00:52 +09003268 term_area = area;
3269 continue;
3270 }
3271
3272 /*
3273 * This area fits, move on to the previous one. If
3274 * the previous one is the terminal one, we're done.
3275 */
3276 area = (area + nr_vms - 1) % nr_vms;
3277 if (area == term_area)
3278 break;
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07003279
Tejun Heoca23e402009-08-14 15:00:52 +09003280 start = offsets[area];
3281 end = start + sizes[area];
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07003282 va = pvm_find_va_enclose_addr(base + end);
Tejun Heoca23e402009-08-14 15:00:52 +09003283 }
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07003284
Tejun Heoca23e402009-08-14 15:00:52 +09003285 /* we've found a fitting base, insert all va's */
3286 for (area = 0; area < nr_vms; area++) {
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07003287 int ret;
Tejun Heoca23e402009-08-14 15:00:52 +09003288
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07003289 start = base + offsets[area];
3290 size = sizes[area];
3291
3292 va = pvm_find_va_enclose_addr(start);
3293 if (WARN_ON_ONCE(va == NULL))
3294 /* It is a BUG(), but trigger recovery instead. */
3295 goto recovery;
3296
3297 type = classify_va_fit_type(va, start, size);
3298 if (WARN_ON_ONCE(type == NOTHING_FIT))
3299 /* It is a BUG(), but trigger recovery instead. */
3300 goto recovery;
3301
3302 ret = adjust_va_to_fit_type(va, start, size, type);
3303 if (unlikely(ret))
3304 goto recovery;
3305
3306 /* Allocated area. */
3307 va = vas[area];
3308 va->va_start = start;
3309 va->va_end = start + size;
3310
3311 insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
Tejun Heoca23e402009-08-14 15:00:52 +09003312 }
3313
Tejun Heoca23e402009-08-14 15:00:52 +09003314 spin_unlock(&vmap_area_lock);
3315
3316 /* insert all vm's */
3317 for (area = 0; area < nr_vms; area++)
Zhang Yanfei3645cb42013-07-03 15:04:48 -07003318 setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
3319 pcpu_get_vm_areas);
Tejun Heoca23e402009-08-14 15:00:52 +09003320
3321 kfree(vas);
3322 return vms;
3323
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07003324recovery:
3325 /* Remove previously inserted areas. */
3326 while (area--) {
3327 __free_vmap_area(vas[area]);
3328 vas[area] = NULL;
3329 }
3330
3331overflow:
3332 spin_unlock(&vmap_area_lock);
3333 if (!purged) {
3334 purge_vmap_area_lazy();
3335 purged = true;
3336
3337 /* Before "retry", check if we recover. */
3338 for (area = 0; area < nr_vms; area++) {
3339 if (vas[area])
3340 continue;
3341
3342 vas[area] = kmem_cache_zalloc(
3343 vmap_area_cachep, GFP_KERNEL);
3344 if (!vas[area])
3345 goto err_free;
3346 }
3347
3348 goto retry;
3349 }
3350
Tejun Heoca23e402009-08-14 15:00:52 +09003351err_free:
3352 for (area = 0; area < nr_vms; area++) {
Uladzislau Rezki (Sony)68ad4a32019-05-17 14:31:31 -07003353 if (vas[area])
3354 kmem_cache_free(vmap_area_cachep, vas[area]);
3355
Kautuk Consulf1db7af2012-01-12 17:20:08 -08003356 kfree(vms[area]);
Tejun Heoca23e402009-08-14 15:00:52 +09003357 }
Kautuk Consulf1db7af2012-01-12 17:20:08 -08003358err_free2:
Tejun Heoca23e402009-08-14 15:00:52 +09003359 kfree(vas);
3360 kfree(vms);
3361 return NULL;
3362}
3363
3364/**
3365 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
3366 * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
3367 * @nr_vms: the number of allocated areas
3368 *
3369 * Free vm_structs and the array allocated by pcpu_get_vm_areas().
3370 */
3371void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
3372{
3373 int i;
3374
3375 for (i = 0; i < nr_vms; i++)
3376 free_vm_area(vms[i]);
3377 kfree(vms);
3378}
Tejun Heo4f8b02b2010-09-03 18:22:47 +02003379#endif /* CONFIG_SMP */
Christoph Lametera10aa572008-04-28 02:12:40 -07003380
3381#ifdef CONFIG_PROC_FS
3382static void *s_start(struct seq_file *m, loff_t *pos)
Joonsoo Kimd4033af2013-04-29 15:07:35 -07003383 __acquires(&vmap_area_lock)
Christoph Lametera10aa572008-04-28 02:12:40 -07003384{
Joonsoo Kimd4033af2013-04-29 15:07:35 -07003385 spin_lock(&vmap_area_lock);
zijun_hu3f500062016-12-12 16:42:17 -08003386 return seq_list_start(&vmap_area_list, *pos);
Christoph Lametera10aa572008-04-28 02:12:40 -07003387}
3388
3389static void *s_next(struct seq_file *m, void *p, loff_t *pos)
3390{
zijun_hu3f500062016-12-12 16:42:17 -08003391 return seq_list_next(p, &vmap_area_list, pos);
Christoph Lametera10aa572008-04-28 02:12:40 -07003392}
3393
3394static void s_stop(struct seq_file *m, void *p)
Joonsoo Kimd4033af2013-04-29 15:07:35 -07003395 __releases(&vmap_area_lock)
Christoph Lametera10aa572008-04-28 02:12:40 -07003396{
Joonsoo Kimd4033af2013-04-29 15:07:35 -07003397 spin_unlock(&vmap_area_lock);
Christoph Lametera10aa572008-04-28 02:12:40 -07003398}
3399
Eric Dumazeta47a1262008-07-23 21:27:38 -07003400static void show_numa_info(struct seq_file *m, struct vm_struct *v)
3401{
Kirill A. Shutemove5adfff2012-12-11 16:00:29 -08003402 if (IS_ENABLED(CONFIG_NUMA)) {
Eric Dumazeta47a1262008-07-23 21:27:38 -07003403 unsigned int nr, *counters = m->private;
3404
3405 if (!counters)
3406 return;
3407
Wanpeng Liaf123462013-11-12 15:07:32 -08003408 if (v->flags & VM_UNINITIALIZED)
3409 return;
Dmitry Vyukov7e5b5282014-12-12 16:56:30 -08003410 /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
3411 smp_rmb();
Wanpeng Liaf123462013-11-12 15:07:32 -08003412
Eric Dumazeta47a1262008-07-23 21:27:38 -07003413 memset(counters, 0, nr_node_ids * sizeof(unsigned int));
3414
3415 for (nr = 0; nr < v->nr_pages; nr++)
3416 counters[page_to_nid(v->pages[nr])]++;
3417
3418 for_each_node_state(nr, N_HIGH_MEMORY)
3419 if (counters[nr])
3420 seq_printf(m, " N%u=%u", nr, counters[nr]);
3421 }
3422}
3423
Christoph Lametera10aa572008-04-28 02:12:40 -07003424static int s_show(struct seq_file *m, void *p)
3425{
zijun_hu3f500062016-12-12 16:42:17 -08003426 struct vmap_area *va;
Joonsoo Kimd4033af2013-04-29 15:07:35 -07003427 struct vm_struct *v;
3428
zijun_hu3f500062016-12-12 16:42:17 -08003429 va = list_entry(p, struct vmap_area, list);
3430
Wanpeng Lic2ce8c12013-11-12 15:07:31 -08003431 /*
3432 * s_show can encounter race with remove_vm_area, !VM_VM_AREA on
3433 * behalf of vmap area is being tear down or vm_map_ram allocation.
3434 */
Yisheng Xie78c72742017-07-10 15:48:09 -07003435 if (!(va->flags & VM_VM_AREA)) {
3436 seq_printf(m, "0x%pK-0x%pK %7ld %s\n",
3437 (void *)va->va_start, (void *)va->va_end,
3438 va->va_end - va->va_start,
3439 va->flags & VM_LAZY_FREE ? "unpurged vm_area" : "vm_map_ram");
3440
Joonsoo Kimd4033af2013-04-29 15:07:35 -07003441 return 0;
Yisheng Xie78c72742017-07-10 15:48:09 -07003442 }
Joonsoo Kimd4033af2013-04-29 15:07:35 -07003443
Joonsoo Kimd4033af2013-04-29 15:07:35 -07003444 v = va->vm;
Christoph Lametera10aa572008-04-28 02:12:40 -07003445
Kees Cook45ec1692012-10-08 16:34:09 -07003446 seq_printf(m, "0x%pK-0x%pK %7ld",
Christoph Lametera10aa572008-04-28 02:12:40 -07003447 v->addr, v->addr + v->size, v->size);
3448
Joe Perches62c70bc2011-01-13 15:45:52 -08003449 if (v->caller)
3450 seq_printf(m, " %pS", v->caller);
Christoph Lameter23016962008-04-28 02:12:42 -07003451
Christoph Lametera10aa572008-04-28 02:12:40 -07003452 if (v->nr_pages)
3453 seq_printf(m, " pages=%d", v->nr_pages);
3454
3455 if (v->phys_addr)
Miles Chen199eaa02017-02-24 14:59:51 -08003456 seq_printf(m, " phys=%pa", &v->phys_addr);
Christoph Lametera10aa572008-04-28 02:12:40 -07003457
3458 if (v->flags & VM_IOREMAP)
Fabian Frederickf4527c92014-06-04 16:08:09 -07003459 seq_puts(m, " ioremap");
Christoph Lametera10aa572008-04-28 02:12:40 -07003460
3461 if (v->flags & VM_ALLOC)
Fabian Frederickf4527c92014-06-04 16:08:09 -07003462 seq_puts(m, " vmalloc");
Christoph Lametera10aa572008-04-28 02:12:40 -07003463
3464 if (v->flags & VM_MAP)
Fabian Frederickf4527c92014-06-04 16:08:09 -07003465 seq_puts(m, " vmap");
Christoph Lametera10aa572008-04-28 02:12:40 -07003466
3467 if (v->flags & VM_USERMAP)
Fabian Frederickf4527c92014-06-04 16:08:09 -07003468 seq_puts(m, " user");
Christoph Lametera10aa572008-04-28 02:12:40 -07003469
David Rientjes244d63e2016-01-14 15:19:35 -08003470 if (is_vmalloc_addr(v->pages))
Fabian Frederickf4527c92014-06-04 16:08:09 -07003471 seq_puts(m, " vpages");
Christoph Lametera10aa572008-04-28 02:12:40 -07003472
Eric Dumazeta47a1262008-07-23 21:27:38 -07003473 show_numa_info(m, v);
Christoph Lametera10aa572008-04-28 02:12:40 -07003474 seq_putc(m, '\n');
3475 return 0;
3476}
3477
Alexey Dobriyan5f6a6a92008-10-06 03:50:47 +04003478static const struct seq_operations vmalloc_op = {
Christoph Lametera10aa572008-04-28 02:12:40 -07003479 .start = s_start,
3480 .next = s_next,
3481 .stop = s_stop,
3482 .show = s_show,
3483};
Alexey Dobriyan5f6a6a92008-10-06 03:50:47 +04003484
Alexey Dobriyan5f6a6a92008-10-06 03:50:47 +04003485static int __init proc_vmalloc_init(void)
3486{
Christoph Hellwigfddda2b2018-04-13 19:44:18 +02003487 if (IS_ENABLED(CONFIG_NUMA))
Joe Perches0825a6f2018-06-14 15:27:58 -07003488 proc_create_seq_private("vmallocinfo", 0400, NULL,
Christoph Hellwig44414d82018-04-24 17:05:17 +02003489 &vmalloc_op,
3490 nr_node_ids * sizeof(unsigned int), NULL);
Christoph Hellwigfddda2b2018-04-13 19:44:18 +02003491 else
Joe Perches0825a6f2018-06-14 15:27:58 -07003492 proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op);
Alexey Dobriyan5f6a6a92008-10-06 03:50:47 +04003493 return 0;
3494}
3495module_init(proc_vmalloc_init);
Joonsoo Kimdb3808c2013-04-29 15:07:28 -07003496
Christoph Lametera10aa572008-04-28 02:12:40 -07003497#endif