blob: 2a9040dcf47e284a59624348cefbc8d44720daf8 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/arm/mm/init.c
3 *
Russell King90072052005-10-28 14:48:37 +01004 * Copyright (C) 1995-2005 Russell King
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/kernel.h>
11#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/swap.h>
13#include <linux/init.h>
14#include <linux/bootmem.h>
15#include <linux/mman.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010016#include <linux/sched/signal.h>
Paul Gortmakerdc280942011-07-31 16:17:29 -040017#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/nodemask.h>
19#include <linux/initrd.h>
Grant Likely9eb8f672011-04-28 14:27:20 -060020#include <linux/of_fdt.h>
Nicolas Pitre3835f6c2008-09-17 15:21:55 -040021#include <linux/highmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022#include <linux/gfp.h>
Russell King2778f622010-07-09 16:27:52 +010023#include <linux/memblock.h>
Marek Szyprowskic7909502011-12-29 13:09:51 +010024#include <linux/dma-contiguous.h>
Alessandro Rubini158e8bf2012-06-24 12:46:26 +010025#include <linux/sizes.h>
Laura Abbott08925c22015-11-30 19:36:28 +010026#include <linux/stop_machine.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Russell Kingb4b20ad82014-04-13 18:57:29 +010028#include <asm/cp15.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <asm/mach-types.h>
Russell King716a3dc2012-01-13 15:00:51 +000030#include <asm/memblock.h>
Afzal Mohammedd2ca5f22017-01-29 17:31:32 +010031#include <asm/memory.h>
Grant Likely93c02ab2011-04-28 14:27:21 -060032#include <asm/prom.h>
Russell King37efe642008-12-01 11:53:07 +000033#include <asm/sections.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <asm/setup.h>
Kees Cook1e6b4812014-04-03 17:28:11 -070035#include <asm/system_info.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/tlb.h>
Fenkart/Bostandzhyandb9ef1a2010-02-07 21:45:47 +010037#include <asm/fixmap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
39#include <asm/mach/arch.h>
40#include <asm/mach/map.h>
41
Russell King1b2e2b72006-08-21 17:06:38 +010042#include "mm.h"
43
Russell Kingb4b20ad82014-04-13 18:57:29 +010044#ifdef CONFIG_CPU_CP15_MMU
45unsigned long __init __clear_cr(unsigned long mask)
46{
Russell Kingb4b20ad82014-04-13 18:57:29 +010047 cr_alignment = cr_alignment & ~mask;
48 return cr_alignment;
49}
50#endif
51
Vitaly Andrianovde22cc6e2012-06-22 14:26:04 -040052static phys_addr_t phys_initrd_start __initdata = 0;
Russell King012d1f42008-09-06 10:57:03 +010053static unsigned long phys_initrd_size __initdata = 0;
54
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +010055static int __init early_initrd(char *p)
Russell King012d1f42008-09-06 10:57:03 +010056{
Vitaly Andrianovde22cc6e2012-06-22 14:26:04 -040057 phys_addr_t start;
58 unsigned long size;
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +010059 char *endp;
Russell King012d1f42008-09-06 10:57:03 +010060
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +010061 start = memparse(p, &endp);
62 if (*endp == ',') {
63 size = memparse(endp + 1, NULL);
Russell King012d1f42008-09-06 10:57:03 +010064
65 phys_initrd_start = start;
66 phys_initrd_size = size;
67 }
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +010068 return 0;
Russell King012d1f42008-09-06 10:57:03 +010069}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +010070early_param("initrd", early_initrd);
Russell King012d1f42008-09-06 10:57:03 +010071
72static int __init parse_tag_initrd(const struct tag *tag)
73{
Russell King4ed89f22014-10-28 11:26:42 +000074 pr_warn("ATAG_INITRD is deprecated; "
Russell King012d1f42008-09-06 10:57:03 +010075 "please update your bootloader.\n");
76 phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
77 phys_initrd_size = tag->u.initrd.size;
78 return 0;
79}
80
81__tagtable(ATAG_INITRD, parse_tag_initrd);
82
83static int __init parse_tag_initrd2(const struct tag *tag)
84{
85 phys_initrd_start = tag->u.initrd.start;
86 phys_initrd_size = tag->u.initrd.size;
87 return 0;
88}
89
90__tagtable(ATAG_INITRD2, parse_tag_initrd2);
Linus Torvalds1da177e2005-04-16 15:20:36 -070091
Russell Kingf25b4b42010-10-27 19:49:33 +010092static void __init find_limits(unsigned long *min, unsigned long *max_low,
Nicolas Pitre27a3f0e2011-08-25 19:10:29 -040093 unsigned long *max_high)
Russell Kingdde58282009-08-15 12:36:00 +010094{
Laura Abbott1c2f87c2014-04-13 22:54:58 +010095 *max_low = PFN_DOWN(memblock_get_current_limit());
96 *min = PFN_UP(memblock_start_of_DRAM());
97 *max_high = PFN_DOWN(memblock_end_of_DRAM());
Russell Kingdde58282009-08-15 12:36:00 +010098}
99
Russell Kingbe209022011-05-11 15:39:00 +0100100#ifdef CONFIG_ZONE_DMA
Nicolas Pitre65032012011-07-18 15:05:10 -0400101
Rob Herring364230b2013-08-01 15:29:29 -0500102phys_addr_t arm_dma_zone_size __read_mostly;
Nicolas Pitre65032012011-07-18 15:05:10 -0400103EXPORT_SYMBOL(arm_dma_zone_size);
104
Russell King022ae532011-07-08 21:26:59 +0100105/*
106 * The DMA mask corresponding to the maximum bus address allocatable
107 * using GFP_DMA. The default here places no restriction on DMA
108 * allocations. This must be the smallest DMA mask in the system,
109 * so a successful GFP_DMA allocation will always satisfy this.
110 */
Marek Szyprowski4986e5c2012-06-06 12:05:01 +0200111phys_addr_t arm_dma_limit;
Russell King4dcfa602013-07-09 12:14:49 +0100112unsigned long arm_dma_pfn_limit;
Russell King022ae532011-07-08 21:26:59 +0100113
Russell Kingbe209022011-05-11 15:39:00 +0100114static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
115 unsigned long dma_size)
116{
117 if (size[0] <= dma_size)
118 return;
119
120 size[ZONE_NORMAL] = size[0] - dma_size;
121 size[ZONE_DMA] = dma_size;
122 hole[ZONE_NORMAL] = hole[0];
123 hole[ZONE_DMA] = 0;
124}
125#endif
126
Russell Kingff69a4c2013-07-26 14:55:59 +0100127void __init setup_dma_zone(const struct machine_desc *mdesc)
Marek Szyprowskic7909502011-12-29 13:09:51 +0100128{
129#ifdef CONFIG_ZONE_DMA
130 if (mdesc->dma_zone_size) {
131 arm_dma_zone_size = mdesc->dma_zone_size;
Russell King6bcac802014-01-07 17:53:54 +0000132 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
Marek Szyprowskic7909502011-12-29 13:09:51 +0100133 } else
134 arm_dma_limit = 0xffffffff;
Russell King4dcfa602013-07-09 12:14:49 +0100135 arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
Marek Szyprowskic7909502011-12-29 13:09:51 +0100136#endif
137}
138
Santosh Shilimkar84f452b2013-06-30 00:28:46 -0400139static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
Russell Kinga2c54d22010-10-27 19:17:31 +0100140 unsigned long max_high)
Russell Kingb7a69ac2008-10-01 16:58:32 +0100141{
142 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
Russell Kinga2c54d22010-10-27 19:17:31 +0100143 struct memblock_region *reg;
Russell Kingb7a69ac2008-10-01 16:58:32 +0100144
Russell King90072052005-10-28 14:48:37 +0100145 /*
Russell Kingbe370302010-05-07 17:40:33 +0100146 * initialise the zones.
Russell King90072052005-10-28 14:48:37 +0100147 */
148 memset(zone_size, 0, sizeof(zone_size));
Russell King90072052005-10-28 14:48:37 +0100149
150 /*
Russell Kingbe370302010-05-07 17:40:33 +0100151 * The memory size has already been determined. If we need
152 * to do anything fancy with the allocation of this memory
153 * to the zones, now is the time to do it.
Russell King90072052005-10-28 14:48:37 +0100154 */
Russell Kingdde58282009-08-15 12:36:00 +0100155 zone_size[0] = max_low - min;
156#ifdef CONFIG_HIGHMEM
157 zone_size[ZONE_HIGHMEM] = max_high - max_low;
158#endif
Russell King90072052005-10-28 14:48:37 +0100159
160 /*
Russell Kingbe370302010-05-07 17:40:33 +0100161 * Calculate the size of the holes.
162 * holes = node_size - sum(bank_sizes)
Russell King90072052005-10-28 14:48:37 +0100163 */
Russell Kingdde58282009-08-15 12:36:00 +0100164 memcpy(zhole_size, zone_size, sizeof(zhole_size));
Russell Kinga2c54d22010-10-27 19:17:31 +0100165 for_each_memblock(memory, reg) {
166 unsigned long start = memblock_region_memory_base_pfn(reg);
167 unsigned long end = memblock_region_memory_end_pfn(reg);
168
169 if (start < max_low) {
170 unsigned long low_end = min(end, max_low);
171 zhole_size[0] -= low_end - start;
172 }
Russell Kingdde58282009-08-15 12:36:00 +0100173#ifdef CONFIG_HIGHMEM
Russell Kinga2c54d22010-10-27 19:17:31 +0100174 if (end > max_low) {
175 unsigned long high_start = max(start, max_low);
176 zhole_size[ZONE_HIGHMEM] -= end - high_start;
177 }
Russell Kingdde58282009-08-15 12:36:00 +0100178#endif
Russell Kingdde58282009-08-15 12:36:00 +0100179 }
Russell King90072052005-10-28 14:48:37 +0100180
Nicolas Pitre65032012011-07-18 15:05:10 -0400181#ifdef CONFIG_ZONE_DMA
Russell King90072052005-10-28 14:48:37 +0100182 /*
183 * Adjust the sizes according to any special requirements for
184 * this machine type.
185 */
Marek Szyprowskic7909502011-12-29 13:09:51 +0100186 if (arm_dma_zone_size)
Nicolas Pitre65032012011-07-18 15:05:10 -0400187 arm_adjust_dma_zone(zone_size, zhole_size,
188 arm_dma_zone_size >> PAGE_SHIFT);
Russell Kingbe209022011-05-11 15:39:00 +0100189#endif
Russell King90072052005-10-28 14:48:37 +0100190
Russell Kingbe370302010-05-07 17:40:33 +0100191 free_area_init_node(0, zone_size, min, zhole_size);
Russell King90072052005-10-28 14:48:37 +0100192}
193
Will Deacon7b7bf492011-05-19 13:21:14 +0100194#ifdef CONFIG_HAVE_ARCH_PFN_VALID
Russell Kingb7cfda92009-09-07 15:06:42 +0100195int pfn_valid(unsigned long pfn)
196{
Ard Biesheuvel09414d02015-10-01 17:58:11 +0200197 return memblock_is_map_memory(__pfn_to_phys(pfn));
Russell Kingb7cfda92009-09-07 15:06:42 +0100198}
199EXPORT_SYMBOL(pfn_valid);
Will Deacon7b7bf492011-05-19 13:21:14 +0100200#endif
Russell King657e12f2009-10-29 17:06:17 +0000201
Will Deacon7b7bf492011-05-19 13:21:14 +0100202#ifndef CONFIG_SPARSEMEM
Stephen Boyd14904922012-04-27 01:40:10 +0100203static void __init arm_memory_present(void)
Russell King657e12f2009-10-29 17:06:17 +0000204{
205}
206#else
Stephen Boyd14904922012-04-27 01:40:10 +0100207static void __init arm_memory_present(void)
Russell King657e12f2009-10-29 17:06:17 +0000208{
Benjamin Herrenschmidt719c15142010-08-05 12:55:55 +1000209 struct memblock_region *reg;
Benjamin Herrenschmidt719c15142010-08-05 12:55:55 +1000210
Yinghai Lu7c996362010-09-16 00:20:36 -0700211 for_each_memblock(memory, reg)
Yinghai Luc7fc2de2010-10-12 14:07:09 -0700212 memory_present(0, memblock_region_memory_base_pfn(reg),
213 memblock_region_memory_end_pfn(reg));
Russell King657e12f2009-10-29 17:06:17 +0000214}
Russell Kingb7cfda92009-09-07 15:06:42 +0100215#endif
216
Russell King716a3dc2012-01-13 15:00:51 +0000217static bool arm_memblock_steal_permitted = true;
218
Russell Kingbc2827d2012-01-19 14:35:19 +0000219phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
Russell King716a3dc2012-01-13 15:00:51 +0000220{
221 phys_addr_t phys;
222
223 BUG_ON(!arm_memblock_steal_permitted);
224
Russell King7ac68a42012-08-13 00:22:28 +0100225 phys = memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
Russell King716a3dc2012-01-13 15:00:51 +0000226 memblock_free(phys, size);
227 memblock_remove(phys, size);
228
229 return phys;
230}
231
Russell King39286242017-01-16 15:11:10 +0000232static void __init arm_initrd_init(void)
Russell King2778f622010-07-09 16:27:52 +0100233{
Russell King2778f622010-07-09 16:27:52 +0100234#ifdef CONFIG_BLK_DEV_INITRD
Russell Kingcdcc5fa2017-01-16 15:21:05 +0000235 phys_addr_t start;
236 unsigned long size;
237
Rob Herring65939302013-08-30 10:54:26 -0500238 /* FDT scan will populate initrd_start */
Ben Peddell4c235cb2014-01-13 23:25:18 +0100239 if (initrd_start && !phys_initrd_size) {
Rob Herring65939302013-08-30 10:54:26 -0500240 phys_initrd_start = __virt_to_phys(initrd_start);
241 phys_initrd_size = initrd_end - initrd_start;
242 }
Russell King2778f622010-07-09 16:27:52 +0100243
Russell King2778f622010-07-09 16:27:52 +0100244 initrd_start = initrd_end = 0;
Russell King68b32f32017-01-16 15:13:25 +0000245
246 if (!phys_initrd_size)
247 return;
248
Russell Kingcdcc5fa2017-01-16 15:21:05 +0000249 /*
250 * Round the memory region to page boundaries as per free_initrd_mem()
251 * This allows us to detect whether the pages overlapping the initrd
252 * are in use, but more importantly, reserves the entire set of pages
253 * as we don't want these pages allocated for other purposes.
254 */
255 start = round_down(phys_initrd_start, PAGE_SIZE);
256 size = phys_initrd_size + (phys_initrd_start - start);
257 size = round_up(size, PAGE_SIZE);
258
259 if (!memblock_is_region_memory(start, size)) {
Russell King2778f622010-07-09 16:27:52 +0100260 pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
Russell Kingcdcc5fa2017-01-16 15:21:05 +0000261 (u64)start, size);
Russell King68b32f32017-01-16 15:13:25 +0000262 return;
Russell King2778f622010-07-09 16:27:52 +0100263 }
Russell King68b32f32017-01-16 15:13:25 +0000264
Russell Kingcdcc5fa2017-01-16 15:21:05 +0000265 if (memblock_is_region_reserved(start, size)) {
Russell King2778f622010-07-09 16:27:52 +0100266 pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n",
Russell Kingcdcc5fa2017-01-16 15:21:05 +0000267 (u64)start, size);
Russell King68b32f32017-01-16 15:13:25 +0000268 return;
Russell King2778f622010-07-09 16:27:52 +0100269 }
Russell King2778f622010-07-09 16:27:52 +0100270
Russell Kingcdcc5fa2017-01-16 15:21:05 +0000271 memblock_reserve(start, size);
Russell King68b32f32017-01-16 15:13:25 +0000272
273 /* Now convert initrd to virtual addresses */
274 initrd_start = __phys_to_virt(phys_initrd_start);
275 initrd_end = initrd_start + phys_initrd_size;
Russell King2778f622010-07-09 16:27:52 +0100276#endif
Russell King39286242017-01-16 15:11:10 +0000277}
278
279void __init arm_memblock_init(const struct machine_desc *mdesc)
280{
281 /* Register the kernel text, kernel data and initrd with memblock. */
282 memblock_reserve(__pa(KERNEL_START), KERNEL_END - KERNEL_START);
283
284 arm_initrd_init();
Russell King2778f622010-07-09 16:27:52 +0100285
286 arm_mm_memblock_reserve();
287
Russell King8d717a52010-05-22 19:47:18 +0100288 /* reserve any platform specific memblock areas */
289 if (mdesc->reserve)
290 mdesc->reserve();
291
Ard Biesheuvel24bbd922015-06-01 13:40:31 +0200292 early_init_fdt_reserve_self();
Marek Szyprowskibcedb5f2014-02-28 14:42:54 +0100293 early_init_fdt_scan_reserved_mem();
294
George G. Davis99a468d2015-01-16 11:21:05 +0100295 /* reserve memory for DMA contiguous allocations */
Marek Szyprowski95b0e652014-10-09 15:26:49 -0700296 dma_contiguous_reserve(arm_dma_limit);
Marek Szyprowskic7909502011-12-29 13:09:51 +0100297
Russell King716a3dc2012-01-13 15:00:51 +0000298 arm_memblock_steal_permitted = false;
Russell King2778f622010-07-09 16:27:52 +0100299 memblock_dump_all();
300}
301
Russell King8d717a52010-05-22 19:47:18 +0100302void __init bootmem_init(void)
Russell King90072052005-10-28 14:48:37 +0100303{
Russell Kingdde58282009-08-15 12:36:00 +0100304 unsigned long min, max_low, max_high;
Russell King90072052005-10-28 14:48:37 +0100305
Grygorii Strashko8e58cae2013-11-23 14:42:18 -0500306 memblock_allow_resize();
Russell Kingdde58282009-08-15 12:36:00 +0100307 max_low = max_high = 0;
308
Russell Kingf25b4b42010-10-27 19:49:33 +0100309 find_limits(&min, &max_low, &max_high);
Russell Kingbe370302010-05-07 17:40:33 +0100310
Vladimir Murzind30eae42015-04-14 15:48:37 -0700311 early_memtest((phys_addr_t)min << PAGE_SHIFT,
312 (phys_addr_t)max_low << PAGE_SHIFT);
313
Russell Kingbe370302010-05-07 17:40:33 +0100314 /*
Russell Kingbe370302010-05-07 17:40:33 +0100315 * Sparsemem tries to allocate bootmem in memory_present(),
316 * so must be done after the fixed reservations
317 */
Russell Kingeda2e5d2010-07-01 12:00:57 +0100318 arm_memory_present();
Russell King90072052005-10-28 14:48:37 +0100319
Russell Kingb7a69ac2008-10-01 16:58:32 +0100320 /*
321 * sparse_init() needs the bootmem allocator up and running.
322 */
323 sparse_init();
324
325 /*
Russell Kingbe370302010-05-07 17:40:33 +0100326 * Now free the memory - free_area_init_node needs
Russell Kingb7a69ac2008-10-01 16:58:32 +0100327 * the sparse mem_map arrays initialized by sparse_init()
328 * for memmap_init_zone(), otherwise all PFNs are invalid.
329 */
Santosh Shilimkar84f452b2013-06-30 00:28:46 -0400330 zone_sizes_init(min, max_low, max_high);
Russell Kingb7a69ac2008-10-01 16:58:32 +0100331
Russell King90072052005-10-28 14:48:37 +0100332 /*
333 * This doesn't seem to be used by the Linux memory manager any
334 * more, but is used by ll_rw_block. If we can get rid of it, we
335 * also get rid of some of the stuff above as well.
Russell King90072052005-10-28 14:48:37 +0100336 */
Santosh Shilimkar26ba47b2013-08-01 03:12:01 +0100337 min_low_pfn = min;
338 max_low_pfn = max_low;
339 max_pfn = max_high;
Russell King90072052005-10-28 14:48:37 +0100340}
341
Stephen Boyd54d52572011-07-07 18:43:36 +0100342/*
343 * Poison init memory with an undefined instruction (ARM) or a branch to an
344 * undefined instruction (Thumb).
345 */
346static inline void poison_init_mem(void *s, size_t count)
347{
348 u32 *p = (u32 *)s;
Jamie Ilesbf912d92011-08-04 09:39:31 +0100349 for (; count != 0; count -= 4)
Stephen Boyd54d52572011-07-07 18:43:36 +0100350 *p++ = 0xe7fddef0;
351}
352
Russell Kinga0130532005-06-27 14:16:47 +0100353static inline void
Russell Kingbe370302010-05-07 17:40:33 +0100354free_memmap(unsigned long start_pfn, unsigned long end_pfn)
Russell Kinga0130532005-06-27 14:16:47 +0100355{
356 struct page *start_pg, *end_pg;
Vitaly Andrianov56bc6282012-06-21 08:09:05 -0400357 phys_addr_t pg, pgend;
Russell Kinga0130532005-06-27 14:16:47 +0100358
359 /*
360 * Convert start_pfn/end_pfn to a struct page pointer.
361 */
Catalin Marinas3257f432009-10-06 17:57:22 +0100362 start_pg = pfn_to_page(start_pfn - 1) + 1;
Will Deacon9af386c2011-04-28 18:44:31 +0100363 end_pg = pfn_to_page(end_pfn - 1) + 1;
Russell Kinga0130532005-06-27 14:16:47 +0100364
365 /*
366 * Convert to physical addresses, and
367 * round start upwards and end downwards.
368 */
Vitaly Andrianov56bc6282012-06-21 08:09:05 -0400369 pg = PAGE_ALIGN(__pa(start_pg));
370 pgend = __pa(end_pg) & PAGE_MASK;
Russell Kinga0130532005-06-27 14:16:47 +0100371
372 /*
373 * If there are free pages between these,
374 * free the section of the memmap array.
375 */
376 if (pg < pgend)
Santosh Shilimkarcfb665862014-01-21 15:50:49 -0800377 memblock_free_early(pg, pgend - pg);
Russell Kinga0130532005-06-27 14:16:47 +0100378}
379
380/*
381 * The mem_map array can get very big. Free the unused area of the memory map.
382 */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100383static void __init free_unused_memmap(void)
Russell Kinga0130532005-06-27 14:16:47 +0100384{
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100385 unsigned long start, prev_end = 0;
386 struct memblock_region *reg;
Russell Kinga0130532005-06-27 14:16:47 +0100387
388 /*
Michael Bohan3260e522010-06-14 13:06:56 -0700389 * This relies on each bank being in address order.
390 * The banks are sorted previously in bootmem_init().
Russell Kinga0130532005-06-27 14:16:47 +0100391 */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100392 for_each_memblock(memory, reg) {
393 start = memblock_region_memory_base_pfn(reg);
Russell Kinga0130532005-06-27 14:16:47 +0100394
Will Deacon9af386c2011-04-28 18:44:31 +0100395#ifdef CONFIG_SPARSEMEM
396 /*
397 * Take care not to free memmap entries that don't exist
398 * due to SPARSEMEM sections which aren't present.
399 */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100400 start = min(start,
401 ALIGN(prev_end, PAGES_PER_SECTION));
Linus Walleij002ea9eef2011-09-29 09:37:23 +0100402#else
403 /*
404 * Align down here since the VM subsystem insists that the
405 * memmap entries are valid from the bank start aligned to
406 * MAX_ORDER_NR_PAGES.
407 */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100408 start = round_down(start, MAX_ORDER_NR_PAGES);
Will Deacon9af386c2011-04-28 18:44:31 +0100409#endif
Russell Kinga0130532005-06-27 14:16:47 +0100410 /*
411 * If we had a previous bank, and there is a space
412 * between the current bank and the previous, free it.
413 */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100414 if (prev_end && prev_end < start)
415 free_memmap(prev_end, start);
Russell Kinga0130532005-06-27 14:16:47 +0100416
Michael Bohan3260e522010-06-14 13:06:56 -0700417 /*
418 * Align up here since the VM subsystem insists that the
419 * memmap entries are valid from the bank end aligned to
420 * MAX_ORDER_NR_PAGES.
421 */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100422 prev_end = ALIGN(memblock_region_memory_end_pfn(reg),
423 MAX_ORDER_NR_PAGES);
Russell Kinga0130532005-06-27 14:16:47 +0100424 }
Will Deacon9af386c2011-04-28 18:44:31 +0100425
426#ifdef CONFIG_SPARSEMEM
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100427 if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
428 free_memmap(prev_end,
429 ALIGN(prev_end, PAGES_PER_SECTION));
Will Deacon9af386c2011-04-28 18:44:31 +0100430#endif
Russell Kinga0130532005-06-27 14:16:47 +0100431}
432
Jiang Liu83db0382013-04-29 15:06:26 -0700433#ifdef CONFIG_HIGHMEM
434static inline void free_area_high(unsigned long pfn, unsigned long end)
435{
Jiang Liudd6911e2013-04-29 15:07:03 -0700436 for (; pfn < end; pfn++)
437 free_highmem_page(pfn_to_page(pfn));
Jiang Liu83db0382013-04-29 15:06:26 -0700438}
439#endif
440
Russell Kingd0e775a2010-10-27 19:37:06 +0100441static void __init free_highpages(void)
442{
443#ifdef CONFIG_HIGHMEM
Santosh Shilimkar26ba47b2013-08-01 03:12:01 +0100444 unsigned long max_low = max_low_pfn;
Russell Kingdf4f14c2010-10-27 19:45:49 +0100445 struct memblock_region *mem, *res;
Russell Kingd0e775a2010-10-27 19:37:06 +0100446
447 /* set highmem page free */
Russell Kingdf4f14c2010-10-27 19:45:49 +0100448 for_each_memblock(memory, mem) {
449 unsigned long start = memblock_region_memory_base_pfn(mem);
450 unsigned long end = memblock_region_memory_end_pfn(mem);
451
452 /* Ignore complete lowmem entries */
453 if (end <= max_low)
454 continue;
455
Ard Biesheuvel09414d02015-10-01 17:58:11 +0200456 if (memblock_is_nomap(mem))
457 continue;
458
Russell Kingdf4f14c2010-10-27 19:45:49 +0100459 /* Truncate partial highmem entries */
460 if (start < max_low)
461 start = max_low;
462
463 /* Find and exclude any reserved regions */
464 for_each_memblock(reserved, res) {
465 unsigned long res_start, res_end;
466
467 res_start = memblock_region_reserved_base_pfn(res);
468 res_end = memblock_region_reserved_end_pfn(res);
469
470 if (res_end < start)
471 continue;
472 if (res_start < start)
473 res_start = start;
474 if (res_start > end)
475 res_start = end;
476 if (res_end > end)
477 res_end = end;
478 if (res_start != start)
Jiang Liu83db0382013-04-29 15:06:26 -0700479 free_area_high(start, res_start);
Russell Kingdf4f14c2010-10-27 19:45:49 +0100480 start = res_end;
481 if (start == end)
482 break;
483 }
484
485 /* And now free anything which remains */
486 if (start < end)
Jiang Liu83db0382013-04-29 15:06:26 -0700487 free_area_high(start, end);
Russell Kingd0e775a2010-10-27 19:37:06 +0100488 }
Russell Kingd0e775a2010-10-27 19:37:06 +0100489#endif
490}
491
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492/*
493 * mem_init() marks the free areas in the mem_map and tells us how much
494 * memory is free. This is done after various parts of the system have
495 * claimed their memory after the kernel image.
496 */
497void __init mem_init(void)
498{
Linus Walleij1dbd30e2010-07-12 21:53:28 +0100499#ifdef CONFIG_HAVE_TCM
500 /* These pointers are filled in on TCM detection */
501 extern u32 dtcm_end;
502 extern u32 itcm_end;
503#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504
Santosh Shilimkarb3ba41f2013-11-23 14:36:42 -0500505 set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 /* this will put all unused low memory onto the freelists */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100508 free_unused_memmap();
Jiang Liu0c988532013-07-03 15:03:24 -0700509 free_all_bootmem();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510
511#ifdef CONFIG_SA1111
512 /* now that our DMA memory is actually so designated, we can free it */
Linus Torvaldsbfd65dd2013-07-13 14:58:36 -0700513 free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514#endif
515
Russell Kingd0e775a2010-10-27 19:37:06 +0100516 free_highpages();
Nicolas Pitre3835f6c2008-09-17 15:21:55 -0400517
Jiang Liu2450c972013-07-03 15:03:48 -0700518 mem_init_print_info(NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519
Fenkart/Bostandzhyandb9ef1a2010-02-07 21:45:47 +0100520#define MLK(b, t) b, t, ((t) - (b)) >> 10
521#define MLM(b, t) b, t, ((t) - (b)) >> 20
522#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
523
Russell King4ed89f22014-10-28 11:26:42 +0000524 pr_notice("Virtual kernel memory layout:\n"
Fenkart/Bostandzhyandb9ef1a2010-02-07 21:45:47 +0100525 " vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
Linus Walleij07d2a5c2010-07-12 21:52:34 +0100526#ifdef CONFIG_HAVE_TCM
Linus Walleij07d2a5c2010-07-12 21:52:34 +0100527 " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
Linus Walleij07d2a5c2010-07-12 21:52:34 +0100528 " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
529#endif
Fenkart/Bostandzhyandb9ef1a2010-02-07 21:45:47 +0100530 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
531 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
532 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
533#ifdef CONFIG_HIGHMEM
534 " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n"
535#endif
Uwe Kleine-Königd9277d52012-02-01 11:16:51 +0100536#ifdef CONFIG_MODULES
Fenkart/Bostandzhyandb9ef1a2010-02-07 21:45:47 +0100537 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
Uwe Kleine-Königd9277d52012-02-01 11:16:51 +0100538#endif
Russell King178c3df2014-10-19 22:42:42 +0100539 " .text : 0x%p" " - 0x%p" " (%4td kB)\n"
540 " .init : 0x%p" " - 0x%p" " (%4td kB)\n"
541 " .data : 0x%p" " - 0x%p" " (%4td kB)\n"
542 " .bss : 0x%p" " - 0x%p" " (%4td kB)\n",
Fenkart/Bostandzhyandb9ef1a2010-02-07 21:45:47 +0100543
Afzal Mohammedd2ca5f22017-01-29 17:31:32 +0100544 MLK(VECTORS_BASE, VECTORS_BASE + PAGE_SIZE),
Linus Walleij07d2a5c2010-07-12 21:52:34 +0100545#ifdef CONFIG_HAVE_TCM
Linus Walleij1dbd30e2010-07-12 21:53:28 +0100546 MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
547 MLK(ITCM_OFFSET, (unsigned long) itcm_end),
Linus Walleij07d2a5c2010-07-12 21:52:34 +0100548#endif
Mark Salterb615bbb2014-08-13 09:04:49 -0700549 MLK(FIXADDR_START, FIXADDR_END),
Fenkart/Bostandzhyanc931b4f2010-02-07 21:47:17 +0100550 MLM(VMALLOC_START, VMALLOC_END),
Fenkart/Bostandzhyandb9ef1a2010-02-07 21:45:47 +0100551 MLM(PAGE_OFFSET, (unsigned long)high_memory),
552#ifdef CONFIG_HIGHMEM
553 MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
554 (PAGE_SIZE)),
555#endif
Uwe Kleine-Königd9277d52012-02-01 11:16:51 +0100556#ifdef CONFIG_MODULES
Fenkart/Bostandzhyandb9ef1a2010-02-07 21:45:47 +0100557 MLM(MODULES_VADDR, MODULES_END),
Uwe Kleine-Königd9277d52012-02-01 11:16:51 +0100558#endif
Fenkart/Bostandzhyandb9ef1a2010-02-07 21:45:47 +0100559
Fenkart/Bostandzhyandb9ef1a2010-02-07 21:45:47 +0100560 MLK_ROUNDUP(_text, _etext),
Russell King3835d692011-07-06 10:39:34 +0100561 MLK_ROUNDUP(__init_begin, __init_end),
Rabin Vincent45f6d7e2011-06-02 15:01:36 +0100562 MLK_ROUNDUP(_sdata, _edata),
563 MLK_ROUNDUP(__bss_start, __bss_stop));
Fenkart/Bostandzhyandb9ef1a2010-02-07 21:45:47 +0100564
565#undef MLK
566#undef MLM
567#undef MLK_ROUNDUP
568
Fenkart/Bostandzhyana1839272010-02-07 21:47:58 +0100569 /*
570 * Check boundaries twice: Some fundamental inconsistencies can
571 * be detected at build time already.
572 */
573#ifdef CONFIG_MMU
Fenkart/Bostandzhyana1839272010-02-07 21:47:58 +0100574 BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
575 BUG_ON(TASK_SIZE > MODULES_VADDR);
576#endif
577
578#ifdef CONFIG_HIGHMEM
579 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
580 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
581#endif
582
Jiang Liu2450c972013-07-03 15:03:48 -0700583 if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 extern int sysctl_overcommit_memory;
585 /*
586 * On a machine this small we won't get
587 * anywhere without overcommit, so turn
588 * it on by default.
589 */
590 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
591 }
592}
593
Laura Abbott0f5bf6d2017-02-06 16:31:58 -0800594#ifdef CONFIG_STRICT_KERNEL_RWX
Kees Cook1e6b4812014-04-03 17:28:11 -0700595struct section_perm {
Kees Cook25362dc2016-01-26 01:19:36 +0100596 const char *name;
Kees Cook1e6b4812014-04-03 17:28:11 -0700597 unsigned long start;
598 unsigned long end;
599 pmdval_t mask;
600 pmdval_t prot;
Kees Cook80d6b0c2014-04-03 13:29:50 -0700601 pmdval_t clear;
Kees Cook1e6b4812014-04-03 17:28:11 -0700602};
603
Kees Cook64ac2e72016-01-26 01:20:21 +0100604/* First section-aligned location at or after __start_rodata. */
605extern char __start_rodata_section_aligned[];
606
Kees Cook80d6b0c2014-04-03 13:29:50 -0700607static struct section_perm nx_perms[] = {
Kees Cook1e6b4812014-04-03 17:28:11 -0700608 /* Make pages tables, etc before _stext RW (set NX). */
609 {
Kees Cook25362dc2016-01-26 01:19:36 +0100610 .name = "pre-text NX",
Kees Cook1e6b4812014-04-03 17:28:11 -0700611 .start = PAGE_OFFSET,
612 .end = (unsigned long)_stext,
613 .mask = ~PMD_SECT_XN,
614 .prot = PMD_SECT_XN,
615 },
616 /* Make init RW (set NX). */
617 {
Kees Cook25362dc2016-01-26 01:19:36 +0100618 .name = "init NX",
Kees Cook1e6b4812014-04-03 17:28:11 -0700619 .start = (unsigned long)__init_begin,
620 .end = (unsigned long)_sdata,
621 .mask = ~PMD_SECT_XN,
622 .prot = PMD_SECT_XN,
623 },
Kees Cook80d6b0c2014-04-03 13:29:50 -0700624 /* Make rodata NX (set RO in ro_perms below). */
625 {
Kees Cook25362dc2016-01-26 01:19:36 +0100626 .name = "rodata NX",
Kees Cook64ac2e72016-01-26 01:20:21 +0100627 .start = (unsigned long)__start_rodata_section_aligned,
Kees Cook80d6b0c2014-04-03 13:29:50 -0700628 .end = (unsigned long)__init_begin,
629 .mask = ~PMD_SECT_XN,
630 .prot = PMD_SECT_XN,
631 },
Kees Cook1e6b4812014-04-03 17:28:11 -0700632};
633
Kees Cook80d6b0c2014-04-03 13:29:50 -0700634static struct section_perm ro_perms[] = {
635 /* Make kernel code and rodata RX (set RO). */
636 {
Kees Cook25362dc2016-01-26 01:19:36 +0100637 .name = "text/rodata RO",
Kees Cook80d6b0c2014-04-03 13:29:50 -0700638 .start = (unsigned long)_stext,
639 .end = (unsigned long)__init_begin,
640#ifdef CONFIG_ARM_LPAE
Victor Kamensky1e347922015-01-09 18:55:45 +0100641 .mask = ~L_PMD_SECT_RDONLY,
642 .prot = L_PMD_SECT_RDONLY,
Kees Cook80d6b0c2014-04-03 13:29:50 -0700643#else
644 .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
645 .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
646 .clear = PMD_SECT_AP_WRITE,
647#endif
648 },
649};
Kees Cook80d6b0c2014-04-03 13:29:50 -0700650
Kees Cook1e6b4812014-04-03 17:28:11 -0700651/*
652 * Updates section permissions only for the current mm (sections are
653 * copied into each mm). During startup, this is the init_mm. Is only
654 * safe to be called with preemption disabled, as under stop_machine().
655 */
656static inline void section_update(unsigned long addr, pmdval_t mask,
Laura Abbott08925c22015-11-30 19:36:28 +0100657 pmdval_t prot, struct mm_struct *mm)
Kees Cook1e6b4812014-04-03 17:28:11 -0700658{
Kees Cook1e6b4812014-04-03 17:28:11 -0700659 pmd_t *pmd;
660
Kees Cook1e6b4812014-04-03 17:28:11 -0700661 pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
662
663#ifdef CONFIG_ARM_LPAE
664 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
665#else
666 if (addr & SECTION_SIZE)
667 pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
668 else
669 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
670#endif
671 flush_pmd_entry(pmd);
672 local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
673}
674
675/* Make sure extended page tables are in use. */
676static inline bool arch_has_strict_perms(void)
677{
678 if (cpu_architecture() < CPU_ARCH_ARMv6)
679 return false;
680
681 return !!(get_cr() & CR_XP);
682}
683
Laura Abbott08925c22015-11-30 19:36:28 +0100684void set_section_perms(struct section_perm *perms, int n, bool set,
685 struct mm_struct *mm)
686{
687 size_t i;
688 unsigned long addr;
689
690 if (!arch_has_strict_perms())
691 return;
692
693 for (i = 0; i < n; i++) {
694 if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
695 !IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
Kees Cook25362dc2016-01-26 01:19:36 +0100696 pr_err("BUG: %s section %lx-%lx not aligned to %lx\n",
697 perms[i].name, perms[i].start, perms[i].end,
Laura Abbott08925c22015-11-30 19:36:28 +0100698 SECTION_SIZE);
699 continue;
700 }
701
702 for (addr = perms[i].start;
703 addr < perms[i].end;
704 addr += SECTION_SIZE)
705 section_update(addr, perms[i].mask,
706 set ? perms[i].prot : perms[i].clear, mm);
707 }
708
Kees Cook1e6b4812014-04-03 17:28:11 -0700709}
710
Laura Abbott08925c22015-11-30 19:36:28 +0100711static void update_sections_early(struct section_perm perms[], int n)
Kees Cook1e6b4812014-04-03 17:28:11 -0700712{
Laura Abbott08925c22015-11-30 19:36:28 +0100713 struct task_struct *t, *s;
714
715 read_lock(&tasklist_lock);
716 for_each_process(t) {
717 if (t->flags & PF_KTHREAD)
718 continue;
719 for_each_thread(t, s)
720 set_section_perms(perms, n, true, s->mm);
721 }
722 read_unlock(&tasklist_lock);
723 set_section_perms(perms, n, true, current->active_mm);
724 set_section_perms(perms, n, true, &init_mm);
725}
726
727int __fix_kernmem_perms(void *unused)
728{
729 update_sections_early(nx_perms, ARRAY_SIZE(nx_perms));
730 return 0;
731}
732
733void fix_kernmem_perms(void)
734{
735 stop_machine(__fix_kernmem_perms, NULL, NULL);
Kees Cook1e6b4812014-04-03 17:28:11 -0700736}
Kees Cook80d6b0c2014-04-03 13:29:50 -0700737
Laura Abbott08925c22015-11-30 19:36:28 +0100738int __mark_rodata_ro(void *unused)
739{
740 update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
741 return 0;
742}
743
Kees Cook80d6b0c2014-04-03 13:29:50 -0700744void mark_rodata_ro(void)
745{
Laura Abbott08925c22015-11-30 19:36:28 +0100746 stop_machine(__mark_rodata_ro, NULL, NULL);
Kees Cook80d6b0c2014-04-03 13:29:50 -0700747}
748
749void set_kernel_text_rw(void)
750{
Laura Abbott08925c22015-11-30 19:36:28 +0100751 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
752 current->active_mm);
Kees Cook80d6b0c2014-04-03 13:29:50 -0700753}
754
755void set_kernel_text_ro(void)
756{
Laura Abbott08925c22015-11-30 19:36:28 +0100757 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
758 current->active_mm);
Kees Cook80d6b0c2014-04-03 13:29:50 -0700759}
Kees Cook80d6b0c2014-04-03 13:29:50 -0700760
Kees Cook1e6b4812014-04-03 17:28:11 -0700761#else
762static inline void fix_kernmem_perms(void) { }
Laura Abbott0f5bf6d2017-02-06 16:31:58 -0800763#endif /* CONFIG_STRICT_KERNEL_RWX */
Kees Cook1e6b4812014-04-03 17:28:11 -0700764
765void free_tcmmem(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766{
Linus Walleijbc581772009-09-15 17:30:37 +0100767#ifdef CONFIG_HAVE_TCM
Linus Walleijea208f62010-05-26 07:37:57 +0100768 extern char __tcm_start, __tcm_end;
Linus Walleijbc581772009-09-15 17:30:37 +0100769
Stephen Boyd54d52572011-07-07 18:43:36 +0100770 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
Jiang Liudbe67df2013-07-03 15:02:51 -0700771 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
Linus Walleijbc581772009-09-15 17:30:37 +0100772#endif
Kees Cook1e6b4812014-04-03 17:28:11 -0700773}
774
775void free_initmem(void)
776{
777 fix_kernmem_perms();
778 free_tcmmem();
Linus Walleijbc581772009-09-15 17:30:37 +0100779
Stephen Boyd54d52572011-07-07 18:43:36 +0100780 poison_init_mem(__init_begin, __init_end - __init_begin);
Nicolas Pitre6db015e2008-09-17 14:50:42 -0400781 if (!machine_is_integrator() && !machine_is_cintegrator())
Jiang Liudbe67df2013-07-03 15:02:51 -0700782 free_initmem_default(-1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783}
784
785#ifdef CONFIG_BLK_DEV_INITRD
786
787static int keep_initrd;
788
789void free_initrd_mem(unsigned long start, unsigned long end)
790{
Stephen Boyd54d52572011-07-07 18:43:36 +0100791 if (!keep_initrd) {
Yalin Wang421520b2014-09-26 03:07:09 +0100792 if (start == initrd_start)
793 start = round_down(start, PAGE_SIZE);
794 if (end == initrd_end)
795 end = round_up(end, PAGE_SIZE);
796
Stephen Boyd54d52572011-07-07 18:43:36 +0100797 poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
Jiang Liudbe67df2013-07-03 15:02:51 -0700798 free_reserved_area((void *)start, (void *)end, -1, "initrd");
Stephen Boyd54d52572011-07-07 18:43:36 +0100799 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800}
801
802static int __init keepinitrd_setup(char *__unused)
803{
804 keep_initrd = 1;
805 return 1;
806}
807
808__setup("keepinitrd", keepinitrd_setup);
809#endif