blob: a6b0805b7977200521a73b341798c912fc8ec681 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/arm/mm/init.c
3 *
Russell King90072052005-10-28 14:48:37 +01004 * Copyright (C) 1995-2005 Russell King
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/kernel.h>
11#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/swap.h>
13#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/mman.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010015#include <linux/sched/signal.h>
Ingo Molnar29930022017-02-08 18:51:36 +010016#include <linux/sched/task.h>
Paul Gortmakerdc280942011-07-31 16:17:29 -040017#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/nodemask.h>
19#include <linux/initrd.h>
Grant Likely9eb8f672011-04-28 14:27:20 -060020#include <linux/of_fdt.h>
Nicolas Pitre3835f6c2008-09-17 15:21:55 -040021#include <linux/highmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022#include <linux/gfp.h>
Russell King2778f622010-07-09 16:27:52 +010023#include <linux/memblock.h>
Marek Szyprowskic7909502011-12-29 13:09:51 +010024#include <linux/dma-contiguous.h>
Alessandro Rubini158e8bf2012-06-24 12:46:26 +010025#include <linux/sizes.h>
Laura Abbott08925c22015-11-30 19:36:28 +010026#include <linux/stop_machine.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Russell Kingb4b20ad82014-04-13 18:57:29 +010028#include <asm/cp15.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <asm/mach-types.h>
Russell King716a3dc2012-01-13 15:00:51 +000030#include <asm/memblock.h>
Afzal Mohammedd2ca5f22017-01-29 17:31:32 +010031#include <asm/memory.h>
Grant Likely93c02ab2011-04-28 14:27:21 -060032#include <asm/prom.h>
Russell King37efe642008-12-01 11:53:07 +000033#include <asm/sections.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <asm/setup.h>
Kees Cook1e6b4812014-04-03 17:28:11 -070035#include <asm/system_info.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/tlb.h>
Fenkart/Bostandzhyandb9ef1a2010-02-07 21:45:47 +010037#include <asm/fixmap.h>
Jinbum Parka8e53c12017-12-12 01:43:57 +010038#include <asm/ptdump.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
40#include <asm/mach/arch.h>
41#include <asm/mach/map.h>
42
Russell King1b2e2b72006-08-21 17:06:38 +010043#include "mm.h"
44
Russell Kingb4b20ad82014-04-13 18:57:29 +010045#ifdef CONFIG_CPU_CP15_MMU
46unsigned long __init __clear_cr(unsigned long mask)
47{
Russell Kingb4b20ad82014-04-13 18:57:29 +010048 cr_alignment = cr_alignment & ~mask;
49 return cr_alignment;
50}
51#endif
52
Florian Fainellib1ab95c2018-11-05 14:54:27 -080053#ifdef CONFIG_BLK_DEV_INITRD
Russell King012d1f42008-09-06 10:57:03 +010054static int __init parse_tag_initrd(const struct tag *tag)
55{
Russell King4ed89f22014-10-28 11:26:42 +000056 pr_warn("ATAG_INITRD is deprecated; "
Russell King012d1f42008-09-06 10:57:03 +010057 "please update your bootloader.\n");
58 phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
59 phys_initrd_size = tag->u.initrd.size;
60 return 0;
61}
62
63__tagtable(ATAG_INITRD, parse_tag_initrd);
64
65static int __init parse_tag_initrd2(const struct tag *tag)
66{
67 phys_initrd_start = tag->u.initrd.start;
68 phys_initrd_size = tag->u.initrd.size;
69 return 0;
70}
71
72__tagtable(ATAG_INITRD2, parse_tag_initrd2);
Florian Fainellib1ab95c2018-11-05 14:54:27 -080073#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
Russell Kingf25b4b42010-10-27 19:49:33 +010075static void __init find_limits(unsigned long *min, unsigned long *max_low,
Nicolas Pitre27a3f0e2011-08-25 19:10:29 -040076 unsigned long *max_high)
Russell Kingdde58282009-08-15 12:36:00 +010077{
Laura Abbott1c2f87c2014-04-13 22:54:58 +010078 *max_low = PFN_DOWN(memblock_get_current_limit());
79 *min = PFN_UP(memblock_start_of_DRAM());
80 *max_high = PFN_DOWN(memblock_end_of_DRAM());
Russell Kingdde58282009-08-15 12:36:00 +010081}
82
Russell Kingbe209022011-05-11 15:39:00 +010083#ifdef CONFIG_ZONE_DMA
Nicolas Pitre65032012011-07-18 15:05:10 -040084
Rob Herring364230b2013-08-01 15:29:29 -050085phys_addr_t arm_dma_zone_size __read_mostly;
Nicolas Pitre65032012011-07-18 15:05:10 -040086EXPORT_SYMBOL(arm_dma_zone_size);
87
Russell King022ae532011-07-08 21:26:59 +010088/*
89 * The DMA mask corresponding to the maximum bus address allocatable
90 * using GFP_DMA. The default here places no restriction on DMA
91 * allocations. This must be the smallest DMA mask in the system,
92 * so a successful GFP_DMA allocation will always satisfy this.
93 */
Marek Szyprowski4986e5c2012-06-06 12:05:01 +020094phys_addr_t arm_dma_limit;
Russell King4dcfa602013-07-09 12:14:49 +010095unsigned long arm_dma_pfn_limit;
Russell King022ae532011-07-08 21:26:59 +010096
Russell Kingbe209022011-05-11 15:39:00 +010097static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
98 unsigned long dma_size)
99{
100 if (size[0] <= dma_size)
101 return;
102
103 size[ZONE_NORMAL] = size[0] - dma_size;
104 size[ZONE_DMA] = dma_size;
105 hole[ZONE_NORMAL] = hole[0];
106 hole[ZONE_DMA] = 0;
107}
108#endif
109
Russell Kingff69a4c2013-07-26 14:55:59 +0100110void __init setup_dma_zone(const struct machine_desc *mdesc)
Marek Szyprowskic7909502011-12-29 13:09:51 +0100111{
112#ifdef CONFIG_ZONE_DMA
113 if (mdesc->dma_zone_size) {
114 arm_dma_zone_size = mdesc->dma_zone_size;
Russell King6bcac802014-01-07 17:53:54 +0000115 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
Marek Szyprowskic7909502011-12-29 13:09:51 +0100116 } else
117 arm_dma_limit = 0xffffffff;
Russell King4dcfa602013-07-09 12:14:49 +0100118 arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
Marek Szyprowskic7909502011-12-29 13:09:51 +0100119#endif
120}
121
Santosh Shilimkar84f452b2013-06-30 00:28:46 -0400122static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
Russell Kinga2c54d22010-10-27 19:17:31 +0100123 unsigned long max_high)
Russell Kingb7a69ac2008-10-01 16:58:32 +0100124{
125 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
Russell Kinga2c54d22010-10-27 19:17:31 +0100126 struct memblock_region *reg;
Russell Kingb7a69ac2008-10-01 16:58:32 +0100127
Russell King90072052005-10-28 14:48:37 +0100128 /*
Russell Kingbe370302010-05-07 17:40:33 +0100129 * initialise the zones.
Russell King90072052005-10-28 14:48:37 +0100130 */
131 memset(zone_size, 0, sizeof(zone_size));
Russell King90072052005-10-28 14:48:37 +0100132
133 /*
Russell Kingbe370302010-05-07 17:40:33 +0100134 * The memory size has already been determined. If we need
135 * to do anything fancy with the allocation of this memory
136 * to the zones, now is the time to do it.
Russell King90072052005-10-28 14:48:37 +0100137 */
Russell Kingdde58282009-08-15 12:36:00 +0100138 zone_size[0] = max_low - min;
139#ifdef CONFIG_HIGHMEM
140 zone_size[ZONE_HIGHMEM] = max_high - max_low;
141#endif
Russell King90072052005-10-28 14:48:37 +0100142
143 /*
Russell Kingbe370302010-05-07 17:40:33 +0100144 * Calculate the size of the holes.
145 * holes = node_size - sum(bank_sizes)
Russell King90072052005-10-28 14:48:37 +0100146 */
Russell Kingdde58282009-08-15 12:36:00 +0100147 memcpy(zhole_size, zone_size, sizeof(zhole_size));
Russell Kinga2c54d22010-10-27 19:17:31 +0100148 for_each_memblock(memory, reg) {
149 unsigned long start = memblock_region_memory_base_pfn(reg);
150 unsigned long end = memblock_region_memory_end_pfn(reg);
151
152 if (start < max_low) {
153 unsigned long low_end = min(end, max_low);
154 zhole_size[0] -= low_end - start;
155 }
Russell Kingdde58282009-08-15 12:36:00 +0100156#ifdef CONFIG_HIGHMEM
Russell Kinga2c54d22010-10-27 19:17:31 +0100157 if (end > max_low) {
158 unsigned long high_start = max(start, max_low);
159 zhole_size[ZONE_HIGHMEM] -= end - high_start;
160 }
Russell Kingdde58282009-08-15 12:36:00 +0100161#endif
Russell Kingdde58282009-08-15 12:36:00 +0100162 }
Russell King90072052005-10-28 14:48:37 +0100163
Nicolas Pitre65032012011-07-18 15:05:10 -0400164#ifdef CONFIG_ZONE_DMA
Russell King90072052005-10-28 14:48:37 +0100165 /*
166 * Adjust the sizes according to any special requirements for
167 * this machine type.
168 */
Marek Szyprowskic7909502011-12-29 13:09:51 +0100169 if (arm_dma_zone_size)
Nicolas Pitre65032012011-07-18 15:05:10 -0400170 arm_adjust_dma_zone(zone_size, zhole_size,
171 arm_dma_zone_size >> PAGE_SHIFT);
Russell Kingbe209022011-05-11 15:39:00 +0100172#endif
Russell King90072052005-10-28 14:48:37 +0100173
Russell Kingbe370302010-05-07 17:40:33 +0100174 free_area_init_node(0, zone_size, min, zhole_size);
Russell King90072052005-10-28 14:48:37 +0100175}
176
Will Deacon7b7bf492011-05-19 13:21:14 +0100177#ifdef CONFIG_HAVE_ARCH_PFN_VALID
Russell Kingb7cfda92009-09-07 15:06:42 +0100178int pfn_valid(unsigned long pfn)
179{
Ard Biesheuvel09414d02015-10-01 17:58:11 +0200180 return memblock_is_map_memory(__pfn_to_phys(pfn));
Russell Kingb7cfda92009-09-07 15:06:42 +0100181}
182EXPORT_SYMBOL(pfn_valid);
Will Deacon7b7bf492011-05-19 13:21:14 +0100183#endif
Russell King657e12f2009-10-29 17:06:17 +0000184
Will Deacon7b7bf492011-05-19 13:21:14 +0100185#ifndef CONFIG_SPARSEMEM
Stephen Boyd14904922012-04-27 01:40:10 +0100186static void __init arm_memory_present(void)
Russell King657e12f2009-10-29 17:06:17 +0000187{
188}
189#else
Stephen Boyd14904922012-04-27 01:40:10 +0100190static void __init arm_memory_present(void)
Russell King657e12f2009-10-29 17:06:17 +0000191{
Benjamin Herrenschmidt719c15142010-08-05 12:55:55 +1000192 struct memblock_region *reg;
Benjamin Herrenschmidt719c15142010-08-05 12:55:55 +1000193
Yinghai Lu7c996362010-09-16 00:20:36 -0700194 for_each_memblock(memory, reg)
Yinghai Luc7fc2de2010-10-12 14:07:09 -0700195 memory_present(0, memblock_region_memory_base_pfn(reg),
196 memblock_region_memory_end_pfn(reg));
Russell King657e12f2009-10-29 17:06:17 +0000197}
Russell Kingb7cfda92009-09-07 15:06:42 +0100198#endif
199
Russell King716a3dc2012-01-13 15:00:51 +0000200static bool arm_memblock_steal_permitted = true;
201
Russell Kingbc2827d2012-01-19 14:35:19 +0000202phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
Russell King716a3dc2012-01-13 15:00:51 +0000203{
204 phys_addr_t phys;
205
206 BUG_ON(!arm_memblock_steal_permitted);
207
Russell King7ac68a42012-08-13 00:22:28 +0100208 phys = memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
Russell King716a3dc2012-01-13 15:00:51 +0000209 memblock_free(phys, size);
210 memblock_remove(phys, size);
211
212 return phys;
213}
214
Russell King39286242017-01-16 15:11:10 +0000215static void __init arm_initrd_init(void)
Russell King2778f622010-07-09 16:27:52 +0100216{
Russell King2778f622010-07-09 16:27:52 +0100217#ifdef CONFIG_BLK_DEV_INITRD
Russell Kingcdcc5fa2017-01-16 15:21:05 +0000218 phys_addr_t start;
219 unsigned long size;
220
Ben Peddell4c235cb2014-01-13 23:25:18 +0100221 initrd_start = initrd_end = 0;
Russell King68b32f32017-01-16 15:13:25 +0000222
223 if (!phys_initrd_size)
224 return;
225
Russell Kingcdcc5fa2017-01-16 15:21:05 +0000226 /*
227 * Round the memory region to page boundaries as per free_initrd_mem()
228 * This allows us to detect whether the pages overlapping the initrd
229 * are in use, but more importantly, reserves the entire set of pages
230 * as we don't want these pages allocated for other purposes.
231 */
232 start = round_down(phys_initrd_start, PAGE_SIZE);
233 size = phys_initrd_size + (phys_initrd_start - start);
234 size = round_up(size, PAGE_SIZE);
235
236 if (!memblock_is_region_memory(start, size)) {
Vitaly Andrianovde22cc6e2012-06-22 14:26:04 -0400237 pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
Russell Kingcdcc5fa2017-01-16 15:21:05 +0000238 (u64)start, size);
Russell King68b32f32017-01-16 15:13:25 +0000239 return;
Russell King2778f622010-07-09 16:27:52 +0100240 }
Russell King68b32f32017-01-16 15:13:25 +0000241
Russell Kingcdcc5fa2017-01-16 15:21:05 +0000242 if (memblock_is_region_reserved(start, size)) {
Russell King2778f622010-07-09 16:27:52 +0100243 pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n",
Russell Kingcdcc5fa2017-01-16 15:21:05 +0000244 (u64)start, size);
Russell King68b32f32017-01-16 15:13:25 +0000245 return;
Russell King2778f622010-07-09 16:27:52 +0100246 }
Russell King2778f622010-07-09 16:27:52 +0100247
Russell Kingcdcc5fa2017-01-16 15:21:05 +0000248 memblock_reserve(start, size);
Russell King68b32f32017-01-16 15:13:25 +0000249
250 /* Now convert initrd to virtual addresses */
251 initrd_start = __phys_to_virt(phys_initrd_start);
252 initrd_end = initrd_start + phys_initrd_size;
Russell King2778f622010-07-09 16:27:52 +0100253#endif
Russell King39286242017-01-16 15:11:10 +0000254}
255
256void __init arm_memblock_init(const struct machine_desc *mdesc)
257{
258 /* Register the kernel text, kernel data and initrd with memblock. */
259 memblock_reserve(__pa(KERNEL_START), KERNEL_END - KERNEL_START);
260
261 arm_initrd_init();
Russell King2778f622010-07-09 16:27:52 +0100262
263 arm_mm_memblock_reserve();
264
Russell King8d717a52010-05-22 19:47:18 +0100265 /* reserve any platform specific memblock areas */
266 if (mdesc->reserve)
267 mdesc->reserve();
268
Ard Biesheuvel24bbd922015-06-01 13:40:31 +0200269 early_init_fdt_reserve_self();
Marek Szyprowskibcedb5f2014-02-28 14:42:54 +0100270 early_init_fdt_scan_reserved_mem();
271
George G. Davis99a468d2015-01-16 11:21:05 +0100272 /* reserve memory for DMA contiguous allocations */
Marek Szyprowski95b0e652014-10-09 15:26:49 -0700273 dma_contiguous_reserve(arm_dma_limit);
Marek Szyprowskic7909502011-12-29 13:09:51 +0100274
Russell King716a3dc2012-01-13 15:00:51 +0000275 arm_memblock_steal_permitted = false;
Russell King2778f622010-07-09 16:27:52 +0100276 memblock_dump_all();
277}
278
Russell King8d717a52010-05-22 19:47:18 +0100279void __init bootmem_init(void)
Russell King90072052005-10-28 14:48:37 +0100280{
Grygorii Strashko8e58cae2013-11-23 14:42:18 -0500281 memblock_allow_resize();
Russell Kingdde58282009-08-15 12:36:00 +0100282
Doug Berger071d1842019-01-22 21:05:10 +0100283 find_limits(&min_low_pfn, &max_low_pfn, &max_pfn);
Russell Kingbe370302010-05-07 17:40:33 +0100284
Doug Berger071d1842019-01-22 21:05:10 +0100285 early_memtest((phys_addr_t)min_low_pfn << PAGE_SHIFT,
286 (phys_addr_t)max_low_pfn << PAGE_SHIFT);
Vladimir Murzind30eae42015-04-14 15:48:37 -0700287
Russell Kingbe370302010-05-07 17:40:33 +0100288 /*
Russell Kingbe370302010-05-07 17:40:33 +0100289 * Sparsemem tries to allocate bootmem in memory_present(),
290 * so must be done after the fixed reservations
291 */
Russell Kingeda2e5d2010-07-01 12:00:57 +0100292 arm_memory_present();
Russell King90072052005-10-28 14:48:37 +0100293
Russell Kingb7a69ac2008-10-01 16:58:32 +0100294 /*
295 * sparse_init() needs the bootmem allocator up and running.
296 */
297 sparse_init();
298
299 /*
Russell Kingbe370302010-05-07 17:40:33 +0100300 * Now free the memory - free_area_init_node needs
Russell Kingb7a69ac2008-10-01 16:58:32 +0100301 * the sparse mem_map arrays initialized by sparse_init()
302 * for memmap_init_zone(), otherwise all PFNs are invalid.
303 */
Doug Berger071d1842019-01-22 21:05:10 +0100304 zone_sizes_init(min_low_pfn, max_low_pfn, max_pfn);
Russell King90072052005-10-28 14:48:37 +0100305}
306
Stephen Boyd54d52572011-07-07 18:43:36 +0100307/*
308 * Poison init memory with an undefined instruction (ARM) or a branch to an
309 * undefined instruction (Thumb).
310 */
311static inline void poison_init_mem(void *s, size_t count)
312{
313 u32 *p = (u32 *)s;
Jamie Ilesbf912d92011-08-04 09:39:31 +0100314 for (; count != 0; count -= 4)
Stephen Boyd54d52572011-07-07 18:43:36 +0100315 *p++ = 0xe7fddef0;
316}
317
Russell Kinga0130532005-06-27 14:16:47 +0100318static inline void
Russell Kingbe370302010-05-07 17:40:33 +0100319free_memmap(unsigned long start_pfn, unsigned long end_pfn)
Russell Kinga0130532005-06-27 14:16:47 +0100320{
321 struct page *start_pg, *end_pg;
Vitaly Andrianov56bc6282012-06-21 08:09:05 -0400322 phys_addr_t pg, pgend;
Russell Kinga0130532005-06-27 14:16:47 +0100323
324 /*
325 * Convert start_pfn/end_pfn to a struct page pointer.
326 */
Catalin Marinas3257f432009-10-06 17:57:22 +0100327 start_pg = pfn_to_page(start_pfn - 1) + 1;
Will Deacon9af386c2011-04-28 18:44:31 +0100328 end_pg = pfn_to_page(end_pfn - 1) + 1;
Russell Kinga0130532005-06-27 14:16:47 +0100329
330 /*
331 * Convert to physical addresses, and
332 * round start upwards and end downwards.
333 */
Vitaly Andrianov56bc6282012-06-21 08:09:05 -0400334 pg = PAGE_ALIGN(__pa(start_pg));
335 pgend = __pa(end_pg) & PAGE_MASK;
Russell Kinga0130532005-06-27 14:16:47 +0100336
337 /*
338 * If there are free pages between these,
339 * free the section of the memmap array.
340 */
341 if (pg < pgend)
Santosh Shilimkarcfb665862014-01-21 15:50:49 -0800342 memblock_free_early(pg, pgend - pg);
Russell Kinga0130532005-06-27 14:16:47 +0100343}
344
345/*
346 * The mem_map array can get very big. Free the unused area of the memory map.
347 */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100348static void __init free_unused_memmap(void)
Russell Kinga0130532005-06-27 14:16:47 +0100349{
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100350 unsigned long start, prev_end = 0;
351 struct memblock_region *reg;
Russell Kinga0130532005-06-27 14:16:47 +0100352
353 /*
Michael Bohan3260e522010-06-14 13:06:56 -0700354 * This relies on each bank being in address order.
355 * The banks are sorted previously in bootmem_init().
Russell Kinga0130532005-06-27 14:16:47 +0100356 */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100357 for_each_memblock(memory, reg) {
358 start = memblock_region_memory_base_pfn(reg);
Russell Kinga0130532005-06-27 14:16:47 +0100359
Will Deacon9af386c2011-04-28 18:44:31 +0100360#ifdef CONFIG_SPARSEMEM
361 /*
362 * Take care not to free memmap entries that don't exist
363 * due to SPARSEMEM sections which aren't present.
364 */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100365 start = min(start,
366 ALIGN(prev_end, PAGES_PER_SECTION));
Linus Walleij002ea9eef2011-09-29 09:37:23 +0100367#else
368 /*
369 * Align down here since the VM subsystem insists that the
370 * memmap entries are valid from the bank start aligned to
371 * MAX_ORDER_NR_PAGES.
372 */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100373 start = round_down(start, MAX_ORDER_NR_PAGES);
Will Deacon9af386c2011-04-28 18:44:31 +0100374#endif
Russell Kinga0130532005-06-27 14:16:47 +0100375 /*
376 * If we had a previous bank, and there is a space
377 * between the current bank and the previous, free it.
378 */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100379 if (prev_end && prev_end < start)
380 free_memmap(prev_end, start);
Russell Kinga0130532005-06-27 14:16:47 +0100381
Michael Bohan3260e522010-06-14 13:06:56 -0700382 /*
383 * Align up here since the VM subsystem insists that the
384 * memmap entries are valid from the bank end aligned to
385 * MAX_ORDER_NR_PAGES.
386 */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100387 prev_end = ALIGN(memblock_region_memory_end_pfn(reg),
388 MAX_ORDER_NR_PAGES);
Russell Kinga0130532005-06-27 14:16:47 +0100389 }
Will Deacon9af386c2011-04-28 18:44:31 +0100390
391#ifdef CONFIG_SPARSEMEM
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100392 if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
393 free_memmap(prev_end,
394 ALIGN(prev_end, PAGES_PER_SECTION));
Will Deacon9af386c2011-04-28 18:44:31 +0100395#endif
Russell Kinga0130532005-06-27 14:16:47 +0100396}
397
Jiang Liu83db0382013-04-29 15:06:26 -0700398#ifdef CONFIG_HIGHMEM
399static inline void free_area_high(unsigned long pfn, unsigned long end)
400{
Jiang Liudd6911e2013-04-29 15:07:03 -0700401 for (; pfn < end; pfn++)
402 free_highmem_page(pfn_to_page(pfn));
Jiang Liu83db0382013-04-29 15:06:26 -0700403}
404#endif
405
Russell Kingd0e775a2010-10-27 19:37:06 +0100406static void __init free_highpages(void)
407{
408#ifdef CONFIG_HIGHMEM
Santosh Shilimkar26ba47b2013-08-01 03:12:01 +0100409 unsigned long max_low = max_low_pfn;
Russell Kingdf4f14c2010-10-27 19:45:49 +0100410 struct memblock_region *mem, *res;
Russell Kingd0e775a2010-10-27 19:37:06 +0100411
412 /* set highmem page free */
Russell Kingdf4f14c2010-10-27 19:45:49 +0100413 for_each_memblock(memory, mem) {
414 unsigned long start = memblock_region_memory_base_pfn(mem);
415 unsigned long end = memblock_region_memory_end_pfn(mem);
416
417 /* Ignore complete lowmem entries */
418 if (end <= max_low)
419 continue;
420
Ard Biesheuvel09414d02015-10-01 17:58:11 +0200421 if (memblock_is_nomap(mem))
422 continue;
423
Russell Kingdf4f14c2010-10-27 19:45:49 +0100424 /* Truncate partial highmem entries */
425 if (start < max_low)
426 start = max_low;
427
428 /* Find and exclude any reserved regions */
429 for_each_memblock(reserved, res) {
430 unsigned long res_start, res_end;
431
432 res_start = memblock_region_reserved_base_pfn(res);
433 res_end = memblock_region_reserved_end_pfn(res);
434
435 if (res_end < start)
436 continue;
437 if (res_start < start)
438 res_start = start;
439 if (res_start > end)
440 res_start = end;
441 if (res_end > end)
442 res_end = end;
443 if (res_start != start)
Jiang Liu83db0382013-04-29 15:06:26 -0700444 free_area_high(start, res_start);
Russell Kingdf4f14c2010-10-27 19:45:49 +0100445 start = res_end;
446 if (start == end)
447 break;
448 }
449
450 /* And now free anything which remains */
451 if (start < end)
Jiang Liu83db0382013-04-29 15:06:26 -0700452 free_area_high(start, end);
Russell Kingd0e775a2010-10-27 19:37:06 +0100453 }
Russell Kingd0e775a2010-10-27 19:37:06 +0100454#endif
455}
456
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457/*
458 * mem_init() marks the free areas in the mem_map and tells us how much
459 * memory is free. This is done after various parts of the system have
460 * claimed their memory after the kernel image.
461 */
462void __init mem_init(void)
463{
Linus Walleij1dbd30e2010-07-12 21:53:28 +0100464#ifdef CONFIG_HAVE_TCM
465 /* These pointers are filled in on TCM detection */
466 extern u32 dtcm_end;
467 extern u32 itcm_end;
468#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469
Santosh Shilimkarb3ba41f2013-11-23 14:36:42 -0500470 set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 /* this will put all unused low memory onto the freelists */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100473 free_unused_memmap();
Mike Rapoportc6ffc5c2018-10-30 15:09:30 -0700474 memblock_free_all();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475
476#ifdef CONFIG_SA1111
477 /* now that our DMA memory is actually so designated, we can free it */
Linus Torvaldsbfd65dd2013-07-13 14:58:36 -0700478 free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479#endif
480
Russell Kingd0e775a2010-10-27 19:37:06 +0100481 free_highpages();
Nicolas Pitre3835f6c2008-09-17 15:21:55 -0400482
Jiang Liu2450c972013-07-03 15:03:48 -0700483 mem_init_print_info(NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484
Fenkart/Bostandzhyana1839272010-02-07 21:47:58 +0100485 /*
486 * Check boundaries twice: Some fundamental inconsistencies can
487 * be detected at build time already.
488 */
489#ifdef CONFIG_MMU
Fenkart/Bostandzhyana1839272010-02-07 21:47:58 +0100490 BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
491 BUG_ON(TASK_SIZE > MODULES_VADDR);
492#endif
493
494#ifdef CONFIG_HIGHMEM
495 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
496 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
497#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498}
499
Laura Abbott0f5bf6d2017-02-06 16:31:58 -0800500#ifdef CONFIG_STRICT_KERNEL_RWX
Kees Cook1e6b4812014-04-03 17:28:11 -0700501struct section_perm {
Kees Cook25362dc2016-01-26 01:19:36 +0100502 const char *name;
Kees Cook1e6b4812014-04-03 17:28:11 -0700503 unsigned long start;
504 unsigned long end;
505 pmdval_t mask;
506 pmdval_t prot;
Kees Cook80d6b0c2014-04-03 13:29:50 -0700507 pmdval_t clear;
Kees Cook1e6b4812014-04-03 17:28:11 -0700508};
509
Kees Cook64ac2e72016-01-26 01:20:21 +0100510/* First section-aligned location at or after __start_rodata. */
511extern char __start_rodata_section_aligned[];
512
Kees Cook80d6b0c2014-04-03 13:29:50 -0700513static struct section_perm nx_perms[] = {
Kees Cook1e6b4812014-04-03 17:28:11 -0700514 /* Make pages tables, etc before _stext RW (set NX). */
515 {
Kees Cook25362dc2016-01-26 01:19:36 +0100516 .name = "pre-text NX",
Kees Cook1e6b4812014-04-03 17:28:11 -0700517 .start = PAGE_OFFSET,
518 .end = (unsigned long)_stext,
519 .mask = ~PMD_SECT_XN,
520 .prot = PMD_SECT_XN,
521 },
522 /* Make init RW (set NX). */
523 {
Kees Cook25362dc2016-01-26 01:19:36 +0100524 .name = "init NX",
Kees Cook1e6b4812014-04-03 17:28:11 -0700525 .start = (unsigned long)__init_begin,
526 .end = (unsigned long)_sdata,
527 .mask = ~PMD_SECT_XN,
528 .prot = PMD_SECT_XN,
529 },
Kees Cook80d6b0c2014-04-03 13:29:50 -0700530 /* Make rodata NX (set RO in ro_perms below). */
531 {
Kees Cook25362dc2016-01-26 01:19:36 +0100532 .name = "rodata NX",
Kees Cook64ac2e72016-01-26 01:20:21 +0100533 .start = (unsigned long)__start_rodata_section_aligned,
Kees Cook80d6b0c2014-04-03 13:29:50 -0700534 .end = (unsigned long)__init_begin,
535 .mask = ~PMD_SECT_XN,
536 .prot = PMD_SECT_XN,
537 },
Kees Cook1e6b4812014-04-03 17:28:11 -0700538};
539
Kees Cook80d6b0c2014-04-03 13:29:50 -0700540static struct section_perm ro_perms[] = {
541 /* Make kernel code and rodata RX (set RO). */
542 {
Kees Cook25362dc2016-01-26 01:19:36 +0100543 .name = "text/rodata RO",
Kees Cook80d6b0c2014-04-03 13:29:50 -0700544 .start = (unsigned long)_stext,
545 .end = (unsigned long)__init_begin,
546#ifdef CONFIG_ARM_LPAE
Philip Derrin400eeff2017-11-14 00:55:25 +0100547 .mask = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2),
548 .prot = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
Kees Cook80d6b0c2014-04-03 13:29:50 -0700549#else
550 .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
551 .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
552 .clear = PMD_SECT_AP_WRITE,
553#endif
554 },
555};
Kees Cook80d6b0c2014-04-03 13:29:50 -0700556
Kees Cook1e6b4812014-04-03 17:28:11 -0700557/*
558 * Updates section permissions only for the current mm (sections are
559 * copied into each mm). During startup, this is the init_mm. Is only
560 * safe to be called with preemption disabled, as under stop_machine().
561 */
562static inline void section_update(unsigned long addr, pmdval_t mask,
Laura Abbott08925c22015-11-30 19:36:28 +0100563 pmdval_t prot, struct mm_struct *mm)
Kees Cook1e6b4812014-04-03 17:28:11 -0700564{
Kees Cook1e6b4812014-04-03 17:28:11 -0700565 pmd_t *pmd;
566
Kees Cook1e6b4812014-04-03 17:28:11 -0700567 pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
568
569#ifdef CONFIG_ARM_LPAE
570 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
571#else
572 if (addr & SECTION_SIZE)
573 pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
574 else
575 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
576#endif
577 flush_pmd_entry(pmd);
578 local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
579}
580
581/* Make sure extended page tables are in use. */
582static inline bool arch_has_strict_perms(void)
583{
584 if (cpu_architecture() < CPU_ARCH_ARMv6)
585 return false;
586
587 return !!(get_cr() & CR_XP);
588}
589
Laura Abbott08925c22015-11-30 19:36:28 +0100590void set_section_perms(struct section_perm *perms, int n, bool set,
591 struct mm_struct *mm)
592{
593 size_t i;
594 unsigned long addr;
595
596 if (!arch_has_strict_perms())
597 return;
598
599 for (i = 0; i < n; i++) {
600 if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
601 !IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
Kees Cook25362dc2016-01-26 01:19:36 +0100602 pr_err("BUG: %s section %lx-%lx not aligned to %lx\n",
603 perms[i].name, perms[i].start, perms[i].end,
Laura Abbott08925c22015-11-30 19:36:28 +0100604 SECTION_SIZE);
605 continue;
606 }
607
608 for (addr = perms[i].start;
609 addr < perms[i].end;
610 addr += SECTION_SIZE)
611 section_update(addr, perms[i].mask,
612 set ? perms[i].prot : perms[i].clear, mm);
613 }
614
Kees Cook1e6b4812014-04-03 17:28:11 -0700615}
616
Grygorii Strashko11ce4b32017-04-25 21:20:52 +0100617/**
618 * update_sections_early intended to be called only through stop_machine
619 * framework and executed by only one CPU while all other CPUs will spin and
620 * wait, so no locking is required in this function.
621 */
Laura Abbott08925c22015-11-30 19:36:28 +0100622static void update_sections_early(struct section_perm perms[], int n)
Kees Cook1e6b4812014-04-03 17:28:11 -0700623{
Laura Abbott08925c22015-11-30 19:36:28 +0100624 struct task_struct *t, *s;
625
Laura Abbott08925c22015-11-30 19:36:28 +0100626 for_each_process(t) {
627 if (t->flags & PF_KTHREAD)
628 continue;
629 for_each_thread(t, s)
630 set_section_perms(perms, n, true, s->mm);
631 }
Laura Abbott08925c22015-11-30 19:36:28 +0100632 set_section_perms(perms, n, true, current->active_mm);
633 set_section_perms(perms, n, true, &init_mm);
634}
635
Grygorii Strashko11ce4b32017-04-25 21:20:52 +0100636static int __fix_kernmem_perms(void *unused)
Laura Abbott08925c22015-11-30 19:36:28 +0100637{
638 update_sections_early(nx_perms, ARRAY_SIZE(nx_perms));
639 return 0;
640}
641
Grygorii Strashko11ce4b32017-04-25 21:20:52 +0100642static void fix_kernmem_perms(void)
Laura Abbott08925c22015-11-30 19:36:28 +0100643{
644 stop_machine(__fix_kernmem_perms, NULL, NULL);
Kees Cook1e6b4812014-04-03 17:28:11 -0700645}
Kees Cook80d6b0c2014-04-03 13:29:50 -0700646
Grygorii Strashko11ce4b32017-04-25 21:20:52 +0100647static int __mark_rodata_ro(void *unused)
Laura Abbott08925c22015-11-30 19:36:28 +0100648{
649 update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
650 return 0;
651}
652
Steven Rostedt (VMware)b4c7e2b2018-07-10 08:22:40 +0100653static int kernel_set_to_readonly __read_mostly;
654
Kees Cook80d6b0c2014-04-03 13:29:50 -0700655void mark_rodata_ro(void)
656{
Steven Rostedt (VMware)b4c7e2b2018-07-10 08:22:40 +0100657 kernel_set_to_readonly = 1;
Laura Abbott08925c22015-11-30 19:36:28 +0100658 stop_machine(__mark_rodata_ro, NULL, NULL);
Jinbum Parka8e53c12017-12-12 01:43:57 +0100659 debug_checkwx();
Kees Cook80d6b0c2014-04-03 13:29:50 -0700660}
661
662void set_kernel_text_rw(void)
663{
Steven Rostedt (VMware)b4c7e2b2018-07-10 08:22:40 +0100664 if (!kernel_set_to_readonly)
665 return;
666
Laura Abbott08925c22015-11-30 19:36:28 +0100667 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
668 current->active_mm);
Kees Cook80d6b0c2014-04-03 13:29:50 -0700669}
670
671void set_kernel_text_ro(void)
672{
Steven Rostedt (VMware)b4c7e2b2018-07-10 08:22:40 +0100673 if (!kernel_set_to_readonly)
674 return;
675
Laura Abbott08925c22015-11-30 19:36:28 +0100676 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
677 current->active_mm);
Kees Cook80d6b0c2014-04-03 13:29:50 -0700678}
Kees Cook80d6b0c2014-04-03 13:29:50 -0700679
Kees Cook1e6b4812014-04-03 17:28:11 -0700680#else
681static inline void fix_kernmem_perms(void) { }
Laura Abbott0f5bf6d2017-02-06 16:31:58 -0800682#endif /* CONFIG_STRICT_KERNEL_RWX */
Kees Cook1e6b4812014-04-03 17:28:11 -0700683
Kees Cook1e6b4812014-04-03 17:28:11 -0700684void free_initmem(void)
685{
686 fix_kernmem_perms();
Linus Walleijbc581772009-09-15 17:30:37 +0100687
Stephen Boyd54d52572011-07-07 18:43:36 +0100688 poison_init_mem(__init_begin, __init_end - __init_begin);
Nicolas Pitre6db015e2008-09-17 14:50:42 -0400689 if (!machine_is_integrator() && !machine_is_cintegrator())
Jiang Liudbe67df2013-07-03 15:02:51 -0700690 free_initmem_default(-1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691}
692
693#ifdef CONFIG_BLK_DEV_INITRD
694
695static int keep_initrd;
696
697void free_initrd_mem(unsigned long start, unsigned long end)
698{
Stephen Boyd54d52572011-07-07 18:43:36 +0100699 if (!keep_initrd) {
Yalin Wang421520b2014-09-26 03:07:09 +0100700 if (start == initrd_start)
701 start = round_down(start, PAGE_SIZE);
702 if (end == initrd_end)
703 end = round_up(end, PAGE_SIZE);
704
Stephen Boyd54d52572011-07-07 18:43:36 +0100705 poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
Jiang Liudbe67df2013-07-03 15:02:51 -0700706 free_reserved_area((void *)start, (void *)end, -1, "initrd");
Stephen Boyd54d52572011-07-07 18:43:36 +0100707 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708}
709
710static int __init keepinitrd_setup(char *__unused)
711{
712 keep_initrd = 1;
713 return 1;
714}
715
716__setup("keepinitrd", keepinitrd_setup);
717#endif