blob: 749a5a6f61433ef733102231cb96b7ab8d905cc9 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/arch/arm/mm/init.c
4 *
Russell King90072052005-10-28 14:48:37 +01005 * Copyright (C) 1995-2005 Russell King
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <linux/kernel.h>
8#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/swap.h>
10#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/mman.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010012#include <linux/sched/signal.h>
Ingo Molnar29930022017-02-08 18:51:36 +010013#include <linux/sched/task.h>
Paul Gortmakerdc280942011-07-31 16:17:29 -040014#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/nodemask.h>
16#include <linux/initrd.h>
Grant Likely9eb8f672011-04-28 14:27:20 -060017#include <linux/of_fdt.h>
Nicolas Pitre3835f6c2008-09-17 15:21:55 -040018#include <linux/highmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090019#include <linux/gfp.h>
Russell King2778f622010-07-09 16:27:52 +010020#include <linux/memblock.h>
Marek Szyprowskic7909502011-12-29 13:09:51 +010021#include <linux/dma-contiguous.h>
Alessandro Rubini158e8bf2012-06-24 12:46:26 +010022#include <linux/sizes.h>
Laura Abbott08925c22015-11-30 19:36:28 +010023#include <linux/stop_machine.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
Russell Kingb4b20ad82014-04-13 18:57:29 +010025#include <asm/cp15.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <asm/mach-types.h>
Russell King716a3dc2012-01-13 15:00:51 +000027#include <asm/memblock.h>
Afzal Mohammedd2ca5f22017-01-29 17:31:32 +010028#include <asm/memory.h>
Grant Likely93c02ab2011-04-28 14:27:21 -060029#include <asm/prom.h>
Russell King37efe642008-12-01 11:53:07 +000030#include <asm/sections.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <asm/setup.h>
Kees Cook1e6b4812014-04-03 17:28:11 -070032#include <asm/system_info.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <asm/tlb.h>
Fenkart/Bostandzhyandb9ef1a2010-02-07 21:45:47 +010034#include <asm/fixmap.h>
Jinbum Parka8e53c12017-12-12 01:43:57 +010035#include <asm/ptdump.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
37#include <asm/mach/arch.h>
38#include <asm/mach/map.h>
39
Russell King1b2e2b72006-08-21 17:06:38 +010040#include "mm.h"
41
Russell Kingb4b20ad82014-04-13 18:57:29 +010042#ifdef CONFIG_CPU_CP15_MMU
43unsigned long __init __clear_cr(unsigned long mask)
44{
Russell Kingb4b20ad82014-04-13 18:57:29 +010045 cr_alignment = cr_alignment & ~mask;
46 return cr_alignment;
47}
48#endif
49
Florian Fainellib1ab95c2018-11-05 14:54:27 -080050#ifdef CONFIG_BLK_DEV_INITRD
Russell King012d1f42008-09-06 10:57:03 +010051static int __init parse_tag_initrd(const struct tag *tag)
52{
Russell King4ed89f22014-10-28 11:26:42 +000053 pr_warn("ATAG_INITRD is deprecated; "
Russell King012d1f42008-09-06 10:57:03 +010054 "please update your bootloader.\n");
55 phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
56 phys_initrd_size = tag->u.initrd.size;
57 return 0;
58}
59
60__tagtable(ATAG_INITRD, parse_tag_initrd);
61
62static int __init parse_tag_initrd2(const struct tag *tag)
63{
64 phys_initrd_start = tag->u.initrd.start;
65 phys_initrd_size = tag->u.initrd.size;
66 return 0;
67}
68
69__tagtable(ATAG_INITRD2, parse_tag_initrd2);
Florian Fainellib1ab95c2018-11-05 14:54:27 -080070#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
Russell Kingf25b4b42010-10-27 19:49:33 +010072static void __init find_limits(unsigned long *min, unsigned long *max_low,
Nicolas Pitre27a3f0e2011-08-25 19:10:29 -040073 unsigned long *max_high)
Russell Kingdde58282009-08-15 12:36:00 +010074{
Laura Abbott1c2f87c2014-04-13 22:54:58 +010075 *max_low = PFN_DOWN(memblock_get_current_limit());
76 *min = PFN_UP(memblock_start_of_DRAM());
77 *max_high = PFN_DOWN(memblock_end_of_DRAM());
Russell Kingdde58282009-08-15 12:36:00 +010078}
79
Russell Kingbe209022011-05-11 15:39:00 +010080#ifdef CONFIG_ZONE_DMA
Nicolas Pitre65032012011-07-18 15:05:10 -040081
Rob Herring364230b2013-08-01 15:29:29 -050082phys_addr_t arm_dma_zone_size __read_mostly;
Nicolas Pitre65032012011-07-18 15:05:10 -040083EXPORT_SYMBOL(arm_dma_zone_size);
84
Russell King022ae532011-07-08 21:26:59 +010085/*
86 * The DMA mask corresponding to the maximum bus address allocatable
87 * using GFP_DMA. The default here places no restriction on DMA
88 * allocations. This must be the smallest DMA mask in the system,
89 * so a successful GFP_DMA allocation will always satisfy this.
90 */
Marek Szyprowski4986e5c2012-06-06 12:05:01 +020091phys_addr_t arm_dma_limit;
Russell King4dcfa602013-07-09 12:14:49 +010092unsigned long arm_dma_pfn_limit;
Russell King022ae532011-07-08 21:26:59 +010093
Russell Kingbe209022011-05-11 15:39:00 +010094static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
95 unsigned long dma_size)
96{
97 if (size[0] <= dma_size)
98 return;
99
100 size[ZONE_NORMAL] = size[0] - dma_size;
101 size[ZONE_DMA] = dma_size;
102 hole[ZONE_NORMAL] = hole[0];
103 hole[ZONE_DMA] = 0;
104}
105#endif
106
Russell Kingff69a4c2013-07-26 14:55:59 +0100107void __init setup_dma_zone(const struct machine_desc *mdesc)
Marek Szyprowskic7909502011-12-29 13:09:51 +0100108{
109#ifdef CONFIG_ZONE_DMA
110 if (mdesc->dma_zone_size) {
111 arm_dma_zone_size = mdesc->dma_zone_size;
Russell King6bcac802014-01-07 17:53:54 +0000112 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
Marek Szyprowskic7909502011-12-29 13:09:51 +0100113 } else
114 arm_dma_limit = 0xffffffff;
Russell King4dcfa602013-07-09 12:14:49 +0100115 arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
Marek Szyprowskic7909502011-12-29 13:09:51 +0100116#endif
117}
118
Santosh Shilimkar84f452b2013-06-30 00:28:46 -0400119static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
Russell Kinga2c54d22010-10-27 19:17:31 +0100120 unsigned long max_high)
Russell Kingb7a69ac2008-10-01 16:58:32 +0100121{
122 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
Russell Kinga2c54d22010-10-27 19:17:31 +0100123 struct memblock_region *reg;
Russell Kingb7a69ac2008-10-01 16:58:32 +0100124
Russell King90072052005-10-28 14:48:37 +0100125 /*
Russell Kingbe370302010-05-07 17:40:33 +0100126 * initialise the zones.
Russell King90072052005-10-28 14:48:37 +0100127 */
128 memset(zone_size, 0, sizeof(zone_size));
Russell King90072052005-10-28 14:48:37 +0100129
130 /*
Russell Kingbe370302010-05-07 17:40:33 +0100131 * The memory size has already been determined. If we need
132 * to do anything fancy with the allocation of this memory
133 * to the zones, now is the time to do it.
Russell King90072052005-10-28 14:48:37 +0100134 */
Russell Kingdde58282009-08-15 12:36:00 +0100135 zone_size[0] = max_low - min;
136#ifdef CONFIG_HIGHMEM
137 zone_size[ZONE_HIGHMEM] = max_high - max_low;
138#endif
Russell King90072052005-10-28 14:48:37 +0100139
140 /*
Russell Kingbe370302010-05-07 17:40:33 +0100141 * Calculate the size of the holes.
142 * holes = node_size - sum(bank_sizes)
Russell King90072052005-10-28 14:48:37 +0100143 */
Russell Kingdde58282009-08-15 12:36:00 +0100144 memcpy(zhole_size, zone_size, sizeof(zhole_size));
Russell Kinga2c54d22010-10-27 19:17:31 +0100145 for_each_memblock(memory, reg) {
146 unsigned long start = memblock_region_memory_base_pfn(reg);
147 unsigned long end = memblock_region_memory_end_pfn(reg);
148
149 if (start < max_low) {
150 unsigned long low_end = min(end, max_low);
151 zhole_size[0] -= low_end - start;
152 }
Russell Kingdde58282009-08-15 12:36:00 +0100153#ifdef CONFIG_HIGHMEM
Russell Kinga2c54d22010-10-27 19:17:31 +0100154 if (end > max_low) {
155 unsigned long high_start = max(start, max_low);
156 zhole_size[ZONE_HIGHMEM] -= end - high_start;
157 }
Russell Kingdde58282009-08-15 12:36:00 +0100158#endif
Russell Kingdde58282009-08-15 12:36:00 +0100159 }
Russell King90072052005-10-28 14:48:37 +0100160
Nicolas Pitre65032012011-07-18 15:05:10 -0400161#ifdef CONFIG_ZONE_DMA
Russell King90072052005-10-28 14:48:37 +0100162 /*
163 * Adjust the sizes according to any special requirements for
164 * this machine type.
165 */
Marek Szyprowskic7909502011-12-29 13:09:51 +0100166 if (arm_dma_zone_size)
Nicolas Pitre65032012011-07-18 15:05:10 -0400167 arm_adjust_dma_zone(zone_size, zhole_size,
168 arm_dma_zone_size >> PAGE_SHIFT);
Russell Kingbe209022011-05-11 15:39:00 +0100169#endif
Russell King90072052005-10-28 14:48:37 +0100170
Russell Kingbe370302010-05-07 17:40:33 +0100171 free_area_init_node(0, zone_size, min, zhole_size);
Russell King90072052005-10-28 14:48:37 +0100172}
173
Will Deacon7b7bf492011-05-19 13:21:14 +0100174#ifdef CONFIG_HAVE_ARCH_PFN_VALID
Russell Kingb7cfda92009-09-07 15:06:42 +0100175int pfn_valid(unsigned long pfn)
176{
Ard Biesheuvel09414d02015-10-01 17:58:11 +0200177 return memblock_is_map_memory(__pfn_to_phys(pfn));
Russell Kingb7cfda92009-09-07 15:06:42 +0100178}
179EXPORT_SYMBOL(pfn_valid);
Will Deacon7b7bf492011-05-19 13:21:14 +0100180#endif
Russell King657e12f2009-10-29 17:06:17 +0000181
Russell King716a3dc2012-01-13 15:00:51 +0000182static bool arm_memblock_steal_permitted = true;
183
Russell Kingbc2827d2012-01-19 14:35:19 +0000184phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
Russell King716a3dc2012-01-13 15:00:51 +0000185{
186 phys_addr_t phys;
187
188 BUG_ON(!arm_memblock_steal_permitted);
189
Mike Rapoportf240ec02019-03-11 23:29:06 -0700190 phys = memblock_phys_alloc(size, align);
Mike Rapoportecc3e772019-03-11 23:29:26 -0700191 if (!phys)
192 panic("Failed to steal %pa bytes at %pS\n",
193 &size, (void *)_RET_IP_);
194
Russell King716a3dc2012-01-13 15:00:51 +0000195 memblock_free(phys, size);
196 memblock_remove(phys, size);
197
198 return phys;
199}
200
Russell King39286242017-01-16 15:11:10 +0000201static void __init arm_initrd_init(void)
Russell King2778f622010-07-09 16:27:52 +0100202{
Russell King2778f622010-07-09 16:27:52 +0100203#ifdef CONFIG_BLK_DEV_INITRD
Russell Kingcdcc5fa2017-01-16 15:21:05 +0000204 phys_addr_t start;
205 unsigned long size;
206
Ben Peddell4c235cb2014-01-13 23:25:18 +0100207 initrd_start = initrd_end = 0;
Russell King68b32f32017-01-16 15:13:25 +0000208
209 if (!phys_initrd_size)
210 return;
211
Russell Kingcdcc5fa2017-01-16 15:21:05 +0000212 /*
213 * Round the memory region to page boundaries as per free_initrd_mem()
214 * This allows us to detect whether the pages overlapping the initrd
215 * are in use, but more importantly, reserves the entire set of pages
216 * as we don't want these pages allocated for other purposes.
217 */
218 start = round_down(phys_initrd_start, PAGE_SIZE);
219 size = phys_initrd_size + (phys_initrd_start - start);
220 size = round_up(size, PAGE_SIZE);
221
222 if (!memblock_is_region_memory(start, size)) {
Vitaly Andrianovde22cc6e2012-06-22 14:26:04 -0400223 pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
Russell Kingcdcc5fa2017-01-16 15:21:05 +0000224 (u64)start, size);
Russell King68b32f32017-01-16 15:13:25 +0000225 return;
Russell King2778f622010-07-09 16:27:52 +0100226 }
Russell King68b32f32017-01-16 15:13:25 +0000227
Russell Kingcdcc5fa2017-01-16 15:21:05 +0000228 if (memblock_is_region_reserved(start, size)) {
Russell King2778f622010-07-09 16:27:52 +0100229 pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n",
Russell Kingcdcc5fa2017-01-16 15:21:05 +0000230 (u64)start, size);
Russell King68b32f32017-01-16 15:13:25 +0000231 return;
Russell King2778f622010-07-09 16:27:52 +0100232 }
Russell King2778f622010-07-09 16:27:52 +0100233
Russell Kingcdcc5fa2017-01-16 15:21:05 +0000234 memblock_reserve(start, size);
Russell King68b32f32017-01-16 15:13:25 +0000235
236 /* Now convert initrd to virtual addresses */
237 initrd_start = __phys_to_virt(phys_initrd_start);
238 initrd_end = initrd_start + phys_initrd_size;
Russell King2778f622010-07-09 16:27:52 +0100239#endif
Russell King39286242017-01-16 15:11:10 +0000240}
241
242void __init arm_memblock_init(const struct machine_desc *mdesc)
243{
244 /* Register the kernel text, kernel data and initrd with memblock. */
245 memblock_reserve(__pa(KERNEL_START), KERNEL_END - KERNEL_START);
246
247 arm_initrd_init();
Russell King2778f622010-07-09 16:27:52 +0100248
249 arm_mm_memblock_reserve();
250
Russell King8d717a52010-05-22 19:47:18 +0100251 /* reserve any platform specific memblock areas */
252 if (mdesc->reserve)
253 mdesc->reserve();
254
Ard Biesheuvel24bbd922015-06-01 13:40:31 +0200255 early_init_fdt_reserve_self();
Marek Szyprowskibcedb5f2014-02-28 14:42:54 +0100256 early_init_fdt_scan_reserved_mem();
257
George G. Davis99a468d2015-01-16 11:21:05 +0100258 /* reserve memory for DMA contiguous allocations */
Marek Szyprowski95b0e652014-10-09 15:26:49 -0700259 dma_contiguous_reserve(arm_dma_limit);
Marek Szyprowskic7909502011-12-29 13:09:51 +0100260
Russell King716a3dc2012-01-13 15:00:51 +0000261 arm_memblock_steal_permitted = false;
Russell King2778f622010-07-09 16:27:52 +0100262 memblock_dump_all();
263}
264
Russell King8d717a52010-05-22 19:47:18 +0100265void __init bootmem_init(void)
Russell King90072052005-10-28 14:48:37 +0100266{
Grygorii Strashko8e58cae2013-11-23 14:42:18 -0500267 memblock_allow_resize();
Russell Kingdde58282009-08-15 12:36:00 +0100268
Doug Berger071d1842019-01-22 21:05:10 +0100269 find_limits(&min_low_pfn, &max_low_pfn, &max_pfn);
Russell Kingbe370302010-05-07 17:40:33 +0100270
Doug Berger071d1842019-01-22 21:05:10 +0100271 early_memtest((phys_addr_t)min_low_pfn << PAGE_SHIFT,
272 (phys_addr_t)max_low_pfn << PAGE_SHIFT);
Vladimir Murzind30eae42015-04-14 15:48:37 -0700273
Russell Kingbe370302010-05-07 17:40:33 +0100274 /*
Russell Kingbe370302010-05-07 17:40:33 +0100275 * Sparsemem tries to allocate bootmem in memory_present(),
276 * so must be done after the fixed reservations
277 */
Peng Fan14b5f542019-03-19 14:34:32 +0100278 memblocks_present();
Russell King90072052005-10-28 14:48:37 +0100279
Russell Kingb7a69ac2008-10-01 16:58:32 +0100280 /*
281 * sparse_init() needs the bootmem allocator up and running.
282 */
283 sparse_init();
284
285 /*
Russell Kingbe370302010-05-07 17:40:33 +0100286 * Now free the memory - free_area_init_node needs
Russell Kingb7a69ac2008-10-01 16:58:32 +0100287 * the sparse mem_map arrays initialized by sparse_init()
288 * for memmap_init_zone(), otherwise all PFNs are invalid.
289 */
Doug Berger071d1842019-01-22 21:05:10 +0100290 zone_sizes_init(min_low_pfn, max_low_pfn, max_pfn);
Russell King90072052005-10-28 14:48:37 +0100291}
292
Stephen Boyd54d52572011-07-07 18:43:36 +0100293/*
294 * Poison init memory with an undefined instruction (ARM) or a branch to an
295 * undefined instruction (Thumb).
296 */
297static inline void poison_init_mem(void *s, size_t count)
298{
299 u32 *p = (u32 *)s;
Jamie Ilesbf912d92011-08-04 09:39:31 +0100300 for (; count != 0; count -= 4)
Stephen Boyd54d52572011-07-07 18:43:36 +0100301 *p++ = 0xe7fddef0;
302}
303
Russell Kinga0130532005-06-27 14:16:47 +0100304static inline void
Russell Kingbe370302010-05-07 17:40:33 +0100305free_memmap(unsigned long start_pfn, unsigned long end_pfn)
Russell Kinga0130532005-06-27 14:16:47 +0100306{
307 struct page *start_pg, *end_pg;
Vitaly Andrianov56bc6282012-06-21 08:09:05 -0400308 phys_addr_t pg, pgend;
Russell Kinga0130532005-06-27 14:16:47 +0100309
310 /*
311 * Convert start_pfn/end_pfn to a struct page pointer.
312 */
Catalin Marinas3257f432009-10-06 17:57:22 +0100313 start_pg = pfn_to_page(start_pfn - 1) + 1;
Will Deacon9af386c2011-04-28 18:44:31 +0100314 end_pg = pfn_to_page(end_pfn - 1) + 1;
Russell Kinga0130532005-06-27 14:16:47 +0100315
316 /*
317 * Convert to physical addresses, and
318 * round start upwards and end downwards.
319 */
Vitaly Andrianov56bc6282012-06-21 08:09:05 -0400320 pg = PAGE_ALIGN(__pa(start_pg));
321 pgend = __pa(end_pg) & PAGE_MASK;
Russell Kinga0130532005-06-27 14:16:47 +0100322
323 /*
324 * If there are free pages between these,
325 * free the section of the memmap array.
326 */
327 if (pg < pgend)
Santosh Shilimkarcfb665862014-01-21 15:50:49 -0800328 memblock_free_early(pg, pgend - pg);
Russell Kinga0130532005-06-27 14:16:47 +0100329}
330
331/*
332 * The mem_map array can get very big. Free the unused area of the memory map.
333 */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100334static void __init free_unused_memmap(void)
Russell Kinga0130532005-06-27 14:16:47 +0100335{
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100336 unsigned long start, prev_end = 0;
337 struct memblock_region *reg;
Russell Kinga0130532005-06-27 14:16:47 +0100338
339 /*
Michael Bohan3260e522010-06-14 13:06:56 -0700340 * This relies on each bank being in address order.
341 * The banks are sorted previously in bootmem_init().
Russell Kinga0130532005-06-27 14:16:47 +0100342 */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100343 for_each_memblock(memory, reg) {
344 start = memblock_region_memory_base_pfn(reg);
Russell Kinga0130532005-06-27 14:16:47 +0100345
Will Deacon9af386c2011-04-28 18:44:31 +0100346#ifdef CONFIG_SPARSEMEM
347 /*
348 * Take care not to free memmap entries that don't exist
349 * due to SPARSEMEM sections which aren't present.
350 */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100351 start = min(start,
352 ALIGN(prev_end, PAGES_PER_SECTION));
Linus Walleij002ea9eef2011-09-29 09:37:23 +0100353#else
354 /*
355 * Align down here since the VM subsystem insists that the
356 * memmap entries are valid from the bank start aligned to
357 * MAX_ORDER_NR_PAGES.
358 */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100359 start = round_down(start, MAX_ORDER_NR_PAGES);
Will Deacon9af386c2011-04-28 18:44:31 +0100360#endif
Russell Kinga0130532005-06-27 14:16:47 +0100361 /*
362 * If we had a previous bank, and there is a space
363 * between the current bank and the previous, free it.
364 */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100365 if (prev_end && prev_end < start)
366 free_memmap(prev_end, start);
Russell Kinga0130532005-06-27 14:16:47 +0100367
Michael Bohan3260e522010-06-14 13:06:56 -0700368 /*
369 * Align up here since the VM subsystem insists that the
370 * memmap entries are valid from the bank end aligned to
371 * MAX_ORDER_NR_PAGES.
372 */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100373 prev_end = ALIGN(memblock_region_memory_end_pfn(reg),
374 MAX_ORDER_NR_PAGES);
Russell Kinga0130532005-06-27 14:16:47 +0100375 }
Will Deacon9af386c2011-04-28 18:44:31 +0100376
377#ifdef CONFIG_SPARSEMEM
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100378 if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
379 free_memmap(prev_end,
380 ALIGN(prev_end, PAGES_PER_SECTION));
Will Deacon9af386c2011-04-28 18:44:31 +0100381#endif
Russell Kinga0130532005-06-27 14:16:47 +0100382}
383
Jiang Liu83db0382013-04-29 15:06:26 -0700384#ifdef CONFIG_HIGHMEM
385static inline void free_area_high(unsigned long pfn, unsigned long end)
386{
Jiang Liudd6911e2013-04-29 15:07:03 -0700387 for (; pfn < end; pfn++)
388 free_highmem_page(pfn_to_page(pfn));
Jiang Liu83db0382013-04-29 15:06:26 -0700389}
390#endif
391
Russell Kingd0e775a2010-10-27 19:37:06 +0100392static void __init free_highpages(void)
393{
394#ifdef CONFIG_HIGHMEM
Santosh Shilimkar26ba47b2013-08-01 03:12:01 +0100395 unsigned long max_low = max_low_pfn;
Russell Kingdf4f14c2010-10-27 19:45:49 +0100396 struct memblock_region *mem, *res;
Russell Kingd0e775a2010-10-27 19:37:06 +0100397
398 /* set highmem page free */
Russell Kingdf4f14c2010-10-27 19:45:49 +0100399 for_each_memblock(memory, mem) {
400 unsigned long start = memblock_region_memory_base_pfn(mem);
401 unsigned long end = memblock_region_memory_end_pfn(mem);
402
403 /* Ignore complete lowmem entries */
404 if (end <= max_low)
405 continue;
406
Ard Biesheuvel09414d02015-10-01 17:58:11 +0200407 if (memblock_is_nomap(mem))
408 continue;
409
Russell Kingdf4f14c2010-10-27 19:45:49 +0100410 /* Truncate partial highmem entries */
411 if (start < max_low)
412 start = max_low;
413
414 /* Find and exclude any reserved regions */
415 for_each_memblock(reserved, res) {
416 unsigned long res_start, res_end;
417
418 res_start = memblock_region_reserved_base_pfn(res);
419 res_end = memblock_region_reserved_end_pfn(res);
420
421 if (res_end < start)
422 continue;
423 if (res_start < start)
424 res_start = start;
425 if (res_start > end)
426 res_start = end;
427 if (res_end > end)
428 res_end = end;
429 if (res_start != start)
Jiang Liu83db0382013-04-29 15:06:26 -0700430 free_area_high(start, res_start);
Russell Kingdf4f14c2010-10-27 19:45:49 +0100431 start = res_end;
432 if (start == end)
433 break;
434 }
435
436 /* And now free anything which remains */
437 if (start < end)
Jiang Liu83db0382013-04-29 15:06:26 -0700438 free_area_high(start, end);
Russell Kingd0e775a2010-10-27 19:37:06 +0100439 }
Russell Kingd0e775a2010-10-27 19:37:06 +0100440#endif
441}
442
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443/*
444 * mem_init() marks the free areas in the mem_map and tells us how much
445 * memory is free. This is done after various parts of the system have
446 * claimed their memory after the kernel image.
447 */
448void __init mem_init(void)
449{
Linus Walleij1dbd30e2010-07-12 21:53:28 +0100450#ifdef CONFIG_HAVE_TCM
451 /* These pointers are filled in on TCM detection */
452 extern u32 dtcm_end;
453 extern u32 itcm_end;
454#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455
Santosh Shilimkarb3ba41f2013-11-23 14:36:42 -0500456 set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 /* this will put all unused low memory onto the freelists */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100459 free_unused_memmap();
Mike Rapoportc6ffc5c2018-10-30 15:09:30 -0700460 memblock_free_all();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461
462#ifdef CONFIG_SA1111
463 /* now that our DMA memory is actually so designated, we can free it */
Linus Torvaldsbfd65dd2013-07-13 14:58:36 -0700464 free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465#endif
466
Russell Kingd0e775a2010-10-27 19:37:06 +0100467 free_highpages();
Nicolas Pitre3835f6c2008-09-17 15:21:55 -0400468
Jiang Liu2450c972013-07-03 15:03:48 -0700469 mem_init_print_info(NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470
Fenkart/Bostandzhyana1839272010-02-07 21:47:58 +0100471 /*
472 * Check boundaries twice: Some fundamental inconsistencies can
473 * be detected at build time already.
474 */
475#ifdef CONFIG_MMU
Fenkart/Bostandzhyana1839272010-02-07 21:47:58 +0100476 BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
477 BUG_ON(TASK_SIZE > MODULES_VADDR);
478#endif
479
480#ifdef CONFIG_HIGHMEM
481 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
482 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
483#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484}
485
Laura Abbott0f5bf6d2017-02-06 16:31:58 -0800486#ifdef CONFIG_STRICT_KERNEL_RWX
Kees Cook1e6b4812014-04-03 17:28:11 -0700487struct section_perm {
Kees Cook25362dc2016-01-26 01:19:36 +0100488 const char *name;
Kees Cook1e6b4812014-04-03 17:28:11 -0700489 unsigned long start;
490 unsigned long end;
491 pmdval_t mask;
492 pmdval_t prot;
Kees Cook80d6b0c2014-04-03 13:29:50 -0700493 pmdval_t clear;
Kees Cook1e6b4812014-04-03 17:28:11 -0700494};
495
Kees Cook64ac2e72016-01-26 01:20:21 +0100496/* First section-aligned location at or after __start_rodata. */
497extern char __start_rodata_section_aligned[];
498
Kees Cook80d6b0c2014-04-03 13:29:50 -0700499static struct section_perm nx_perms[] = {
Kees Cook1e6b4812014-04-03 17:28:11 -0700500 /* Make pages tables, etc before _stext RW (set NX). */
501 {
Kees Cook25362dc2016-01-26 01:19:36 +0100502 .name = "pre-text NX",
Kees Cook1e6b4812014-04-03 17:28:11 -0700503 .start = PAGE_OFFSET,
504 .end = (unsigned long)_stext,
505 .mask = ~PMD_SECT_XN,
506 .prot = PMD_SECT_XN,
507 },
508 /* Make init RW (set NX). */
509 {
Kees Cook25362dc2016-01-26 01:19:36 +0100510 .name = "init NX",
Kees Cook1e6b4812014-04-03 17:28:11 -0700511 .start = (unsigned long)__init_begin,
512 .end = (unsigned long)_sdata,
513 .mask = ~PMD_SECT_XN,
514 .prot = PMD_SECT_XN,
515 },
Kees Cook80d6b0c2014-04-03 13:29:50 -0700516 /* Make rodata NX (set RO in ro_perms below). */
517 {
Kees Cook25362dc2016-01-26 01:19:36 +0100518 .name = "rodata NX",
Kees Cook64ac2e72016-01-26 01:20:21 +0100519 .start = (unsigned long)__start_rodata_section_aligned,
Kees Cook80d6b0c2014-04-03 13:29:50 -0700520 .end = (unsigned long)__init_begin,
521 .mask = ~PMD_SECT_XN,
522 .prot = PMD_SECT_XN,
523 },
Kees Cook1e6b4812014-04-03 17:28:11 -0700524};
525
Kees Cook80d6b0c2014-04-03 13:29:50 -0700526static struct section_perm ro_perms[] = {
527 /* Make kernel code and rodata RX (set RO). */
528 {
Kees Cook25362dc2016-01-26 01:19:36 +0100529 .name = "text/rodata RO",
Kees Cook80d6b0c2014-04-03 13:29:50 -0700530 .start = (unsigned long)_stext,
531 .end = (unsigned long)__init_begin,
532#ifdef CONFIG_ARM_LPAE
Philip Derrin400eeff2017-11-14 00:55:25 +0100533 .mask = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2),
534 .prot = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
Kees Cook80d6b0c2014-04-03 13:29:50 -0700535#else
536 .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
537 .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
538 .clear = PMD_SECT_AP_WRITE,
539#endif
540 },
541};
Kees Cook80d6b0c2014-04-03 13:29:50 -0700542
Kees Cook1e6b4812014-04-03 17:28:11 -0700543/*
544 * Updates section permissions only for the current mm (sections are
545 * copied into each mm). During startup, this is the init_mm. Is only
546 * safe to be called with preemption disabled, as under stop_machine().
547 */
548static inline void section_update(unsigned long addr, pmdval_t mask,
Laura Abbott08925c22015-11-30 19:36:28 +0100549 pmdval_t prot, struct mm_struct *mm)
Kees Cook1e6b4812014-04-03 17:28:11 -0700550{
Kees Cook1e6b4812014-04-03 17:28:11 -0700551 pmd_t *pmd;
552
Kees Cook1e6b4812014-04-03 17:28:11 -0700553 pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
554
555#ifdef CONFIG_ARM_LPAE
556 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
557#else
558 if (addr & SECTION_SIZE)
559 pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
560 else
561 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
562#endif
563 flush_pmd_entry(pmd);
564 local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
565}
566
567/* Make sure extended page tables are in use. */
568static inline bool arch_has_strict_perms(void)
569{
570 if (cpu_architecture() < CPU_ARCH_ARMv6)
571 return false;
572
573 return !!(get_cr() & CR_XP);
574}
575
Laura Abbott08925c22015-11-30 19:36:28 +0100576void set_section_perms(struct section_perm *perms, int n, bool set,
577 struct mm_struct *mm)
578{
579 size_t i;
580 unsigned long addr;
581
582 if (!arch_has_strict_perms())
583 return;
584
585 for (i = 0; i < n; i++) {
586 if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
587 !IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
Kees Cook25362dc2016-01-26 01:19:36 +0100588 pr_err("BUG: %s section %lx-%lx not aligned to %lx\n",
589 perms[i].name, perms[i].start, perms[i].end,
Laura Abbott08925c22015-11-30 19:36:28 +0100590 SECTION_SIZE);
591 continue;
592 }
593
594 for (addr = perms[i].start;
595 addr < perms[i].end;
596 addr += SECTION_SIZE)
597 section_update(addr, perms[i].mask,
598 set ? perms[i].prot : perms[i].clear, mm);
599 }
600
Kees Cook1e6b4812014-04-03 17:28:11 -0700601}
602
Grygorii Strashko11ce4b32017-04-25 21:20:52 +0100603/**
604 * update_sections_early intended to be called only through stop_machine
605 * framework and executed by only one CPU while all other CPUs will spin and
606 * wait, so no locking is required in this function.
607 */
Laura Abbott08925c22015-11-30 19:36:28 +0100608static void update_sections_early(struct section_perm perms[], int n)
Kees Cook1e6b4812014-04-03 17:28:11 -0700609{
Laura Abbott08925c22015-11-30 19:36:28 +0100610 struct task_struct *t, *s;
611
Laura Abbott08925c22015-11-30 19:36:28 +0100612 for_each_process(t) {
613 if (t->flags & PF_KTHREAD)
614 continue;
615 for_each_thread(t, s)
616 set_section_perms(perms, n, true, s->mm);
617 }
Laura Abbott08925c22015-11-30 19:36:28 +0100618 set_section_perms(perms, n, true, current->active_mm);
619 set_section_perms(perms, n, true, &init_mm);
620}
621
Grygorii Strashko11ce4b32017-04-25 21:20:52 +0100622static int __fix_kernmem_perms(void *unused)
Laura Abbott08925c22015-11-30 19:36:28 +0100623{
624 update_sections_early(nx_perms, ARRAY_SIZE(nx_perms));
625 return 0;
626}
627
Grygorii Strashko11ce4b32017-04-25 21:20:52 +0100628static void fix_kernmem_perms(void)
Laura Abbott08925c22015-11-30 19:36:28 +0100629{
630 stop_machine(__fix_kernmem_perms, NULL, NULL);
Kees Cook1e6b4812014-04-03 17:28:11 -0700631}
Kees Cook80d6b0c2014-04-03 13:29:50 -0700632
Grygorii Strashko11ce4b32017-04-25 21:20:52 +0100633static int __mark_rodata_ro(void *unused)
Laura Abbott08925c22015-11-30 19:36:28 +0100634{
635 update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
636 return 0;
637}
638
Steven Rostedt (VMware)b4c7e2b2018-07-10 08:22:40 +0100639static int kernel_set_to_readonly __read_mostly;
640
Kees Cook80d6b0c2014-04-03 13:29:50 -0700641void mark_rodata_ro(void)
642{
Steven Rostedt (VMware)b4c7e2b2018-07-10 08:22:40 +0100643 kernel_set_to_readonly = 1;
Laura Abbott08925c22015-11-30 19:36:28 +0100644 stop_machine(__mark_rodata_ro, NULL, NULL);
Jinbum Parka8e53c12017-12-12 01:43:57 +0100645 debug_checkwx();
Kees Cook80d6b0c2014-04-03 13:29:50 -0700646}
647
648void set_kernel_text_rw(void)
649{
Steven Rostedt (VMware)b4c7e2b2018-07-10 08:22:40 +0100650 if (!kernel_set_to_readonly)
651 return;
652
Laura Abbott08925c22015-11-30 19:36:28 +0100653 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
654 current->active_mm);
Kees Cook80d6b0c2014-04-03 13:29:50 -0700655}
656
657void set_kernel_text_ro(void)
658{
Steven Rostedt (VMware)b4c7e2b2018-07-10 08:22:40 +0100659 if (!kernel_set_to_readonly)
660 return;
661
Laura Abbott08925c22015-11-30 19:36:28 +0100662 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
663 current->active_mm);
Kees Cook80d6b0c2014-04-03 13:29:50 -0700664}
Kees Cook80d6b0c2014-04-03 13:29:50 -0700665
Kees Cook1e6b4812014-04-03 17:28:11 -0700666#else
667static inline void fix_kernmem_perms(void) { }
Laura Abbott0f5bf6d2017-02-06 16:31:58 -0800668#endif /* CONFIG_STRICT_KERNEL_RWX */
Kees Cook1e6b4812014-04-03 17:28:11 -0700669
Kees Cook1e6b4812014-04-03 17:28:11 -0700670void free_initmem(void)
671{
672 fix_kernmem_perms();
Linus Walleijbc581772009-09-15 17:30:37 +0100673
Stephen Boyd54d52572011-07-07 18:43:36 +0100674 poison_init_mem(__init_begin, __init_end - __init_begin);
Nicolas Pitre6db015e2008-09-17 14:50:42 -0400675 if (!machine_is_integrator() && !machine_is_cintegrator())
Jiang Liudbe67df2013-07-03 15:02:51 -0700676 free_initmem_default(-1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677}
678
679#ifdef CONFIG_BLK_DEV_INITRD
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680void free_initrd_mem(unsigned long start, unsigned long end)
681{
Christoph Hellwigd8ae8a32019-05-13 17:18:30 -0700682 if (start == initrd_start)
683 start = round_down(start, PAGE_SIZE);
684 if (end == initrd_end)
685 end = round_up(end, PAGE_SIZE);
Yalin Wang421520b2014-09-26 03:07:09 +0100686
Christoph Hellwigd8ae8a32019-05-13 17:18:30 -0700687 poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
688 free_reserved_area((void *)start, (void *)end, -1, "initrd");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690#endif