blob: 0cc8e04295a40dc1d16f308396afdfb7540aa48c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/arm/mm/init.c
3 *
Russell King90072052005-10-28 14:48:37 +01004 * Copyright (C) 1995-2005 Russell King
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/kernel.h>
11#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/swap.h>
13#include <linux/init.h>
14#include <linux/bootmem.h>
15#include <linux/mman.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010016#include <linux/sched/signal.h>
Ingo Molnar29930022017-02-08 18:51:36 +010017#include <linux/sched/task.h>
Paul Gortmakerdc280942011-07-31 16:17:29 -040018#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/nodemask.h>
20#include <linux/initrd.h>
Grant Likely9eb8f672011-04-28 14:27:20 -060021#include <linux/of_fdt.h>
Nicolas Pitre3835f6c2008-09-17 15:21:55 -040022#include <linux/highmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090023#include <linux/gfp.h>
Russell King2778f622010-07-09 16:27:52 +010024#include <linux/memblock.h>
Marek Szyprowskic7909502011-12-29 13:09:51 +010025#include <linux/dma-contiguous.h>
Alessandro Rubini158e8bf2012-06-24 12:46:26 +010026#include <linux/sizes.h>
Laura Abbott08925c22015-11-30 19:36:28 +010027#include <linux/stop_machine.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Russell Kingb4b20ad82014-04-13 18:57:29 +010029#include <asm/cp15.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <asm/mach-types.h>
Russell King716a3dc2012-01-13 15:00:51 +000031#include <asm/memblock.h>
Afzal Mohammedd2ca5f22017-01-29 17:31:32 +010032#include <asm/memory.h>
Grant Likely93c02ab2011-04-28 14:27:21 -060033#include <asm/prom.h>
Russell King37efe642008-12-01 11:53:07 +000034#include <asm/sections.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <asm/setup.h>
Kees Cook1e6b4812014-04-03 17:28:11 -070036#include <asm/system_info.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <asm/tlb.h>
Fenkart/Bostandzhyandb9ef1a2010-02-07 21:45:47 +010038#include <asm/fixmap.h>
Jinbum Parka8e53c12017-12-12 01:43:57 +010039#include <asm/ptdump.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
41#include <asm/mach/arch.h>
42#include <asm/mach/map.h>
43
Russell King1b2e2b72006-08-21 17:06:38 +010044#include "mm.h"
45
Russell Kingb4b20ad82014-04-13 18:57:29 +010046#ifdef CONFIG_CPU_CP15_MMU
47unsigned long __init __clear_cr(unsigned long mask)
48{
Russell Kingb4b20ad82014-04-13 18:57:29 +010049 cr_alignment = cr_alignment & ~mask;
50 return cr_alignment;
51}
52#endif
53
Vitaly Andrianovde22cc6e2012-06-22 14:26:04 -040054static phys_addr_t phys_initrd_start __initdata = 0;
Russell King012d1f42008-09-06 10:57:03 +010055static unsigned long phys_initrd_size __initdata = 0;
56
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +010057static int __init early_initrd(char *p)
Russell King012d1f42008-09-06 10:57:03 +010058{
Vitaly Andrianovde22cc6e2012-06-22 14:26:04 -040059 phys_addr_t start;
60 unsigned long size;
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +010061 char *endp;
Russell King012d1f42008-09-06 10:57:03 +010062
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +010063 start = memparse(p, &endp);
64 if (*endp == ',') {
65 size = memparse(endp + 1, NULL);
Russell King012d1f42008-09-06 10:57:03 +010066
67 phys_initrd_start = start;
68 phys_initrd_size = size;
69 }
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +010070 return 0;
Russell King012d1f42008-09-06 10:57:03 +010071}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +010072early_param("initrd", early_initrd);
Russell King012d1f42008-09-06 10:57:03 +010073
74static int __init parse_tag_initrd(const struct tag *tag)
75{
Russell King4ed89f22014-10-28 11:26:42 +000076 pr_warn("ATAG_INITRD is deprecated; "
Russell King012d1f42008-09-06 10:57:03 +010077 "please update your bootloader.\n");
78 phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
79 phys_initrd_size = tag->u.initrd.size;
80 return 0;
81}
82
83__tagtable(ATAG_INITRD, parse_tag_initrd);
84
85static int __init parse_tag_initrd2(const struct tag *tag)
86{
87 phys_initrd_start = tag->u.initrd.start;
88 phys_initrd_size = tag->u.initrd.size;
89 return 0;
90}
91
92__tagtable(ATAG_INITRD2, parse_tag_initrd2);
Linus Torvalds1da177e2005-04-16 15:20:36 -070093
Russell Kingf25b4b42010-10-27 19:49:33 +010094static void __init find_limits(unsigned long *min, unsigned long *max_low,
Nicolas Pitre27a3f0e2011-08-25 19:10:29 -040095 unsigned long *max_high)
Russell Kingdde58282009-08-15 12:36:00 +010096{
Laura Abbott1c2f87c2014-04-13 22:54:58 +010097 *max_low = PFN_DOWN(memblock_get_current_limit());
98 *min = PFN_UP(memblock_start_of_DRAM());
99 *max_high = PFN_DOWN(memblock_end_of_DRAM());
Russell Kingdde58282009-08-15 12:36:00 +0100100}
101
Russell Kingbe209022011-05-11 15:39:00 +0100102#ifdef CONFIG_ZONE_DMA
Nicolas Pitre65032012011-07-18 15:05:10 -0400103
Rob Herring364230b2013-08-01 15:29:29 -0500104phys_addr_t arm_dma_zone_size __read_mostly;
Nicolas Pitre65032012011-07-18 15:05:10 -0400105EXPORT_SYMBOL(arm_dma_zone_size);
106
Russell King022ae532011-07-08 21:26:59 +0100107/*
108 * The DMA mask corresponding to the maximum bus address allocatable
109 * using GFP_DMA. The default here places no restriction on DMA
110 * allocations. This must be the smallest DMA mask in the system,
111 * so a successful GFP_DMA allocation will always satisfy this.
112 */
Marek Szyprowski4986e5c2012-06-06 12:05:01 +0200113phys_addr_t arm_dma_limit;
Russell King4dcfa602013-07-09 12:14:49 +0100114unsigned long arm_dma_pfn_limit;
Russell King022ae532011-07-08 21:26:59 +0100115
Russell Kingbe209022011-05-11 15:39:00 +0100116static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
117 unsigned long dma_size)
118{
119 if (size[0] <= dma_size)
120 return;
121
122 size[ZONE_NORMAL] = size[0] - dma_size;
123 size[ZONE_DMA] = dma_size;
124 hole[ZONE_NORMAL] = hole[0];
125 hole[ZONE_DMA] = 0;
126}
127#endif
128
Russell Kingff69a4c2013-07-26 14:55:59 +0100129void __init setup_dma_zone(const struct machine_desc *mdesc)
Marek Szyprowskic7909502011-12-29 13:09:51 +0100130{
131#ifdef CONFIG_ZONE_DMA
132 if (mdesc->dma_zone_size) {
133 arm_dma_zone_size = mdesc->dma_zone_size;
Russell King6bcac802014-01-07 17:53:54 +0000134 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
Marek Szyprowskic7909502011-12-29 13:09:51 +0100135 } else
136 arm_dma_limit = 0xffffffff;
Russell King4dcfa602013-07-09 12:14:49 +0100137 arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
Marek Szyprowskic7909502011-12-29 13:09:51 +0100138#endif
139}
140
Santosh Shilimkar84f452b2013-06-30 00:28:46 -0400141static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
Russell Kinga2c54d22010-10-27 19:17:31 +0100142 unsigned long max_high)
Russell Kingb7a69ac2008-10-01 16:58:32 +0100143{
144 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
Russell Kinga2c54d22010-10-27 19:17:31 +0100145 struct memblock_region *reg;
Russell Kingb7a69ac2008-10-01 16:58:32 +0100146
Russell King90072052005-10-28 14:48:37 +0100147 /*
Russell Kingbe370302010-05-07 17:40:33 +0100148 * initialise the zones.
Russell King90072052005-10-28 14:48:37 +0100149 */
150 memset(zone_size, 0, sizeof(zone_size));
Russell King90072052005-10-28 14:48:37 +0100151
152 /*
Russell Kingbe370302010-05-07 17:40:33 +0100153 * The memory size has already been determined. If we need
154 * to do anything fancy with the allocation of this memory
155 * to the zones, now is the time to do it.
Russell King90072052005-10-28 14:48:37 +0100156 */
Russell Kingdde58282009-08-15 12:36:00 +0100157 zone_size[0] = max_low - min;
158#ifdef CONFIG_HIGHMEM
159 zone_size[ZONE_HIGHMEM] = max_high - max_low;
160#endif
Russell King90072052005-10-28 14:48:37 +0100161
162 /*
Russell Kingbe370302010-05-07 17:40:33 +0100163 * Calculate the size of the holes.
164 * holes = node_size - sum(bank_sizes)
Russell King90072052005-10-28 14:48:37 +0100165 */
Russell Kingdde58282009-08-15 12:36:00 +0100166 memcpy(zhole_size, zone_size, sizeof(zhole_size));
Russell Kinga2c54d22010-10-27 19:17:31 +0100167 for_each_memblock(memory, reg) {
168 unsigned long start = memblock_region_memory_base_pfn(reg);
169 unsigned long end = memblock_region_memory_end_pfn(reg);
170
171 if (start < max_low) {
172 unsigned long low_end = min(end, max_low);
173 zhole_size[0] -= low_end - start;
174 }
Russell Kingdde58282009-08-15 12:36:00 +0100175#ifdef CONFIG_HIGHMEM
Russell Kinga2c54d22010-10-27 19:17:31 +0100176 if (end > max_low) {
177 unsigned long high_start = max(start, max_low);
178 zhole_size[ZONE_HIGHMEM] -= end - high_start;
179 }
Russell Kingdde58282009-08-15 12:36:00 +0100180#endif
Russell Kingdde58282009-08-15 12:36:00 +0100181 }
Russell King90072052005-10-28 14:48:37 +0100182
Nicolas Pitre65032012011-07-18 15:05:10 -0400183#ifdef CONFIG_ZONE_DMA
Russell King90072052005-10-28 14:48:37 +0100184 /*
185 * Adjust the sizes according to any special requirements for
186 * this machine type.
187 */
Marek Szyprowskic7909502011-12-29 13:09:51 +0100188 if (arm_dma_zone_size)
Nicolas Pitre65032012011-07-18 15:05:10 -0400189 arm_adjust_dma_zone(zone_size, zhole_size,
190 arm_dma_zone_size >> PAGE_SHIFT);
Russell Kingbe209022011-05-11 15:39:00 +0100191#endif
Russell King90072052005-10-28 14:48:37 +0100192
Russell Kingbe370302010-05-07 17:40:33 +0100193 free_area_init_node(0, zone_size, min, zhole_size);
Russell King90072052005-10-28 14:48:37 +0100194}
195
Will Deacon7b7bf492011-05-19 13:21:14 +0100196#ifdef CONFIG_HAVE_ARCH_PFN_VALID
Russell Kingb7cfda92009-09-07 15:06:42 +0100197int pfn_valid(unsigned long pfn)
198{
Ard Biesheuvel09414d02015-10-01 17:58:11 +0200199 return memblock_is_map_memory(__pfn_to_phys(pfn));
Russell Kingb7cfda92009-09-07 15:06:42 +0100200}
201EXPORT_SYMBOL(pfn_valid);
Will Deacon7b7bf492011-05-19 13:21:14 +0100202#endif
Russell King657e12f2009-10-29 17:06:17 +0000203
Will Deacon7b7bf492011-05-19 13:21:14 +0100204#ifndef CONFIG_SPARSEMEM
Stephen Boyd14904922012-04-27 01:40:10 +0100205static void __init arm_memory_present(void)
Russell King657e12f2009-10-29 17:06:17 +0000206{
207}
208#else
Stephen Boyd14904922012-04-27 01:40:10 +0100209static void __init arm_memory_present(void)
Russell King657e12f2009-10-29 17:06:17 +0000210{
Benjamin Herrenschmidt719c15142010-08-05 12:55:55 +1000211 struct memblock_region *reg;
Benjamin Herrenschmidt719c15142010-08-05 12:55:55 +1000212
Yinghai Lu7c996362010-09-16 00:20:36 -0700213 for_each_memblock(memory, reg)
Yinghai Luc7fc2de2010-10-12 14:07:09 -0700214 memory_present(0, memblock_region_memory_base_pfn(reg),
215 memblock_region_memory_end_pfn(reg));
Russell King657e12f2009-10-29 17:06:17 +0000216}
Russell Kingb7cfda92009-09-07 15:06:42 +0100217#endif
218
Russell King716a3dc2012-01-13 15:00:51 +0000219static bool arm_memblock_steal_permitted = true;
220
Russell Kingbc2827d2012-01-19 14:35:19 +0000221phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
Russell King716a3dc2012-01-13 15:00:51 +0000222{
223 phys_addr_t phys;
224
225 BUG_ON(!arm_memblock_steal_permitted);
226
Russell King7ac68a42012-08-13 00:22:28 +0100227 phys = memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
Russell King716a3dc2012-01-13 15:00:51 +0000228 memblock_free(phys, size);
229 memblock_remove(phys, size);
230
231 return phys;
232}
233
Russell King39286242017-01-16 15:11:10 +0000234static void __init arm_initrd_init(void)
Russell King2778f622010-07-09 16:27:52 +0100235{
Russell King2778f622010-07-09 16:27:52 +0100236#ifdef CONFIG_BLK_DEV_INITRD
Russell Kingcdcc5fa2017-01-16 15:21:05 +0000237 phys_addr_t start;
238 unsigned long size;
239
Rob Herring65939302013-08-30 10:54:26 -0500240 /* FDT scan will populate initrd_start */
Ben Peddell4c235cb2014-01-13 23:25:18 +0100241 if (initrd_start && !phys_initrd_size) {
Rob Herring65939302013-08-30 10:54:26 -0500242 phys_initrd_start = __virt_to_phys(initrd_start);
243 phys_initrd_size = initrd_end - initrd_start;
244 }
Russell King2778f622010-07-09 16:27:52 +0100245
Russell King2778f622010-07-09 16:27:52 +0100246 initrd_start = initrd_end = 0;
Russell King68b32f32017-01-16 15:13:25 +0000247
248 if (!phys_initrd_size)
249 return;
250
Russell Kingcdcc5fa2017-01-16 15:21:05 +0000251 /*
252 * Round the memory region to page boundaries as per free_initrd_mem()
253 * This allows us to detect whether the pages overlapping the initrd
254 * are in use, but more importantly, reserves the entire set of pages
255 * as we don't want these pages allocated for other purposes.
256 */
257 start = round_down(phys_initrd_start, PAGE_SIZE);
258 size = phys_initrd_size + (phys_initrd_start - start);
259 size = round_up(size, PAGE_SIZE);
260
261 if (!memblock_is_region_memory(start, size)) {
Russell King2778f622010-07-09 16:27:52 +0100262 pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
Russell Kingcdcc5fa2017-01-16 15:21:05 +0000263 (u64)start, size);
Russell King68b32f32017-01-16 15:13:25 +0000264 return;
Russell King2778f622010-07-09 16:27:52 +0100265 }
Russell King68b32f32017-01-16 15:13:25 +0000266
Russell Kingcdcc5fa2017-01-16 15:21:05 +0000267 if (memblock_is_region_reserved(start, size)) {
Russell King2778f622010-07-09 16:27:52 +0100268 pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n",
Russell Kingcdcc5fa2017-01-16 15:21:05 +0000269 (u64)start, size);
Russell King68b32f32017-01-16 15:13:25 +0000270 return;
Russell King2778f622010-07-09 16:27:52 +0100271 }
Russell King2778f622010-07-09 16:27:52 +0100272
Russell Kingcdcc5fa2017-01-16 15:21:05 +0000273 memblock_reserve(start, size);
Russell King68b32f32017-01-16 15:13:25 +0000274
275 /* Now convert initrd to virtual addresses */
276 initrd_start = __phys_to_virt(phys_initrd_start);
277 initrd_end = initrd_start + phys_initrd_size;
Russell King2778f622010-07-09 16:27:52 +0100278#endif
Russell King39286242017-01-16 15:11:10 +0000279}
280
281void __init arm_memblock_init(const struct machine_desc *mdesc)
282{
283 /* Register the kernel text, kernel data and initrd with memblock. */
284 memblock_reserve(__pa(KERNEL_START), KERNEL_END - KERNEL_START);
285
286 arm_initrd_init();
Russell King2778f622010-07-09 16:27:52 +0100287
288 arm_mm_memblock_reserve();
289
Russell King8d717a52010-05-22 19:47:18 +0100290 /* reserve any platform specific memblock areas */
291 if (mdesc->reserve)
292 mdesc->reserve();
293
Ard Biesheuvel24bbd922015-06-01 13:40:31 +0200294 early_init_fdt_reserve_self();
Marek Szyprowskibcedb5f2014-02-28 14:42:54 +0100295 early_init_fdt_scan_reserved_mem();
296
George G. Davis99a468d2015-01-16 11:21:05 +0100297 /* reserve memory for DMA contiguous allocations */
Marek Szyprowski95b0e652014-10-09 15:26:49 -0700298 dma_contiguous_reserve(arm_dma_limit);
Marek Szyprowskic7909502011-12-29 13:09:51 +0100299
Russell King716a3dc2012-01-13 15:00:51 +0000300 arm_memblock_steal_permitted = false;
Russell King2778f622010-07-09 16:27:52 +0100301 memblock_dump_all();
302}
303
Russell King8d717a52010-05-22 19:47:18 +0100304void __init bootmem_init(void)
Russell King90072052005-10-28 14:48:37 +0100305{
Russell Kingdde58282009-08-15 12:36:00 +0100306 unsigned long min, max_low, max_high;
Russell King90072052005-10-28 14:48:37 +0100307
Grygorii Strashko8e58cae2013-11-23 14:42:18 -0500308 memblock_allow_resize();
Russell Kingdde58282009-08-15 12:36:00 +0100309 max_low = max_high = 0;
310
Russell Kingf25b4b42010-10-27 19:49:33 +0100311 find_limits(&min, &max_low, &max_high);
Russell Kingbe370302010-05-07 17:40:33 +0100312
Vladimir Murzind30eae42015-04-14 15:48:37 -0700313 early_memtest((phys_addr_t)min << PAGE_SHIFT,
314 (phys_addr_t)max_low << PAGE_SHIFT);
315
Russell Kingbe370302010-05-07 17:40:33 +0100316 /*
Russell Kingbe370302010-05-07 17:40:33 +0100317 * Sparsemem tries to allocate bootmem in memory_present(),
318 * so must be done after the fixed reservations
319 */
Russell Kingeda2e5d2010-07-01 12:00:57 +0100320 arm_memory_present();
Russell King90072052005-10-28 14:48:37 +0100321
Russell Kingb7a69ac2008-10-01 16:58:32 +0100322 /*
323 * sparse_init() needs the bootmem allocator up and running.
324 */
325 sparse_init();
326
327 /*
Russell Kingbe370302010-05-07 17:40:33 +0100328 * Now free the memory - free_area_init_node needs
Russell Kingb7a69ac2008-10-01 16:58:32 +0100329 * the sparse mem_map arrays initialized by sparse_init()
330 * for memmap_init_zone(), otherwise all PFNs are invalid.
331 */
Santosh Shilimkar84f452b2013-06-30 00:28:46 -0400332 zone_sizes_init(min, max_low, max_high);
Russell Kingb7a69ac2008-10-01 16:58:32 +0100333
Russell King90072052005-10-28 14:48:37 +0100334 /*
335 * This doesn't seem to be used by the Linux memory manager any
336 * more, but is used by ll_rw_block. If we can get rid of it, we
337 * also get rid of some of the stuff above as well.
Russell King90072052005-10-28 14:48:37 +0100338 */
Santosh Shilimkar26ba47b2013-08-01 03:12:01 +0100339 min_low_pfn = min;
340 max_low_pfn = max_low;
341 max_pfn = max_high;
Russell King90072052005-10-28 14:48:37 +0100342}
343
Stephen Boyd54d52572011-07-07 18:43:36 +0100344/*
345 * Poison init memory with an undefined instruction (ARM) or a branch to an
346 * undefined instruction (Thumb).
347 */
348static inline void poison_init_mem(void *s, size_t count)
349{
350 u32 *p = (u32 *)s;
Jamie Ilesbf912d92011-08-04 09:39:31 +0100351 for (; count != 0; count -= 4)
Stephen Boyd54d52572011-07-07 18:43:36 +0100352 *p++ = 0xe7fddef0;
353}
354
Russell Kinga0130532005-06-27 14:16:47 +0100355static inline void
Russell Kingbe370302010-05-07 17:40:33 +0100356free_memmap(unsigned long start_pfn, unsigned long end_pfn)
Russell Kinga0130532005-06-27 14:16:47 +0100357{
358 struct page *start_pg, *end_pg;
Vitaly Andrianov56bc6282012-06-21 08:09:05 -0400359 phys_addr_t pg, pgend;
Russell Kinga0130532005-06-27 14:16:47 +0100360
361 /*
362 * Convert start_pfn/end_pfn to a struct page pointer.
363 */
Catalin Marinas3257f432009-10-06 17:57:22 +0100364 start_pg = pfn_to_page(start_pfn - 1) + 1;
Will Deacon9af386c2011-04-28 18:44:31 +0100365 end_pg = pfn_to_page(end_pfn - 1) + 1;
Russell Kinga0130532005-06-27 14:16:47 +0100366
367 /*
368 * Convert to physical addresses, and
369 * round start upwards and end downwards.
370 */
Vitaly Andrianov56bc6282012-06-21 08:09:05 -0400371 pg = PAGE_ALIGN(__pa(start_pg));
372 pgend = __pa(end_pg) & PAGE_MASK;
Russell Kinga0130532005-06-27 14:16:47 +0100373
374 /*
375 * If there are free pages between these,
376 * free the section of the memmap array.
377 */
378 if (pg < pgend)
Santosh Shilimkarcfb665862014-01-21 15:50:49 -0800379 memblock_free_early(pg, pgend - pg);
Russell Kinga0130532005-06-27 14:16:47 +0100380}
381
382/*
383 * The mem_map array can get very big. Free the unused area of the memory map.
384 */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100385static void __init free_unused_memmap(void)
Russell Kinga0130532005-06-27 14:16:47 +0100386{
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100387 unsigned long start, prev_end = 0;
388 struct memblock_region *reg;
Russell Kinga0130532005-06-27 14:16:47 +0100389
390 /*
Michael Bohan3260e522010-06-14 13:06:56 -0700391 * This relies on each bank being in address order.
392 * The banks are sorted previously in bootmem_init().
Russell Kinga0130532005-06-27 14:16:47 +0100393 */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100394 for_each_memblock(memory, reg) {
395 start = memblock_region_memory_base_pfn(reg);
Russell Kinga0130532005-06-27 14:16:47 +0100396
Will Deacon9af386c2011-04-28 18:44:31 +0100397#ifdef CONFIG_SPARSEMEM
398 /*
399 * Take care not to free memmap entries that don't exist
400 * due to SPARSEMEM sections which aren't present.
401 */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100402 start = min(start,
403 ALIGN(prev_end, PAGES_PER_SECTION));
Linus Walleij002ea9eef2011-09-29 09:37:23 +0100404#else
405 /*
406 * Align down here since the VM subsystem insists that the
407 * memmap entries are valid from the bank start aligned to
408 * MAX_ORDER_NR_PAGES.
409 */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100410 start = round_down(start, MAX_ORDER_NR_PAGES);
Will Deacon9af386c2011-04-28 18:44:31 +0100411#endif
Russell Kinga0130532005-06-27 14:16:47 +0100412 /*
413 * If we had a previous bank, and there is a space
414 * between the current bank and the previous, free it.
415 */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100416 if (prev_end && prev_end < start)
417 free_memmap(prev_end, start);
Russell Kinga0130532005-06-27 14:16:47 +0100418
Michael Bohan3260e522010-06-14 13:06:56 -0700419 /*
420 * Align up here since the VM subsystem insists that the
421 * memmap entries are valid from the bank end aligned to
422 * MAX_ORDER_NR_PAGES.
423 */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100424 prev_end = ALIGN(memblock_region_memory_end_pfn(reg),
425 MAX_ORDER_NR_PAGES);
Russell Kinga0130532005-06-27 14:16:47 +0100426 }
Will Deacon9af386c2011-04-28 18:44:31 +0100427
428#ifdef CONFIG_SPARSEMEM
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100429 if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
430 free_memmap(prev_end,
431 ALIGN(prev_end, PAGES_PER_SECTION));
Will Deacon9af386c2011-04-28 18:44:31 +0100432#endif
Russell Kinga0130532005-06-27 14:16:47 +0100433}
434
Jiang Liu83db0382013-04-29 15:06:26 -0700435#ifdef CONFIG_HIGHMEM
436static inline void free_area_high(unsigned long pfn, unsigned long end)
437{
Jiang Liudd6911e2013-04-29 15:07:03 -0700438 for (; pfn < end; pfn++)
439 free_highmem_page(pfn_to_page(pfn));
Jiang Liu83db0382013-04-29 15:06:26 -0700440}
441#endif
442
Russell Kingd0e775a2010-10-27 19:37:06 +0100443static void __init free_highpages(void)
444{
445#ifdef CONFIG_HIGHMEM
Santosh Shilimkar26ba47b2013-08-01 03:12:01 +0100446 unsigned long max_low = max_low_pfn;
Russell Kingdf4f14c2010-10-27 19:45:49 +0100447 struct memblock_region *mem, *res;
Russell Kingd0e775a2010-10-27 19:37:06 +0100448
449 /* set highmem page free */
Russell Kingdf4f14c2010-10-27 19:45:49 +0100450 for_each_memblock(memory, mem) {
451 unsigned long start = memblock_region_memory_base_pfn(mem);
452 unsigned long end = memblock_region_memory_end_pfn(mem);
453
454 /* Ignore complete lowmem entries */
455 if (end <= max_low)
456 continue;
457
Ard Biesheuvel09414d02015-10-01 17:58:11 +0200458 if (memblock_is_nomap(mem))
459 continue;
460
Russell Kingdf4f14c2010-10-27 19:45:49 +0100461 /* Truncate partial highmem entries */
462 if (start < max_low)
463 start = max_low;
464
465 /* Find and exclude any reserved regions */
466 for_each_memblock(reserved, res) {
467 unsigned long res_start, res_end;
468
469 res_start = memblock_region_reserved_base_pfn(res);
470 res_end = memblock_region_reserved_end_pfn(res);
471
472 if (res_end < start)
473 continue;
474 if (res_start < start)
475 res_start = start;
476 if (res_start > end)
477 res_start = end;
478 if (res_end > end)
479 res_end = end;
480 if (res_start != start)
Jiang Liu83db0382013-04-29 15:06:26 -0700481 free_area_high(start, res_start);
Russell Kingdf4f14c2010-10-27 19:45:49 +0100482 start = res_end;
483 if (start == end)
484 break;
485 }
486
487 /* And now free anything which remains */
488 if (start < end)
Jiang Liu83db0382013-04-29 15:06:26 -0700489 free_area_high(start, end);
Russell Kingd0e775a2010-10-27 19:37:06 +0100490 }
Russell Kingd0e775a2010-10-27 19:37:06 +0100491#endif
492}
493
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494/*
495 * mem_init() marks the free areas in the mem_map and tells us how much
496 * memory is free. This is done after various parts of the system have
497 * claimed their memory after the kernel image.
498 */
499void __init mem_init(void)
500{
Linus Walleij1dbd30e2010-07-12 21:53:28 +0100501#ifdef CONFIG_HAVE_TCM
502 /* These pointers are filled in on TCM detection */
503 extern u32 dtcm_end;
504 extern u32 itcm_end;
505#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506
Santosh Shilimkarb3ba41f2013-11-23 14:36:42 -0500507 set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 /* this will put all unused low memory onto the freelists */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100510 free_unused_memmap();
Jiang Liu0c988532013-07-03 15:03:24 -0700511 free_all_bootmem();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512
513#ifdef CONFIG_SA1111
514 /* now that our DMA memory is actually so designated, we can free it */
Linus Torvaldsbfd65dd2013-07-13 14:58:36 -0700515 free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516#endif
517
Russell Kingd0e775a2010-10-27 19:37:06 +0100518 free_highpages();
Nicolas Pitre3835f6c2008-09-17 15:21:55 -0400519
Jiang Liu2450c972013-07-03 15:03:48 -0700520 mem_init_print_info(NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521
Fenkart/Bostandzhyandb9ef1a2010-02-07 21:45:47 +0100522#define MLK(b, t) b, t, ((t) - (b)) >> 10
523#define MLM(b, t) b, t, ((t) - (b)) >> 20
524#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
525
Russell King4ed89f22014-10-28 11:26:42 +0000526 pr_notice("Virtual kernel memory layout:\n"
Fenkart/Bostandzhyandb9ef1a2010-02-07 21:45:47 +0100527 " vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
Linus Walleij07d2a5c2010-07-12 21:52:34 +0100528#ifdef CONFIG_HAVE_TCM
Linus Walleij07d2a5c2010-07-12 21:52:34 +0100529 " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
Linus Walleij07d2a5c2010-07-12 21:52:34 +0100530 " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
531#endif
Fenkart/Bostandzhyandb9ef1a2010-02-07 21:45:47 +0100532 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
533 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
534 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
535#ifdef CONFIG_HIGHMEM
536 " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n"
537#endif
Uwe Kleine-Königd9277d52012-02-01 11:16:51 +0100538#ifdef CONFIG_MODULES
Fenkart/Bostandzhyandb9ef1a2010-02-07 21:45:47 +0100539 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
Uwe Kleine-Königd9277d52012-02-01 11:16:51 +0100540#endif
Russell King178c3df2014-10-19 22:42:42 +0100541 " .text : 0x%p" " - 0x%p" " (%4td kB)\n"
542 " .init : 0x%p" " - 0x%p" " (%4td kB)\n"
543 " .data : 0x%p" " - 0x%p" " (%4td kB)\n"
544 " .bss : 0x%p" " - 0x%p" " (%4td kB)\n",
Fenkart/Bostandzhyandb9ef1a2010-02-07 21:45:47 +0100545
Afzal Mohammedd2ca5f22017-01-29 17:31:32 +0100546 MLK(VECTORS_BASE, VECTORS_BASE + PAGE_SIZE),
Linus Walleij07d2a5c2010-07-12 21:52:34 +0100547#ifdef CONFIG_HAVE_TCM
Linus Walleij1dbd30e2010-07-12 21:53:28 +0100548 MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
549 MLK(ITCM_OFFSET, (unsigned long) itcm_end),
Linus Walleij07d2a5c2010-07-12 21:52:34 +0100550#endif
Mark Salterb615bbb2014-08-13 09:04:49 -0700551 MLK(FIXADDR_START, FIXADDR_END),
Fenkart/Bostandzhyanc931b4f2010-02-07 21:47:17 +0100552 MLM(VMALLOC_START, VMALLOC_END),
Fenkart/Bostandzhyandb9ef1a2010-02-07 21:45:47 +0100553 MLM(PAGE_OFFSET, (unsigned long)high_memory),
554#ifdef CONFIG_HIGHMEM
555 MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
556 (PAGE_SIZE)),
557#endif
Uwe Kleine-Königd9277d52012-02-01 11:16:51 +0100558#ifdef CONFIG_MODULES
Fenkart/Bostandzhyandb9ef1a2010-02-07 21:45:47 +0100559 MLM(MODULES_VADDR, MODULES_END),
Uwe Kleine-Königd9277d52012-02-01 11:16:51 +0100560#endif
Fenkart/Bostandzhyandb9ef1a2010-02-07 21:45:47 +0100561
Fenkart/Bostandzhyandb9ef1a2010-02-07 21:45:47 +0100562 MLK_ROUNDUP(_text, _etext),
Russell King3835d692011-07-06 10:39:34 +0100563 MLK_ROUNDUP(__init_begin, __init_end),
Rabin Vincent45f6d7e2011-06-02 15:01:36 +0100564 MLK_ROUNDUP(_sdata, _edata),
565 MLK_ROUNDUP(__bss_start, __bss_stop));
Fenkart/Bostandzhyandb9ef1a2010-02-07 21:45:47 +0100566
567#undef MLK
568#undef MLM
569#undef MLK_ROUNDUP
570
Fenkart/Bostandzhyana1839272010-02-07 21:47:58 +0100571 /*
572 * Check boundaries twice: Some fundamental inconsistencies can
573 * be detected at build time already.
574 */
575#ifdef CONFIG_MMU
Fenkart/Bostandzhyana1839272010-02-07 21:47:58 +0100576 BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
577 BUG_ON(TASK_SIZE > MODULES_VADDR);
578#endif
579
580#ifdef CONFIG_HIGHMEM
581 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
582 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
583#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584}
585
Laura Abbott0f5bf6d2017-02-06 16:31:58 -0800586#ifdef CONFIG_STRICT_KERNEL_RWX
Kees Cook1e6b4812014-04-03 17:28:11 -0700587struct section_perm {
Kees Cook25362dc2016-01-26 01:19:36 +0100588 const char *name;
Kees Cook1e6b4812014-04-03 17:28:11 -0700589 unsigned long start;
590 unsigned long end;
591 pmdval_t mask;
592 pmdval_t prot;
Kees Cook80d6b0c2014-04-03 13:29:50 -0700593 pmdval_t clear;
Kees Cook1e6b4812014-04-03 17:28:11 -0700594};
595
Kees Cook64ac2e72016-01-26 01:20:21 +0100596/* First section-aligned location at or after __start_rodata. */
597extern char __start_rodata_section_aligned[];
598
Kees Cook80d6b0c2014-04-03 13:29:50 -0700599static struct section_perm nx_perms[] = {
Kees Cook1e6b4812014-04-03 17:28:11 -0700600 /* Make pages tables, etc before _stext RW (set NX). */
601 {
Kees Cook25362dc2016-01-26 01:19:36 +0100602 .name = "pre-text NX",
Kees Cook1e6b4812014-04-03 17:28:11 -0700603 .start = PAGE_OFFSET,
604 .end = (unsigned long)_stext,
605 .mask = ~PMD_SECT_XN,
606 .prot = PMD_SECT_XN,
607 },
608 /* Make init RW (set NX). */
609 {
Kees Cook25362dc2016-01-26 01:19:36 +0100610 .name = "init NX",
Kees Cook1e6b4812014-04-03 17:28:11 -0700611 .start = (unsigned long)__init_begin,
612 .end = (unsigned long)_sdata,
613 .mask = ~PMD_SECT_XN,
614 .prot = PMD_SECT_XN,
615 },
Kees Cook80d6b0c2014-04-03 13:29:50 -0700616 /* Make rodata NX (set RO in ro_perms below). */
617 {
Kees Cook25362dc2016-01-26 01:19:36 +0100618 .name = "rodata NX",
Kees Cook64ac2e72016-01-26 01:20:21 +0100619 .start = (unsigned long)__start_rodata_section_aligned,
Kees Cook80d6b0c2014-04-03 13:29:50 -0700620 .end = (unsigned long)__init_begin,
621 .mask = ~PMD_SECT_XN,
622 .prot = PMD_SECT_XN,
623 },
Kees Cook1e6b4812014-04-03 17:28:11 -0700624};
625
Kees Cook80d6b0c2014-04-03 13:29:50 -0700626static struct section_perm ro_perms[] = {
627 /* Make kernel code and rodata RX (set RO). */
628 {
Kees Cook25362dc2016-01-26 01:19:36 +0100629 .name = "text/rodata RO",
Kees Cook80d6b0c2014-04-03 13:29:50 -0700630 .start = (unsigned long)_stext,
631 .end = (unsigned long)__init_begin,
632#ifdef CONFIG_ARM_LPAE
Philip Derrin400eeff2017-11-14 00:55:25 +0100633 .mask = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2),
634 .prot = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
Kees Cook80d6b0c2014-04-03 13:29:50 -0700635#else
636 .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
637 .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
638 .clear = PMD_SECT_AP_WRITE,
639#endif
640 },
641};
Kees Cook80d6b0c2014-04-03 13:29:50 -0700642
Kees Cook1e6b4812014-04-03 17:28:11 -0700643/*
644 * Updates section permissions only for the current mm (sections are
645 * copied into each mm). During startup, this is the init_mm. Is only
646 * safe to be called with preemption disabled, as under stop_machine().
647 */
648static inline void section_update(unsigned long addr, pmdval_t mask,
Laura Abbott08925c22015-11-30 19:36:28 +0100649 pmdval_t prot, struct mm_struct *mm)
Kees Cook1e6b4812014-04-03 17:28:11 -0700650{
Kees Cook1e6b4812014-04-03 17:28:11 -0700651 pmd_t *pmd;
652
Kees Cook1e6b4812014-04-03 17:28:11 -0700653 pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
654
655#ifdef CONFIG_ARM_LPAE
656 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
657#else
658 if (addr & SECTION_SIZE)
659 pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
660 else
661 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
662#endif
663 flush_pmd_entry(pmd);
664 local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
665}
666
667/* Make sure extended page tables are in use. */
668static inline bool arch_has_strict_perms(void)
669{
670 if (cpu_architecture() < CPU_ARCH_ARMv6)
671 return false;
672
673 return !!(get_cr() & CR_XP);
674}
675
Laura Abbott08925c22015-11-30 19:36:28 +0100676void set_section_perms(struct section_perm *perms, int n, bool set,
677 struct mm_struct *mm)
678{
679 size_t i;
680 unsigned long addr;
681
682 if (!arch_has_strict_perms())
683 return;
684
685 for (i = 0; i < n; i++) {
686 if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
687 !IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
Kees Cook25362dc2016-01-26 01:19:36 +0100688 pr_err("BUG: %s section %lx-%lx not aligned to %lx\n",
689 perms[i].name, perms[i].start, perms[i].end,
Laura Abbott08925c22015-11-30 19:36:28 +0100690 SECTION_SIZE);
691 continue;
692 }
693
694 for (addr = perms[i].start;
695 addr < perms[i].end;
696 addr += SECTION_SIZE)
697 section_update(addr, perms[i].mask,
698 set ? perms[i].prot : perms[i].clear, mm);
699 }
700
Kees Cook1e6b4812014-04-03 17:28:11 -0700701}
702
Grygorii Strashko11ce4b32017-04-25 21:20:52 +0100703/**
704 * update_sections_early intended to be called only through stop_machine
705 * framework and executed by only one CPU while all other CPUs will spin and
706 * wait, so no locking is required in this function.
707 */
Laura Abbott08925c22015-11-30 19:36:28 +0100708static void update_sections_early(struct section_perm perms[], int n)
Kees Cook1e6b4812014-04-03 17:28:11 -0700709{
Laura Abbott08925c22015-11-30 19:36:28 +0100710 struct task_struct *t, *s;
711
Laura Abbott08925c22015-11-30 19:36:28 +0100712 for_each_process(t) {
713 if (t->flags & PF_KTHREAD)
714 continue;
715 for_each_thread(t, s)
716 set_section_perms(perms, n, true, s->mm);
717 }
Laura Abbott08925c22015-11-30 19:36:28 +0100718 set_section_perms(perms, n, true, current->active_mm);
719 set_section_perms(perms, n, true, &init_mm);
720}
721
Grygorii Strashko11ce4b32017-04-25 21:20:52 +0100722static int __fix_kernmem_perms(void *unused)
Laura Abbott08925c22015-11-30 19:36:28 +0100723{
724 update_sections_early(nx_perms, ARRAY_SIZE(nx_perms));
725 return 0;
726}
727
Grygorii Strashko11ce4b32017-04-25 21:20:52 +0100728static void fix_kernmem_perms(void)
Laura Abbott08925c22015-11-30 19:36:28 +0100729{
730 stop_machine(__fix_kernmem_perms, NULL, NULL);
Kees Cook1e6b4812014-04-03 17:28:11 -0700731}
Kees Cook80d6b0c2014-04-03 13:29:50 -0700732
Grygorii Strashko11ce4b32017-04-25 21:20:52 +0100733static int __mark_rodata_ro(void *unused)
Laura Abbott08925c22015-11-30 19:36:28 +0100734{
735 update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
736 return 0;
737}
738
Steven Rostedt (VMware)b4c7e2b2018-07-10 08:22:40 +0100739static int kernel_set_to_readonly __read_mostly;
740
Kees Cook80d6b0c2014-04-03 13:29:50 -0700741void mark_rodata_ro(void)
742{
Steven Rostedt (VMware)b4c7e2b2018-07-10 08:22:40 +0100743 kernel_set_to_readonly = 1;
Laura Abbott08925c22015-11-30 19:36:28 +0100744 stop_machine(__mark_rodata_ro, NULL, NULL);
Jinbum Parka8e53c12017-12-12 01:43:57 +0100745 debug_checkwx();
Kees Cook80d6b0c2014-04-03 13:29:50 -0700746}
747
748void set_kernel_text_rw(void)
749{
Steven Rostedt (VMware)b4c7e2b2018-07-10 08:22:40 +0100750 if (!kernel_set_to_readonly)
751 return;
752
Laura Abbott08925c22015-11-30 19:36:28 +0100753 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
754 current->active_mm);
Kees Cook80d6b0c2014-04-03 13:29:50 -0700755}
756
757void set_kernel_text_ro(void)
758{
Steven Rostedt (VMware)b4c7e2b2018-07-10 08:22:40 +0100759 if (!kernel_set_to_readonly)
760 return;
761
Laura Abbott08925c22015-11-30 19:36:28 +0100762 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
763 current->active_mm);
Kees Cook80d6b0c2014-04-03 13:29:50 -0700764}
Kees Cook80d6b0c2014-04-03 13:29:50 -0700765
Kees Cook1e6b4812014-04-03 17:28:11 -0700766#else
767static inline void fix_kernmem_perms(void) { }
Laura Abbott0f5bf6d2017-02-06 16:31:58 -0800768#endif /* CONFIG_STRICT_KERNEL_RWX */
Kees Cook1e6b4812014-04-03 17:28:11 -0700769
Kees Cook1e6b4812014-04-03 17:28:11 -0700770void free_initmem(void)
771{
772 fix_kernmem_perms();
Linus Walleijbc581772009-09-15 17:30:37 +0100773
Stephen Boyd54d52572011-07-07 18:43:36 +0100774 poison_init_mem(__init_begin, __init_end - __init_begin);
Nicolas Pitre6db015e2008-09-17 14:50:42 -0400775 if (!machine_is_integrator() && !machine_is_cintegrator())
Jiang Liudbe67df2013-07-03 15:02:51 -0700776 free_initmem_default(-1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777}
778
779#ifdef CONFIG_BLK_DEV_INITRD
780
781static int keep_initrd;
782
783void free_initrd_mem(unsigned long start, unsigned long end)
784{
Stephen Boyd54d52572011-07-07 18:43:36 +0100785 if (!keep_initrd) {
Yalin Wang421520b2014-09-26 03:07:09 +0100786 if (start == initrd_start)
787 start = round_down(start, PAGE_SIZE);
788 if (end == initrd_end)
789 end = round_up(end, PAGE_SIZE);
790
Stephen Boyd54d52572011-07-07 18:43:36 +0100791 poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
Jiang Liudbe67df2013-07-03 15:02:51 -0700792 free_reserved_area((void *)start, (void *)end, -1, "initrd");
Stephen Boyd54d52572011-07-07 18:43:36 +0100793 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794}
795
796static int __init keepinitrd_setup(char *__unused)
797{
798 keep_initrd = 1;
799 return 1;
800}
801
802__setup("keepinitrd", keepinitrd_setup);
803#endif