blob: 1b2f7b3bba342ee3d4c99d0a7d06a1096461ecd7 [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/mm/page_alloc.c
4 *
5 * Manages the free list, the system allocates free pages here.
6 * Note that kmalloc() lives in slab.c
7 *
8 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
9 * Swap reorganised 29.12.95, Stephen Tweedie
10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
11 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
12 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
13 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
14 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
15 * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
16 */
17
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/stddef.h>
19#include <linux/mm.h>
Arun KSca79b0c2018-12-28 00:34:29 -080020#include <linux/highmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/swap.h>
22#include <linux/interrupt.h>
23#include <linux/pagemap.h>
KOSAKI Motohiro10ed2732008-03-04 14:28:32 -080024#include <linux/jiffies.h>
Yinghai Luedbe7d22010-08-25 13:39:16 -070025#include <linux/memblock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/compiler.h>
Randy Dunlap9f158332005-09-13 01:25:16 -070027#include <linux/kernel.h>
Andrey Ryabininb8c73fc2015-02-13 14:39:28 -080028#include <linux/kasan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30#include <linux/suspend.h>
31#include <linux/pagevec.h>
32#include <linux/blkdev.h>
33#include <linux/slab.h>
Dave Hansena238ab52011-05-24 17:12:16 -070034#include <linux/ratelimit.h>
David Rientjes5a3135c22007-10-16 23:25:53 -070035#include <linux/oom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <linux/topology.h>
37#include <linux/sysctl.h>
38#include <linux/cpu.h>
39#include <linux/cpuset.h>
Dave Hansenbdc8cb92005-10-29 18:16:53 -070040#include <linux/memory_hotplug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/nodemask.h>
42#include <linux/vmalloc.h>
KOSAKI Motohiroa6cccdc2011-05-24 17:11:33 -070043#include <linux/vmstat.h>
Christoph Lameter4be38e32006-01-06 00:11:17 -080044#include <linux/mempolicy.h>
Dan Williams4b94ffd2016-01-15 16:56:22 -080045#include <linux/memremap.h>
Yasunori Goto68113782006-06-23 02:03:11 -070046#include <linux/stop_machine.h>
Dan Williams97500a42019-05-14 15:41:35 -070047#include <linux/random.h>
Mel Gormanc7132162006-09-27 01:49:43 -070048#include <linux/sort.h>
49#include <linux/pfn.h>
Andrew Morton3fcfab12006-10-19 23:28:16 -070050#include <linux/backing-dev.h>
Akinobu Mita933e3122006-12-08 02:39:45 -080051#include <linux/fault-inject.h>
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -070052#include <linux/page-isolation.h>
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070053#include <linux/debugobjects.h>
Catalin Marinasdbb1f812009-06-11 13:23:19 +010054#include <linux/kmemleak.h>
Mel Gorman56de7262010-05-24 14:32:30 -070055#include <linux/compaction.h>
Mel Gorman0d3d0622009-09-21 17:02:44 -070056#include <trace/events/kmem.h>
Michal Hockod379f012017-02-22 15:42:00 -080057#include <trace/events/oom.h>
Linus Torvalds268bb0c2011-05-20 12:50:29 -070058#include <linux/prefetch.h>
Lisa Du6e543d52013-09-11 14:22:36 -070059#include <linux/mm_inline.h>
Michal Nazarewicz041d3a82011-12-29 13:09:50 +010060#include <linux/migrate.h>
David Rientjes949f7ec2013-04-29 15:07:48 -070061#include <linux/hugetlb.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060062#include <linux/sched/rt.h>
Ingo Molnar5b3cc152017-02-02 20:43:54 +010063#include <linux/sched/mm.h>
Joonsoo Kim48c96a32014-12-12 16:56:01 -080064#include <linux/page_owner.h>
Mel Gorman0e1cc952015-06-30 14:57:27 -070065#include <linux/kthread.h>
Vladimir Davydov49491482016-07-26 15:24:24 -070066#include <linux/memcontrol.h>
Steven Rostedt (VMware)42c269c2017-03-03 16:15:39 -050067#include <linux/ftrace.h>
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +010068#include <linux/lockdep.h>
Chen Yu556b9692017-08-25 15:55:30 -070069#include <linux/nmi.h>
Johannes Weinereb414682018-10-26 15:06:27 -070070#include <linux/psi.h>
Daniel Jordane4443142020-06-03 15:59:51 -070071#include <linux/padata.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070072
Jiang Liu7ee3d4e2013-07-03 15:03:41 -070073#include <asm/sections.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070074#include <asm/tlbflush.h>
Andrew Mortonac924c62006-05-15 09:43:59 -070075#include <asm/div64.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include "internal.h"
Dan Williamse900a912019-05-14 15:41:28 -070077#include "shuffle.h"
Alexander Duyck36e66c52020-04-06 20:04:56 -070078#include "page_reporting.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
Cody P Schaferc8e251f2013-07-03 15:01:29 -070080/* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
81static DEFINE_MUTEX(pcp_batch_high_lock);
David Rientjes7cd2b0a2014-06-23 13:22:04 -070082#define MIN_PERCPU_PAGELIST_FRACTION (8)
Cody P Schaferc8e251f2013-07-03 15:01:29 -070083
Lee Schermerhorn72812012010-05-26 14:44:56 -070084#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
85DEFINE_PER_CPU(int, numa_node);
86EXPORT_PER_CPU_SYMBOL(numa_node);
87#endif
88
Kemi Wang45180852017-11-15 17:38:22 -080089DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key);
90
Lee Schermerhorn7aac7892010-05-26 14:45:00 -070091#ifdef CONFIG_HAVE_MEMORYLESS_NODES
92/*
93 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
94 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
95 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
96 * defined in <linux/topology.h>.
97 */
98DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
99EXPORT_PER_CPU_SYMBOL(_numa_mem_);
100#endif
101
Mel Gormanbd233f52017-02-24 14:56:56 -0800102/* work_structs for global per-cpu drains */
Wei Yangd9367bd2018-12-28 00:38:58 -0800103struct pcpu_drain {
104 struct zone *zone;
105 struct work_struct work;
106};
Jason Yan8b885f52020-04-10 14:32:32 -0700107static DEFINE_MUTEX(pcpu_drain_mutex);
108static DEFINE_PER_CPU(struct pcpu_drain, pcpu_drain);
Mel Gormanbd233f52017-02-24 14:56:56 -0800109
Emese Revfy38addce2016-06-20 20:41:19 +0200110#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
Kees Cook58bea412016-10-19 00:08:04 +0200111volatile unsigned long latent_entropy __latent_entropy;
Emese Revfy38addce2016-06-20 20:41:19 +0200112EXPORT_SYMBOL(latent_entropy);
113#endif
114
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115/*
Christoph Lameter13808912007-10-16 01:25:27 -0700116 * Array of node states.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 */
Christoph Lameter13808912007-10-16 01:25:27 -0700118nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
119 [N_POSSIBLE] = NODE_MASK_ALL,
120 [N_ONLINE] = { { [0] = 1UL } },
121#ifndef CONFIG_NUMA
122 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
123#ifdef CONFIG_HIGHMEM
124 [N_HIGH_MEMORY] = { { [0] = 1UL } },
125#endif
Lai Jiangshan20b2f522012-12-12 13:52:00 -0800126 [N_MEMORY] = { { [0] = 1UL } },
Christoph Lameter13808912007-10-16 01:25:27 -0700127 [N_CPU] = { { [0] = 1UL } },
128#endif /* NUMA */
129};
130EXPORT_SYMBOL(node_states);
131
Arun KSca79b0c2018-12-28 00:34:29 -0800132atomic_long_t _totalram_pages __read_mostly;
133EXPORT_SYMBOL(_totalram_pages);
Hideo AOKIcb45b0e2006-04-10 22:52:59 -0700134unsigned long totalreserve_pages __read_mostly;
Pintu Kumare48322a2014-12-18 16:17:15 -0800135unsigned long totalcma_pages __read_mostly;
Johannes Weinerab8fabd2012-01-10 15:07:42 -0800136
Hugh Dickins1b76b022012-05-11 01:00:07 -0700137int percpu_pagelist_fraction;
Benjamin Herrenschmidtdcce2842009-06-18 13:24:12 +1000138gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
Alexander Potapenko64713842019-07-11 20:59:19 -0700139#ifdef CONFIG_INIT_ON_ALLOC_DEFAULT_ON
140DEFINE_STATIC_KEY_TRUE(init_on_alloc);
141#else
142DEFINE_STATIC_KEY_FALSE(init_on_alloc);
143#endif
144EXPORT_SYMBOL(init_on_alloc);
145
146#ifdef CONFIG_INIT_ON_FREE_DEFAULT_ON
147DEFINE_STATIC_KEY_TRUE(init_on_free);
148#else
149DEFINE_STATIC_KEY_FALSE(init_on_free);
150#endif
151EXPORT_SYMBOL(init_on_free);
152
153static int __init early_init_on_alloc(char *buf)
154{
155 int ret;
156 bool bool_result;
157
158 if (!buf)
159 return -EINVAL;
160 ret = kstrtobool(buf, &bool_result);
161 if (bool_result && page_poisoning_enabled())
162 pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, will take precedence over init_on_alloc\n");
163 if (bool_result)
164 static_branch_enable(&init_on_alloc);
165 else
166 static_branch_disable(&init_on_alloc);
167 return ret;
168}
169early_param("init_on_alloc", early_init_on_alloc);
170
171static int __init early_init_on_free(char *buf)
172{
173 int ret;
174 bool bool_result;
175
176 if (!buf)
177 return -EINVAL;
178 ret = kstrtobool(buf, &bool_result);
179 if (bool_result && page_poisoning_enabled())
180 pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, will take precedence over init_on_free\n");
181 if (bool_result)
182 static_branch_enable(&init_on_free);
183 else
184 static_branch_disable(&init_on_free);
185 return ret;
186}
187early_param("init_on_free", early_init_on_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -0700189/*
190 * A cached value of the page's pageblock's migratetype, used when the page is
191 * put on a pcplist. Used to avoid the pageblock migratetype lookup when
192 * freeing from pcplists in most cases, at the cost of possibly becoming stale.
193 * Also the migratetype set in the page does not necessarily match the pcplist
194 * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any
195 * other index - this ensures that it will be put on the correct CMA freelist.
196 */
197static inline int get_pcppage_migratetype(struct page *page)
198{
199 return page->index;
200}
201
202static inline void set_pcppage_migratetype(struct page *page, int migratetype)
203{
204 page->index = migratetype;
205}
206
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800207#ifdef CONFIG_PM_SLEEP
208/*
209 * The following functions are used by the suspend/hibernate code to temporarily
210 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
211 * while devices are suspended. To avoid races with the suspend/hibernate code,
Pingfan Liu55f25032018-07-31 16:51:32 +0800212 * they should always be called with system_transition_mutex held
213 * (gfp_allowed_mask also should only be modified with system_transition_mutex
214 * held, unless the suspend/hibernate code is guaranteed not to run in parallel
215 * with that modification).
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800216 */
Rafael J. Wysockic9e664f2010-12-03 22:57:45 +0100217
218static gfp_t saved_gfp_mask;
219
220void pm_restore_gfp_mask(void)
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800221{
Pingfan Liu55f25032018-07-31 16:51:32 +0800222 WARN_ON(!mutex_is_locked(&system_transition_mutex));
Rafael J. Wysockic9e664f2010-12-03 22:57:45 +0100223 if (saved_gfp_mask) {
224 gfp_allowed_mask = saved_gfp_mask;
225 saved_gfp_mask = 0;
226 }
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800227}
228
Rafael J. Wysockic9e664f2010-12-03 22:57:45 +0100229void pm_restrict_gfp_mask(void)
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800230{
Pingfan Liu55f25032018-07-31 16:51:32 +0800231 WARN_ON(!mutex_is_locked(&system_transition_mutex));
Rafael J. Wysockic9e664f2010-12-03 22:57:45 +0100232 WARN_ON(saved_gfp_mask);
233 saved_gfp_mask = gfp_allowed_mask;
Mel Gormand0164ad2015-11-06 16:28:21 -0800234 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800235}
Mel Gormanf90ac392012-01-10 15:07:15 -0800236
237bool pm_suspended_storage(void)
238{
Mel Gormand0164ad2015-11-06 16:28:21 -0800239 if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
Mel Gormanf90ac392012-01-10 15:07:15 -0800240 return false;
241 return true;
242}
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800243#endif /* CONFIG_PM_SLEEP */
244
Mel Gormand9c23402007-10-16 01:26:01 -0700245#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -0800246unsigned int pageblock_order __read_mostly;
Mel Gormand9c23402007-10-16 01:26:01 -0700247#endif
248
Hugh Dickinsd98c7a02006-02-14 13:52:59 -0800249static void __free_pages_ok(struct page *page, unsigned int order);
David Howellsa226f6c2006-01-06 00:11:08 -0800250
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251/*
252 * results with 256, 32 in the lowmem_reserve sysctl:
253 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
254 * 1G machine -> (16M dma, 784M normal, 224M high)
255 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
256 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
Yaowei Bai84109e12015-02-12 15:00:22 -0800257 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
Andi Kleena2f1b422005-11-05 17:25:53 +0100258 *
259 * TBD: should special case ZONE_DMA32 machines here - in those we normally
260 * don't need any ZONE_NORMAL reservation
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 */
Joonsoo Kimd3cda232018-04-10 16:30:11 -0700262int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = {
Christoph Lameter4b51d662007-02-10 01:43:10 -0800263#ifdef CONFIG_ZONE_DMA
Joonsoo Kimd3cda232018-04-10 16:30:11 -0700264 [ZONE_DMA] = 256,
Christoph Lameter4b51d662007-02-10 01:43:10 -0800265#endif
Christoph Lameterfb0e7942006-09-25 23:31:13 -0700266#ifdef CONFIG_ZONE_DMA32
Joonsoo Kimd3cda232018-04-10 16:30:11 -0700267 [ZONE_DMA32] = 256,
Christoph Lameterfb0e7942006-09-25 23:31:13 -0700268#endif
Joonsoo Kimd3cda232018-04-10 16:30:11 -0700269 [ZONE_NORMAL] = 32,
Christoph Lametere53ef382006-09-25 23:31:14 -0700270#ifdef CONFIG_HIGHMEM
Joonsoo Kimd3cda232018-04-10 16:30:11 -0700271 [ZONE_HIGHMEM] = 0,
Christoph Lametere53ef382006-09-25 23:31:14 -0700272#endif
Joonsoo Kimd3cda232018-04-10 16:30:11 -0700273 [ZONE_MOVABLE] = 0,
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700274};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275
Helge Deller15ad7cd2006-12-06 20:40:36 -0800276static char * const zone_names[MAX_NR_ZONES] = {
Christoph Lameter4b51d662007-02-10 01:43:10 -0800277#ifdef CONFIG_ZONE_DMA
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700278 "DMA",
Christoph Lameter4b51d662007-02-10 01:43:10 -0800279#endif
Christoph Lameterfb0e7942006-09-25 23:31:13 -0700280#ifdef CONFIG_ZONE_DMA32
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700281 "DMA32",
Christoph Lameterfb0e7942006-09-25 23:31:13 -0700282#endif
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700283 "Normal",
Christoph Lametere53ef382006-09-25 23:31:14 -0700284#ifdef CONFIG_HIGHMEM
Mel Gorman2a1e2742007-07-17 04:03:12 -0700285 "HighMem",
Christoph Lametere53ef382006-09-25 23:31:14 -0700286#endif
Mel Gorman2a1e2742007-07-17 04:03:12 -0700287 "Movable",
Dan Williams033fbae2015-08-09 15:29:06 -0400288#ifdef CONFIG_ZONE_DEVICE
289 "Device",
290#endif
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700291};
292
Alexey Dobriyanc999fbd2018-12-28 00:35:55 -0800293const char * const migratetype_names[MIGRATE_TYPES] = {
Vlastimil Babka60f30352016-03-15 14:56:08 -0700294 "Unmovable",
295 "Movable",
296 "Reclaimable",
297 "HighAtomic",
298#ifdef CONFIG_CMA
299 "CMA",
300#endif
301#ifdef CONFIG_MEMORY_ISOLATION
302 "Isolate",
303#endif
304};
305
Anshuman Khandualae70edd2020-06-03 15:59:17 -0700306compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = {
307 [NULL_COMPOUND_DTOR] = NULL,
308 [COMPOUND_PAGE_DTOR] = free_compound_page,
Kirill A. Shutemovf1e61552015-11-06 16:29:50 -0800309#ifdef CONFIG_HUGETLB_PAGE
Anshuman Khandualae70edd2020-06-03 15:59:17 -0700310 [HUGETLB_PAGE_DTOR] = free_huge_page,
Kirill A. Shutemovf1e61552015-11-06 16:29:50 -0800311#endif
Kirill A. Shutemov9a982252016-01-15 16:54:17 -0800312#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Anshuman Khandualae70edd2020-06-03 15:59:17 -0700313 [TRANSHUGE_PAGE_DTOR] = free_transhuge_page,
Kirill A. Shutemov9a982252016-01-15 16:54:17 -0800314#endif
Kirill A. Shutemovf1e61552015-11-06 16:29:50 -0800315};
316
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317int min_free_kbytes = 1024;
Han Pingtian42aa83c2014-01-23 15:53:28 -0800318int user_min_free_kbytes = -1;
Mel Gorman24512228b2019-04-25 22:23:51 -0700319#ifdef CONFIG_DISCONTIGMEM
320/*
321 * DiscontigMem defines memory ranges as separate pg_data_t even if the ranges
322 * are not on separate NUMA nodes. Functionally this works but with
323 * watermark_boost_factor, it can reclaim prematurely as the ranges can be
324 * quite small. By default, do not boost watermarks on discontigmem as in
325 * many cases very high-order allocations like THP are likely to be
326 * unsupported and the premature reclaim offsets the advantage of long-term
327 * fragmentation avoidance.
328 */
329int watermark_boost_factor __read_mostly;
330#else
Mel Gorman1c308442018-12-28 00:35:52 -0800331int watermark_boost_factor __read_mostly = 15000;
Mel Gorman24512228b2019-04-25 22:23:51 -0700332#endif
Johannes Weiner795ae7a2016-03-17 14:19:14 -0700333int watermark_scale_factor = 10;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334
Oscar Salvadorbbe5d992018-12-28 00:37:24 -0800335static unsigned long nr_kernel_pages __initdata;
336static unsigned long nr_all_pages __initdata;
337static unsigned long dma_reserve __initdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338
Oscar Salvadorbbe5d992018-12-28 00:37:24 -0800339static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata;
340static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata;
David Rientjes7f16f912018-04-05 16:23:12 -0700341static unsigned long required_kernelcore __initdata;
David Rientjesa5c6d652018-04-05 16:23:09 -0700342static unsigned long required_kernelcore_percent __initdata;
David Rientjes7f16f912018-04-05 16:23:12 -0700343static unsigned long required_movablecore __initdata;
David Rientjesa5c6d652018-04-05 16:23:09 -0700344static unsigned long required_movablecore_percent __initdata;
Oscar Salvadorbbe5d992018-12-28 00:37:24 -0800345static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata;
David Rientjes7f16f912018-04-05 16:23:12 -0700346static bool mirrored_kernelcore __meminitdata;
Mel Gormanc7132162006-09-27 01:49:43 -0700347
Tejun Heo0ee332c2011-12-08 10:22:09 -0800348/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
349int movable_zone;
350EXPORT_SYMBOL(movable_zone);
Mel Gormanc7132162006-09-27 01:49:43 -0700351
Miklos Szeredi418508c2007-05-23 13:57:55 -0700352#if MAX_NUMNODES > 1
Alexey Dobriyanb9726c22019-03-05 15:48:26 -0800353unsigned int nr_node_ids __read_mostly = MAX_NUMNODES;
Alexey Dobriyance0725f2019-03-05 15:48:29 -0800354unsigned int nr_online_nodes __read_mostly = 1;
Miklos Szeredi418508c2007-05-23 13:57:55 -0700355EXPORT_SYMBOL(nr_node_ids);
Christoph Lameter62bc62a2009-06-16 15:32:15 -0700356EXPORT_SYMBOL(nr_online_nodes);
Miklos Szeredi418508c2007-05-23 13:57:55 -0700357#endif
358
Mel Gorman9ef9acb2007-10-16 01:25:54 -0700359int page_group_by_mobility_disabled __read_mostly;
360
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700361#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
Waiman Long3c0c12c2018-12-28 00:38:51 -0800362/*
363 * During boot we initialize deferred pages on-demand, as needed, but once
364 * page_alloc_init_late() has finished, the deferred pages are all initialized,
365 * and we can permanently disable that path.
366 */
367static DEFINE_STATIC_KEY_TRUE(deferred_pages);
368
369/*
370 * Calling kasan_free_pages() only after deferred memory initialization
371 * has completed. Poisoning pages during deferred memory init will greatly
372 * lengthen the process and cause problem in large memory systems as the
373 * deferred pages initialization is done with interrupt disabled.
374 *
375 * Assuming that there will be no reference to those newly initialized
376 * pages before they are ever allocated, this should have no effect on
377 * KASAN memory tracking as the poison will be properly inserted at page
378 * allocation time. The only corner case is when pages are allocated by
379 * on-demand allocation and then freed again before the deferred pages
380 * initialization is done, but this is not likely to happen.
381 */
382static inline void kasan_free_nondeferred_pages(struct page *page, int order)
383{
384 if (!static_branch_unlikely(&deferred_pages))
385 kasan_free_pages(page, order);
386}
387
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700388/* Returns true if the struct page for the pfn is uninitialised */
Mel Gorman0e1cc952015-06-30 14:57:27 -0700389static inline bool __meminit early_page_uninitialised(unsigned long pfn)
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700390{
Mel Gormanef70b6f2016-07-14 12:07:23 -0700391 int nid = early_pfn_to_nid(pfn);
392
393 if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700394 return true;
395
396 return false;
397}
398
399/*
Pavel Tatashind3035be2018-10-26 15:09:37 -0700400 * Returns true when the remaining initialisation should be deferred until
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700401 * later in the boot cycle when it can be parallelised.
402 */
Pavel Tatashind3035be2018-10-26 15:09:37 -0700403static bool __meminit
404defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700405{
Pavel Tatashind3035be2018-10-26 15:09:37 -0700406 static unsigned long prev_end_pfn, nr_initialised;
407
408 /*
409 * prev_end_pfn static that contains the end of previous zone
410 * No need to protect because called very early in boot before smp_init.
411 */
412 if (prev_end_pfn != end_pfn) {
413 prev_end_pfn = end_pfn;
414 nr_initialised = 0;
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700415 }
416
Pavel Tatashind3035be2018-10-26 15:09:37 -0700417 /* Always populate low zones for address-constrained allocations */
418 if (end_pfn < pgdat_end_pfn(NODE_DATA(nid)))
419 return false;
Wei Yang23b68cf2018-12-28 00:36:18 -0800420
421 /*
422 * We start only with one section of pages, more pages are added as
423 * needed until the rest of deferred pages are initialized.
424 */
Pavel Tatashind3035be2018-10-26 15:09:37 -0700425 nr_initialised++;
Wei Yang23b68cf2018-12-28 00:36:18 -0800426 if ((nr_initialised > PAGES_PER_SECTION) &&
Pavel Tatashind3035be2018-10-26 15:09:37 -0700427 (pfn & (PAGES_PER_SECTION - 1)) == 0) {
428 NODE_DATA(nid)->first_deferred_pfn = pfn;
429 return true;
430 }
431 return false;
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700432}
433#else
Waiman Long3c0c12c2018-12-28 00:38:51 -0800434#define kasan_free_nondeferred_pages(p, o) kasan_free_pages(p, o)
435
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700436static inline bool early_page_uninitialised(unsigned long pfn)
437{
438 return false;
439}
440
Pavel Tatashind3035be2018-10-26 15:09:37 -0700441static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700442{
Pavel Tatashind3035be2018-10-26 15:09:37 -0700443 return false;
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700444}
445#endif
446
Mel Gorman0b423ca2016-05-19 17:14:27 -0700447/* Return a pointer to the bitmap storing bits affecting a block of pages */
448static inline unsigned long *get_pageblock_bitmap(struct page *page,
449 unsigned long pfn)
450{
451#ifdef CONFIG_SPARSEMEM
Dan Williamsf1eca352019-07-18 15:57:57 -0700452 return section_to_usemap(__pfn_to_section(pfn));
Mel Gorman0b423ca2016-05-19 17:14:27 -0700453#else
454 return page_zone(page)->pageblock_flags;
455#endif /* CONFIG_SPARSEMEM */
456}
457
458static inline int pfn_to_bitidx(struct page *page, unsigned long pfn)
459{
460#ifdef CONFIG_SPARSEMEM
461 pfn &= (PAGES_PER_SECTION-1);
462 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
463#else
464 pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages);
465 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
466#endif /* CONFIG_SPARSEMEM */
467}
468
469/**
470 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
471 * @page: The page within the block of interest
472 * @pfn: The target page frame number
473 * @end_bitidx: The last bit of interest to retrieve
474 * @mask: mask of bits that the caller is interested in
475 *
476 * Return: pageblock_bits flags
477 */
478static __always_inline unsigned long __get_pfnblock_flags_mask(struct page *page,
479 unsigned long pfn,
480 unsigned long end_bitidx,
481 unsigned long mask)
482{
483 unsigned long *bitmap;
484 unsigned long bitidx, word_bitidx;
485 unsigned long word;
486
487 bitmap = get_pageblock_bitmap(page, pfn);
488 bitidx = pfn_to_bitidx(page, pfn);
489 word_bitidx = bitidx / BITS_PER_LONG;
490 bitidx &= (BITS_PER_LONG-1);
491
492 word = bitmap[word_bitidx];
493 bitidx += end_bitidx;
494 return (word >> (BITS_PER_LONG - bitidx - 1)) & mask;
495}
496
497unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
498 unsigned long end_bitidx,
499 unsigned long mask)
500{
501 return __get_pfnblock_flags_mask(page, pfn, end_bitidx, mask);
502}
503
504static __always_inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn)
505{
506 return __get_pfnblock_flags_mask(page, pfn, PB_migrate_end, MIGRATETYPE_MASK);
507}
508
509/**
510 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
511 * @page: The page within the block of interest
512 * @flags: The flags to set
513 * @pfn: The target page frame number
514 * @end_bitidx: The last bit of interest
515 * @mask: mask of bits that the caller is interested in
516 */
517void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
518 unsigned long pfn,
519 unsigned long end_bitidx,
520 unsigned long mask)
521{
522 unsigned long *bitmap;
523 unsigned long bitidx, word_bitidx;
524 unsigned long old_word, word;
525
526 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
Pingfan Liu125b8602018-12-28 00:38:43 -0800527 BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits));
Mel Gorman0b423ca2016-05-19 17:14:27 -0700528
529 bitmap = get_pageblock_bitmap(page, pfn);
530 bitidx = pfn_to_bitidx(page, pfn);
531 word_bitidx = bitidx / BITS_PER_LONG;
532 bitidx &= (BITS_PER_LONG-1);
533
534 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
535
536 bitidx += end_bitidx;
537 mask <<= (BITS_PER_LONG - bitidx - 1);
538 flags <<= (BITS_PER_LONG - bitidx - 1);
539
540 word = READ_ONCE(bitmap[word_bitidx]);
541 for (;;) {
542 old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
543 if (word == old_word)
544 break;
545 word = old_word;
546 }
547}
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700548
Minchan Kimee6f5092012-07-31 16:43:50 -0700549void set_pageblock_migratetype(struct page *page, int migratetype)
Mel Gormanb2a0ac82007-10-16 01:25:48 -0700550{
KOSAKI Motohiro5d0f3f72013-11-12 15:08:18 -0800551 if (unlikely(page_group_by_mobility_disabled &&
552 migratetype < MIGRATE_PCPTYPES))
Mel Gorman49255c62009-06-16 15:31:58 -0700553 migratetype = MIGRATE_UNMOVABLE;
554
Mel Gormanb2a0ac82007-10-16 01:25:48 -0700555 set_pageblock_flags_group(page, (unsigned long)migratetype,
556 PB_migrate, PB_migrate_end);
557}
558
Nick Piggin13e74442006-01-06 00:10:58 -0800559#ifdef CONFIG_DEBUG_VM
Dave Hansenc6a57e12005-10-29 18:16:52 -0700560static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561{
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700562 int ret = 0;
563 unsigned seq;
564 unsigned long pfn = page_to_pfn(page);
Cody P Schaferb5e6a5a2013-02-22 16:35:28 -0800565 unsigned long sp, start_pfn;
Dave Hansenc6a57e12005-10-29 18:16:52 -0700566
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700567 do {
568 seq = zone_span_seqbegin(zone);
Cody P Schaferb5e6a5a2013-02-22 16:35:28 -0800569 start_pfn = zone->zone_start_pfn;
570 sp = zone->spanned_pages;
Cody P Schafer108bcc92013-02-22 16:35:23 -0800571 if (!zone_spans_pfn(zone, pfn))
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700572 ret = 1;
573 } while (zone_span_seqretry(zone, seq));
574
Cody P Schaferb5e6a5a2013-02-22 16:35:28 -0800575 if (ret)
Dave Hansen613813e2014-06-04 16:07:27 -0700576 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
577 pfn, zone_to_nid(zone), zone->name,
578 start_pfn, start_pfn + sp);
Cody P Schaferb5e6a5a2013-02-22 16:35:28 -0800579
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700580 return ret;
Dave Hansenc6a57e12005-10-29 18:16:52 -0700581}
582
583static int page_is_consistent(struct zone *zone, struct page *page)
584{
Andy Whitcroft14e07292007-05-06 14:49:14 -0700585 if (!pfn_valid_within(page_to_pfn(page)))
Dave Hansenc6a57e12005-10-29 18:16:52 -0700586 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 if (zone != page_zone(page))
Dave Hansenc6a57e12005-10-29 18:16:52 -0700588 return 0;
589
590 return 1;
591}
592/*
593 * Temporary debugging check for pages not lying within a given zone.
594 */
Matthias Kaehlcked73d3c92017-07-06 15:39:23 -0700595static int __maybe_unused bad_range(struct zone *zone, struct page *page)
Dave Hansenc6a57e12005-10-29 18:16:52 -0700596{
597 if (page_outside_zone_boundaries(zone, page))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 return 1;
Dave Hansenc6a57e12005-10-29 18:16:52 -0700599 if (!page_is_consistent(zone, page))
600 return 1;
601
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 return 0;
603}
Nick Piggin13e74442006-01-06 00:10:58 -0800604#else
Matthias Kaehlcked73d3c92017-07-06 15:39:23 -0700605static inline int __maybe_unused bad_range(struct zone *zone, struct page *page)
Nick Piggin13e74442006-01-06 00:10:58 -0800606{
607 return 0;
608}
609#endif
610
Wei Yang82a32412020-06-03 15:58:29 -0700611static void bad_page(struct page *page, const char *reason)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612{
Hugh Dickinsd936cf92009-01-06 14:40:12 -0800613 static unsigned long resume;
614 static unsigned long nr_shown;
615 static unsigned long nr_unshown;
616
617 /*
618 * Allow a burst of 60 reports, then keep quiet for that minute;
619 * or allow a steady drip of one report per second.
620 */
621 if (nr_shown == 60) {
622 if (time_before(jiffies, resume)) {
623 nr_unshown++;
624 goto out;
625 }
626 if (nr_unshown) {
Vlastimil Babkaff8e8112016-03-15 14:56:24 -0700627 pr_alert(
Hugh Dickins1e9e6362009-01-06 14:40:13 -0800628 "BUG: Bad page state: %lu messages suppressed\n",
Hugh Dickinsd936cf92009-01-06 14:40:12 -0800629 nr_unshown);
630 nr_unshown = 0;
631 }
632 nr_shown = 0;
633 }
634 if (nr_shown++ == 0)
635 resume = jiffies + 60 * HZ;
636
Vlastimil Babkaff8e8112016-03-15 14:56:24 -0700637 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n",
Hugh Dickins3dc14742009-01-06 14:40:08 -0800638 current->comm, page_to_pfn(page));
Vlastimil Babkaff8e8112016-03-15 14:56:24 -0700639 __dump_page(page, reason);
Vlastimil Babka4e462112016-03-15 14:56:21 -0700640 dump_page_owner(page);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700641
Dave Jones4f318882011-10-31 17:07:24 -0700642 print_modules();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 dump_stack();
Hugh Dickinsd936cf92009-01-06 14:40:12 -0800644out:
Hugh Dickins8cc3b392009-01-06 14:40:06 -0800645 /* Leave bad fields for debug, except PageBuddy could make trouble */
Mel Gorman22b751c2013-02-22 16:34:59 -0800646 page_mapcount_reset(page); /* remove PageBuddy */
Rusty Russell373d4d02013-01-21 17:17:39 +1030647 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648}
649
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650/*
651 * Higher-order pages are called "compound pages". They are structured thusly:
652 *
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -0800653 * The first PAGE_SIZE page is called the "head page" and have PG_head set.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 *
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -0800655 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
656 * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 *
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -0800658 * The first tail page's ->compound_dtor holds the offset in array of compound
659 * page destructors. See compound_page_dtors.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 *
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -0800661 * The first tail page's ->compound_order holds the order of allocation.
Hugh Dickins41d78ba2006-02-14 13:52:58 -0800662 * This usage means that zero-order pages may not be compound.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 */
Hugh Dickinsd98c7a02006-02-14 13:52:59 -0800664
Kirill A. Shutemov9a982252016-01-15 16:54:17 -0800665void free_compound_page(struct page *page)
Hugh Dickinsd98c7a02006-02-14 13:52:59 -0800666{
Yang Shi7ae88532019-09-23 15:38:09 -0700667 mem_cgroup_uncharge(page);
Christoph Lameterd85f3382007-05-06 14:49:39 -0700668 __free_pages_ok(page, compound_order(page));
Hugh Dickinsd98c7a02006-02-14 13:52:59 -0800669}
670
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -0800671void prep_compound_page(struct page *page, unsigned int order)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672{
673 int i;
674 int nr_pages = 1 << order;
675
Kirill A. Shutemovf1e61552015-11-06 16:29:50 -0800676 set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
Christoph Lameterd85f3382007-05-06 14:49:39 -0700677 set_compound_order(page, order);
Christoph Lameter6d777952007-05-06 14:49:40 -0700678 __SetPageHead(page);
Andy Whitcroft18229df2008-11-06 12:53:27 -0800679 for (i = 1; i < nr_pages; i++) {
680 struct page *p = page + i;
Youquan Song58a84aa2011-12-08 14:34:18 -0800681 set_page_count(p, 0);
Kirill A. Shutemov1c290f62016-01-15 16:52:07 -0800682 p->mapping = TAIL_MAPPING;
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -0800683 set_compound_head(p, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 }
Kirill A. Shutemov53f92632016-01-15 16:53:42 -0800685 atomic_set(compound_mapcount_ptr(page), -1);
John Hubbard47e29d32020-04-01 21:05:33 -0700686 if (hpage_pincount_available(page))
687 atomic_set(compound_pincount_ptr(page), 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688}
689
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800690#ifdef CONFIG_DEBUG_PAGEALLOC
691unsigned int _debug_guardpage_minorder;
Vlastimil Babka96a2b032019-07-11 20:55:06 -0700692
Vlastimil Babka8e57f8a2020-01-13 16:29:20 -0800693bool _debug_pagealloc_enabled_early __read_mostly
694 = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
695EXPORT_SYMBOL(_debug_pagealloc_enabled_early);
Vlastimil Babka96a2b032019-07-11 20:55:06 -0700696DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
Joonsoo Kim505f6d22016-03-17 14:17:56 -0700697EXPORT_SYMBOL(_debug_pagealloc_enabled);
Vlastimil Babka96a2b032019-07-11 20:55:06 -0700698
699DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
Joonsoo Kime30825f2014-12-12 16:55:49 -0800700
Joonsoo Kim031bc572014-12-12 16:55:52 -0800701static int __init early_debug_pagealloc(char *buf)
702{
Vlastimil Babka8e57f8a2020-01-13 16:29:20 -0800703 return kstrtobool(buf, &_debug_pagealloc_enabled_early);
Joonsoo Kim031bc572014-12-12 16:55:52 -0800704}
705early_param("debug_pagealloc", early_debug_pagealloc);
706
Vlastimil Babka8e57f8a2020-01-13 16:29:20 -0800707void init_debug_pagealloc(void)
Joonsoo Kime30825f2014-12-12 16:55:49 -0800708{
Joonsoo Kim031bc572014-12-12 16:55:52 -0800709 if (!debug_pagealloc_enabled())
710 return;
711
Vlastimil Babka8e57f8a2020-01-13 16:29:20 -0800712 static_branch_enable(&_debug_pagealloc_enabled);
713
Joonsoo Kimf1c1e9f2016-10-07 16:58:18 -0700714 if (!debug_guardpage_minorder())
715 return;
716
Vlastimil Babka96a2b032019-07-11 20:55:06 -0700717 static_branch_enable(&_debug_guardpage_enabled);
Joonsoo Kime30825f2014-12-12 16:55:49 -0800718}
719
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800720static int __init debug_guardpage_minorder_setup(char *buf)
721{
722 unsigned long res;
723
724 if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
Joe Perches11705322016-03-17 14:19:50 -0700725 pr_err("Bad debug_guardpage_minorder value\n");
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800726 return 0;
727 }
728 _debug_guardpage_minorder = res;
Joe Perches11705322016-03-17 14:19:50 -0700729 pr_info("Setting debug_guardpage_minorder to %lu\n", res);
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800730 return 0;
731}
Joonsoo Kimf1c1e9f2016-10-07 16:58:18 -0700732early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800733
Joonsoo Kimacbc15a2016-10-07 16:58:15 -0700734static inline bool set_page_guard(struct zone *zone, struct page *page,
Joonsoo Kim2847cf92014-12-12 16:55:01 -0800735 unsigned int order, int migratetype)
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800736{
Joonsoo Kime30825f2014-12-12 16:55:49 -0800737 if (!debug_guardpage_enabled())
Joonsoo Kimacbc15a2016-10-07 16:58:15 -0700738 return false;
739
740 if (order >= debug_guardpage_minorder())
741 return false;
Joonsoo Kime30825f2014-12-12 16:55:49 -0800742
Vlastimil Babka3972f6b2019-07-11 20:55:13 -0700743 __SetPageGuard(page);
Joonsoo Kim2847cf92014-12-12 16:55:01 -0800744 INIT_LIST_HEAD(&page->lru);
745 set_page_private(page, order);
746 /* Guard pages are not available for any usage */
747 __mod_zone_freepage_state(zone, -(1 << order), migratetype);
Joonsoo Kimacbc15a2016-10-07 16:58:15 -0700748
749 return true;
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800750}
751
Joonsoo Kim2847cf92014-12-12 16:55:01 -0800752static inline void clear_page_guard(struct zone *zone, struct page *page,
753 unsigned int order, int migratetype)
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800754{
Joonsoo Kime30825f2014-12-12 16:55:49 -0800755 if (!debug_guardpage_enabled())
756 return;
757
Vlastimil Babka3972f6b2019-07-11 20:55:13 -0700758 __ClearPageGuard(page);
Joonsoo Kime30825f2014-12-12 16:55:49 -0800759
Joonsoo Kim2847cf92014-12-12 16:55:01 -0800760 set_page_private(page, 0);
761 if (!is_migrate_isolate(migratetype))
762 __mod_zone_freepage_state(zone, (1 << order), migratetype);
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800763}
764#else
Joonsoo Kimacbc15a2016-10-07 16:58:15 -0700765static inline bool set_page_guard(struct zone *zone, struct page *page,
766 unsigned int order, int migratetype) { return false; }
Joonsoo Kim2847cf92014-12-12 16:55:01 -0800767static inline void clear_page_guard(struct zone *zone, struct page *page,
768 unsigned int order, int migratetype) {}
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800769#endif
770
Mel Gorman7aeb09f2014-06-04 16:10:21 -0700771static inline void set_page_order(struct page *page, unsigned int order)
Andrew Morton6aa3001b22006-04-18 22:20:52 -0700772{
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700773 set_page_private(page, order);
Nick Piggin676165a2006-04-10 11:21:48 +1000774 __SetPageBuddy(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775}
776
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 * This function checks whether a page is free && is the buddy
Matthew Wilcox6e292b92018-06-07 17:08:18 -0700779 * we can coalesce a page and its buddy if
Vlastimil Babka13ad59d2017-02-22 15:41:51 -0800780 * (a) the buddy is not in a hole (check before calling!) &&
Nick Piggin676165a2006-04-10 11:21:48 +1000781 * (b) the buddy is in the buddy system &&
Andy Whitcroftcb2b95e2006-06-23 02:03:01 -0700782 * (c) a page and its buddy have the same order &&
783 * (d) a page and its buddy are in the same zone.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 *
Matthew Wilcox6e292b92018-06-07 17:08:18 -0700785 * For recording whether a page is in the buddy system, we set PageBuddy.
786 * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
Nick Piggin676165a2006-04-10 11:21:48 +1000787 *
788 * For recording page's order, we use page_private(page).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 */
chenqiwufe925c02020-04-01 21:09:56 -0700790static inline bool page_is_buddy(struct page *page, struct page *buddy,
Mel Gorman7aeb09f2014-06-04 16:10:21 -0700791 unsigned int order)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792{
chenqiwufe925c02020-04-01 21:09:56 -0700793 if (!page_is_guard(buddy) && !PageBuddy(buddy))
794 return false;
Mel Gormand34c5fa2014-06-04 16:10:10 -0700795
chenqiwufe925c02020-04-01 21:09:56 -0700796 if (page_order(buddy) != order)
797 return false;
Weijie Yang4c5018c2015-02-10 14:11:39 -0800798
chenqiwufe925c02020-04-01 21:09:56 -0700799 /*
800 * zone check is done late to avoid uselessly calculating
801 * zone/node ids for pages that could never merge.
802 */
803 if (page_zone_id(page) != page_zone_id(buddy))
804 return false;
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800805
chenqiwufe925c02020-04-01 21:09:56 -0700806 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
Mel Gormand34c5fa2014-06-04 16:10:10 -0700807
chenqiwufe925c02020-04-01 21:09:56 -0700808 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809}
810
Mel Gorman5e1f0f02019-03-05 15:45:41 -0800811#ifdef CONFIG_COMPACTION
812static inline struct capture_control *task_capc(struct zone *zone)
813{
814 struct capture_control *capc = current->capture_control;
815
Vlastimil Babkadeba0482020-08-06 23:25:16 -0700816 return unlikely(capc) &&
Mel Gorman5e1f0f02019-03-05 15:45:41 -0800817 !(current->flags & PF_KTHREAD) &&
818 !capc->page &&
Vlastimil Babkadeba0482020-08-06 23:25:16 -0700819 capc->cc->zone == zone ? capc : NULL;
Mel Gorman5e1f0f02019-03-05 15:45:41 -0800820}
821
822static inline bool
823compaction_capture(struct capture_control *capc, struct page *page,
824 int order, int migratetype)
825{
826 if (!capc || order != capc->cc->order)
827 return false;
828
829 /* Do not accidentally pollute CMA or isolated regions*/
830 if (is_migrate_cma(migratetype) ||
831 is_migrate_isolate(migratetype))
832 return false;
833
834 /*
835 * Do not let lower order allocations polluate a movable pageblock.
836 * This might let an unmovable request use a reclaimable pageblock
837 * and vice-versa but no more than normal fallback logic which can
838 * have trouble finding a high-order free page.
839 */
840 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE)
841 return false;
842
843 capc->page = page;
844 return true;
845}
846
847#else
848static inline struct capture_control *task_capc(struct zone *zone)
849{
850 return NULL;
851}
852
853static inline bool
854compaction_capture(struct capture_control *capc, struct page *page,
855 int order, int migratetype)
856{
857 return false;
858}
859#endif /* CONFIG_COMPACTION */
860
Alexander Duyck6ab01362020-04-06 20:04:49 -0700861/* Used for pages not on another list */
862static inline void add_to_free_list(struct page *page, struct zone *zone,
863 unsigned int order, int migratetype)
864{
865 struct free_area *area = &zone->free_area[order];
866
867 list_add(&page->lru, &area->free_list[migratetype]);
868 area->nr_free++;
869}
870
871/* Used for pages not on another list */
872static inline void add_to_free_list_tail(struct page *page, struct zone *zone,
873 unsigned int order, int migratetype)
874{
875 struct free_area *area = &zone->free_area[order];
876
877 list_add_tail(&page->lru, &area->free_list[migratetype]);
878 area->nr_free++;
879}
880
881/* Used for pages which are on another list */
882static inline void move_to_free_list(struct page *page, struct zone *zone,
883 unsigned int order, int migratetype)
884{
885 struct free_area *area = &zone->free_area[order];
886
887 list_move(&page->lru, &area->free_list[migratetype]);
888}
889
890static inline void del_page_from_free_list(struct page *page, struct zone *zone,
891 unsigned int order)
892{
Alexander Duyck36e66c52020-04-06 20:04:56 -0700893 /* clear reported state and update reported page count */
894 if (page_reported(page))
895 __ClearPageReported(page);
896
Alexander Duyck6ab01362020-04-06 20:04:49 -0700897 list_del(&page->lru);
898 __ClearPageBuddy(page);
899 set_page_private(page, 0);
900 zone->free_area[order].nr_free--;
901}
902
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903/*
Alexander Duycka2129f22020-04-06 20:04:45 -0700904 * If this is not the largest possible page, check if the buddy
905 * of the next-highest order is free. If it is, it's possible
906 * that pages are being freed that will coalesce soon. In case,
907 * that is happening, add the free page to the tail of the list
908 * so it's less likely to be used soon and more likely to be merged
909 * as a higher order page
910 */
911static inline bool
912buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
913 struct page *page, unsigned int order)
914{
915 struct page *higher_page, *higher_buddy;
916 unsigned long combined_pfn;
917
918 if (order >= MAX_ORDER - 2)
919 return false;
920
921 if (!pfn_valid_within(buddy_pfn))
922 return false;
923
924 combined_pfn = buddy_pfn & pfn;
925 higher_page = page + (combined_pfn - pfn);
926 buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
927 higher_buddy = higher_page + (buddy_pfn - combined_pfn);
928
929 return pfn_valid_within(buddy_pfn) &&
930 page_is_buddy(higher_page, higher_buddy, order + 1);
931}
932
933/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934 * Freeing function for a buddy system allocator.
935 *
936 * The concept of a buddy system is to maintain direct-mapped table
937 * (containing bit values) for memory blocks of various "orders".
938 * The bottom level table contains the map for the smallest allocatable
939 * units of memory (here, pages), and each level above it describes
940 * pairs of units from the levels below, hence, "buddies".
941 * At a high level, all that happens here is marking the table entry
942 * at the bottom level available, and propagating the changes upward
943 * as necessary, plus some accounting needed to play nicely with other
944 * parts of the VM system.
945 * At each level, we keep a list of pages, which are heads of continuous
Matthew Wilcox6e292b92018-06-07 17:08:18 -0700946 * free pages of length of (1 << order) and marked with PageBuddy.
947 * Page's order is recorded in page_private(page) field.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 * So when we are allocating or freeing one, we can derive the state of the
Michal Nazarewicz5f63b722012-01-11 15:16:11 +0100949 * other. That is, if we allocate a small block, and both were
950 * free, the remainder of the region must be split into blocks.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 * If a block is freed, and its buddy is also free, then this
Michal Nazarewicz5f63b722012-01-11 15:16:11 +0100952 * triggers coalescing into a block of larger size.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953 *
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +0100954 * -- nyc
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955 */
956
Nick Piggin48db57f2006-01-08 01:00:42 -0800957static inline void __free_one_page(struct page *page,
Mel Gormandc4b0ca2014-06-04 16:10:17 -0700958 unsigned long pfn,
Mel Gormaned0ae212009-06-16 15:32:07 -0700959 struct zone *zone, unsigned int order,
Alexander Duyck36e66c52020-04-06 20:04:56 -0700960 int migratetype, bool report)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961{
Mel Gorman5e1f0f02019-03-05 15:45:41 -0800962 struct capture_control *capc = task_capc(zone);
Kees Cook3f649ab2020-06-03 13:09:38 -0700963 unsigned long buddy_pfn;
Alexander Duycka2129f22020-04-06 20:04:45 -0700964 unsigned long combined_pfn;
Alexander Duycka2129f22020-04-06 20:04:45 -0700965 unsigned int max_order;
966 struct page *buddy;
967 bool to_tail;
Vlastimil Babkad9dddbf2016-03-25 14:21:50 -0700968
969 max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970
Cody P Schaferd29bb972013-02-22 16:35:25 -0800971 VM_BUG_ON(!zone_is_initialized(zone));
Kirill A. Shutemov6e9f0d52015-02-11 15:25:50 -0800972 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973
Mel Gormaned0ae212009-06-16 15:32:07 -0700974 VM_BUG_ON(migratetype == -1);
Vlastimil Babkad9dddbf2016-03-25 14:21:50 -0700975 if (likely(!is_migrate_isolate(migratetype)))
Joonsoo Kim8f82b552014-11-13 15:19:18 -0800976 __mod_zone_freepage_state(zone, 1 << order, migratetype);
Mel Gormaned0ae212009-06-16 15:32:07 -0700977
Vlastimil Babka76741e72017-02-22 15:41:48 -0800978 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
Sasha Levin309381fea2014-01-23 15:52:54 -0800979 VM_BUG_ON_PAGE(bad_range(zone, page), page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980
Vlastimil Babkad9dddbf2016-03-25 14:21:50 -0700981continue_merging:
Joonsoo Kim3c605092014-11-13 15:19:21 -0800982 while (order < max_order - 1) {
Mel Gorman5e1f0f02019-03-05 15:45:41 -0800983 if (compaction_capture(capc, page, order, migratetype)) {
984 __mod_zone_freepage_state(zone, -(1 << order),
985 migratetype);
986 return;
987 }
Vlastimil Babka76741e72017-02-22 15:41:48 -0800988 buddy_pfn = __find_buddy_pfn(pfn, order);
989 buddy = page + (buddy_pfn - pfn);
Vlastimil Babka13ad59d2017-02-22 15:41:51 -0800990
991 if (!pfn_valid_within(buddy_pfn))
992 goto done_merging;
Andy Whitcroftcb2b95e2006-06-23 02:03:01 -0700993 if (!page_is_buddy(page, buddy, order))
Vlastimil Babkad9dddbf2016-03-25 14:21:50 -0700994 goto done_merging;
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800995 /*
996 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
997 * merge with it and move up one order.
998 */
Dan Williamsb03641a2019-05-14 15:41:32 -0700999 if (page_is_guard(buddy))
Joonsoo Kim2847cf92014-12-12 16:55:01 -08001000 clear_page_guard(zone, buddy, order, migratetype);
Dan Williamsb03641a2019-05-14 15:41:32 -07001001 else
Alexander Duyck6ab01362020-04-06 20:04:49 -07001002 del_page_from_free_list(buddy, zone, order);
Vlastimil Babka76741e72017-02-22 15:41:48 -08001003 combined_pfn = buddy_pfn & pfn;
1004 page = page + (combined_pfn - pfn);
1005 pfn = combined_pfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 order++;
1007 }
Vlastimil Babkad9dddbf2016-03-25 14:21:50 -07001008 if (max_order < MAX_ORDER) {
1009 /* If we are here, it means order is >= pageblock_order.
1010 * We want to prevent merge between freepages on isolate
1011 * pageblock and normal pageblock. Without this, pageblock
1012 * isolation could cause incorrect freepage or CMA accounting.
1013 *
1014 * We don't want to hit this code for the more frequent
1015 * low-order merging.
1016 */
1017 if (unlikely(has_isolate_pageblock(zone))) {
1018 int buddy_mt;
1019
Vlastimil Babka76741e72017-02-22 15:41:48 -08001020 buddy_pfn = __find_buddy_pfn(pfn, order);
1021 buddy = page + (buddy_pfn - pfn);
Vlastimil Babkad9dddbf2016-03-25 14:21:50 -07001022 buddy_mt = get_pageblock_migratetype(buddy);
1023
1024 if (migratetype != buddy_mt
1025 && (is_migrate_isolate(migratetype) ||
1026 is_migrate_isolate(buddy_mt)))
1027 goto done_merging;
1028 }
1029 max_order++;
1030 goto continue_merging;
1031 }
1032
1033done_merging:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034 set_page_order(page, order);
Corrado Zoccolo6dda9d52010-05-24 14:31:54 -07001035
Dan Williams97500a42019-05-14 15:41:35 -07001036 if (is_shuffle_order(order))
Alexander Duycka2129f22020-04-06 20:04:45 -07001037 to_tail = shuffle_pick_tail();
Dan Williams97500a42019-05-14 15:41:35 -07001038 else
Alexander Duycka2129f22020-04-06 20:04:45 -07001039 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order);
Dan Williams97500a42019-05-14 15:41:35 -07001040
Alexander Duycka2129f22020-04-06 20:04:45 -07001041 if (to_tail)
Alexander Duyck6ab01362020-04-06 20:04:49 -07001042 add_to_free_list_tail(page, zone, order, migratetype);
Alexander Duycka2129f22020-04-06 20:04:45 -07001043 else
Alexander Duyck6ab01362020-04-06 20:04:49 -07001044 add_to_free_list(page, zone, order, migratetype);
Alexander Duyck36e66c52020-04-06 20:04:56 -07001045
1046 /* Notify page reporting subsystem of freed page */
1047 if (report)
1048 page_reporting_notify_free(order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049}
1050
Mel Gorman7bfec6f2016-05-19 17:14:15 -07001051/*
1052 * A bad page could be due to a number of fields. Instead of multiple branches,
1053 * try and check multiple fields with one check. The caller must do a detailed
1054 * check if necessary.
1055 */
1056static inline bool page_expected_state(struct page *page,
1057 unsigned long check_flags)
1058{
1059 if (unlikely(atomic_read(&page->_mapcount) != -1))
1060 return false;
1061
1062 if (unlikely((unsigned long)page->mapping |
1063 page_ref_count(page) |
1064#ifdef CONFIG_MEMCG
1065 (unsigned long)page->mem_cgroup |
1066#endif
1067 (page->flags & check_flags)))
1068 return false;
1069
1070 return true;
1071}
1072
Wei Yang58b7f112020-06-03 15:58:39 -07001073static const char *page_bad_reason(struct page *page, unsigned long flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074{
Wei Yang82a32412020-06-03 15:58:29 -07001075 const char *bad_reason = NULL;
Dave Hansenf0b791a2014-01-23 15:52:49 -08001076
Kirill A. Shutemov53f92632016-01-15 16:53:42 -08001077 if (unlikely(atomic_read(&page->_mapcount) != -1))
Dave Hansenf0b791a2014-01-23 15:52:49 -08001078 bad_reason = "nonzero mapcount";
1079 if (unlikely(page->mapping != NULL))
1080 bad_reason = "non-NULL mapping";
Joonsoo Kimfe896d12016-03-17 14:19:26 -07001081 if (unlikely(page_ref_count(page) != 0))
Joonsoo Kim0139aa72016-05-19 17:10:49 -07001082 bad_reason = "nonzero _refcount";
Wei Yang58b7f112020-06-03 15:58:39 -07001083 if (unlikely(page->flags & flags)) {
1084 if (flags == PAGE_FLAGS_CHECK_AT_PREP)
1085 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set";
1086 else
1087 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
Dave Hansenf0b791a2014-01-23 15:52:49 -08001088 }
Johannes Weiner9edad6e2014-12-10 15:44:58 -08001089#ifdef CONFIG_MEMCG
1090 if (unlikely(page->mem_cgroup))
1091 bad_reason = "page still charged to cgroup";
1092#endif
Wei Yang58b7f112020-06-03 15:58:39 -07001093 return bad_reason;
Mel Gormanbb552ac2016-05-19 17:14:18 -07001094}
1095
Wei Yang58b7f112020-06-03 15:58:39 -07001096static void check_free_page_bad(struct page *page)
1097{
1098 bad_page(page,
1099 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE));
Mel Gormanbb552ac2016-05-19 17:14:18 -07001100}
1101
Wei Yang534fe5e2020-06-03 15:58:36 -07001102static inline int check_free_page(struct page *page)
Mel Gormanbb552ac2016-05-19 17:14:18 -07001103{
Mel Gormanda838d42016-05-19 17:14:21 -07001104 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
Mel Gormanbb552ac2016-05-19 17:14:18 -07001105 return 0;
Mel Gormanbb552ac2016-05-19 17:14:18 -07001106
1107 /* Something has gone sideways, find it */
Wei Yang0d0c48a2020-06-03 15:58:33 -07001108 check_free_page_bad(page);
Mel Gorman7bfec6f2016-05-19 17:14:15 -07001109 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110}
1111
Mel Gorman4db75482016-05-19 17:14:32 -07001112static int free_tail_pages_check(struct page *head_page, struct page *page)
1113{
1114 int ret = 1;
1115
1116 /*
1117 * We rely page->lru.next never has bit 0 set, unless the page
1118 * is PageTail(). Let's make sure that's true even for poisoned ->lru.
1119 */
1120 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
1121
1122 if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
1123 ret = 0;
1124 goto out;
1125 }
1126 switch (page - head_page) {
1127 case 1:
Matthew Wilcox4da19842018-06-07 17:08:50 -07001128 /* the first tail page: ->mapping may be compound_mapcount() */
Mel Gorman4db75482016-05-19 17:14:32 -07001129 if (unlikely(compound_mapcount(page))) {
Wei Yang82a32412020-06-03 15:58:29 -07001130 bad_page(page, "nonzero compound_mapcount");
Mel Gorman4db75482016-05-19 17:14:32 -07001131 goto out;
1132 }
1133 break;
1134 case 2:
1135 /*
1136 * the second tail page: ->mapping is
Matthew Wilcoxfa3015b2018-06-07 17:08:42 -07001137 * deferred_list.next -- ignore value.
Mel Gorman4db75482016-05-19 17:14:32 -07001138 */
1139 break;
1140 default:
1141 if (page->mapping != TAIL_MAPPING) {
Wei Yang82a32412020-06-03 15:58:29 -07001142 bad_page(page, "corrupted mapping in tail page");
Mel Gorman4db75482016-05-19 17:14:32 -07001143 goto out;
1144 }
1145 break;
1146 }
1147 if (unlikely(!PageTail(page))) {
Wei Yang82a32412020-06-03 15:58:29 -07001148 bad_page(page, "PageTail not set");
Mel Gorman4db75482016-05-19 17:14:32 -07001149 goto out;
1150 }
1151 if (unlikely(compound_head(page) != head_page)) {
Wei Yang82a32412020-06-03 15:58:29 -07001152 bad_page(page, "compound_head not consistent");
Mel Gorman4db75482016-05-19 17:14:32 -07001153 goto out;
1154 }
1155 ret = 0;
1156out:
1157 page->mapping = NULL;
1158 clear_compound_head(page);
1159 return ret;
1160}
1161
Alexander Potapenko64713842019-07-11 20:59:19 -07001162static void kernel_init_free_pages(struct page *page, int numpages)
1163{
1164 int i;
1165
1166 for (i = 0; i < numpages; i++)
1167 clear_highpage(page + i);
1168}
1169
Mel Gormane2769db2016-05-19 17:14:38 -07001170static __always_inline bool free_pages_prepare(struct page *page,
1171 unsigned int order, bool check_free)
1172{
1173 int bad = 0;
1174
1175 VM_BUG_ON_PAGE(PageTail(page), page);
1176
1177 trace_mm_page_free(page, order);
Mel Gormane2769db2016-05-19 17:14:38 -07001178
1179 /*
1180 * Check tail pages before head page information is cleared to
1181 * avoid checking PageCompound for order-0 pages.
1182 */
1183 if (unlikely(order)) {
1184 bool compound = PageCompound(page);
1185 int i;
1186
1187 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
1188
Kirill A. Shutemov9a73f612016-07-26 15:25:53 -07001189 if (compound)
1190 ClearPageDoubleMap(page);
Mel Gormane2769db2016-05-19 17:14:38 -07001191 for (i = 1; i < (1 << order); i++) {
1192 if (compound)
1193 bad += free_tail_pages_check(page, page + i);
Wei Yang534fe5e2020-06-03 15:58:36 -07001194 if (unlikely(check_free_page(page + i))) {
Mel Gormane2769db2016-05-19 17:14:38 -07001195 bad++;
1196 continue;
1197 }
1198 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1199 }
1200 }
Minchan Kimbda807d2016-07-26 15:23:05 -07001201 if (PageMappingFlags(page))
Mel Gormane2769db2016-05-19 17:14:38 -07001202 page->mapping = NULL;
Vladimir Davydovc4159a72016-08-08 23:03:12 +03001203 if (memcg_kmem_enabled() && PageKmemcg(page))
Roman Gushchinf4b00ea2020-04-01 21:06:46 -07001204 __memcg_kmem_uncharge_page(page, order);
Mel Gormane2769db2016-05-19 17:14:38 -07001205 if (check_free)
Wei Yang534fe5e2020-06-03 15:58:36 -07001206 bad += check_free_page(page);
Mel Gormane2769db2016-05-19 17:14:38 -07001207 if (bad)
1208 return false;
1209
1210 page_cpupid_reset_last(page);
1211 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1212 reset_page_owner(page, order);
1213
1214 if (!PageHighMem(page)) {
1215 debug_check_no_locks_freed(page_address(page),
1216 PAGE_SIZE << order);
1217 debug_check_no_obj_freed(page_address(page),
1218 PAGE_SIZE << order);
1219 }
Alexander Potapenko64713842019-07-11 20:59:19 -07001220 if (want_init_on_free())
1221 kernel_init_free_pages(page, 1 << order);
1222
Mel Gormane2769db2016-05-19 17:14:38 -07001223 kernel_poison_pages(page, 1 << order, 0);
Qian Cai234fdce2019-10-06 17:58:25 -07001224 /*
1225 * arch_free_page() can make the page's contents inaccessible. s390
1226 * does this. So nothing which can access the page's contents should
1227 * happen after this.
1228 */
1229 arch_free_page(page, order);
1230
Vlastimil Babka8e57f8a2020-01-13 16:29:20 -08001231 if (debug_pagealloc_enabled_static())
Rick Edgecombed6332692019-04-25 17:11:35 -07001232 kernel_map_pages(page, 1 << order, 0);
1233
Waiman Long3c0c12c2018-12-28 00:38:51 -08001234 kasan_free_nondeferred_pages(page, order);
Mel Gormane2769db2016-05-19 17:14:38 -07001235
1236 return true;
1237}
Mel Gorman4db75482016-05-19 17:14:32 -07001238
1239#ifdef CONFIG_DEBUG_VM
Vlastimil Babka4462b322019-07-11 20:55:09 -07001240/*
1241 * With DEBUG_VM enabled, order-0 pages are checked immediately when being freed
1242 * to pcp lists. With debug_pagealloc also enabled, they are also rechecked when
1243 * moved from pcp lists to free lists.
1244 */
1245static bool free_pcp_prepare(struct page *page)
Mel Gorman4db75482016-05-19 17:14:32 -07001246{
Mel Gormane2769db2016-05-19 17:14:38 -07001247 return free_pages_prepare(page, 0, true);
Mel Gorman4db75482016-05-19 17:14:32 -07001248}
1249
Vlastimil Babka4462b322019-07-11 20:55:09 -07001250static bool bulkfree_pcp_prepare(struct page *page)
Mel Gorman4db75482016-05-19 17:14:32 -07001251{
Vlastimil Babka8e57f8a2020-01-13 16:29:20 -08001252 if (debug_pagealloc_enabled_static())
Wei Yang534fe5e2020-06-03 15:58:36 -07001253 return check_free_page(page);
Vlastimil Babka4462b322019-07-11 20:55:09 -07001254 else
1255 return false;
Mel Gorman4db75482016-05-19 17:14:32 -07001256}
1257#else
Vlastimil Babka4462b322019-07-11 20:55:09 -07001258/*
1259 * With DEBUG_VM disabled, order-0 pages being freed are checked only when
1260 * moving from pcp lists to free list in order to reduce overhead. With
1261 * debug_pagealloc enabled, they are checked also immediately when being freed
1262 * to the pcp lists.
1263 */
Mel Gorman4db75482016-05-19 17:14:32 -07001264static bool free_pcp_prepare(struct page *page)
1265{
Vlastimil Babka8e57f8a2020-01-13 16:29:20 -08001266 if (debug_pagealloc_enabled_static())
Vlastimil Babka4462b322019-07-11 20:55:09 -07001267 return free_pages_prepare(page, 0, true);
1268 else
1269 return free_pages_prepare(page, 0, false);
Mel Gorman4db75482016-05-19 17:14:32 -07001270}
1271
1272static bool bulkfree_pcp_prepare(struct page *page)
1273{
Wei Yang534fe5e2020-06-03 15:58:36 -07001274 return check_free_page(page);
Mel Gorman4db75482016-05-19 17:14:32 -07001275}
1276#endif /* CONFIG_DEBUG_VM */
1277
Aaron Lu97334162018-04-05 16:24:14 -07001278static inline void prefetch_buddy(struct page *page)
1279{
1280 unsigned long pfn = page_to_pfn(page);
1281 unsigned long buddy_pfn = __find_buddy_pfn(pfn, 0);
1282 struct page *buddy = page + (buddy_pfn - pfn);
1283
1284 prefetch(buddy);
1285}
1286
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287/*
Mel Gorman5f8dcc22009-09-21 17:03:19 -07001288 * Frees a number of pages from the PCP lists
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289 * Assumes all pages on list are in same zone, and of same order.
Renaud Lienhart207f36e2005-09-10 00:26:59 -07001290 * count is the number of pages to free.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 *
1292 * If the zone was previously in an "all pages pinned" state then look to
1293 * see if this freeing clears that state.
1294 *
1295 * And clear the zone's pages_scanned counter, to hold off the "all pages are
1296 * pinned" detection logic.
1297 */
Mel Gorman5f8dcc22009-09-21 17:03:19 -07001298static void free_pcppages_bulk(struct zone *zone, int count,
1299 struct per_cpu_pages *pcp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300{
Mel Gorman5f8dcc22009-09-21 17:03:19 -07001301 int migratetype = 0;
Mel Gormana6f9edd62009-09-21 17:03:20 -07001302 int batch_free = 0;
Aaron Lu97334162018-04-05 16:24:14 -07001303 int prefetch_nr = 0;
Mel Gorman37779992016-05-19 17:13:58 -07001304 bool isolated_pageblocks;
Aaron Lu0a5f4e52018-04-05 16:24:10 -07001305 struct page *page, *tmp;
1306 LIST_HEAD(head);
Mel Gormanf2260e62009-06-16 15:32:13 -07001307
Mel Gormane5b31ac2016-05-19 17:14:24 -07001308 while (count) {
Mel Gorman5f8dcc22009-09-21 17:03:19 -07001309 struct list_head *list;
Nick Piggin48db57f2006-01-08 01:00:42 -08001310
Mel Gorman5f8dcc22009-09-21 17:03:19 -07001311 /*
Mel Gormana6f9edd62009-09-21 17:03:20 -07001312 * Remove pages from lists in a round-robin fashion. A
1313 * batch_free count is maintained that is incremented when an
1314 * empty list is encountered. This is so more pages are freed
1315 * off fuller lists instead of spinning excessively around empty
1316 * lists
Mel Gorman5f8dcc22009-09-21 17:03:19 -07001317 */
1318 do {
Mel Gormana6f9edd62009-09-21 17:03:20 -07001319 batch_free++;
Mel Gorman5f8dcc22009-09-21 17:03:19 -07001320 if (++migratetype == MIGRATE_PCPTYPES)
1321 migratetype = 0;
1322 list = &pcp->lists[migratetype];
1323 } while (list_empty(list));
1324
Namhyung Kim1d168712011-03-22 16:32:45 -07001325 /* This is the only non-empty list. Free them all. */
1326 if (batch_free == MIGRATE_PCPTYPES)
Mel Gormane5b31ac2016-05-19 17:14:24 -07001327 batch_free = count;
Namhyung Kim1d168712011-03-22 16:32:45 -07001328
Mel Gormana6f9edd62009-09-21 17:03:20 -07001329 do {
Geliang Tanga16601c2016-01-14 15:20:30 -08001330 page = list_last_entry(list, struct page, lru);
Aaron Lu0a5f4e52018-04-05 16:24:10 -07001331 /* must delete to avoid corrupting pcp list */
Mel Gormana6f9edd62009-09-21 17:03:20 -07001332 list_del(&page->lru);
Aaron Lu77ba9062018-04-05 16:24:06 -07001333 pcp->count--;
Vlastimil Babkaaa016d12015-09-08 15:01:22 -07001334
Mel Gorman4db75482016-05-19 17:14:32 -07001335 if (bulkfree_pcp_prepare(page))
1336 continue;
1337
Aaron Lu0a5f4e52018-04-05 16:24:10 -07001338 list_add_tail(&page->lru, &head);
Aaron Lu97334162018-04-05 16:24:14 -07001339
1340 /*
1341 * We are going to put the page back to the global
1342 * pool, prefetch its buddy to speed up later access
1343 * under zone->lock. It is believed the overhead of
1344 * an additional test and calculating buddy_pfn here
1345 * can be offset by reduced memory latency later. To
1346 * avoid excessive prefetching due to large count, only
1347 * prefetch buddy for the first pcp->batch nr of pages.
1348 */
1349 if (prefetch_nr++ < pcp->batch)
1350 prefetch_buddy(page);
Mel Gormane5b31ac2016-05-19 17:14:24 -07001351 } while (--count && --batch_free && !list_empty(list));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352 }
Aaron Lu0a5f4e52018-04-05 16:24:10 -07001353
1354 spin_lock(&zone->lock);
1355 isolated_pageblocks = has_isolate_pageblock(zone);
1356
1357 /*
1358 * Use safe version since after __free_one_page(),
1359 * page->lru.next will not point to original list.
1360 */
1361 list_for_each_entry_safe(page, tmp, &head, lru) {
1362 int mt = get_pcppage_migratetype(page);
1363 /* MIGRATE_ISOLATE page should not go to pcplists */
1364 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
1365 /* Pageblock could have been isolated meanwhile */
1366 if (unlikely(isolated_pageblocks))
1367 mt = get_pageblock_migratetype(page);
1368
Alexander Duyck36e66c52020-04-06 20:04:56 -07001369 __free_one_page(page, page_to_pfn(page), zone, 0, mt, true);
Aaron Lu0a5f4e52018-04-05 16:24:10 -07001370 trace_mm_page_pcpu_drain(page, 0, mt);
1371 }
Mel Gormand34b0732017-04-20 14:37:43 -07001372 spin_unlock(&zone->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373}
1374
Mel Gormandc4b0ca2014-06-04 16:10:17 -07001375static void free_one_page(struct zone *zone,
1376 struct page *page, unsigned long pfn,
Mel Gorman7aeb09f2014-06-04 16:10:21 -07001377 unsigned int order,
Mel Gormaned0ae212009-06-16 15:32:07 -07001378 int migratetype)
Nick Piggin48db57f2006-01-08 01:00:42 -08001379{
Mel Gormand34b0732017-04-20 14:37:43 -07001380 spin_lock(&zone->lock);
Joonsoo Kimad53f922014-11-13 15:19:11 -08001381 if (unlikely(has_isolate_pageblock(zone) ||
1382 is_migrate_isolate(migratetype))) {
1383 migratetype = get_pfnblock_migratetype(page, pfn);
Joonsoo Kimad53f922014-11-13 15:19:11 -08001384 }
Alexander Duyck36e66c52020-04-06 20:04:56 -07001385 __free_one_page(page, pfn, zone, order, migratetype, true);
Mel Gormand34b0732017-04-20 14:37:43 -07001386 spin_unlock(&zone->lock);
Nick Piggin48db57f2006-01-08 01:00:42 -08001387}
1388
Robin Holt1e8ce832015-06-30 14:56:45 -07001389static void __meminit __init_single_page(struct page *page, unsigned long pfn,
Pavel Tatashind0dc12e2018-04-05 16:23:00 -07001390 unsigned long zone, int nid)
Robin Holt1e8ce832015-06-30 14:56:45 -07001391{
Pavel Tatashind0dc12e2018-04-05 16:23:00 -07001392 mm_zero_struct_page(page);
Robin Holt1e8ce832015-06-30 14:56:45 -07001393 set_page_links(page, zone, nid, pfn);
Robin Holt1e8ce832015-06-30 14:56:45 -07001394 init_page_count(page);
1395 page_mapcount_reset(page);
1396 page_cpupid_reset_last(page);
Andrey Konovalov2813b9c2018-12-28 00:30:57 -08001397 page_kasan_tag_reset(page);
Robin Holt1e8ce832015-06-30 14:56:45 -07001398
Robin Holt1e8ce832015-06-30 14:56:45 -07001399 INIT_LIST_HEAD(&page->lru);
1400#ifdef WANT_PAGE_VIRTUAL
1401 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
1402 if (!is_highmem_idx(zone))
1403 set_page_address(page, __va(pfn << PAGE_SHIFT));
1404#endif
1405}
1406
Mel Gorman7e18adb2015-06-30 14:57:05 -07001407#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
Arnd Bergmann57148a62017-10-03 16:15:10 -07001408static void __meminit init_reserved_page(unsigned long pfn)
Mel Gorman7e18adb2015-06-30 14:57:05 -07001409{
1410 pg_data_t *pgdat;
1411 int nid, zid;
1412
1413 if (!early_page_uninitialised(pfn))
1414 return;
1415
1416 nid = early_pfn_to_nid(pfn);
1417 pgdat = NODE_DATA(nid);
1418
1419 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1420 struct zone *zone = &pgdat->node_zones[zid];
1421
1422 if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
1423 break;
1424 }
Pavel Tatashind0dc12e2018-04-05 16:23:00 -07001425 __init_single_page(pfn_to_page(pfn), pfn, zid, nid);
Mel Gorman7e18adb2015-06-30 14:57:05 -07001426}
1427#else
1428static inline void init_reserved_page(unsigned long pfn)
1429{
1430}
1431#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1432
Nathan Zimmer92923ca32015-06-30 14:56:48 -07001433/*
1434 * Initialised pages do not have PageReserved set. This function is
1435 * called for each range allocated by the bootmem allocator and
1436 * marks the pages PageReserved. The remaining valid pages are later
1437 * sent to the buddy page allocator.
1438 */
Stefan Bader4b50bcc2016-05-20 16:58:38 -07001439void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
Nathan Zimmer92923ca32015-06-30 14:56:48 -07001440{
1441 unsigned long start_pfn = PFN_DOWN(start);
1442 unsigned long end_pfn = PFN_UP(end);
1443
Mel Gorman7e18adb2015-06-30 14:57:05 -07001444 for (; start_pfn < end_pfn; start_pfn++) {
1445 if (pfn_valid(start_pfn)) {
1446 struct page *page = pfn_to_page(start_pfn);
1447
1448 init_reserved_page(start_pfn);
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -08001449
1450 /* Avoid false-positive PageTail() */
1451 INIT_LIST_HEAD(&page->lru);
1452
Alexander Duyckd483da52018-10-26 15:07:48 -07001453 /*
1454 * no need for atomic set_bit because the struct
1455 * page is not visible yet so nobody should
1456 * access it yet.
1457 */
1458 __SetPageReserved(page);
Mel Gorman7e18adb2015-06-30 14:57:05 -07001459 }
1460 }
Nathan Zimmer92923ca32015-06-30 14:56:48 -07001461}
1462
KOSAKI Motohiroec95f532010-05-24 14:32:38 -07001463static void __free_pages_ok(struct page *page, unsigned int order)
1464{
Mel Gormand34b0732017-04-20 14:37:43 -07001465 unsigned long flags;
Minchan Kim95e34412012-10-08 16:32:11 -07001466 int migratetype;
Mel Gormandc4b0ca2014-06-04 16:10:17 -07001467 unsigned long pfn = page_to_pfn(page);
KOSAKI Motohiroec95f532010-05-24 14:32:38 -07001468
Mel Gormane2769db2016-05-19 17:14:38 -07001469 if (!free_pages_prepare(page, order, true))
KOSAKI Motohiroec95f532010-05-24 14:32:38 -07001470 return;
1471
Mel Gormancfc47a22014-06-04 16:10:19 -07001472 migratetype = get_pfnblock_migratetype(page, pfn);
Mel Gormand34b0732017-04-20 14:37:43 -07001473 local_irq_save(flags);
1474 __count_vm_events(PGFREE, 1 << order);
Mel Gormandc4b0ca2014-06-04 16:10:17 -07001475 free_one_page(page_zone(page), page, pfn, order, migratetype);
Mel Gormand34b0732017-04-20 14:37:43 -07001476 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477}
1478
Arun KSa9cd4102019-03-05 15:42:14 -08001479void __free_pages_core(struct page *page, unsigned int order)
David Howellsa226f6c2006-01-06 00:11:08 -08001480{
Johannes Weinerc3993072012-01-10 15:08:10 -08001481 unsigned int nr_pages = 1 << order;
Yinghai Lue2d0bd22013-09-11 14:20:37 -07001482 struct page *p = page;
Johannes Weinerc3993072012-01-10 15:08:10 -08001483 unsigned int loop;
David Howellsa226f6c2006-01-06 00:11:08 -08001484
Yinghai Lue2d0bd22013-09-11 14:20:37 -07001485 prefetchw(p);
1486 for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
1487 prefetchw(p + 1);
Johannes Weinerc3993072012-01-10 15:08:10 -08001488 __ClearPageReserved(p);
1489 set_page_count(p, 0);
David Howellsa226f6c2006-01-06 00:11:08 -08001490 }
Yinghai Lue2d0bd22013-09-11 14:20:37 -07001491 __ClearPageReserved(p);
1492 set_page_count(p, 0);
Johannes Weinerc3993072012-01-10 15:08:10 -08001493
Arun KS9705bea2018-12-28 00:34:24 -08001494 atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
Johannes Weinerc3993072012-01-10 15:08:10 -08001495 set_page_refcounted(page);
1496 __free_pages(page, order);
David Howellsa226f6c2006-01-06 00:11:08 -08001497}
1498
Mike Rapoport3f08a302020-06-03 15:57:02 -07001499#ifdef CONFIG_NEED_MULTIPLE_NODES
Mel Gorman7ace9912015-08-06 15:46:13 -07001500
Mel Gorman75a592a2015-06-30 14:56:59 -07001501static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
1502
Mike Rapoport6f24fbd2020-06-03 15:56:57 -07001503#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
1504
1505/*
1506 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
1507 */
1508int __meminit __early_pfn_to_nid(unsigned long pfn,
1509 struct mminit_pfnnid_cache *state)
1510{
1511 unsigned long start_pfn, end_pfn;
1512 int nid;
1513
1514 if (state->last_start <= pfn && pfn < state->last_end)
1515 return state->last_nid;
1516
1517 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
1518 if (nid != NUMA_NO_NODE) {
1519 state->last_start = start_pfn;
1520 state->last_end = end_pfn;
1521 state->last_nid = nid;
1522 }
1523
1524 return nid;
1525}
1526#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
1527
Mel Gorman75a592a2015-06-30 14:56:59 -07001528int __meminit early_pfn_to_nid(unsigned long pfn)
1529{
Mel Gorman7ace9912015-08-06 15:46:13 -07001530 static DEFINE_SPINLOCK(early_pfn_lock);
Mel Gorman75a592a2015-06-30 14:56:59 -07001531 int nid;
1532
Mel Gorman7ace9912015-08-06 15:46:13 -07001533 spin_lock(&early_pfn_lock);
Mel Gorman75a592a2015-06-30 14:56:59 -07001534 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
Mel Gorman7ace9912015-08-06 15:46:13 -07001535 if (nid < 0)
Mel Gormane4568d32016-07-14 12:07:20 -07001536 nid = first_online_node;
Mel Gorman7ace9912015-08-06 15:46:13 -07001537 spin_unlock(&early_pfn_lock);
1538
1539 return nid;
Mel Gorman75a592a2015-06-30 14:56:59 -07001540}
Mike Rapoport3f08a302020-06-03 15:57:02 -07001541#endif /* CONFIG_NEED_MULTIPLE_NODES */
Mel Gorman75a592a2015-06-30 14:56:59 -07001542
Mike Rapoport7c2ee342018-10-30 15:09:36 -07001543void __init memblock_free_pages(struct page *page, unsigned long pfn,
Mel Gorman3a80a7f2015-06-30 14:57:02 -07001544 unsigned int order)
1545{
1546 if (early_page_uninitialised(pfn))
1547 return;
Arun KSa9cd4102019-03-05 15:42:14 -08001548 __free_pages_core(page, order);
Mel Gorman3a80a7f2015-06-30 14:57:02 -07001549}
1550
Joonsoo Kim7cf91a92016-03-15 14:57:51 -07001551/*
1552 * Check that the whole (or subset of) a pageblock given by the interval of
1553 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1554 * with the migration of free compaction scanner. The scanners then need to
1555 * use only pfn_valid_within() check for arches that allow holes within
1556 * pageblocks.
1557 *
1558 * Return struct page pointer of start_pfn, or NULL if checks were not passed.
1559 *
1560 * It's possible on some configurations to have a setup like node0 node1 node0
1561 * i.e. it's possible that all pages within a zones range of pages do not
1562 * belong to a single zone. We assume that a border between node0 and node1
1563 * can occur within a single pageblock, but not a node0 node1 node0
1564 * interleaving within a single pageblock. It is therefore sufficient to check
1565 * the first and last page of a pageblock and avoid checking each individual
1566 * page in a pageblock.
1567 */
1568struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
1569 unsigned long end_pfn, struct zone *zone)
1570{
1571 struct page *start_page;
1572 struct page *end_page;
1573
1574 /* end_pfn is one past the range we are checking */
1575 end_pfn--;
1576
1577 if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
1578 return NULL;
1579
Michal Hocko2d070ea2017-07-06 15:37:56 -07001580 start_page = pfn_to_online_page(start_pfn);
1581 if (!start_page)
1582 return NULL;
Joonsoo Kim7cf91a92016-03-15 14:57:51 -07001583
1584 if (page_zone(start_page) != zone)
1585 return NULL;
1586
1587 end_page = pfn_to_page(end_pfn);
1588
1589 /* This gives a shorter code than deriving page_zone(end_page) */
1590 if (page_zone_id(start_page) != page_zone_id(end_page))
1591 return NULL;
1592
1593 return start_page;
1594}
1595
1596void set_zone_contiguous(struct zone *zone)
1597{
1598 unsigned long block_start_pfn = zone->zone_start_pfn;
1599 unsigned long block_end_pfn;
1600
1601 block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages);
1602 for (; block_start_pfn < zone_end_pfn(zone);
1603 block_start_pfn = block_end_pfn,
1604 block_end_pfn += pageblock_nr_pages) {
1605
1606 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
1607
1608 if (!__pageblock_pfn_to_page(block_start_pfn,
1609 block_end_pfn, zone))
1610 return;
David Hildenbrande84fe992020-05-07 18:35:46 -07001611 cond_resched();
Joonsoo Kim7cf91a92016-03-15 14:57:51 -07001612 }
1613
1614 /* We confirm that there is no hole */
1615 zone->contiguous = true;
1616}
1617
1618void clear_zone_contiguous(struct zone *zone)
1619{
1620 zone->contiguous = false;
1621}
1622
Mel Gorman7e18adb2015-06-30 14:57:05 -07001623#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001624static void __init deferred_free_range(unsigned long pfn,
1625 unsigned long nr_pages)
Mel Gormana4de83d2015-06-30 14:57:16 -07001626{
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001627 struct page *page;
1628 unsigned long i;
Mel Gormana4de83d2015-06-30 14:57:16 -07001629
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001630 if (!nr_pages)
Mel Gormana4de83d2015-06-30 14:57:16 -07001631 return;
1632
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001633 page = pfn_to_page(pfn);
1634
Mel Gormana4de83d2015-06-30 14:57:16 -07001635 /* Free a large naturally-aligned chunk if possible */
Xishi Qiue7801492016-10-07 16:58:09 -07001636 if (nr_pages == pageblock_nr_pages &&
1637 (pfn & (pageblock_nr_pages - 1)) == 0) {
Mel Gormanac5d2532015-06-30 14:57:20 -07001638 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
Arun KSa9cd4102019-03-05 15:42:14 -08001639 __free_pages_core(page, pageblock_order);
Mel Gormana4de83d2015-06-30 14:57:16 -07001640 return;
1641 }
1642
Xishi Qiue7801492016-10-07 16:58:09 -07001643 for (i = 0; i < nr_pages; i++, page++, pfn++) {
1644 if ((pfn & (pageblock_nr_pages - 1)) == 0)
1645 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
Arun KSa9cd4102019-03-05 15:42:14 -08001646 __free_pages_core(page, 0);
Xishi Qiue7801492016-10-07 16:58:09 -07001647 }
Mel Gormana4de83d2015-06-30 14:57:16 -07001648}
1649
Nicolai Stanged3cd1312015-08-06 15:46:16 -07001650/* Completion tracking for deferred_init_memmap() threads */
1651static atomic_t pgdat_init_n_undone __initdata;
1652static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1653
1654static inline void __init pgdat_init_report_one_done(void)
1655{
1656 if (atomic_dec_and_test(&pgdat_init_n_undone))
1657 complete(&pgdat_init_all_done_comp);
1658}
Mel Gorman0e1cc952015-06-30 14:57:27 -07001659
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001660/*
Pavel Tatashin80b1f412018-01-31 16:16:30 -08001661 * Returns true if page needs to be initialized or freed to buddy allocator.
1662 *
1663 * First we check if pfn is valid on architectures where it is possible to have
1664 * holes within pageblock_nr_pages. On systems where it is not possible, this
1665 * function is optimized out.
1666 *
1667 * Then, we check if a current large page is valid by only checking the validity
1668 * of the head pfn.
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001669 */
Alexander Duyck56ec43d2019-05-13 17:21:13 -07001670static inline bool __init deferred_pfn_valid(unsigned long pfn)
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001671{
Pavel Tatashin80b1f412018-01-31 16:16:30 -08001672 if (!pfn_valid_within(pfn))
1673 return false;
1674 if (!(pfn & (pageblock_nr_pages - 1)) && !pfn_valid(pfn))
1675 return false;
Pavel Tatashin80b1f412018-01-31 16:16:30 -08001676 return true;
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001677}
1678
Pavel Tatashin80b1f412018-01-31 16:16:30 -08001679/*
1680 * Free pages to buddy allocator. Try to free aligned pages in
1681 * pageblock_nr_pages sizes.
1682 */
Alexander Duyck56ec43d2019-05-13 17:21:13 -07001683static void __init deferred_free_pages(unsigned long pfn,
Pavel Tatashin80b1f412018-01-31 16:16:30 -08001684 unsigned long end_pfn)
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001685{
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001686 unsigned long nr_pgmask = pageblock_nr_pages - 1;
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001687 unsigned long nr_free = 0;
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001688
Pavel Tatashin80b1f412018-01-31 16:16:30 -08001689 for (; pfn < end_pfn; pfn++) {
Alexander Duyck56ec43d2019-05-13 17:21:13 -07001690 if (!deferred_pfn_valid(pfn)) {
Pavel Tatashin80b1f412018-01-31 16:16:30 -08001691 deferred_free_range(pfn - nr_free, nr_free);
1692 nr_free = 0;
1693 } else if (!(pfn & nr_pgmask)) {
1694 deferred_free_range(pfn - nr_free, nr_free);
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001695 nr_free = 1;
Pavel Tatashin80b1f412018-01-31 16:16:30 -08001696 } else {
1697 nr_free++;
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001698 }
1699 }
1700 /* Free the last block of pages to allocator */
Pavel Tatashin80b1f412018-01-31 16:16:30 -08001701 deferred_free_range(pfn - nr_free, nr_free);
1702}
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001703
Pavel Tatashin80b1f412018-01-31 16:16:30 -08001704/*
1705 * Initialize struct pages. We minimize pfn page lookups and scheduler checks
1706 * by performing it only once every pageblock_nr_pages.
1707 * Return number of pages initialized.
1708 */
Alexander Duyck56ec43d2019-05-13 17:21:13 -07001709static unsigned long __init deferred_init_pages(struct zone *zone,
Pavel Tatashin80b1f412018-01-31 16:16:30 -08001710 unsigned long pfn,
1711 unsigned long end_pfn)
1712{
Pavel Tatashin80b1f412018-01-31 16:16:30 -08001713 unsigned long nr_pgmask = pageblock_nr_pages - 1;
Alexander Duyck56ec43d2019-05-13 17:21:13 -07001714 int nid = zone_to_nid(zone);
Pavel Tatashin80b1f412018-01-31 16:16:30 -08001715 unsigned long nr_pages = 0;
Alexander Duyck56ec43d2019-05-13 17:21:13 -07001716 int zid = zone_idx(zone);
Pavel Tatashin80b1f412018-01-31 16:16:30 -08001717 struct page *page = NULL;
1718
1719 for (; pfn < end_pfn; pfn++) {
Alexander Duyck56ec43d2019-05-13 17:21:13 -07001720 if (!deferred_pfn_valid(pfn)) {
Pavel Tatashin80b1f412018-01-31 16:16:30 -08001721 page = NULL;
1722 continue;
1723 } else if (!page || !(pfn & nr_pgmask)) {
1724 page = pfn_to_page(pfn);
Pavel Tatashin80b1f412018-01-31 16:16:30 -08001725 } else {
1726 page++;
1727 }
Pavel Tatashind0dc12e2018-04-05 16:23:00 -07001728 __init_single_page(page, pfn, zid, nid);
Pavel Tatashin80b1f412018-01-31 16:16:30 -08001729 nr_pages++;
1730 }
1731 return (nr_pages);
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001732}
1733
Alexander Duyck0e56aca2019-05-13 17:21:20 -07001734/*
1735 * This function is meant to pre-load the iterator for the zone init.
1736 * Specifically it walks through the ranges until we are caught up to the
1737 * first_init_pfn value and exits there. If we never encounter the value we
1738 * return false indicating there are no valid ranges left.
1739 */
1740static bool __init
1741deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone,
1742 unsigned long *spfn, unsigned long *epfn,
1743 unsigned long first_init_pfn)
1744{
1745 u64 j;
1746
1747 /*
1748 * Start out by walking through the ranges in this zone that have
1749 * already been initialized. We don't need to do anything with them
1750 * so we just need to flush them out of the system.
1751 */
1752 for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) {
1753 if (*epfn <= first_init_pfn)
1754 continue;
1755 if (*spfn < first_init_pfn)
1756 *spfn = first_init_pfn;
1757 *i = j;
1758 return true;
1759 }
1760
1761 return false;
1762}
1763
1764/*
1765 * Initialize and free pages. We do it in two loops: first we initialize
1766 * struct page, then free to buddy allocator, because while we are
1767 * freeing pages we can access pages that are ahead (computing buddy
1768 * page in __free_one_page()).
1769 *
1770 * In order to try and keep some memory in the cache we have the loop
1771 * broken along max page order boundaries. This way we will not cause
1772 * any issues with the buddy page computation.
1773 */
1774static unsigned long __init
1775deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn,
1776 unsigned long *end_pfn)
1777{
1778 unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES);
1779 unsigned long spfn = *start_pfn, epfn = *end_pfn;
1780 unsigned long nr_pages = 0;
1781 u64 j = *i;
1782
1783 /* First we loop through and initialize the page values */
1784 for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) {
1785 unsigned long t;
1786
1787 if (mo_pfn <= *start_pfn)
1788 break;
1789
1790 t = min(mo_pfn, *end_pfn);
1791 nr_pages += deferred_init_pages(zone, *start_pfn, t);
1792
1793 if (mo_pfn < *end_pfn) {
1794 *start_pfn = mo_pfn;
1795 break;
1796 }
1797 }
1798
1799 /* Reset values and now loop through freeing pages as needed */
1800 swap(j, *i);
1801
1802 for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) {
1803 unsigned long t;
1804
1805 if (mo_pfn <= spfn)
1806 break;
1807
1808 t = min(mo_pfn, epfn);
1809 deferred_free_pages(spfn, t);
1810
1811 if (mo_pfn <= epfn)
1812 break;
1813 }
1814
1815 return nr_pages;
1816}
1817
Daniel Jordane4443142020-06-03 15:59:51 -07001818static void __init
1819deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn,
1820 void *arg)
1821{
1822 unsigned long spfn, epfn;
1823 struct zone *zone = arg;
1824 u64 i;
1825
1826 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn);
1827
1828 /*
1829 * Initialize and free pages in MAX_ORDER sized increments so that we
1830 * can avoid introducing any issues with the buddy allocator.
1831 */
1832 while (spfn < end_pfn) {
1833 deferred_init_maxorder(&i, zone, &spfn, &epfn);
1834 cond_resched();
1835 }
1836}
1837
Daniel Jordanecd09652020-06-03 15:59:55 -07001838/* An arch may override for more concurrency. */
1839__weak int __init
1840deferred_page_init_max_threads(const struct cpumask *node_cpumask)
1841{
1842 return 1;
1843}
1844
Mel Gorman7e18adb2015-06-30 14:57:05 -07001845/* Initialise remaining memory on a node */
Mel Gorman0e1cc952015-06-30 14:57:27 -07001846static int __init deferred_init_memmap(void *data)
Mel Gorman7e18adb2015-06-30 14:57:05 -07001847{
Mel Gorman0e1cc952015-06-30 14:57:27 -07001848 pg_data_t *pgdat = data;
Mel Gorman0e1cc952015-06-30 14:57:27 -07001849 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
Daniel Jordan89c7c402020-06-03 15:59:47 -07001850 unsigned long spfn = 0, epfn = 0;
Alexander Duyck0e56aca2019-05-13 17:21:20 -07001851 unsigned long first_init_pfn, flags;
1852 unsigned long start = jiffies;
1853 struct zone *zone;
Daniel Jordane4443142020-06-03 15:59:51 -07001854 int zid, max_threads;
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001855 u64 i;
Mel Gorman7e18adb2015-06-30 14:57:05 -07001856
Mel Gorman0e1cc952015-06-30 14:57:27 -07001857 /* Bind memory initialisation thread to a local node if possible */
1858 if (!cpumask_empty(cpumask))
1859 set_cpus_allowed_ptr(current, cpumask);
Mel Gorman7e18adb2015-06-30 14:57:05 -07001860
Pavel Tatashin3a2d7fa2018-04-05 16:22:27 -07001861 pgdat_resize_lock(pgdat, &flags);
1862 first_init_pfn = pgdat->first_deferred_pfn;
1863 if (first_init_pfn == ULONG_MAX) {
1864 pgdat_resize_unlock(pgdat, &flags);
1865 pgdat_init_report_one_done();
1866 return 0;
1867 }
1868
Mel Gorman7e18adb2015-06-30 14:57:05 -07001869 /* Sanity check boundaries */
1870 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
1871 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
1872 pgdat->first_deferred_pfn = ULONG_MAX;
1873
Pavel Tatashin3d060852020-06-03 15:59:24 -07001874 /*
1875 * Once we unlock here, the zone cannot be grown anymore, thus if an
1876 * interrupt thread must allocate this early in boot, zone must be
1877 * pre-grown prior to start of deferred page initialization.
1878 */
1879 pgdat_resize_unlock(pgdat, &flags);
1880
Mel Gorman7e18adb2015-06-30 14:57:05 -07001881 /* Only the highest zone is deferred so find it */
1882 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1883 zone = pgdat->node_zones + zid;
1884 if (first_init_pfn < zone_end_pfn(zone))
1885 break;
1886 }
Alexander Duyck0e56aca2019-05-13 17:21:20 -07001887
1888 /* If the zone is empty somebody else may have cleared out the zone */
1889 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
1890 first_init_pfn))
1891 goto zone_empty;
Mel Gorman7e18adb2015-06-30 14:57:05 -07001892
Daniel Jordanecd09652020-06-03 15:59:55 -07001893 max_threads = deferred_page_init_max_threads(cpumask);
Mel Gorman7e18adb2015-06-30 14:57:05 -07001894
Daniel Jordan117003c2020-06-03 15:59:20 -07001895 while (spfn < epfn) {
Daniel Jordane4443142020-06-03 15:59:51 -07001896 unsigned long epfn_align = ALIGN(epfn, PAGES_PER_SECTION);
1897 struct padata_mt_job job = {
1898 .thread_fn = deferred_init_memmap_chunk,
1899 .fn_arg = zone,
1900 .start = spfn,
1901 .size = epfn_align - spfn,
1902 .align = PAGES_PER_SECTION,
1903 .min_chunk = PAGES_PER_SECTION,
1904 .max_threads = max_threads,
1905 };
1906
1907 padata_do_multithreaded(&job);
1908 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
1909 epfn_align);
Daniel Jordan117003c2020-06-03 15:59:20 -07001910 }
Mel Gorman7e18adb2015-06-30 14:57:05 -07001911zone_empty:
Mel Gorman7e18adb2015-06-30 14:57:05 -07001912 /* Sanity check that the next zone really is unpopulated */
1913 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
1914
Daniel Jordan89c7c402020-06-03 15:59:47 -07001915 pr_info("node %d deferred pages initialised in %ums\n",
1916 pgdat->node_id, jiffies_to_msecs(jiffies - start));
Nicolai Stanged3cd1312015-08-06 15:46:16 -07001917
1918 pgdat_init_report_one_done();
Mel Gorman0e1cc952015-06-30 14:57:27 -07001919 return 0;
1920}
Pavel Tatashinc9e97a12018-04-05 16:22:31 -07001921
1922/*
Pavel Tatashinc9e97a12018-04-05 16:22:31 -07001923 * If this zone has deferred pages, try to grow it by initializing enough
1924 * deferred pages to satisfy the allocation specified by order, rounded up to
1925 * the nearest PAGES_PER_SECTION boundary. So we're adding memory in increments
1926 * of SECTION_SIZE bytes by initializing struct pages in increments of
1927 * PAGES_PER_SECTION * sizeof(struct page) bytes.
1928 *
1929 * Return true when zone was grown, otherwise return false. We return true even
1930 * when we grow less than requested, to let the caller decide if there are
1931 * enough pages to satisfy the allocation.
1932 *
1933 * Note: We use noinline because this function is needed only during boot, and
1934 * it is called from a __ref function _deferred_grow_zone. This way we are
1935 * making sure that it is not inlined into permanent text section.
1936 */
1937static noinline bool __init
1938deferred_grow_zone(struct zone *zone, unsigned int order)
1939{
Pavel Tatashinc9e97a12018-04-05 16:22:31 -07001940 unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION);
Alexander Duyck837566e2019-05-13 17:21:17 -07001941 pg_data_t *pgdat = zone->zone_pgdat;
Pavel Tatashinc9e97a12018-04-05 16:22:31 -07001942 unsigned long first_deferred_pfn = pgdat->first_deferred_pfn;
Alexander Duyck0e56aca2019-05-13 17:21:20 -07001943 unsigned long spfn, epfn, flags;
1944 unsigned long nr_pages = 0;
Pavel Tatashinc9e97a12018-04-05 16:22:31 -07001945 u64 i;
1946
1947 /* Only the last zone may have deferred pages */
1948 if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat))
1949 return false;
1950
1951 pgdat_resize_lock(pgdat, &flags);
1952
1953 /*
Pavel Tatashinc9e97a12018-04-05 16:22:31 -07001954 * If someone grew this zone while we were waiting for spinlock, return
1955 * true, as there might be enough pages already.
1956 */
1957 if (first_deferred_pfn != pgdat->first_deferred_pfn) {
1958 pgdat_resize_unlock(pgdat, &flags);
1959 return true;
1960 }
1961
Alexander Duyck0e56aca2019-05-13 17:21:20 -07001962 /* If the zone is empty somebody else may have cleared out the zone */
1963 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
1964 first_deferred_pfn)) {
1965 pgdat->first_deferred_pfn = ULONG_MAX;
Pavel Tatashinc9e97a12018-04-05 16:22:31 -07001966 pgdat_resize_unlock(pgdat, &flags);
Juergen Grossb9705d82019-07-04 15:14:36 -07001967 /* Retry only once. */
1968 return first_deferred_pfn != ULONG_MAX;
Pavel Tatashinc9e97a12018-04-05 16:22:31 -07001969 }
1970
Alexander Duyck0e56aca2019-05-13 17:21:20 -07001971 /*
1972 * Initialize and free pages in MAX_ORDER sized increments so
1973 * that we can avoid introducing any issues with the buddy
1974 * allocator.
1975 */
1976 while (spfn < epfn) {
1977 /* update our first deferred PFN for this section */
1978 first_deferred_pfn = spfn;
Pavel Tatashinc9e97a12018-04-05 16:22:31 -07001979
Alexander Duyck0e56aca2019-05-13 17:21:20 -07001980 nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn);
Daniel Jordan117003c2020-06-03 15:59:20 -07001981 touch_nmi_watchdog();
Pavel Tatashinc9e97a12018-04-05 16:22:31 -07001982
Alexander Duyck0e56aca2019-05-13 17:21:20 -07001983 /* We should only stop along section boundaries */
1984 if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION)
1985 continue;
1986
1987 /* If our quota has been met we can stop here */
Pavel Tatashinc9e97a12018-04-05 16:22:31 -07001988 if (nr_pages >= nr_pages_needed)
1989 break;
1990 }
1991
Alexander Duyck0e56aca2019-05-13 17:21:20 -07001992 pgdat->first_deferred_pfn = spfn;
Pavel Tatashinc9e97a12018-04-05 16:22:31 -07001993 pgdat_resize_unlock(pgdat, &flags);
1994
1995 return nr_pages > 0;
1996}
1997
1998/*
1999 * deferred_grow_zone() is __init, but it is called from
2000 * get_page_from_freelist() during early boot until deferred_pages permanently
2001 * disables this call. This is why we have refdata wrapper to avoid warning,
2002 * and to ensure that the function body gets unloaded.
2003 */
2004static bool __ref
2005_deferred_grow_zone(struct zone *zone, unsigned int order)
2006{
2007 return deferred_grow_zone(zone, order);
2008}
2009
Joonsoo Kim7cf91a92016-03-15 14:57:51 -07002010#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
Mel Gorman0e1cc952015-06-30 14:57:27 -07002011
2012void __init page_alloc_init_late(void)
2013{
Joonsoo Kim7cf91a92016-03-15 14:57:51 -07002014 struct zone *zone;
Dan Williamse900a912019-05-14 15:41:28 -07002015 int nid;
Joonsoo Kim7cf91a92016-03-15 14:57:51 -07002016
2017#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
Mel Gorman0e1cc952015-06-30 14:57:27 -07002018
Nicolai Stanged3cd1312015-08-06 15:46:16 -07002019 /* There will be num_node_state(N_MEMORY) threads */
2020 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
Mel Gorman0e1cc952015-06-30 14:57:27 -07002021 for_each_node_state(nid, N_MEMORY) {
Mel Gorman0e1cc952015-06-30 14:57:27 -07002022 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
2023 }
2024
2025 /* Block until all are initialised */
Nicolai Stanged3cd1312015-08-06 15:46:16 -07002026 wait_for_completion(&pgdat_init_all_done_comp);
Mel Gorman4248b0d2015-08-06 15:46:20 -07002027
Pavel Tatashinc9e97a12018-04-05 16:22:31 -07002028 /*
Mel Gorman3e8fc002019-11-05 21:16:27 -08002029 * The number of managed pages has changed due to the initialisation
2030 * so the pcpu batch and high limits needs to be updated or the limits
2031 * will be artificially small.
2032 */
2033 for_each_populated_zone(zone)
2034 zone_pcp_update(zone);
2035
2036 /*
Pavel Tatashinc9e97a12018-04-05 16:22:31 -07002037 * We initialized the rest of the deferred pages. Permanently disable
2038 * on-demand struct page initialization.
2039 */
2040 static_branch_disable(&deferred_pages);
2041
Mel Gorman4248b0d2015-08-06 15:46:20 -07002042 /* Reinit limits that are based on free pages after the kernel is up */
2043 files_maxfiles_init();
Joonsoo Kim7cf91a92016-03-15 14:57:51 -07002044#endif
Mike Rapoport350e88b2019-05-13 17:22:59 -07002045
Pavel Tatashin3010f872017-08-18 15:16:05 -07002046 /* Discard memblock private memory */
2047 memblock_discard();
Joonsoo Kim7cf91a92016-03-15 14:57:51 -07002048
Dan Williamse900a912019-05-14 15:41:28 -07002049 for_each_node_state(nid, N_MEMORY)
2050 shuffle_free_memory(NODE_DATA(nid));
2051
Joonsoo Kim7cf91a92016-03-15 14:57:51 -07002052 for_each_populated_zone(zone)
2053 set_zone_contiguous(zone);
Mel Gorman7e18adb2015-06-30 14:57:05 -07002054}
Mel Gorman7e18adb2015-06-30 14:57:05 -07002055
Michal Nazarewicz47118af2011-12-29 13:09:50 +01002056#ifdef CONFIG_CMA
Li Zhong9cf510a2013-08-23 13:52:52 +08002057/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
Michal Nazarewicz47118af2011-12-29 13:09:50 +01002058void __init init_cma_reserved_pageblock(struct page *page)
2059{
2060 unsigned i = pageblock_nr_pages;
2061 struct page *p = page;
2062
2063 do {
2064 __ClearPageReserved(p);
2065 set_page_count(p, 0);
Joonsoo Kimd883c6c2018-05-23 10:18:21 +09002066 } while (++p, --i);
Michal Nazarewicz47118af2011-12-29 13:09:50 +01002067
Michal Nazarewicz47118af2011-12-29 13:09:50 +01002068 set_pageblock_migratetype(page, MIGRATE_CMA);
Michal Nazarewiczdc783272014-07-02 15:22:35 -07002069
2070 if (pageblock_order >= MAX_ORDER) {
2071 i = pageblock_nr_pages;
2072 p = page;
2073 do {
2074 set_page_refcounted(p);
2075 __free_pages(p, MAX_ORDER - 1);
2076 p += MAX_ORDER_NR_PAGES;
2077 } while (i -= MAX_ORDER_NR_PAGES);
2078 } else {
2079 set_page_refcounted(page);
2080 __free_pages(page, pageblock_order);
2081 }
2082
Jiang Liu3dcc0572013-07-03 15:03:21 -07002083 adjust_managed_page_count(page, pageblock_nr_pages);
Michal Nazarewicz47118af2011-12-29 13:09:50 +01002084}
2085#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086
2087/*
2088 * The order of subdivision here is critical for the IO subsystem.
2089 * Please do not alter this order without good reasons and regression
2090 * testing. Specifically, as large blocks of memory are subdivided,
2091 * the order in which smaller blocks are delivered depends on the order
2092 * they're subdivided in this function. This is the primary factor
2093 * influencing the order in which pages are delivered to the IO
2094 * subsystem according to empirical testing, and this is also justified
2095 * by considering the behavior of a buddy system containing a single
2096 * large block of memory acted on by a series of small allocations.
2097 * This behavior is a critical factor in sglist merging's success.
2098 *
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +01002099 * -- nyc
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100 */
Nick Piggin085cc7d2006-01-06 00:11:01 -08002101static inline void expand(struct zone *zone, struct page *page,
Alexander Duyck6ab01362020-04-06 20:04:49 -07002102 int low, int high, int migratetype)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103{
2104 unsigned long size = 1 << high;
2105
2106 while (high > low) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107 high--;
2108 size >>= 1;
Sasha Levin309381fea2014-01-23 15:52:54 -08002109 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -08002110
Joonsoo Kimacbc15a2016-10-07 16:58:15 -07002111 /*
2112 * Mark as guard pages (or page), that will allow to
2113 * merge back to allocator when buddy will be freed.
2114 * Corresponding page table entries will not be touched,
2115 * pages will stay not present in virtual address space
2116 */
2117 if (set_page_guard(zone, &page[size], high, migratetype))
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -08002118 continue;
Joonsoo Kimacbc15a2016-10-07 16:58:15 -07002119
Alexander Duyck6ab01362020-04-06 20:04:49 -07002120 add_to_free_list(&page[size], zone, high, migratetype);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121 set_page_order(&page[size], high);
2122 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123}
2124
Vlastimil Babka4e611802016-05-19 17:14:41 -07002125static void check_new_page_bad(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126{
Naoya Horiguchif4c18e62015-08-06 15:47:08 -07002127 if (unlikely(page->flags & __PG_HWPOISON)) {
Naoya Horiguchie570f562016-05-20 16:58:50 -07002128 /* Don't complain about hwpoisoned pages */
2129 page_mapcount_reset(page); /* remove PageBuddy */
2130 return;
Naoya Horiguchif4c18e62015-08-06 15:47:08 -07002131 }
Wei Yang58b7f112020-06-03 15:58:39 -07002132
2133 bad_page(page,
2134 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP));
Vlastimil Babka4e611802016-05-19 17:14:41 -07002135}
2136
2137/*
2138 * This page is about to be returned from the page allocator
2139 */
2140static inline int check_new_page(struct page *page)
2141{
2142 if (likely(page_expected_state(page,
2143 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
2144 return 0;
2145
2146 check_new_page_bad(page);
2147 return 1;
Wu Fengguang2a7684a2009-09-16 11:50:12 +02002148}
2149
Vinayak Menonbd33ef32017-05-03 14:54:42 -07002150static inline bool free_pages_prezeroed(void)
Laura Abbott1414c7f2016-03-15 14:56:30 -07002151{
Alexander Potapenko64713842019-07-11 20:59:19 -07002152 return (IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) &&
2153 page_poisoning_enabled()) || want_init_on_free();
Laura Abbott1414c7f2016-03-15 14:56:30 -07002154}
2155
Mel Gorman479f8542016-05-19 17:14:35 -07002156#ifdef CONFIG_DEBUG_VM
Vlastimil Babka4462b322019-07-11 20:55:09 -07002157/*
2158 * With DEBUG_VM enabled, order-0 pages are checked for expected state when
2159 * being allocated from pcp lists. With debug_pagealloc also enabled, they are
2160 * also checked when pcp lists are refilled from the free lists.
2161 */
2162static inline bool check_pcp_refill(struct page *page)
Mel Gorman479f8542016-05-19 17:14:35 -07002163{
Vlastimil Babka8e57f8a2020-01-13 16:29:20 -08002164 if (debug_pagealloc_enabled_static())
Vlastimil Babka4462b322019-07-11 20:55:09 -07002165 return check_new_page(page);
2166 else
2167 return false;
Mel Gorman479f8542016-05-19 17:14:35 -07002168}
2169
Vlastimil Babka4462b322019-07-11 20:55:09 -07002170static inline bool check_new_pcp(struct page *page)
Mel Gorman479f8542016-05-19 17:14:35 -07002171{
2172 return check_new_page(page);
2173}
2174#else
Vlastimil Babka4462b322019-07-11 20:55:09 -07002175/*
2176 * With DEBUG_VM disabled, free order-0 pages are checked for expected state
2177 * when pcp lists are being refilled from the free lists. With debug_pagealloc
2178 * enabled, they are also checked when being allocated from the pcp lists.
2179 */
2180static inline bool check_pcp_refill(struct page *page)
Mel Gorman479f8542016-05-19 17:14:35 -07002181{
2182 return check_new_page(page);
2183}
Vlastimil Babka4462b322019-07-11 20:55:09 -07002184static inline bool check_new_pcp(struct page *page)
Mel Gorman479f8542016-05-19 17:14:35 -07002185{
Vlastimil Babka8e57f8a2020-01-13 16:29:20 -08002186 if (debug_pagealloc_enabled_static())
Vlastimil Babka4462b322019-07-11 20:55:09 -07002187 return check_new_page(page);
2188 else
2189 return false;
Mel Gorman479f8542016-05-19 17:14:35 -07002190}
2191#endif /* CONFIG_DEBUG_VM */
2192
2193static bool check_new_pages(struct page *page, unsigned int order)
2194{
2195 int i;
2196 for (i = 0; i < (1 << order); i++) {
2197 struct page *p = page + i;
2198
2199 if (unlikely(check_new_page(p)))
2200 return true;
2201 }
2202
2203 return false;
2204}
2205
Joonsoo Kim46f24fd2016-07-26 15:23:58 -07002206inline void post_alloc_hook(struct page *page, unsigned int order,
2207 gfp_t gfp_flags)
2208{
2209 set_page_private(page, 0);
2210 set_page_refcounted(page);
2211
2212 arch_alloc_page(page, order);
Vlastimil Babka8e57f8a2020-01-13 16:29:20 -08002213 if (debug_pagealloc_enabled_static())
Rick Edgecombed6332692019-04-25 17:11:35 -07002214 kernel_map_pages(page, 1 << order, 1);
Joonsoo Kim46f24fd2016-07-26 15:23:58 -07002215 kasan_alloc_pages(page, order);
Qian Cai41179922019-03-05 15:41:24 -08002216 kernel_poison_pages(page, 1 << order, 1);
Joonsoo Kim46f24fd2016-07-26 15:23:58 -07002217 set_page_owner(page, order, gfp_flags);
2218}
2219
Mel Gorman479f8542016-05-19 17:14:35 -07002220static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
Mel Gormanc6038442016-05-19 17:13:38 -07002221 unsigned int alloc_flags)
Wu Fengguang2a7684a2009-09-16 11:50:12 +02002222{
Joonsoo Kim46f24fd2016-07-26 15:23:58 -07002223 post_alloc_hook(page, order, gfp_flags);
Nick Piggin17cf4402006-03-22 00:08:41 -08002224
Alexander Potapenko64713842019-07-11 20:59:19 -07002225 if (!free_pages_prezeroed() && want_init_on_alloc(gfp_flags))
2226 kernel_init_free_pages(page, 1 << order);
Nick Piggin17cf4402006-03-22 00:08:41 -08002227
2228 if (order && (gfp_flags & __GFP_COMP))
2229 prep_compound_page(page, order);
2230
Vlastimil Babka75379192015-02-11 15:25:38 -08002231 /*
Michal Hocko2f064f32015-08-21 14:11:51 -07002232 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
Vlastimil Babka75379192015-02-11 15:25:38 -08002233 * allocate the page. The expectation is that the caller is taking
2234 * steps that will free more memory. The caller should avoid the page
2235 * being used for !PFMEMALLOC purposes.
2236 */
Michal Hocko2f064f32015-08-21 14:11:51 -07002237 if (alloc_flags & ALLOC_NO_WATERMARKS)
2238 set_page_pfmemalloc(page);
2239 else
2240 clear_page_pfmemalloc(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241}
2242
Mel Gorman56fd56b2007-10-16 01:25:58 -07002243/*
2244 * Go through the free lists for the given migratetype and remove
2245 * the smallest available page from the freelists
2246 */
Aaron Lu85ccc8f2017-11-15 17:36:53 -08002247static __always_inline
Mel Gorman728ec982009-06-16 15:32:04 -07002248struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
Mel Gorman56fd56b2007-10-16 01:25:58 -07002249 int migratetype)
2250{
2251 unsigned int current_order;
Pintu Kumarb8af2942013-09-11 14:20:34 -07002252 struct free_area *area;
Mel Gorman56fd56b2007-10-16 01:25:58 -07002253 struct page *page;
2254
2255 /* Find a page of the appropriate size in the preferred list */
2256 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
2257 area = &(zone->free_area[current_order]);
Dan Williamsb03641a2019-05-14 15:41:32 -07002258 page = get_page_from_free_area(area, migratetype);
Geliang Tanga16601c2016-01-14 15:20:30 -08002259 if (!page)
2260 continue;
Alexander Duyck6ab01362020-04-06 20:04:49 -07002261 del_page_from_free_list(page, zone, current_order);
2262 expand(zone, page, order, current_order, migratetype);
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -07002263 set_pcppage_migratetype(page, migratetype);
Mel Gorman56fd56b2007-10-16 01:25:58 -07002264 return page;
2265 }
2266
2267 return NULL;
2268}
2269
2270
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002271/*
2272 * This array describes the order lists are fallen back to when
2273 * the free lists for the desirable migrate type are depleted
2274 */
Michal Nazarewicz47118af2011-12-29 13:09:50 +01002275static int fallbacks[MIGRATE_TYPES][4] = {
Mel Gorman974a7862015-11-06 16:28:34 -08002276 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
Mel Gorman974a7862015-11-06 16:28:34 -08002277 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES },
Huang Shijie7ead3342018-12-28 00:34:46 -08002278 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
Joonsoo Kimdc676472015-04-14 15:45:15 -07002279#ifdef CONFIG_CMA
Mel Gorman974a7862015-11-06 16:28:34 -08002280 [MIGRATE_CMA] = { MIGRATE_TYPES }, /* Never used */
Michal Nazarewicz47118af2011-12-29 13:09:50 +01002281#endif
Minchan Kim194159f2013-02-22 16:33:58 -08002282#ifdef CONFIG_MEMORY_ISOLATION
Mel Gorman974a7862015-11-06 16:28:34 -08002283 [MIGRATE_ISOLATE] = { MIGRATE_TYPES }, /* Never used */
Minchan Kim194159f2013-02-22 16:33:58 -08002284#endif
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002285};
2286
Joonsoo Kimdc676472015-04-14 15:45:15 -07002287#ifdef CONFIG_CMA
Aaron Lu85ccc8f2017-11-15 17:36:53 -08002288static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone,
Joonsoo Kimdc676472015-04-14 15:45:15 -07002289 unsigned int order)
2290{
2291 return __rmqueue_smallest(zone, order, MIGRATE_CMA);
2292}
2293#else
2294static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
2295 unsigned int order) { return NULL; }
2296#endif
2297
Mel Gormanc361be52007-10-16 01:25:51 -07002298/*
2299 * Move the free pages in a range to the free lists of the requested type.
Mel Gormand9c23402007-10-16 01:26:01 -07002300 * Note that start_page and end_pages are not aligned on a pageblock
Mel Gormanc361be52007-10-16 01:25:51 -07002301 * boundary. If alignment is required, use move_freepages_block()
2302 */
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -07002303static int move_freepages(struct zone *zone,
Adrian Bunkb69a7282008-07-23 21:28:12 -07002304 struct page *start_page, struct page *end_page,
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -07002305 int migratetype, int *num_movable)
Mel Gormanc361be52007-10-16 01:25:51 -07002306{
2307 struct page *page;
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08002308 unsigned int order;
Mel Gormand1003132007-10-16 01:26:00 -07002309 int pages_moved = 0;
Mel Gormanc361be52007-10-16 01:25:51 -07002310
Mel Gormanc361be52007-10-16 01:25:51 -07002311 for (page = start_page; page <= end_page;) {
2312 if (!pfn_valid_within(page_to_pfn(page))) {
2313 page++;
2314 continue;
2315 }
2316
2317 if (!PageBuddy(page)) {
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -07002318 /*
2319 * We assume that pages that could be isolated for
2320 * migration are movable. But we don't actually try
2321 * isolating, as that would be expensive.
2322 */
2323 if (num_movable &&
2324 (PageLRU(page) || __PageMovable(page)))
2325 (*num_movable)++;
2326
Mel Gormanc361be52007-10-16 01:25:51 -07002327 page++;
2328 continue;
2329 }
2330
David Rientjescd961032019-08-24 17:54:40 -07002331 /* Make sure we are not inadvertently changing nodes */
2332 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
2333 VM_BUG_ON_PAGE(page_zone(page) != zone, page);
2334
Mel Gormanc361be52007-10-16 01:25:51 -07002335 order = page_order(page);
Alexander Duyck6ab01362020-04-06 20:04:49 -07002336 move_to_free_list(page, zone, order, migratetype);
Mel Gormanc361be52007-10-16 01:25:51 -07002337 page += 1 << order;
Mel Gormand1003132007-10-16 01:26:00 -07002338 pages_moved += 1 << order;
Mel Gormanc361be52007-10-16 01:25:51 -07002339 }
2340
Mel Gormand1003132007-10-16 01:26:00 -07002341 return pages_moved;
Mel Gormanc361be52007-10-16 01:25:51 -07002342}
2343
Minchan Kimee6f5092012-07-31 16:43:50 -07002344int move_freepages_block(struct zone *zone, struct page *page,
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -07002345 int migratetype, int *num_movable)
Mel Gormanc361be52007-10-16 01:25:51 -07002346{
2347 unsigned long start_pfn, end_pfn;
2348 struct page *start_page, *end_page;
2349
David Rientjes4a222122018-10-26 15:09:24 -07002350 if (num_movable)
2351 *num_movable = 0;
2352
Mel Gormanc361be52007-10-16 01:25:51 -07002353 start_pfn = page_to_pfn(page);
Mel Gormand9c23402007-10-16 01:26:01 -07002354 start_pfn = start_pfn & ~(pageblock_nr_pages-1);
Mel Gormanc361be52007-10-16 01:25:51 -07002355 start_page = pfn_to_page(start_pfn);
Mel Gormand9c23402007-10-16 01:26:01 -07002356 end_page = start_page + pageblock_nr_pages - 1;
2357 end_pfn = start_pfn + pageblock_nr_pages - 1;
Mel Gormanc361be52007-10-16 01:25:51 -07002358
2359 /* Do not cross zone boundaries */
Cody P Schafer108bcc92013-02-22 16:35:23 -08002360 if (!zone_spans_pfn(zone, start_pfn))
Mel Gormanc361be52007-10-16 01:25:51 -07002361 start_page = page;
Cody P Schafer108bcc92013-02-22 16:35:23 -08002362 if (!zone_spans_pfn(zone, end_pfn))
Mel Gormanc361be52007-10-16 01:25:51 -07002363 return 0;
2364
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -07002365 return move_freepages(zone, start_page, end_page, migratetype,
2366 num_movable);
Mel Gormanc361be52007-10-16 01:25:51 -07002367}
2368
Mel Gorman2f66a682009-09-21 17:02:31 -07002369static void change_pageblock_range(struct page *pageblock_page,
2370 int start_order, int migratetype)
2371{
2372 int nr_pageblocks = 1 << (start_order - pageblock_order);
2373
2374 while (nr_pageblocks--) {
2375 set_pageblock_migratetype(pageblock_page, migratetype);
2376 pageblock_page += pageblock_nr_pages;
2377 }
2378}
2379
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07002380/*
Vlastimil Babka9c0415e2015-02-11 15:28:21 -08002381 * When we are falling back to another migratetype during allocation, try to
2382 * steal extra free pages from the same pageblocks to satisfy further
2383 * allocations, instead of polluting multiple pageblocks.
2384 *
2385 * If we are stealing a relatively large buddy page, it is likely there will
2386 * be more free pages in the pageblock, so try to steal them all. For
2387 * reclaimable and unmovable allocations, we steal regardless of page size,
2388 * as fragmentation caused by those allocations polluting movable pageblocks
2389 * is worse than movable allocations stealing from unmovable and reclaimable
2390 * pageblocks.
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07002391 */
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002392static bool can_steal_fallback(unsigned int order, int start_mt)
2393{
2394 /*
2395 * Leaving this order check is intended, although there is
2396 * relaxed order check in next check. The reason is that
2397 * we can actually steal whole pageblock if this condition met,
2398 * but, below check doesn't guarantee it and that is just heuristic
2399 * so could be changed anytime.
2400 */
2401 if (order >= pageblock_order)
2402 return true;
2403
2404 if (order >= pageblock_order / 2 ||
2405 start_mt == MIGRATE_RECLAIMABLE ||
2406 start_mt == MIGRATE_UNMOVABLE ||
2407 page_group_by_mobility_disabled)
2408 return true;
2409
2410 return false;
2411}
2412
Mel Gorman1c308442018-12-28 00:35:52 -08002413static inline void boost_watermark(struct zone *zone)
2414{
2415 unsigned long max_boost;
2416
2417 if (!watermark_boost_factor)
2418 return;
Henry Willard14f69142020-05-07 18:36:27 -07002419 /*
2420 * Don't bother in zones that are unlikely to produce results.
2421 * On small machines, including kdump capture kernels running
2422 * in a small area, boosting the watermark can cause an out of
2423 * memory situation immediately.
2424 */
2425 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone))
2426 return;
Mel Gorman1c308442018-12-28 00:35:52 -08002427
2428 max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
2429 watermark_boost_factor, 10000);
Mel Gorman94b33342019-02-20 22:19:49 -08002430
2431 /*
2432 * high watermark may be uninitialised if fragmentation occurs
2433 * very early in boot so do not boost. We do not fall
2434 * through and boost by pageblock_nr_pages as failing
2435 * allocations that early means that reclaim is not going
2436 * to help and it may even be impossible to reclaim the
2437 * boosted watermark resulting in a hang.
2438 */
2439 if (!max_boost)
2440 return;
2441
Mel Gorman1c308442018-12-28 00:35:52 -08002442 max_boost = max(pageblock_nr_pages, max_boost);
2443
2444 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
2445 max_boost);
2446}
2447
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002448/*
2449 * This function implements actual steal behaviour. If order is large enough,
2450 * we can steal whole pageblock. If not, we first move freepages in this
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -07002451 * pageblock to our migratetype and determine how many already-allocated pages
2452 * are there in the pageblock with a compatible migratetype. If at least half
2453 * of pages are free or compatible, we can change migratetype of the pageblock
2454 * itself, so pages freed in the future will be put on the correct free list.
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002455 */
2456static void steal_suitable_fallback(struct zone *zone, struct page *page,
Mel Gorman1c308442018-12-28 00:35:52 -08002457 unsigned int alloc_flags, int start_type, bool whole_block)
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07002458{
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08002459 unsigned int current_order = page_order(page);
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -07002460 int free_pages, movable_pages, alike_pages;
2461 int old_block_type;
2462
2463 old_block_type = get_pageblock_migratetype(page);
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07002464
Vlastimil Babka3bc48f92017-05-08 15:54:37 -07002465 /*
2466 * This can happen due to races and we want to prevent broken
2467 * highatomic accounting.
2468 */
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -07002469 if (is_migrate_highatomic(old_block_type))
Vlastimil Babka3bc48f92017-05-08 15:54:37 -07002470 goto single_page;
2471
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07002472 /* Take ownership for orders >= pageblock_order */
2473 if (current_order >= pageblock_order) {
2474 change_pageblock_range(page, current_order, start_type);
Vlastimil Babka3bc48f92017-05-08 15:54:37 -07002475 goto single_page;
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07002476 }
2477
Mel Gorman1c308442018-12-28 00:35:52 -08002478 /*
2479 * Boost watermarks to increase reclaim pressure to reduce the
2480 * likelihood of future fallbacks. Wake kswapd now as the node
2481 * may be balanced overall and kswapd will not wake naturally.
2482 */
2483 boost_watermark(zone);
2484 if (alloc_flags & ALLOC_KSWAPD)
Mel Gorman73444bc2019-01-08 15:23:39 -08002485 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
Mel Gorman1c308442018-12-28 00:35:52 -08002486
Vlastimil Babka3bc48f92017-05-08 15:54:37 -07002487 /* We are not allowed to try stealing from the whole block */
2488 if (!whole_block)
2489 goto single_page;
2490
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -07002491 free_pages = move_freepages_block(zone, page, start_type,
2492 &movable_pages);
2493 /*
2494 * Determine how many pages are compatible with our allocation.
2495 * For movable allocation, it's the number of movable pages which
2496 * we just obtained. For other types it's a bit more tricky.
2497 */
2498 if (start_type == MIGRATE_MOVABLE) {
2499 alike_pages = movable_pages;
2500 } else {
2501 /*
2502 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation
2503 * to MOVABLE pageblock, consider all non-movable pages as
2504 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or
2505 * vice versa, be conservative since we can't distinguish the
2506 * exact migratetype of non-movable pages.
2507 */
2508 if (old_block_type == MIGRATE_MOVABLE)
2509 alike_pages = pageblock_nr_pages
2510 - (free_pages + movable_pages);
2511 else
2512 alike_pages = 0;
2513 }
2514
Vlastimil Babka3bc48f92017-05-08 15:54:37 -07002515 /* moving whole block can fail due to zone boundary conditions */
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -07002516 if (!free_pages)
Vlastimil Babka3bc48f92017-05-08 15:54:37 -07002517 goto single_page;
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07002518
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -07002519 /*
2520 * If a sufficient number of pages in the block are either free or of
2521 * comparable migratability as our allocation, claim the whole block.
2522 */
2523 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002524 page_group_by_mobility_disabled)
2525 set_pageblock_migratetype(page, start_type);
Vlastimil Babka3bc48f92017-05-08 15:54:37 -07002526
2527 return;
2528
2529single_page:
Alexander Duyck6ab01362020-04-06 20:04:49 -07002530 move_to_free_list(page, zone, current_order, start_type);
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002531}
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07002532
Joonsoo Kim2149cda2015-04-14 15:45:21 -07002533/*
2534 * Check whether there is a suitable fallback freepage with requested order.
2535 * If only_stealable is true, this function returns fallback_mt only if
2536 * we can steal other freepages all together. This would help to reduce
2537 * fragmentation due to mixed migratetype pages in one pageblock.
2538 */
2539int find_suitable_fallback(struct free_area *area, unsigned int order,
2540 int migratetype, bool only_stealable, bool *can_steal)
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002541{
2542 int i;
2543 int fallback_mt;
2544
2545 if (area->nr_free == 0)
2546 return -1;
2547
2548 *can_steal = false;
2549 for (i = 0;; i++) {
2550 fallback_mt = fallbacks[migratetype][i];
Mel Gorman974a7862015-11-06 16:28:34 -08002551 if (fallback_mt == MIGRATE_TYPES)
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002552 break;
2553
Dan Williamsb03641a2019-05-14 15:41:32 -07002554 if (free_area_empty(area, fallback_mt))
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002555 continue;
2556
2557 if (can_steal_fallback(order, migratetype))
2558 *can_steal = true;
2559
Joonsoo Kim2149cda2015-04-14 15:45:21 -07002560 if (!only_stealable)
2561 return fallback_mt;
2562
2563 if (*can_steal)
2564 return fallback_mt;
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07002565 }
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002566
2567 return -1;
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07002568}
2569
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002570/*
2571 * Reserve a pageblock for exclusive use of high-order atomic allocations if
2572 * there are no empty page blocks that contain a page with a suitable order
2573 */
2574static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
2575 unsigned int alloc_order)
2576{
2577 int mt;
2578 unsigned long max_managed, flags;
2579
2580 /*
2581 * Limit the number reserved to 1 pageblock or roughly 1% of a zone.
2582 * Check is race-prone but harmless.
2583 */
Arun KS9705bea2018-12-28 00:34:24 -08002584 max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages;
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002585 if (zone->nr_reserved_highatomic >= max_managed)
2586 return;
2587
2588 spin_lock_irqsave(&zone->lock, flags);
2589
2590 /* Recheck the nr_reserved_highatomic limit under the lock */
2591 if (zone->nr_reserved_highatomic >= max_managed)
2592 goto out_unlock;
2593
2594 /* Yoink! */
2595 mt = get_pageblock_migratetype(page);
Xishi Qiua6ffdc02017-05-03 14:52:52 -07002596 if (!is_migrate_highatomic(mt) && !is_migrate_isolate(mt)
2597 && !is_migrate_cma(mt)) {
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002598 zone->nr_reserved_highatomic += pageblock_nr_pages;
2599 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -07002600 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002601 }
2602
2603out_unlock:
2604 spin_unlock_irqrestore(&zone->lock, flags);
2605}
2606
2607/*
2608 * Used when an allocation is about to fail under memory pressure. This
2609 * potentially hurts the reliability of high-order allocations when under
2610 * intense memory pressure but failed atomic allocations should be easier
2611 * to recover from than an OOM.
Minchan Kim29fac032016-12-12 16:42:14 -08002612 *
2613 * If @force is true, try to unreserve a pageblock even though highatomic
2614 * pageblock is exhausted.
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002615 */
Minchan Kim29fac032016-12-12 16:42:14 -08002616static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
2617 bool force)
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002618{
2619 struct zonelist *zonelist = ac->zonelist;
2620 unsigned long flags;
2621 struct zoneref *z;
2622 struct zone *zone;
2623 struct page *page;
2624 int order;
Minchan Kim04c87162016-12-12 16:42:11 -08002625 bool ret;
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002626
Joonsoo Kim97a225e2020-06-03 15:59:01 -07002627 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx,
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002628 ac->nodemask) {
Minchan Kim29fac032016-12-12 16:42:14 -08002629 /*
2630 * Preserve at least one pageblock unless memory pressure
2631 * is really high.
2632 */
2633 if (!force && zone->nr_reserved_highatomic <=
2634 pageblock_nr_pages)
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002635 continue;
2636
2637 spin_lock_irqsave(&zone->lock, flags);
2638 for (order = 0; order < MAX_ORDER; order++) {
2639 struct free_area *area = &(zone->free_area[order]);
2640
Dan Williamsb03641a2019-05-14 15:41:32 -07002641 page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
Geliang Tanga16601c2016-01-14 15:20:30 -08002642 if (!page)
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002643 continue;
2644
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002645 /*
Minchan Kim4855e4a2016-12-12 16:42:08 -08002646 * In page freeing path, migratetype change is racy so
2647 * we can counter several free pages in a pageblock
2648 * in this loop althoug we changed the pageblock type
2649 * from highatomic to ac->migratetype. So we should
2650 * adjust the count once.
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002651 */
Xishi Qiua6ffdc02017-05-03 14:52:52 -07002652 if (is_migrate_highatomic_page(page)) {
Minchan Kim4855e4a2016-12-12 16:42:08 -08002653 /*
2654 * It should never happen but changes to
2655 * locking could inadvertently allow a per-cpu
2656 * drain to add pages to MIGRATE_HIGHATOMIC
2657 * while unreserving so be safe and watch for
2658 * underflows.
2659 */
2660 zone->nr_reserved_highatomic -= min(
2661 pageblock_nr_pages,
2662 zone->nr_reserved_highatomic);
2663 }
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002664
2665 /*
2666 * Convert to ac->migratetype and avoid the normal
2667 * pageblock stealing heuristics. Minimally, the caller
2668 * is doing the work and needs the pages. More
2669 * importantly, if the block was always converted to
2670 * MIGRATE_UNMOVABLE or another type then the number
2671 * of pageblocks that cannot be completely freed
2672 * may increase.
2673 */
2674 set_pageblock_migratetype(page, ac->migratetype);
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -07002675 ret = move_freepages_block(zone, page, ac->migratetype,
2676 NULL);
Minchan Kim29fac032016-12-12 16:42:14 -08002677 if (ret) {
2678 spin_unlock_irqrestore(&zone->lock, flags);
2679 return ret;
2680 }
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002681 }
2682 spin_unlock_irqrestore(&zone->lock, flags);
2683 }
Minchan Kim04c87162016-12-12 16:42:11 -08002684
2685 return false;
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002686}
2687
Vlastimil Babka3bc48f92017-05-08 15:54:37 -07002688/*
2689 * Try finding a free buddy page on the fallback list and put it on the free
2690 * list of requested migratetype, possibly along with other pages from the same
2691 * block, depending on fragmentation avoidance heuristics. Returns true if
2692 * fallback was found so that __rmqueue_smallest() can grab it.
Rasmus Villemoesb0025292017-07-10 15:49:26 -07002693 *
2694 * The use of signed ints for order and current_order is a deliberate
2695 * deviation from the rest of this file, to make the for loop
2696 * condition simpler.
Vlastimil Babka3bc48f92017-05-08 15:54:37 -07002697 */
Aaron Lu85ccc8f2017-11-15 17:36:53 -08002698static __always_inline bool
Mel Gorman6bb15452018-12-28 00:35:41 -08002699__rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
2700 unsigned int alloc_flags)
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002701{
Pintu Kumarb8af2942013-09-11 14:20:34 -07002702 struct free_area *area;
Rasmus Villemoesb0025292017-07-10 15:49:26 -07002703 int current_order;
Mel Gorman6bb15452018-12-28 00:35:41 -08002704 int min_order = order;
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002705 struct page *page;
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002706 int fallback_mt;
2707 bool can_steal;
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002708
Vlastimil Babka7a8f58f2017-07-10 15:47:14 -07002709 /*
Mel Gorman6bb15452018-12-28 00:35:41 -08002710 * Do not steal pages from freelists belonging to other pageblocks
2711 * i.e. orders < pageblock_order. If there are no local zones free,
2712 * the zonelists will be reiterated without ALLOC_NOFRAGMENT.
2713 */
2714 if (alloc_flags & ALLOC_NOFRAGMENT)
2715 min_order = pageblock_order;
2716
2717 /*
Vlastimil Babka7a8f58f2017-07-10 15:47:14 -07002718 * Find the largest available free page in the other list. This roughly
2719 * approximates finding the pageblock with the most free pages, which
2720 * would be too costly to do exactly.
2721 */
Mel Gorman6bb15452018-12-28 00:35:41 -08002722 for (current_order = MAX_ORDER - 1; current_order >= min_order;
Mel Gorman7aeb09f2014-06-04 16:10:21 -07002723 --current_order) {
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002724 area = &(zone->free_area[current_order]);
2725 fallback_mt = find_suitable_fallback(area, current_order,
Joonsoo Kim2149cda2015-04-14 15:45:21 -07002726 start_migratetype, false, &can_steal);
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002727 if (fallback_mt == -1)
2728 continue;
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002729
Vlastimil Babka7a8f58f2017-07-10 15:47:14 -07002730 /*
2731 * We cannot steal all free pages from the pageblock and the
2732 * requested migratetype is movable. In that case it's better to
2733 * steal and split the smallest available page instead of the
2734 * largest available page, because even if the next movable
2735 * allocation falls back into a different pageblock than this
2736 * one, it won't cause permanent fragmentation.
2737 */
2738 if (!can_steal && start_migratetype == MIGRATE_MOVABLE
2739 && current_order > order)
2740 goto find_smallest;
Mel Gormane0104872007-10-16 01:25:53 -07002741
Vlastimil Babka7a8f58f2017-07-10 15:47:14 -07002742 goto do_steal;
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002743 }
2744
Vlastimil Babka3bc48f92017-05-08 15:54:37 -07002745 return false;
Vlastimil Babka7a8f58f2017-07-10 15:47:14 -07002746
2747find_smallest:
2748 for (current_order = order; current_order < MAX_ORDER;
2749 current_order++) {
2750 area = &(zone->free_area[current_order]);
2751 fallback_mt = find_suitable_fallback(area, current_order,
2752 start_migratetype, false, &can_steal);
2753 if (fallback_mt != -1)
2754 break;
2755 }
2756
2757 /*
2758 * This should not happen - we already found a suitable fallback
2759 * when looking for the largest page.
2760 */
2761 VM_BUG_ON(current_order == MAX_ORDER);
2762
2763do_steal:
Dan Williamsb03641a2019-05-14 15:41:32 -07002764 page = get_page_from_free_area(area, fallback_mt);
Vlastimil Babka7a8f58f2017-07-10 15:47:14 -07002765
Mel Gorman1c308442018-12-28 00:35:52 -08002766 steal_suitable_fallback(zone, page, alloc_flags, start_migratetype,
2767 can_steal);
Vlastimil Babka7a8f58f2017-07-10 15:47:14 -07002768
2769 trace_mm_page_alloc_extfrag(page, order, current_order,
2770 start_migratetype, fallback_mt);
2771
2772 return true;
2773
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002774}
2775
Mel Gorman56fd56b2007-10-16 01:25:58 -07002776/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002777 * Do the hard work of removing an element from the buddy allocator.
2778 * Call me with the zone->lock already held.
2779 */
Aaron Lu85ccc8f2017-11-15 17:36:53 -08002780static __always_inline struct page *
Mel Gorman6bb15452018-12-28 00:35:41 -08002781__rmqueue(struct zone *zone, unsigned int order, int migratetype,
2782 unsigned int alloc_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002783{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784 struct page *page;
2785
Roman Gushchin16867662020-06-03 15:58:42 -07002786#ifdef CONFIG_CMA
2787 /*
2788 * Balance movable allocations between regular and CMA areas by
2789 * allocating from CMA when over half of the zone's free memory
2790 * is in the CMA area.
2791 */
2792 if (migratetype == MIGRATE_MOVABLE &&
2793 zone_page_state(zone, NR_FREE_CMA_PAGES) >
2794 zone_page_state(zone, NR_FREE_PAGES) / 2) {
2795 page = __rmqueue_cma_fallback(zone, order);
2796 if (page)
2797 return page;
2798 }
2799#endif
Vlastimil Babka3bc48f92017-05-08 15:54:37 -07002800retry:
Mel Gorman56fd56b2007-10-16 01:25:58 -07002801 page = __rmqueue_smallest(zone, order, migratetype);
Mel Gorman974a7862015-11-06 16:28:34 -08002802 if (unlikely(!page)) {
Joonsoo Kimdc676472015-04-14 15:45:15 -07002803 if (migratetype == MIGRATE_MOVABLE)
2804 page = __rmqueue_cma_fallback(zone, order);
2805
Mel Gorman6bb15452018-12-28 00:35:41 -08002806 if (!page && __rmqueue_fallback(zone, order, migratetype,
2807 alloc_flags))
Vlastimil Babka3bc48f92017-05-08 15:54:37 -07002808 goto retry;
Mel Gorman728ec982009-06-16 15:32:04 -07002809 }
2810
Mel Gorman0d3d0622009-09-21 17:02:44 -07002811 trace_mm_page_alloc_zone_locked(page, order, migratetype);
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002812 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813}
2814
Michal Nazarewicz5f63b722012-01-11 15:16:11 +01002815/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002816 * Obtain a specified number of elements from the buddy allocator, all under
2817 * a single hold of the lock, for efficiency. Add them to the supplied list.
2818 * Returns the number of new pages which were placed at *list.
2819 */
Michal Nazarewicz5f63b722012-01-11 15:16:11 +01002820static int rmqueue_bulk(struct zone *zone, unsigned int order,
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002821 unsigned long count, struct list_head *list,
Mel Gorman6bb15452018-12-28 00:35:41 -08002822 int migratetype, unsigned int alloc_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002823{
Mel Gormana6de7342016-12-12 16:44:41 -08002824 int i, alloced = 0;
Michal Nazarewicz5f63b722012-01-11 15:16:11 +01002825
Mel Gormand34b0732017-04-20 14:37:43 -07002826 spin_lock(&zone->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002827 for (i = 0; i < count; ++i) {
Mel Gorman6bb15452018-12-28 00:35:41 -08002828 struct page *page = __rmqueue(zone, order, migratetype,
2829 alloc_flags);
Nick Piggin085cc7d2006-01-06 00:11:01 -08002830 if (unlikely(page == NULL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002831 break;
Mel Gorman81eabcb2007-12-17 16:20:05 -08002832
Mel Gorman479f8542016-05-19 17:14:35 -07002833 if (unlikely(check_pcp_refill(page)))
2834 continue;
2835
Mel Gorman81eabcb2007-12-17 16:20:05 -08002836 /*
Vlastimil Babka0fac3ba2017-11-15 17:38:07 -08002837 * Split buddy pages returned by expand() are received here in
2838 * physical page order. The page is added to the tail of
2839 * caller's list. From the callers perspective, the linked list
2840 * is ordered by page number under some conditions. This is
2841 * useful for IO devices that can forward direction from the
2842 * head, thus also in the physical page order. This is useful
2843 * for IO devices that can merge IO requests if the physical
2844 * pages are ordered properly.
Mel Gorman81eabcb2007-12-17 16:20:05 -08002845 */
Vlastimil Babka0fac3ba2017-11-15 17:38:07 -08002846 list_add_tail(&page->lru, list);
Mel Gormana6de7342016-12-12 16:44:41 -08002847 alloced++;
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -07002848 if (is_migrate_cma(get_pcppage_migratetype(page)))
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -07002849 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
2850 -(1 << order));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002851 }
Mel Gormana6de7342016-12-12 16:44:41 -08002852
2853 /*
2854 * i pages were removed from the buddy list even if some leak due
2855 * to check_pcp_refill failing so adjust NR_FREE_PAGES based
2856 * on i. Do not confuse with 'alloced' which is the number of
2857 * pages added to the pcp list.
2858 */
Mel Gormanf2260e62009-06-16 15:32:13 -07002859 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
Mel Gormand34b0732017-04-20 14:37:43 -07002860 spin_unlock(&zone->lock);
Mel Gormana6de7342016-12-12 16:44:41 -08002861 return alloced;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002862}
2863
Christoph Lameter4ae7c032005-06-21 17:14:57 -07002864#ifdef CONFIG_NUMA
Christoph Lameter8fce4d82006-03-09 17:33:54 -08002865/*
Christoph Lameter4037d452007-05-09 02:35:14 -07002866 * Called from the vmstat counter updater to drain pagesets of this
2867 * currently executing processor on remote nodes after they have
2868 * expired.
2869 *
Christoph Lameter879336c2006-03-22 00:09:08 -08002870 * Note that this function must be called with the thread pinned to
2871 * a single processor.
Christoph Lameter8fce4d82006-03-09 17:33:54 -08002872 */
Christoph Lameter4037d452007-05-09 02:35:14 -07002873void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
Christoph Lameter4ae7c032005-06-21 17:14:57 -07002874{
Christoph Lameter4ae7c032005-06-21 17:14:57 -07002875 unsigned long flags;
Michal Nazarewicz7be12fc2014-08-06 16:05:15 -07002876 int to_drain, batch;
Christoph Lameter4ae7c032005-06-21 17:14:57 -07002877
Christoph Lameter4037d452007-05-09 02:35:14 -07002878 local_irq_save(flags);
Jason Low4db0c3c2015-04-15 16:14:08 -07002879 batch = READ_ONCE(pcp->batch);
Michal Nazarewicz7be12fc2014-08-06 16:05:15 -07002880 to_drain = min(pcp->count, batch);
Aaron Lu77ba9062018-04-05 16:24:06 -07002881 if (to_drain > 0)
KOSAKI Motohiro2a135152012-07-31 16:42:53 -07002882 free_pcppages_bulk(zone, to_drain, pcp);
Christoph Lameter4037d452007-05-09 02:35:14 -07002883 local_irq_restore(flags);
Christoph Lameter4ae7c032005-06-21 17:14:57 -07002884}
2885#endif
2886
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002887/*
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002888 * Drain pcplists of the indicated processor and zone.
2889 *
2890 * The processor must either be the current processor and the
2891 * thread pinned to the current processor or a processor that
2892 * is not online.
2893 */
2894static void drain_pages_zone(unsigned int cpu, struct zone *zone)
2895{
2896 unsigned long flags;
2897 struct per_cpu_pageset *pset;
2898 struct per_cpu_pages *pcp;
2899
2900 local_irq_save(flags);
2901 pset = per_cpu_ptr(zone->pageset, cpu);
2902
2903 pcp = &pset->pcp;
Aaron Lu77ba9062018-04-05 16:24:06 -07002904 if (pcp->count)
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002905 free_pcppages_bulk(zone, pcp->count, pcp);
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002906 local_irq_restore(flags);
2907}
2908
2909/*
2910 * Drain pcplists of all zones on the indicated processor.
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002911 *
2912 * The processor must either be the current processor and the
2913 * thread pinned to the current processor or a processor that
2914 * is not online.
2915 */
2916static void drain_pages(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002917{
2918 struct zone *zone;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002919
KOSAKI Motohiroee99c712009-03-31 15:19:31 -07002920 for_each_populated_zone(zone) {
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002921 drain_pages_zone(cpu, zone);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002922 }
2923}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002925/*
2926 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002927 *
2928 * The CPU has to be pinned. When zone parameter is non-NULL, spill just
2929 * the single zone's pages.
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002930 */
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002931void drain_local_pages(struct zone *zone)
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002932{
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002933 int cpu = smp_processor_id();
2934
2935 if (zone)
2936 drain_pages_zone(cpu, zone);
2937 else
2938 drain_pages(cpu);
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002939}
2940
Mel Gorman0ccce3b2017-02-24 14:56:32 -08002941static void drain_local_pages_wq(struct work_struct *work)
2942{
Wei Yangd9367bd2018-12-28 00:38:58 -08002943 struct pcpu_drain *drain;
2944
2945 drain = container_of(work, struct pcpu_drain, work);
2946
Michal Hockoa459eeb2017-02-24 14:56:35 -08002947 /*
2948 * drain_all_pages doesn't use proper cpu hotplug protection so
2949 * we can race with cpu offline when the WQ can move this from
2950 * a cpu pinned worker to an unbound one. We can operate on a different
2951 * cpu which is allright but we also have to make sure to not move to
2952 * a different one.
2953 */
2954 preempt_disable();
Wei Yangd9367bd2018-12-28 00:38:58 -08002955 drain_local_pages(drain->zone);
Michal Hockoa459eeb2017-02-24 14:56:35 -08002956 preempt_enable();
Mel Gorman0ccce3b2017-02-24 14:56:32 -08002957}
2958
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002959/*
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002960 * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
2961 *
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002962 * When zone parameter is non-NULL, spill just the single zone's pages.
2963 *
Mel Gorman0ccce3b2017-02-24 14:56:32 -08002964 * Note that this can be extremely slow as the draining happens in a workqueue.
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002965 */
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002966void drain_all_pages(struct zone *zone)
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002967{
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002968 int cpu;
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002969
2970 /*
2971 * Allocate in the BSS so we wont require allocation in
2972 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
2973 */
2974 static cpumask_t cpus_with_pcps;
2975
Michal Hockoce612872017-04-07 16:05:05 -07002976 /*
2977 * Make sure nobody triggers this path before mm_percpu_wq is fully
2978 * initialized.
2979 */
2980 if (WARN_ON_ONCE(!mm_percpu_wq))
2981 return;
2982
Mel Gormanbd233f52017-02-24 14:56:56 -08002983 /*
2984 * Do not drain if one is already in progress unless it's specific to
2985 * a zone. Such callers are primarily CMA and memory hotplug and need
2986 * the drain to be complete when the call returns.
2987 */
2988 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
2989 if (!zone)
2990 return;
2991 mutex_lock(&pcpu_drain_mutex);
2992 }
Mel Gorman0ccce3b2017-02-24 14:56:32 -08002993
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002994 /*
2995 * We don't care about racing with CPU hotplug event
2996 * as offline notification will cause the notified
2997 * cpu to drain that CPU pcps and on_each_cpu_mask
2998 * disables preemption as part of its processing
2999 */
3000 for_each_online_cpu(cpu) {
Vlastimil Babka93481ff2014-12-10 15:43:01 -08003001 struct per_cpu_pageset *pcp;
3002 struct zone *z;
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07003003 bool has_pcps = false;
Vlastimil Babka93481ff2014-12-10 15:43:01 -08003004
3005 if (zone) {
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07003006 pcp = per_cpu_ptr(zone->pageset, cpu);
Vlastimil Babka93481ff2014-12-10 15:43:01 -08003007 if (pcp->pcp.count)
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07003008 has_pcps = true;
Vlastimil Babka93481ff2014-12-10 15:43:01 -08003009 } else {
3010 for_each_populated_zone(z) {
3011 pcp = per_cpu_ptr(z->pageset, cpu);
3012 if (pcp->pcp.count) {
3013 has_pcps = true;
3014 break;
3015 }
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07003016 }
3017 }
Vlastimil Babka93481ff2014-12-10 15:43:01 -08003018
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07003019 if (has_pcps)
3020 cpumask_set_cpu(cpu, &cpus_with_pcps);
3021 else
3022 cpumask_clear_cpu(cpu, &cpus_with_pcps);
3023 }
Mel Gorman0ccce3b2017-02-24 14:56:32 -08003024
Mel Gormanbd233f52017-02-24 14:56:56 -08003025 for_each_cpu(cpu, &cpus_with_pcps) {
Wei Yangd9367bd2018-12-28 00:38:58 -08003026 struct pcpu_drain *drain = per_cpu_ptr(&pcpu_drain, cpu);
3027
3028 drain->zone = zone;
3029 INIT_WORK(&drain->work, drain_local_pages_wq);
3030 queue_work_on(cpu, mm_percpu_wq, &drain->work);
Mel Gorman0ccce3b2017-02-24 14:56:32 -08003031 }
Mel Gormanbd233f52017-02-24 14:56:56 -08003032 for_each_cpu(cpu, &cpus_with_pcps)
Wei Yangd9367bd2018-12-28 00:38:58 -08003033 flush_work(&per_cpu_ptr(&pcpu_drain, cpu)->work);
Mel Gormanbd233f52017-02-24 14:56:56 -08003034
3035 mutex_unlock(&pcpu_drain_mutex);
Christoph Lameter9f8f2172008-02-04 22:29:11 -08003036}
3037
Rafael J. Wysocki296699d2007-07-29 23:27:18 +02003038#ifdef CONFIG_HIBERNATION
Linus Torvalds1da177e2005-04-16 15:20:36 -07003039
Chen Yu556b9692017-08-25 15:55:30 -07003040/*
3041 * Touch the watchdog for every WD_PAGE_COUNT pages.
3042 */
3043#define WD_PAGE_COUNT (128*1024)
3044
Linus Torvalds1da177e2005-04-16 15:20:36 -07003045void mark_free_pages(struct zone *zone)
3046{
Chen Yu556b9692017-08-25 15:55:30 -07003047 unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07003048 unsigned long flags;
Mel Gorman7aeb09f2014-06-04 16:10:21 -07003049 unsigned int order, t;
Geliang Tang86760a22016-01-14 15:20:33 -08003050 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003051
Xishi Qiu8080fc02013-09-11 14:21:45 -07003052 if (zone_is_empty(zone))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003053 return;
3054
3055 spin_lock_irqsave(&zone->lock, flags);
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07003056
Cody P Schafer108bcc92013-02-22 16:35:23 -08003057 max_zone_pfn = zone_end_pfn(zone);
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07003058 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
3059 if (pfn_valid(pfn)) {
Geliang Tang86760a22016-01-14 15:20:33 -08003060 page = pfn_to_page(pfn);
Joonsoo Kimba6b0972016-05-19 17:12:16 -07003061
Chen Yu556b9692017-08-25 15:55:30 -07003062 if (!--page_count) {
3063 touch_nmi_watchdog();
3064 page_count = WD_PAGE_COUNT;
3065 }
3066
Joonsoo Kimba6b0972016-05-19 17:12:16 -07003067 if (page_zone(page) != zone)
3068 continue;
3069
Rafael J. Wysocki7be98232007-05-06 14:50:42 -07003070 if (!swsusp_page_is_forbidden(page))
3071 swsusp_unset_page_free(page);
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07003072 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003073
Mel Gormanb2a0ac82007-10-16 01:25:48 -07003074 for_each_migratetype_order(order, t) {
Geliang Tang86760a22016-01-14 15:20:33 -08003075 list_for_each_entry(page,
3076 &zone->free_area[order].free_list[t], lru) {
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07003077 unsigned long i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003078
Geliang Tang86760a22016-01-14 15:20:33 -08003079 pfn = page_to_pfn(page);
Chen Yu556b9692017-08-25 15:55:30 -07003080 for (i = 0; i < (1UL << order); i++) {
3081 if (!--page_count) {
3082 touch_nmi_watchdog();
3083 page_count = WD_PAGE_COUNT;
3084 }
Rafael J. Wysocki7be98232007-05-06 14:50:42 -07003085 swsusp_set_page_free(pfn_to_page(pfn + i));
Chen Yu556b9692017-08-25 15:55:30 -07003086 }
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07003087 }
Mel Gormanb2a0ac82007-10-16 01:25:48 -07003088 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003089 spin_unlock_irqrestore(&zone->lock, flags);
3090}
Mel Gormane2c55dc2007-10-16 01:25:50 -07003091#endif /* CONFIG_PM */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003092
Mel Gorman2d4894b2017-11-15 17:37:59 -08003093static bool free_unref_page_prepare(struct page *page, unsigned long pfn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003094{
Mel Gorman5f8dcc22009-09-21 17:03:19 -07003095 int migratetype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003096
Mel Gorman4db75482016-05-19 17:14:32 -07003097 if (!free_pcp_prepare(page))
Mel Gorman9cca35d42017-11-15 17:37:37 -08003098 return false;
Hugh Dickins689bceb2005-11-21 21:32:20 -08003099
Mel Gormandc4b0ca2014-06-04 16:10:17 -07003100 migratetype = get_pfnblock_migratetype(page, pfn);
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -07003101 set_pcppage_migratetype(page, migratetype);
Mel Gorman9cca35d42017-11-15 17:37:37 -08003102 return true;
3103}
3104
Mel Gorman2d4894b2017-11-15 17:37:59 -08003105static void free_unref_page_commit(struct page *page, unsigned long pfn)
Mel Gorman9cca35d42017-11-15 17:37:37 -08003106{
3107 struct zone *zone = page_zone(page);
3108 struct per_cpu_pages *pcp;
3109 int migratetype;
3110
3111 migratetype = get_pcppage_migratetype(page);
Mel Gormand34b0732017-04-20 14:37:43 -07003112 __count_vm_event(PGFREE);
Mel Gormanda456f12009-06-16 15:32:08 -07003113
Mel Gorman5f8dcc22009-09-21 17:03:19 -07003114 /*
3115 * We only track unmovable, reclaimable and movable on pcp lists.
3116 * Free ISOLATE pages back to the allocator because they are being
Xishi Qiua6ffdc02017-05-03 14:52:52 -07003117 * offlined but treat HIGHATOMIC as movable pages so we can get those
Mel Gorman5f8dcc22009-09-21 17:03:19 -07003118 * areas back if necessary. Otherwise, we may have to free
3119 * excessively into the page allocator
3120 */
3121 if (migratetype >= MIGRATE_PCPTYPES) {
Minchan Kim194159f2013-02-22 16:33:58 -08003122 if (unlikely(is_migrate_isolate(migratetype))) {
Mel Gormandc4b0ca2014-06-04 16:10:17 -07003123 free_one_page(zone, page, pfn, 0, migratetype);
Mel Gorman9cca35d42017-11-15 17:37:37 -08003124 return;
Mel Gorman5f8dcc22009-09-21 17:03:19 -07003125 }
3126 migratetype = MIGRATE_MOVABLE;
3127 }
3128
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09003129 pcp = &this_cpu_ptr(zone->pageset)->pcp;
Mel Gorman2d4894b2017-11-15 17:37:59 -08003130 list_add(&page->lru, &pcp->lists[migratetype]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003131 pcp->count++;
Nick Piggin48db57f2006-01-08 01:00:42 -08003132 if (pcp->count >= pcp->high) {
Jason Low4db0c3c2015-04-15 16:14:08 -07003133 unsigned long batch = READ_ONCE(pcp->batch);
Cody P Schafer998d39cb2013-07-03 15:01:32 -07003134 free_pcppages_bulk(zone, batch, pcp);
Nick Piggin48db57f2006-01-08 01:00:42 -08003135 }
Mel Gorman9cca35d42017-11-15 17:37:37 -08003136}
Mel Gorman5f8dcc22009-09-21 17:03:19 -07003137
Mel Gorman9cca35d42017-11-15 17:37:37 -08003138/*
3139 * Free a 0-order page
Mel Gorman9cca35d42017-11-15 17:37:37 -08003140 */
Mel Gorman2d4894b2017-11-15 17:37:59 -08003141void free_unref_page(struct page *page)
Mel Gorman9cca35d42017-11-15 17:37:37 -08003142{
3143 unsigned long flags;
3144 unsigned long pfn = page_to_pfn(page);
3145
Mel Gorman2d4894b2017-11-15 17:37:59 -08003146 if (!free_unref_page_prepare(page, pfn))
Mel Gorman9cca35d42017-11-15 17:37:37 -08003147 return;
3148
3149 local_irq_save(flags);
Mel Gorman2d4894b2017-11-15 17:37:59 -08003150 free_unref_page_commit(page, pfn);
Mel Gormand34b0732017-04-20 14:37:43 -07003151 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003152}
3153
Nick Piggin8dfcc9b2006-03-22 00:08:05 -08003154/*
Konstantin Khlebnikovcc598502012-01-10 15:07:04 -08003155 * Free a list of 0-order pages
3156 */
Mel Gorman2d4894b2017-11-15 17:37:59 -08003157void free_unref_page_list(struct list_head *list)
Konstantin Khlebnikovcc598502012-01-10 15:07:04 -08003158{
3159 struct page *page, *next;
Mel Gorman9cca35d42017-11-15 17:37:37 -08003160 unsigned long flags, pfn;
Lucas Stachc24ad772017-12-14 15:32:55 -08003161 int batch_count = 0;
Konstantin Khlebnikovcc598502012-01-10 15:07:04 -08003162
Mel Gorman9cca35d42017-11-15 17:37:37 -08003163 /* Prepare pages for freeing */
Konstantin Khlebnikovcc598502012-01-10 15:07:04 -08003164 list_for_each_entry_safe(page, next, list, lru) {
Mel Gorman9cca35d42017-11-15 17:37:37 -08003165 pfn = page_to_pfn(page);
Mel Gorman2d4894b2017-11-15 17:37:59 -08003166 if (!free_unref_page_prepare(page, pfn))
Mel Gorman9cca35d42017-11-15 17:37:37 -08003167 list_del(&page->lru);
3168 set_page_private(page, pfn);
Konstantin Khlebnikovcc598502012-01-10 15:07:04 -08003169 }
Mel Gorman9cca35d42017-11-15 17:37:37 -08003170
3171 local_irq_save(flags);
3172 list_for_each_entry_safe(page, next, list, lru) {
3173 unsigned long pfn = page_private(page);
3174
3175 set_page_private(page, 0);
Mel Gorman2d4894b2017-11-15 17:37:59 -08003176 trace_mm_page_free_batched(page);
3177 free_unref_page_commit(page, pfn);
Lucas Stachc24ad772017-12-14 15:32:55 -08003178
3179 /*
3180 * Guard against excessive IRQ disabled times when we get
3181 * a large list of pages to free.
3182 */
3183 if (++batch_count == SWAP_CLUSTER_MAX) {
3184 local_irq_restore(flags);
3185 batch_count = 0;
3186 local_irq_save(flags);
3187 }
Mel Gorman9cca35d42017-11-15 17:37:37 -08003188 }
3189 local_irq_restore(flags);
Konstantin Khlebnikovcc598502012-01-10 15:07:04 -08003190}
3191
3192/*
Nick Piggin8dfcc9b2006-03-22 00:08:05 -08003193 * split_page takes a non-compound higher-order page, and splits it into
3194 * n (1<<order) sub-pages: page[0..n]
3195 * Each sub-page must be freed individually.
3196 *
3197 * Note: this is probably too low level an operation for use in drivers.
3198 * Please consult with lkml before using this in your driver.
3199 */
3200void split_page(struct page *page, unsigned int order)
3201{
3202 int i;
3203
Sasha Levin309381fea2014-01-23 15:52:54 -08003204 VM_BUG_ON_PAGE(PageCompound(page), page);
3205 VM_BUG_ON_PAGE(!page_count(page), page);
Vegard Nossumb1eeab62008-11-25 16:55:53 +01003206
Joonsoo Kima9627bc2016-07-26 15:23:49 -07003207 for (i = 1; i < (1 << order); i++)
Nick Piggin7835e982006-03-22 00:08:40 -08003208 set_page_refcounted(page + i);
Joonsoo Kima9627bc2016-07-26 15:23:49 -07003209 split_page_owner(page, order);
Nick Piggin8dfcc9b2006-03-22 00:08:05 -08003210}
K. Y. Srinivasan5853ff22013-03-25 15:47:38 -07003211EXPORT_SYMBOL_GPL(split_page);
Nick Piggin8dfcc9b2006-03-22 00:08:05 -08003212
Joonsoo Kim3c605092014-11-13 15:19:21 -08003213int __isolate_free_page(struct page *page, unsigned int order)
Mel Gorman748446b2010-05-24 14:32:27 -07003214{
Mel Gorman748446b2010-05-24 14:32:27 -07003215 unsigned long watermark;
3216 struct zone *zone;
Bartlomiej Zolnierkiewicz2139cbe2012-10-08 16:32:00 -07003217 int mt;
Mel Gorman748446b2010-05-24 14:32:27 -07003218
3219 BUG_ON(!PageBuddy(page));
3220
3221 zone = page_zone(page);
Marek Szyprowski2e30abd2012-12-11 16:02:57 -08003222 mt = get_pageblock_migratetype(page);
Mel Gorman748446b2010-05-24 14:32:27 -07003223
Minchan Kim194159f2013-02-22 16:33:58 -08003224 if (!is_migrate_isolate(mt)) {
Vlastimil Babka8348faf2016-10-07 16:58:00 -07003225 /*
3226 * Obey watermarks as if the page was being allocated. We can
3227 * emulate a high-order watermark check with a raised order-0
3228 * watermark, because we already know our high-order page
3229 * exists.
3230 */
Mel Gormanfd1444b2019-03-05 15:44:50 -08003231 watermark = zone->_watermark[WMARK_MIN] + (1UL << order);
Joonsoo Kimd883c6c2018-05-23 10:18:21 +09003232 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
Marek Szyprowski2e30abd2012-12-11 16:02:57 -08003233 return 0;
3234
Mel Gorman8fb74b92013-01-11 14:32:16 -08003235 __mod_zone_freepage_state(zone, -(1UL << order), mt);
Marek Szyprowski2e30abd2012-12-11 16:02:57 -08003236 }
Mel Gorman748446b2010-05-24 14:32:27 -07003237
3238 /* Remove page from free list */
Dan Williamsb03641a2019-05-14 15:41:32 -07003239
Alexander Duyck6ab01362020-04-06 20:04:49 -07003240 del_page_from_free_list(page, zone, order);
Bartlomiej Zolnierkiewicz2139cbe2012-10-08 16:32:00 -07003241
zhong jiang400bc7f2016-07-28 15:45:07 -07003242 /*
3243 * Set the pageblock if the isolated page is at least half of a
3244 * pageblock
3245 */
Mel Gorman748446b2010-05-24 14:32:27 -07003246 if (order >= pageblock_order - 1) {
3247 struct page *endpage = page + (1 << order) - 1;
Michal Nazarewicz47118af2011-12-29 13:09:50 +01003248 for (; page < endpage; page += pageblock_nr_pages) {
3249 int mt = get_pageblock_migratetype(page);
Minchan Kim88ed3652016-12-12 16:42:05 -08003250 if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)
Xishi Qiua6ffdc02017-05-03 14:52:52 -07003251 && !is_migrate_highatomic(mt))
Michal Nazarewicz47118af2011-12-29 13:09:50 +01003252 set_pageblock_migratetype(page,
3253 MIGRATE_MOVABLE);
3254 }
Mel Gorman748446b2010-05-24 14:32:27 -07003255 }
3256
Joonsoo Kimf3a14ce2015-07-17 16:24:15 -07003257
Mel Gorman8fb74b92013-01-11 14:32:16 -08003258 return 1UL << order;
Mel Gorman1fb3f8c2012-10-08 16:29:12 -07003259}
3260
Alexander Duyck624f58d2020-04-06 20:04:53 -07003261/**
3262 * __putback_isolated_page - Return a now-isolated page back where we got it
3263 * @page: Page that was isolated
3264 * @order: Order of the isolated page
Randy Dunlape6a0a7a2020-04-10 14:32:29 -07003265 * @mt: The page's pageblock's migratetype
Alexander Duyck624f58d2020-04-06 20:04:53 -07003266 *
3267 * This function is meant to return a page pulled from the free lists via
3268 * __isolate_free_page back to the free lists they were pulled from.
3269 */
3270void __putback_isolated_page(struct page *page, unsigned int order, int mt)
3271{
3272 struct zone *zone = page_zone(page);
3273
3274 /* zone lock should be held when this function is called */
3275 lockdep_assert_held(&zone->lock);
3276
3277 /* Return isolated page to tail of freelist. */
Alexander Duyck36e66c52020-04-06 20:04:56 -07003278 __free_one_page(page, page_to_pfn(page), zone, order, mt, false);
Alexander Duyck624f58d2020-04-06 20:04:53 -07003279}
3280
Mel Gorman1fb3f8c2012-10-08 16:29:12 -07003281/*
Mel Gorman060e7412016-05-19 17:13:27 -07003282 * Update NUMA hit/miss statistics
3283 *
3284 * Must be called with interrupts disabled.
Mel Gorman060e7412016-05-19 17:13:27 -07003285 */
Michal Hocko41b61672017-01-10 16:57:42 -08003286static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
Mel Gorman060e7412016-05-19 17:13:27 -07003287{
3288#ifdef CONFIG_NUMA
Kemi Wang3a321d22017-09-08 16:12:48 -07003289 enum numa_stat_item local_stat = NUMA_LOCAL;
Mel Gorman060e7412016-05-19 17:13:27 -07003290
Kemi Wang45180852017-11-15 17:38:22 -08003291 /* skip numa counters update if numa stats is disabled */
3292 if (!static_branch_likely(&vm_numa_stat_key))
3293 return;
3294
Pavel Tatashinc1093b72018-08-21 21:53:32 -07003295 if (zone_to_nid(z) != numa_node_id())
Mel Gorman060e7412016-05-19 17:13:27 -07003296 local_stat = NUMA_OTHER;
Mel Gorman060e7412016-05-19 17:13:27 -07003297
Pavel Tatashinc1093b72018-08-21 21:53:32 -07003298 if (zone_to_nid(z) == zone_to_nid(preferred_zone))
Kemi Wang3a321d22017-09-08 16:12:48 -07003299 __inc_numa_state(z, NUMA_HIT);
Michal Hocko2df26632017-01-10 16:57:39 -08003300 else {
Kemi Wang3a321d22017-09-08 16:12:48 -07003301 __inc_numa_state(z, NUMA_MISS);
3302 __inc_numa_state(preferred_zone, NUMA_FOREIGN);
Mel Gorman060e7412016-05-19 17:13:27 -07003303 }
Kemi Wang3a321d22017-09-08 16:12:48 -07003304 __inc_numa_state(z, local_stat);
Mel Gorman060e7412016-05-19 17:13:27 -07003305#endif
3306}
3307
Mel Gorman066b2392017-02-24 14:56:26 -08003308/* Remove page from the per-cpu list, caller must protect the list */
3309static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
Mel Gorman6bb15452018-12-28 00:35:41 -08003310 unsigned int alloc_flags,
Mel Gorman453f85d2017-11-15 17:38:03 -08003311 struct per_cpu_pages *pcp,
Mel Gorman066b2392017-02-24 14:56:26 -08003312 struct list_head *list)
3313{
3314 struct page *page;
3315
3316 do {
3317 if (list_empty(list)) {
3318 pcp->count += rmqueue_bulk(zone, 0,
3319 pcp->batch, list,
Mel Gorman6bb15452018-12-28 00:35:41 -08003320 migratetype, alloc_flags);
Mel Gorman066b2392017-02-24 14:56:26 -08003321 if (unlikely(list_empty(list)))
3322 return NULL;
3323 }
3324
Mel Gorman453f85d2017-11-15 17:38:03 -08003325 page = list_first_entry(list, struct page, lru);
Mel Gorman066b2392017-02-24 14:56:26 -08003326 list_del(&page->lru);
3327 pcp->count--;
3328 } while (check_new_pcp(page));
3329
3330 return page;
3331}
3332
3333/* Lock and remove page from the per-cpu list */
3334static struct page *rmqueue_pcplist(struct zone *preferred_zone,
Yafang Shao1c52e6d2019-05-13 17:22:40 -07003335 struct zone *zone, gfp_t gfp_flags,
3336 int migratetype, unsigned int alloc_flags)
Mel Gorman066b2392017-02-24 14:56:26 -08003337{
3338 struct per_cpu_pages *pcp;
3339 struct list_head *list;
Mel Gorman066b2392017-02-24 14:56:26 -08003340 struct page *page;
Mel Gormand34b0732017-04-20 14:37:43 -07003341 unsigned long flags;
Mel Gorman066b2392017-02-24 14:56:26 -08003342
Mel Gormand34b0732017-04-20 14:37:43 -07003343 local_irq_save(flags);
Mel Gorman066b2392017-02-24 14:56:26 -08003344 pcp = &this_cpu_ptr(zone->pageset)->pcp;
3345 list = &pcp->lists[migratetype];
Mel Gorman6bb15452018-12-28 00:35:41 -08003346 page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, list);
Mel Gorman066b2392017-02-24 14:56:26 -08003347 if (page) {
Yafang Shao1c52e6d2019-05-13 17:22:40 -07003348 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1);
Mel Gorman066b2392017-02-24 14:56:26 -08003349 zone_statistics(preferred_zone, zone);
3350 }
Mel Gormand34b0732017-04-20 14:37:43 -07003351 local_irq_restore(flags);
Mel Gorman066b2392017-02-24 14:56:26 -08003352 return page;
3353}
3354
Mel Gorman060e7412016-05-19 17:13:27 -07003355/*
Vlastimil Babka75379192015-02-11 15:25:38 -08003356 * Allocate a page from the given zone. Use pcplists for order-0 allocations.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003357 */
Mel Gorman0a15c3e2009-06-16 15:32:05 -07003358static inline
Mel Gorman066b2392017-02-24 14:56:26 -08003359struct page *rmqueue(struct zone *preferred_zone,
Mel Gorman7aeb09f2014-06-04 16:10:21 -07003360 struct zone *zone, unsigned int order,
Mel Gormanc6038442016-05-19 17:13:38 -07003361 gfp_t gfp_flags, unsigned int alloc_flags,
3362 int migratetype)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003363{
3364 unsigned long flags;
Hugh Dickins689bceb2005-11-21 21:32:20 -08003365 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003366
Mel Gormand34b0732017-04-20 14:37:43 -07003367 if (likely(order == 0)) {
Yafang Shao1c52e6d2019-05-13 17:22:40 -07003368 page = rmqueue_pcplist(preferred_zone, zone, gfp_flags,
3369 migratetype, alloc_flags);
Mel Gorman066b2392017-02-24 14:56:26 -08003370 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003371 }
3372
Mel Gorman066b2392017-02-24 14:56:26 -08003373 /*
3374 * We most definitely don't want callers attempting to
3375 * allocate greater than order-1 page units with __GFP_NOFAIL.
3376 */
3377 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
3378 spin_lock_irqsave(&zone->lock, flags);
3379
3380 do {
3381 page = NULL;
3382 if (alloc_flags & ALLOC_HARDER) {
3383 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
3384 if (page)
3385 trace_mm_page_alloc_zone_locked(page, order, migratetype);
3386 }
3387 if (!page)
Mel Gorman6bb15452018-12-28 00:35:41 -08003388 page = __rmqueue(zone, order, migratetype, alloc_flags);
Mel Gorman066b2392017-02-24 14:56:26 -08003389 } while (page && check_new_pages(page, order));
3390 spin_unlock(&zone->lock);
3391 if (!page)
3392 goto failed;
3393 __mod_zone_freepage_state(zone, -(1 << order),
3394 get_pcppage_migratetype(page));
3395
Mel Gorman16709d12016-07-28 15:46:56 -07003396 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
Michal Hocko41b61672017-01-10 16:57:42 -08003397 zone_statistics(preferred_zone, zone);
Nick Piggina74609f2006-01-06 00:11:20 -08003398 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003399
Mel Gorman066b2392017-02-24 14:56:26 -08003400out:
Mel Gorman73444bc2019-01-08 15:23:39 -08003401 /* Separate test+clear to avoid unnecessary atomics */
3402 if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) {
3403 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
3404 wakeup_kswapd(zone, 0, 0, zone_idx(zone));
3405 }
3406
Mel Gorman066b2392017-02-24 14:56:26 -08003407 VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003408 return page;
Nick Piggina74609f2006-01-06 00:11:20 -08003409
3410failed:
3411 local_irq_restore(flags);
Nick Piggina74609f2006-01-06 00:11:20 -08003412 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003413}
3414
Akinobu Mita933e3122006-12-08 02:39:45 -08003415#ifdef CONFIG_FAIL_PAGE_ALLOC
3416
Akinobu Mitab2588c42011-07-26 16:09:03 -07003417static struct {
Akinobu Mita933e3122006-12-08 02:39:45 -08003418 struct fault_attr attr;
3419
Viresh Kumar621a5f72015-09-26 15:04:07 -07003420 bool ignore_gfp_highmem;
Mel Gorman71baba42015-11-06 16:28:28 -08003421 bool ignore_gfp_reclaim;
Akinobu Mita54114992007-07-15 23:40:23 -07003422 u32 min_order;
Akinobu Mita933e3122006-12-08 02:39:45 -08003423} fail_page_alloc = {
3424 .attr = FAULT_ATTR_INITIALIZER,
Mel Gorman71baba42015-11-06 16:28:28 -08003425 .ignore_gfp_reclaim = true,
Viresh Kumar621a5f72015-09-26 15:04:07 -07003426 .ignore_gfp_highmem = true,
Akinobu Mita54114992007-07-15 23:40:23 -07003427 .min_order = 1,
Akinobu Mita933e3122006-12-08 02:39:45 -08003428};
3429
3430static int __init setup_fail_page_alloc(char *str)
3431{
3432 return setup_fault_attr(&fail_page_alloc.attr, str);
3433}
3434__setup("fail_page_alloc=", setup_fail_page_alloc);
3435
Benjamin Poirieraf3b8542018-12-28 00:39:23 -08003436static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
Akinobu Mita933e3122006-12-08 02:39:45 -08003437{
Akinobu Mita54114992007-07-15 23:40:23 -07003438 if (order < fail_page_alloc.min_order)
Gavin Shandeaf3862012-07-31 16:41:51 -07003439 return false;
Akinobu Mita933e3122006-12-08 02:39:45 -08003440 if (gfp_mask & __GFP_NOFAIL)
Gavin Shandeaf3862012-07-31 16:41:51 -07003441 return false;
Akinobu Mita933e3122006-12-08 02:39:45 -08003442 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
Gavin Shandeaf3862012-07-31 16:41:51 -07003443 return false;
Mel Gorman71baba42015-11-06 16:28:28 -08003444 if (fail_page_alloc.ignore_gfp_reclaim &&
3445 (gfp_mask & __GFP_DIRECT_RECLAIM))
Gavin Shandeaf3862012-07-31 16:41:51 -07003446 return false;
Akinobu Mita933e3122006-12-08 02:39:45 -08003447
3448 return should_fail(&fail_page_alloc.attr, 1 << order);
3449}
3450
3451#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
3452
3453static int __init fail_page_alloc_debugfs(void)
3454{
Joe Perches0825a6f2018-06-14 15:27:58 -07003455 umode_t mode = S_IFREG | 0600;
Akinobu Mita933e3122006-12-08 02:39:45 -08003456 struct dentry *dir;
Akinobu Mita933e3122006-12-08 02:39:45 -08003457
Akinobu Mitadd48c082011-08-03 16:21:01 -07003458 dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
3459 &fail_page_alloc.attr);
Akinobu Mita933e3122006-12-08 02:39:45 -08003460
Greg Kroah-Hartmand9f79792019-03-05 15:46:09 -08003461 debugfs_create_bool("ignore-gfp-wait", mode, dir,
3462 &fail_page_alloc.ignore_gfp_reclaim);
3463 debugfs_create_bool("ignore-gfp-highmem", mode, dir,
3464 &fail_page_alloc.ignore_gfp_highmem);
3465 debugfs_create_u32("min-order", mode, dir, &fail_page_alloc.min_order);
Akinobu Mita933e3122006-12-08 02:39:45 -08003466
Akinobu Mitab2588c42011-07-26 16:09:03 -07003467 return 0;
Akinobu Mita933e3122006-12-08 02:39:45 -08003468}
3469
3470late_initcall(fail_page_alloc_debugfs);
3471
3472#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
3473
3474#else /* CONFIG_FAIL_PAGE_ALLOC */
3475
Benjamin Poirieraf3b8542018-12-28 00:39:23 -08003476static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
Akinobu Mita933e3122006-12-08 02:39:45 -08003477{
Gavin Shandeaf3862012-07-31 16:41:51 -07003478 return false;
Akinobu Mita933e3122006-12-08 02:39:45 -08003479}
3480
3481#endif /* CONFIG_FAIL_PAGE_ALLOC */
3482
Benjamin Poirieraf3b8542018-12-28 00:39:23 -08003483static noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3484{
3485 return __should_fail_alloc_page(gfp_mask, order);
3486}
3487ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE);
3488
Jaewon Kimf27ce0e2020-08-06 23:25:20 -07003489static inline long __zone_watermark_unusable_free(struct zone *z,
3490 unsigned int order, unsigned int alloc_flags)
3491{
3492 const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM));
3493 long unusable_free = (1 << order) - 1;
3494
3495 /*
3496 * If the caller does not have rights to ALLOC_HARDER then subtract
3497 * the high-atomic reserves. This will over-estimate the size of the
3498 * atomic reserve but it avoids a search.
3499 */
3500 if (likely(!alloc_harder))
3501 unusable_free += z->nr_reserved_highatomic;
3502
3503#ifdef CONFIG_CMA
3504 /* If allocation can't use CMA areas don't use free CMA pages */
3505 if (!(alloc_flags & ALLOC_CMA))
3506 unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES);
3507#endif
3508
3509 return unusable_free;
3510}
3511
Linus Torvalds1da177e2005-04-16 15:20:36 -07003512/*
Mel Gorman97a16fc2015-11-06 16:28:40 -08003513 * Return true if free base pages are above 'mark'. For high-order checks it
3514 * will return true of the order-0 watermark is reached and there is at least
3515 * one free page of a suitable size. Checking now avoids taking the zone lock
3516 * to check in the allocation paths if no pages are free.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003517 */
Michal Hocko86a294a2016-05-20 16:57:12 -07003518bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003519 int highest_zoneidx, unsigned int alloc_flags,
Michal Hocko86a294a2016-05-20 16:57:12 -07003520 long free_pages)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003521{
Christoph Lameterd23ad422007-02-10 01:43:02 -08003522 long min = mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003523 int o;
Michal Hockocd04ae12017-09-06 16:24:50 -07003524 const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003525
Mel Gorman0aaa29a2015-11-06 16:28:37 -08003526 /* free_pages may go negative - that's OK */
Jaewon Kimf27ce0e2020-08-06 23:25:20 -07003527 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags);
Mel Gorman0aaa29a2015-11-06 16:28:37 -08003528
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08003529 if (alloc_flags & ALLOC_HIGH)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003530 min -= min / 2;
Mel Gorman0aaa29a2015-11-06 16:28:37 -08003531
Jaewon Kimf27ce0e2020-08-06 23:25:20 -07003532 if (unlikely(alloc_harder)) {
Michal Hockocd04ae12017-09-06 16:24:50 -07003533 /*
3534 * OOM victims can try even harder than normal ALLOC_HARDER
3535 * users on the grounds that it's definitely going to be in
3536 * the exit path shortly and free memory. Any allocation it
3537 * makes during the free path will be small and short-lived.
3538 */
3539 if (alloc_flags & ALLOC_OOM)
3540 min -= min / 2;
3541 else
3542 min -= min / 4;
3543 }
3544
Mel Gorman97a16fc2015-11-06 16:28:40 -08003545 /*
3546 * Check watermarks for an order-0 allocation request. If these
3547 * are not met, then a high-order request also cannot go ahead
3548 * even if a suitable page happened to be free.
3549 */
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003550 if (free_pages <= min + z->lowmem_reserve[highest_zoneidx])
Mel Gorman88f5acf2011-01-13 15:45:41 -08003551 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003552
Mel Gorman97a16fc2015-11-06 16:28:40 -08003553 /* If this is an order-0 request then the watermark is fine */
3554 if (!order)
3555 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003556
Mel Gorman97a16fc2015-11-06 16:28:40 -08003557 /* For a high-order request, check at least one suitable page is free */
3558 for (o = order; o < MAX_ORDER; o++) {
3559 struct free_area *area = &z->free_area[o];
3560 int mt;
3561
3562 if (!area->nr_free)
3563 continue;
3564
Mel Gorman97a16fc2015-11-06 16:28:40 -08003565 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
Dan Williamsb03641a2019-05-14 15:41:32 -07003566 if (!free_area_empty(area, mt))
Mel Gorman97a16fc2015-11-06 16:28:40 -08003567 return true;
3568 }
3569
3570#ifdef CONFIG_CMA
Joonsoo Kimd883c6c2018-05-23 10:18:21 +09003571 if ((alloc_flags & ALLOC_CMA) &&
Dan Williamsb03641a2019-05-14 15:41:32 -07003572 !free_area_empty(area, MIGRATE_CMA)) {
Mel Gorman97a16fc2015-11-06 16:28:40 -08003573 return true;
Joonsoo Kimd883c6c2018-05-23 10:18:21 +09003574 }
Mel Gorman97a16fc2015-11-06 16:28:40 -08003575#endif
chenqiwu76089d02020-04-01 21:09:50 -07003576 if (alloc_harder && !free_area_empty(area, MIGRATE_HIGHATOMIC))
Vlastimil Babkab050e372017-11-15 17:38:30 -08003577 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003578 }
Mel Gorman97a16fc2015-11-06 16:28:40 -08003579 return false;
Mel Gorman88f5acf2011-01-13 15:45:41 -08003580}
3581
Mel Gorman7aeb09f2014-06-04 16:10:21 -07003582bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003583 int highest_zoneidx, unsigned int alloc_flags)
Mel Gorman88f5acf2011-01-13 15:45:41 -08003584{
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003585 return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
Mel Gorman88f5acf2011-01-13 15:45:41 -08003586 zone_page_state(z, NR_FREE_PAGES));
3587}
3588
Mel Gorman48ee5f32016-05-19 17:14:07 -07003589static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003590 unsigned long mark, int highest_zoneidx,
3591 unsigned int alloc_flags)
Mel Gorman48ee5f32016-05-19 17:14:07 -07003592{
Jaewon Kimf27ce0e2020-08-06 23:25:20 -07003593 long free_pages;
Joonsoo Kimd883c6c2018-05-23 10:18:21 +09003594
Jaewon Kimf27ce0e2020-08-06 23:25:20 -07003595 free_pages = zone_page_state(z, NR_FREE_PAGES);
Mel Gorman48ee5f32016-05-19 17:14:07 -07003596
3597 /*
3598 * Fast check for order-0 only. If this fails then the reserves
Jaewon Kimf27ce0e2020-08-06 23:25:20 -07003599 * need to be calculated.
Mel Gorman48ee5f32016-05-19 17:14:07 -07003600 */
Jaewon Kimf27ce0e2020-08-06 23:25:20 -07003601 if (!order) {
3602 long fast_free;
3603
3604 fast_free = free_pages;
3605 fast_free -= __zone_watermark_unusable_free(z, 0, alloc_flags);
3606 if (fast_free > mark + z->lowmem_reserve[highest_zoneidx])
3607 return true;
3608 }
Mel Gorman48ee5f32016-05-19 17:14:07 -07003609
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003610 return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
Mel Gorman48ee5f32016-05-19 17:14:07 -07003611 free_pages);
3612}
3613
Mel Gorman7aeb09f2014-06-04 16:10:21 -07003614bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003615 unsigned long mark, int highest_zoneidx)
Mel Gorman88f5acf2011-01-13 15:45:41 -08003616{
3617 long free_pages = zone_page_state(z, NR_FREE_PAGES);
3618
3619 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
3620 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
3621
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003622 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0,
Mel Gorman88f5acf2011-01-13 15:45:41 -08003623 free_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003624}
3625
Paul Jackson9276b1bc2006-12-06 20:31:48 -08003626#ifdef CONFIG_NUMA
David Rientjes957f8222012-10-08 16:33:24 -07003627static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3628{
Gavin Shane02dc012017-02-24 14:59:33 -08003629 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
Matt Fleminga55c7452019-08-08 20:53:01 +01003630 node_reclaim_distance;
David Rientjes957f8222012-10-08 16:33:24 -07003631}
Paul Jackson9276b1bc2006-12-06 20:31:48 -08003632#else /* CONFIG_NUMA */
David Rientjes957f8222012-10-08 16:33:24 -07003633static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3634{
3635 return true;
3636}
Paul Jackson9276b1bc2006-12-06 20:31:48 -08003637#endif /* CONFIG_NUMA */
3638
Mel Gorman6bb15452018-12-28 00:35:41 -08003639/*
3640 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid
3641 * fragmentation is subtle. If the preferred zone was HIGHMEM then
3642 * premature use of a lower zone may cause lowmem pressure problems that
3643 * are worse than fragmentation. If the next zone is ZONE_DMA then it is
3644 * probably too small. It only makes sense to spread allocations to avoid
3645 * fragmentation between the Normal and DMA32 zones.
3646 */
3647static inline unsigned int
Mel Gorman0a79cda2018-12-28 00:35:48 -08003648alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
Mel Gorman6bb15452018-12-28 00:35:41 -08003649{
Mateusz Nosek736838e2020-04-01 21:09:47 -07003650 unsigned int alloc_flags;
Mel Gorman0a79cda2018-12-28 00:35:48 -08003651
Mateusz Nosek736838e2020-04-01 21:09:47 -07003652 /*
3653 * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
3654 * to save a branch.
3655 */
3656 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM);
Mel Gorman0a79cda2018-12-28 00:35:48 -08003657
3658#ifdef CONFIG_ZONE_DMA32
Andrey Ryabinin8139ad02019-04-25 22:23:58 -07003659 if (!zone)
3660 return alloc_flags;
3661
Mel Gorman6bb15452018-12-28 00:35:41 -08003662 if (zone_idx(zone) != ZONE_NORMAL)
Andrey Ryabinin8118b822019-04-25 22:24:01 -07003663 return alloc_flags;
Mel Gorman6bb15452018-12-28 00:35:41 -08003664
3665 /*
3666 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and
3667 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume
3668 * on UMA that if Normal is populated then so is DMA32.
3669 */
3670 BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1);
3671 if (nr_online_nodes > 1 && !populated_zone(--zone))
Andrey Ryabinin8118b822019-04-25 22:24:01 -07003672 return alloc_flags;
Mel Gorman6bb15452018-12-28 00:35:41 -08003673
Andrey Ryabinin8118b822019-04-25 22:24:01 -07003674 alloc_flags |= ALLOC_NOFRAGMENT;
Mel Gorman0a79cda2018-12-28 00:35:48 -08003675#endif /* CONFIG_ZONE_DMA32 */
3676 return alloc_flags;
Mel Gorman6bb15452018-12-28 00:35:41 -08003677}
Mel Gorman6bb15452018-12-28 00:35:41 -08003678
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08003679/*
Paul Jackson0798e512006-12-06 20:31:38 -08003680 * get_page_from_freelist goes through the zonelist trying to allocate
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08003681 * a page.
3682 */
3683static struct page *
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003684get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
3685 const struct alloc_context *ac)
Martin Hicks753ee722005-06-21 17:14:41 -07003686{
Mel Gorman6bb15452018-12-28 00:35:41 -08003687 struct zoneref *z;
Mel Gorman5117f452009-06-16 15:31:59 -07003688 struct zone *zone;
Mel Gorman3b8c0be2016-07-28 15:46:53 -07003689 struct pglist_data *last_pgdat_dirty_limit = NULL;
Mel Gorman6bb15452018-12-28 00:35:41 -08003690 bool no_fallback;
Mel Gorman3b8c0be2016-07-28 15:46:53 -07003691
Mel Gorman6bb15452018-12-28 00:35:41 -08003692retry:
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08003693 /*
Paul Jackson9276b1bc2006-12-06 20:31:48 -08003694 * Scan zonelist, looking for a zone with enough free.
Vladimir Davydov344736f2014-10-20 15:50:30 +04003695 * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08003696 */
Mel Gorman6bb15452018-12-28 00:35:41 -08003697 no_fallback = alloc_flags & ALLOC_NOFRAGMENT;
3698 z = ac->preferred_zoneref;
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003699 for_next_zone_zonelist_nodemask(zone, z, ac->zonelist,
3700 ac->highest_zoneidx, ac->nodemask) {
Mel Gormanbe06af02016-05-19 17:13:47 -07003701 struct page *page;
Johannes Weinere085dbc2013-09-11 14:20:46 -07003702 unsigned long mark;
3703
Mel Gorman664eedd2014-06-04 16:10:08 -07003704 if (cpusets_enabled() &&
3705 (alloc_flags & ALLOC_CPUSET) &&
Vlastimil Babka002f2902016-05-19 17:14:30 -07003706 !__cpuset_zone_allowed(zone, gfp_mask))
Mel Gormancd38b112011-07-25 17:12:29 -07003707 continue;
Johannes Weinera756cf52012-01-10 15:07:49 -08003708 /*
3709 * When allocating a page cache page for writing, we
Mel Gorman281e3722016-07-28 15:46:11 -07003710 * want to get it from a node that is within its dirty
3711 * limit, such that no single node holds more than its
Johannes Weinera756cf52012-01-10 15:07:49 -08003712 * proportional share of globally allowed dirty pages.
Mel Gorman281e3722016-07-28 15:46:11 -07003713 * The dirty limits take into account the node's
Johannes Weinera756cf52012-01-10 15:07:49 -08003714 * lowmem reserves and high watermark so that kswapd
3715 * should be able to balance it without having to
3716 * write pages from its LRU list.
3717 *
Johannes Weinera756cf52012-01-10 15:07:49 -08003718 * XXX: For now, allow allocations to potentially
Mel Gorman281e3722016-07-28 15:46:11 -07003719 * exceed the per-node dirty limit in the slowpath
Mel Gormanc9ab0c42015-11-06 16:28:12 -08003720 * (spread_dirty_pages unset) before going into reclaim,
Johannes Weinera756cf52012-01-10 15:07:49 -08003721 * which is important when on a NUMA setup the allowed
Mel Gorman281e3722016-07-28 15:46:11 -07003722 * nodes are together not big enough to reach the
Johannes Weinera756cf52012-01-10 15:07:49 -08003723 * global limit. The proper fix for these situations
Mel Gorman281e3722016-07-28 15:46:11 -07003724 * will require awareness of nodes in the
Johannes Weinera756cf52012-01-10 15:07:49 -08003725 * dirty-throttling and the flusher threads.
3726 */
Mel Gorman3b8c0be2016-07-28 15:46:53 -07003727 if (ac->spread_dirty_pages) {
3728 if (last_pgdat_dirty_limit == zone->zone_pgdat)
3729 continue;
3730
3731 if (!node_dirty_ok(zone->zone_pgdat)) {
3732 last_pgdat_dirty_limit = zone->zone_pgdat;
3733 continue;
3734 }
3735 }
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08003736
Mel Gorman6bb15452018-12-28 00:35:41 -08003737 if (no_fallback && nr_online_nodes > 1 &&
3738 zone != ac->preferred_zoneref->zone) {
3739 int local_nid;
3740
3741 /*
3742 * If moving to a remote node, retry but allow
3743 * fragmenting fallbacks. Locality is more important
3744 * than fragmentation avoidance.
3745 */
3746 local_nid = zone_to_nid(ac->preferred_zoneref->zone);
3747 if (zone_to_nid(zone) != local_nid) {
3748 alloc_flags &= ~ALLOC_NOFRAGMENT;
3749 goto retry;
3750 }
3751 }
3752
Mel Gormana9214442018-12-28 00:35:44 -08003753 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
Mel Gorman48ee5f32016-05-19 17:14:07 -07003754 if (!zone_watermark_fast(zone, order, mark,
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003755 ac->highest_zoneidx, alloc_flags)) {
Mel Gormanfa5e0842009-06-16 15:33:22 -07003756 int ret;
3757
Pavel Tatashinc9e97a12018-04-05 16:22:31 -07003758#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
3759 /*
3760 * Watermark failed for this zone, but see if we can
3761 * grow this zone if it contains deferred pages.
3762 */
3763 if (static_branch_unlikely(&deferred_pages)) {
3764 if (_deferred_grow_zone(zone, order))
3765 goto try_this_zone;
3766 }
3767#endif
Mel Gorman5dab2912014-06-04 16:10:14 -07003768 /* Checked here to keep the fast path fast */
3769 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
3770 if (alloc_flags & ALLOC_NO_WATERMARKS)
3771 goto try_this_zone;
3772
Mel Gormana5f5f912016-07-28 15:46:32 -07003773 if (node_reclaim_mode == 0 ||
Mel Gormanc33d6c02016-05-19 17:14:10 -07003774 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
Mel Gormancd38b112011-07-25 17:12:29 -07003775 continue;
3776
Mel Gormana5f5f912016-07-28 15:46:32 -07003777 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
Mel Gormanfa5e0842009-06-16 15:33:22 -07003778 switch (ret) {
Mel Gormana5f5f912016-07-28 15:46:32 -07003779 case NODE_RECLAIM_NOSCAN:
Mel Gormanfa5e0842009-06-16 15:33:22 -07003780 /* did not scan */
Mel Gormancd38b112011-07-25 17:12:29 -07003781 continue;
Mel Gormana5f5f912016-07-28 15:46:32 -07003782 case NODE_RECLAIM_FULL:
Mel Gormanfa5e0842009-06-16 15:33:22 -07003783 /* scanned but unreclaimable */
Mel Gormancd38b112011-07-25 17:12:29 -07003784 continue;
Mel Gormanfa5e0842009-06-16 15:33:22 -07003785 default:
3786 /* did we reclaim enough */
Mel Gormanfed27192013-04-29 15:07:57 -07003787 if (zone_watermark_ok(zone, order, mark,
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003788 ac->highest_zoneidx, alloc_flags))
Mel Gormanfed27192013-04-29 15:07:57 -07003789 goto try_this_zone;
3790
Mel Gormanfed27192013-04-29 15:07:57 -07003791 continue;
Paul Jackson0798e512006-12-06 20:31:38 -08003792 }
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08003793 }
3794
Mel Gormanfa5e0842009-06-16 15:33:22 -07003795try_this_zone:
Mel Gorman066b2392017-02-24 14:56:26 -08003796 page = rmqueue(ac->preferred_zoneref->zone, zone, order,
Mel Gorman0aaa29a2015-11-06 16:28:37 -08003797 gfp_mask, alloc_flags, ac->migratetype);
Vlastimil Babka75379192015-02-11 15:25:38 -08003798 if (page) {
Mel Gorman479f8542016-05-19 17:14:35 -07003799 prep_new_page(page, order, gfp_mask, alloc_flags);
Mel Gorman0aaa29a2015-11-06 16:28:37 -08003800
3801 /*
3802 * If this is a high-order atomic allocation then check
3803 * if the pageblock should be reserved for the future
3804 */
3805 if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
3806 reserve_highatomic_pageblock(page, zone, order);
3807
Vlastimil Babka75379192015-02-11 15:25:38 -08003808 return page;
Pavel Tatashinc9e97a12018-04-05 16:22:31 -07003809 } else {
3810#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
3811 /* Try again if zone has deferred pages */
3812 if (static_branch_unlikely(&deferred_pages)) {
3813 if (_deferred_grow_zone(zone, order))
3814 goto try_this_zone;
3815 }
3816#endif
Vlastimil Babka75379192015-02-11 15:25:38 -08003817 }
Mel Gorman54a6eb52008-04-28 02:12:16 -07003818 }
Paul Jackson9276b1bc2006-12-06 20:31:48 -08003819
Mel Gorman6bb15452018-12-28 00:35:41 -08003820 /*
3821 * It's possible on a UMA machine to get through all zones that are
3822 * fragmented. If avoiding fragmentation, reset and try again.
3823 */
3824 if (no_fallback) {
3825 alloc_flags &= ~ALLOC_NOFRAGMENT;
3826 goto retry;
3827 }
3828
Mel Gorman4ffeaf32014-08-06 16:07:22 -07003829 return NULL;
Martin Hicks753ee722005-06-21 17:14:41 -07003830}
3831
Michal Hocko9af744d2017-02-22 15:46:16 -08003832static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
Dave Hansena238ab52011-05-24 17:12:16 -07003833{
Dave Hansena238ab52011-05-24 17:12:16 -07003834 unsigned int filter = SHOW_MEM_FILTER_NODES;
Dave Hansena238ab52011-05-24 17:12:16 -07003835
3836 /*
3837 * This documents exceptions given to allocations in certain
3838 * contexts that are allowed to allocate outside current's set
3839 * of allowed nodes.
3840 */
3841 if (!(gfp_mask & __GFP_NOMEMALLOC))
Michal Hockocd04ae12017-09-06 16:24:50 -07003842 if (tsk_is_oom_victim(current) ||
Dave Hansena238ab52011-05-24 17:12:16 -07003843 (current->flags & (PF_MEMALLOC | PF_EXITING)))
3844 filter &= ~SHOW_MEM_FILTER_NODES;
Mel Gormand0164ad2015-11-06 16:28:21 -08003845 if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
Dave Hansena238ab52011-05-24 17:12:16 -07003846 filter &= ~SHOW_MEM_FILTER_NODES;
3847
Michal Hocko9af744d2017-02-22 15:46:16 -08003848 show_mem(filter, nodemask);
Michal Hockoaa187502017-02-22 15:41:45 -08003849}
3850
Michal Hockoa8e99252017-02-22 15:46:10 -08003851void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
Michal Hockoaa187502017-02-22 15:41:45 -08003852{
3853 struct va_format vaf;
3854 va_list args;
Johannes Weiner1be334e2019-11-05 21:16:51 -08003855 static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1);
Michal Hockoaa187502017-02-22 15:41:45 -08003856
Tetsuo Handa0f7896f2017-05-03 14:55:34 -07003857 if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
Michal Hockoaa187502017-02-22 15:41:45 -08003858 return;
3859
Michal Hocko7877cdc2016-10-07 17:01:55 -07003860 va_start(args, fmt);
3861 vaf.fmt = fmt;
3862 vaf.va = &args;
yuzhoujianef8444e2018-12-28 00:36:07 -08003863 pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl",
Michal Hocko0205f752017-11-15 17:39:14 -08003864 current->comm, &vaf, gfp_mask, &gfp_mask,
3865 nodemask_pr_args(nodemask));
Michal Hocko7877cdc2016-10-07 17:01:55 -07003866 va_end(args);
Joe Perches3ee9a4f2011-10-31 17:08:35 -07003867
Michal Hockoa8e99252017-02-22 15:46:10 -08003868 cpuset_print_current_mems_allowed();
yuzhoujianef8444e2018-12-28 00:36:07 -08003869 pr_cont("\n");
Dave Hansena238ab52011-05-24 17:12:16 -07003870 dump_stack();
David Rientjes685dbf62017-02-22 15:46:28 -08003871 warn_alloc_show_mem(gfp_mask, nodemask);
Dave Hansena238ab52011-05-24 17:12:16 -07003872}
3873
Mel Gorman11e33f62009-06-16 15:31:57 -07003874static inline struct page *
Michal Hocko6c18ba72017-02-22 15:46:25 -08003875__alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
3876 unsigned int alloc_flags,
3877 const struct alloc_context *ac)
3878{
3879 struct page *page;
3880
3881 page = get_page_from_freelist(gfp_mask, order,
3882 alloc_flags|ALLOC_CPUSET, ac);
3883 /*
3884 * fallback to ignore cpuset restriction if our nodes
3885 * are depleted
3886 */
3887 if (!page)
3888 page = get_page_from_freelist(gfp_mask, order,
3889 alloc_flags, ac);
3890
3891 return page;
3892}
3893
3894static inline struct page *
Mel Gorman11e33f62009-06-16 15:31:57 -07003895__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003896 const struct alloc_context *ac, unsigned long *did_some_progress)
Mel Gorman11e33f62009-06-16 15:31:57 -07003897{
David Rientjes6e0fc462015-09-08 15:00:36 -07003898 struct oom_control oc = {
3899 .zonelist = ac->zonelist,
3900 .nodemask = ac->nodemask,
Vladimir Davydov2a966b72016-07-26 15:22:33 -07003901 .memcg = NULL,
David Rientjes6e0fc462015-09-08 15:00:36 -07003902 .gfp_mask = gfp_mask,
3903 .order = order,
David Rientjes6e0fc462015-09-08 15:00:36 -07003904 };
Linus Torvalds1da177e2005-04-16 15:20:36 -07003905 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003906
Johannes Weiner9879de72015-01-26 12:58:32 -08003907 *did_some_progress = 0;
3908
Johannes Weiner9879de72015-01-26 12:58:32 -08003909 /*
Johannes Weinerdc564012015-06-24 16:57:19 -07003910 * Acquire the oom lock. If that fails, somebody else is
3911 * making progress for us.
Johannes Weiner9879de72015-01-26 12:58:32 -08003912 */
Johannes Weinerdc564012015-06-24 16:57:19 -07003913 if (!mutex_trylock(&oom_lock)) {
Johannes Weiner9879de72015-01-26 12:58:32 -08003914 *did_some_progress = 1;
Mel Gorman11e33f62009-06-16 15:31:57 -07003915 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003916 return NULL;
3917 }
Jens Axboe6b1de912005-11-17 21:35:02 +01003918
Mel Gorman11e33f62009-06-16 15:31:57 -07003919 /*
3920 * Go through the zonelist yet one more time, keep very high watermark
3921 * here, this is only to catch a parallel oom killing, we must fail if
Tetsuo Handae746bf72017-08-31 16:15:20 -07003922 * we're still under heavy pressure. But make sure that this reclaim
3923 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY
3924 * allocation which will never fail due to oom_lock already held.
Mel Gorman11e33f62009-06-16 15:31:57 -07003925 */
Tetsuo Handae746bf72017-08-31 16:15:20 -07003926 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
3927 ~__GFP_DIRECT_RECLAIM, order,
3928 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08003929 if (page)
Mel Gorman11e33f62009-06-16 15:31:57 -07003930 goto out;
3931
Michal Hocko06ad2762017-02-22 15:46:22 -08003932 /* Coredumps can quickly deplete all memory reserves */
3933 if (current->flags & PF_DUMPCORE)
3934 goto out;
3935 /* The OOM killer will not help higher order allocs */
3936 if (order > PAGE_ALLOC_COSTLY_ORDER)
3937 goto out;
Michal Hockodcda9b02017-07-12 14:36:45 -07003938 /*
3939 * We have already exhausted all our reclaim opportunities without any
3940 * success so it is time to admit defeat. We will skip the OOM killer
3941 * because it is very likely that the caller has a more reasonable
3942 * fallback than shooting a random task.
3943 */
3944 if (gfp_mask & __GFP_RETRY_MAYFAIL)
3945 goto out;
Michal Hocko06ad2762017-02-22 15:46:22 -08003946 /* The OOM killer does not needlessly kill tasks for lowmem */
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003947 if (ac->highest_zoneidx < ZONE_NORMAL)
Michal Hocko06ad2762017-02-22 15:46:22 -08003948 goto out;
3949 if (pm_suspended_storage())
3950 goto out;
3951 /*
3952 * XXX: GFP_NOFS allocations should rather fail than rely on
3953 * other request to make a forward progress.
3954 * We are in an unfortunate situation where out_of_memory cannot
3955 * do much for this context but let's try it to at least get
3956 * access to memory reserved if the current task is killed (see
3957 * out_of_memory). Once filesystems are ready to handle allocation
3958 * failures more gracefully we should just bail out here.
3959 */
Michal Hocko3da88fb32016-05-19 17:13:09 -07003960
Michal Hocko06ad2762017-02-22 15:46:22 -08003961 /* The OOM killer may not free memory on a specific node */
3962 if (gfp_mask & __GFP_THISNODE)
3963 goto out;
3964
Shile Zhang3c2c6482018-01-31 16:20:07 -08003965 /* Exhausted what can be done so it's blame time */
Michal Hocko5020e282016-01-14 15:20:36 -08003966 if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
Michal Hockoc32b3cb2015-02-11 15:26:24 -08003967 *did_some_progress = 1;
Michal Hocko5020e282016-01-14 15:20:36 -08003968
Michal Hocko6c18ba72017-02-22 15:46:25 -08003969 /*
3970 * Help non-failing allocations by giving them access to memory
3971 * reserves
3972 */
3973 if (gfp_mask & __GFP_NOFAIL)
3974 page = __alloc_pages_cpuset_fallback(gfp_mask, order,
Michal Hocko5020e282016-01-14 15:20:36 -08003975 ALLOC_NO_WATERMARKS, ac);
Michal Hocko5020e282016-01-14 15:20:36 -08003976 }
Mel Gorman11e33f62009-06-16 15:31:57 -07003977out:
Johannes Weinerdc564012015-06-24 16:57:19 -07003978 mutex_unlock(&oom_lock);
Mel Gorman11e33f62009-06-16 15:31:57 -07003979 return page;
3980}
3981
Michal Hocko33c2d212016-05-20 16:57:06 -07003982/*
3983 * Maximum number of compaction retries wit a progress before OOM
3984 * killer is consider as the only way to move forward.
3985 */
3986#define MAX_COMPACT_RETRIES 16
3987
Mel Gorman56de7262010-05-24 14:32:30 -07003988#ifdef CONFIG_COMPACTION
3989/* Try memory compaction for high-order allocations before reclaim */
3990static struct page *
3991__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
Mel Gormanc6038442016-05-19 17:13:38 -07003992 unsigned int alloc_flags, const struct alloc_context *ac,
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07003993 enum compact_priority prio, enum compact_result *compact_result)
Mel Gorman56de7262010-05-24 14:32:30 -07003994{
Mel Gorman5e1f0f02019-03-05 15:45:41 -08003995 struct page *page = NULL;
Johannes Weinereb414682018-10-26 15:06:27 -07003996 unsigned long pflags;
Vlastimil Babka499118e2017-05-08 15:59:50 -07003997 unsigned int noreclaim_flag;
Vlastimil Babka53853e22014-10-09 15:27:02 -07003998
Mel Gorman66199712012-01-12 17:19:41 -08003999 if (!order)
Mel Gorman56de7262010-05-24 14:32:30 -07004000 return NULL;
4001
Johannes Weinereb414682018-10-26 15:06:27 -07004002 psi_memstall_enter(&pflags);
Vlastimil Babka499118e2017-05-08 15:59:50 -07004003 noreclaim_flag = memalloc_noreclaim_save();
Johannes Weinereb414682018-10-26 15:06:27 -07004004
Michal Hockoc5d01d02016-05-20 16:56:53 -07004005 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
Mel Gorman5e1f0f02019-03-05 15:45:41 -08004006 prio, &page);
Johannes Weinereb414682018-10-26 15:06:27 -07004007
Vlastimil Babka499118e2017-05-08 15:59:50 -07004008 memalloc_noreclaim_restore(noreclaim_flag);
Johannes Weinereb414682018-10-26 15:06:27 -07004009 psi_memstall_leave(&pflags);
Mel Gorman56de7262010-05-24 14:32:30 -07004010
Vlastimil Babka98dd3b42014-10-09 15:27:04 -07004011 /*
4012 * At least in one zone compaction wasn't deferred or skipped, so let's
4013 * count a compaction stall
4014 */
4015 count_vm_event(COMPACTSTALL);
4016
Mel Gorman5e1f0f02019-03-05 15:45:41 -08004017 /* Prep a captured page if available */
4018 if (page)
4019 prep_new_page(page, order, gfp_mask, alloc_flags);
4020
4021 /* Try get a page from the freelist if available */
4022 if (!page)
4023 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
Vlastimil Babka98dd3b42014-10-09 15:27:04 -07004024
4025 if (page) {
4026 struct zone *zone = page_zone(page);
4027
4028 zone->compact_blockskip_flush = false;
4029 compaction_defer_reset(zone, order, true);
4030 count_vm_event(COMPACTSUCCESS);
4031 return page;
4032 }
4033
4034 /*
Vlastimil Babka98dd3b42014-10-09 15:27:04 -07004035 * It's bad if compaction run occurs and fails. The most likely reason
4036 * is that pages exist, but not enough to satisfy watermarks.
4037 */
4038 count_vm_event(COMPACTFAIL);
4039
4040 cond_resched();
4041
Mel Gorman56de7262010-05-24 14:32:30 -07004042 return NULL;
4043}
Michal Hocko33c2d212016-05-20 16:57:06 -07004044
Vlastimil Babka32508452016-10-07 17:00:28 -07004045static inline bool
4046should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
4047 enum compact_result compact_result,
4048 enum compact_priority *compact_priority,
Vlastimil Babkad9436492016-10-07 17:00:31 -07004049 int *compaction_retries)
Vlastimil Babka32508452016-10-07 17:00:28 -07004050{
4051 int max_retries = MAX_COMPACT_RETRIES;
Vlastimil Babkac2033b02016-10-07 17:00:34 -07004052 int min_priority;
Michal Hocko65190cf2017-02-22 15:42:03 -08004053 bool ret = false;
4054 int retries = *compaction_retries;
4055 enum compact_priority priority = *compact_priority;
Vlastimil Babka32508452016-10-07 17:00:28 -07004056
4057 if (!order)
4058 return false;
4059
Vlastimil Babkad9436492016-10-07 17:00:31 -07004060 if (compaction_made_progress(compact_result))
4061 (*compaction_retries)++;
4062
Vlastimil Babka32508452016-10-07 17:00:28 -07004063 /*
4064 * compaction considers all the zone as desperately out of memory
4065 * so it doesn't really make much sense to retry except when the
4066 * failure could be caused by insufficient priority
4067 */
Vlastimil Babkad9436492016-10-07 17:00:31 -07004068 if (compaction_failed(compact_result))
4069 goto check_priority;
Vlastimil Babka32508452016-10-07 17:00:28 -07004070
4071 /*
Vlastimil Babka494330852019-09-23 15:37:32 -07004072 * compaction was skipped because there are not enough order-0 pages
4073 * to work with, so we retry only if it looks like reclaim can help.
Vlastimil Babka32508452016-10-07 17:00:28 -07004074 */
Vlastimil Babka494330852019-09-23 15:37:32 -07004075 if (compaction_needs_reclaim(compact_result)) {
Michal Hocko65190cf2017-02-22 15:42:03 -08004076 ret = compaction_zonelist_suitable(ac, order, alloc_flags);
4077 goto out;
4078 }
Vlastimil Babka32508452016-10-07 17:00:28 -07004079
4080 /*
Vlastimil Babka494330852019-09-23 15:37:32 -07004081 * make sure the compaction wasn't deferred or didn't bail out early
4082 * due to locks contention before we declare that we should give up.
4083 * But the next retry should use a higher priority if allowed, so
4084 * we don't just keep bailing out endlessly.
4085 */
4086 if (compaction_withdrawn(compact_result)) {
4087 goto check_priority;
4088 }
4089
4090 /*
Michal Hockodcda9b02017-07-12 14:36:45 -07004091 * !costly requests are much more important than __GFP_RETRY_MAYFAIL
Vlastimil Babka32508452016-10-07 17:00:28 -07004092 * costly ones because they are de facto nofail and invoke OOM
4093 * killer to move on while costly can fail and users are ready
4094 * to cope with that. 1/4 retries is rather arbitrary but we
4095 * would need much more detailed feedback from compaction to
4096 * make a better decision.
4097 */
4098 if (order > PAGE_ALLOC_COSTLY_ORDER)
4099 max_retries /= 4;
Michal Hocko65190cf2017-02-22 15:42:03 -08004100 if (*compaction_retries <= max_retries) {
4101 ret = true;
4102 goto out;
4103 }
Vlastimil Babka32508452016-10-07 17:00:28 -07004104
Vlastimil Babkad9436492016-10-07 17:00:31 -07004105 /*
4106 * Make sure there are attempts at the highest priority if we exhausted
4107 * all retries or failed at the lower priorities.
4108 */
4109check_priority:
Vlastimil Babkac2033b02016-10-07 17:00:34 -07004110 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
4111 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
Michal Hocko65190cf2017-02-22 15:42:03 -08004112
Vlastimil Babkac2033b02016-10-07 17:00:34 -07004113 if (*compact_priority > min_priority) {
Vlastimil Babkad9436492016-10-07 17:00:31 -07004114 (*compact_priority)--;
4115 *compaction_retries = 0;
Michal Hocko65190cf2017-02-22 15:42:03 -08004116 ret = true;
Vlastimil Babkad9436492016-10-07 17:00:31 -07004117 }
Michal Hocko65190cf2017-02-22 15:42:03 -08004118out:
4119 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret);
4120 return ret;
Vlastimil Babka32508452016-10-07 17:00:28 -07004121}
Mel Gorman56de7262010-05-24 14:32:30 -07004122#else
4123static inline struct page *
4124__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
Mel Gormanc6038442016-05-19 17:13:38 -07004125 unsigned int alloc_flags, const struct alloc_context *ac,
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07004126 enum compact_priority prio, enum compact_result *compact_result)
Mel Gorman56de7262010-05-24 14:32:30 -07004127{
Michal Hocko33c2d212016-05-20 16:57:06 -07004128 *compact_result = COMPACT_SKIPPED;
Mel Gorman56de7262010-05-24 14:32:30 -07004129 return NULL;
4130}
Michal Hocko33c2d212016-05-20 16:57:06 -07004131
4132static inline bool
Michal Hocko86a294a2016-05-20 16:57:12 -07004133should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
4134 enum compact_result compact_result,
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07004135 enum compact_priority *compact_priority,
Vlastimil Babkad9436492016-10-07 17:00:31 -07004136 int *compaction_retries)
Michal Hocko33c2d212016-05-20 16:57:06 -07004137{
Michal Hocko31e49bf2016-05-20 16:57:15 -07004138 struct zone *zone;
4139 struct zoneref *z;
4140
4141 if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
4142 return false;
4143
4144 /*
4145 * There are setups with compaction disabled which would prefer to loop
4146 * inside the allocator rather than hit the oom killer prematurely.
4147 * Let's give them a good hope and keep retrying while the order-0
4148 * watermarks are OK.
4149 */
Joonsoo Kim97a225e2020-06-03 15:59:01 -07004150 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
4151 ac->highest_zoneidx, ac->nodemask) {
Michal Hocko31e49bf2016-05-20 16:57:15 -07004152 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
Joonsoo Kim97a225e2020-06-03 15:59:01 -07004153 ac->highest_zoneidx, alloc_flags))
Michal Hocko31e49bf2016-05-20 16:57:15 -07004154 return true;
4155 }
Michal Hocko33c2d212016-05-20 16:57:06 -07004156 return false;
4157}
Vlastimil Babka32508452016-10-07 17:00:28 -07004158#endif /* CONFIG_COMPACTION */
Mel Gorman56de7262010-05-24 14:32:30 -07004159
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01004160#ifdef CONFIG_LOCKDEP
Omar Sandoval93781322018-06-07 17:07:02 -07004161static struct lockdep_map __fs_reclaim_map =
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01004162 STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map);
4163
4164static bool __need_fs_reclaim(gfp_t gfp_mask)
4165{
4166 gfp_mask = current_gfp_context(gfp_mask);
4167
4168 /* no reclaim without waiting on it */
4169 if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
4170 return false;
4171
4172 /* this guy won't enter reclaim */
Tetsuo Handa2e517d682018-03-22 16:17:10 -07004173 if (current->flags & PF_MEMALLOC)
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01004174 return false;
4175
4176 /* We're only interested __GFP_FS allocations for now */
4177 if (!(gfp_mask & __GFP_FS))
4178 return false;
4179
4180 if (gfp_mask & __GFP_NOLOCKDEP)
4181 return false;
4182
4183 return true;
4184}
4185
Omar Sandoval93781322018-06-07 17:07:02 -07004186void __fs_reclaim_acquire(void)
4187{
4188 lock_map_acquire(&__fs_reclaim_map);
4189}
4190
4191void __fs_reclaim_release(void)
4192{
4193 lock_map_release(&__fs_reclaim_map);
4194}
4195
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01004196void fs_reclaim_acquire(gfp_t gfp_mask)
4197{
4198 if (__need_fs_reclaim(gfp_mask))
Omar Sandoval93781322018-06-07 17:07:02 -07004199 __fs_reclaim_acquire();
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01004200}
4201EXPORT_SYMBOL_GPL(fs_reclaim_acquire);
4202
4203void fs_reclaim_release(gfp_t gfp_mask)
4204{
4205 if (__need_fs_reclaim(gfp_mask))
Omar Sandoval93781322018-06-07 17:07:02 -07004206 __fs_reclaim_release();
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01004207}
4208EXPORT_SYMBOL_GPL(fs_reclaim_release);
4209#endif
4210
Marek Szyprowskibba90712012-01-25 12:09:52 +01004211/* Perform direct synchronous page reclaim */
4212static int
Vlastimil Babkaa9263752015-02-11 15:25:41 -08004213__perform_reclaim(gfp_t gfp_mask, unsigned int order,
4214 const struct alloc_context *ac)
Mel Gorman11e33f62009-06-16 15:31:57 -07004215{
Marek Szyprowskibba90712012-01-25 12:09:52 +01004216 int progress;
Vlastimil Babka499118e2017-05-08 15:59:50 -07004217 unsigned int noreclaim_flag;
Johannes Weinereb414682018-10-26 15:06:27 -07004218 unsigned long pflags;
Mel Gorman11e33f62009-06-16 15:31:57 -07004219
4220 cond_resched();
4221
4222 /* We now go into synchronous reclaim */
4223 cpuset_memory_pressure_bump();
Johannes Weinereb414682018-10-26 15:06:27 -07004224 psi_memstall_enter(&pflags);
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01004225 fs_reclaim_acquire(gfp_mask);
Omar Sandoval93781322018-06-07 17:07:02 -07004226 noreclaim_flag = memalloc_noreclaim_save();
Mel Gorman11e33f62009-06-16 15:31:57 -07004227
Vlastimil Babkaa9263752015-02-11 15:25:41 -08004228 progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
4229 ac->nodemask);
Mel Gorman11e33f62009-06-16 15:31:57 -07004230
Vlastimil Babka499118e2017-05-08 15:59:50 -07004231 memalloc_noreclaim_restore(noreclaim_flag);
Omar Sandoval93781322018-06-07 17:07:02 -07004232 fs_reclaim_release(gfp_mask);
Johannes Weinereb414682018-10-26 15:06:27 -07004233 psi_memstall_leave(&pflags);
Mel Gorman11e33f62009-06-16 15:31:57 -07004234
4235 cond_resched();
4236
Marek Szyprowskibba90712012-01-25 12:09:52 +01004237 return progress;
4238}
4239
4240/* The really slow allocator path where we enter direct reclaim */
4241static inline struct page *
4242__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
Mel Gormanc6038442016-05-19 17:13:38 -07004243 unsigned int alloc_flags, const struct alloc_context *ac,
Vlastimil Babkaa9263752015-02-11 15:25:41 -08004244 unsigned long *did_some_progress)
Marek Szyprowskibba90712012-01-25 12:09:52 +01004245{
4246 struct page *page = NULL;
4247 bool drained = false;
4248
Vlastimil Babkaa9263752015-02-11 15:25:41 -08004249 *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
Mel Gorman9ee493c2010-09-09 16:38:18 -07004250 if (unlikely(!(*did_some_progress)))
4251 return NULL;
Mel Gorman11e33f62009-06-16 15:31:57 -07004252
Mel Gorman9ee493c2010-09-09 16:38:18 -07004253retry:
Vlastimil Babka31a6c192016-07-28 15:49:13 -07004254 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
Mel Gorman9ee493c2010-09-09 16:38:18 -07004255
4256 /*
4257 * If an allocation failed after direct reclaim, it could be because
Mel Gorman0aaa29a2015-11-06 16:28:37 -08004258 * pages are pinned on the per-cpu lists or in high alloc reserves.
4259 * Shrink them them and try again
Mel Gorman9ee493c2010-09-09 16:38:18 -07004260 */
4261 if (!page && !drained) {
Minchan Kim29fac032016-12-12 16:42:14 -08004262 unreserve_highatomic_pageblock(ac, false);
Vlastimil Babka93481ff2014-12-10 15:43:01 -08004263 drain_all_pages(NULL);
Mel Gorman9ee493c2010-09-09 16:38:18 -07004264 drained = true;
4265 goto retry;
4266 }
4267
Mel Gorman11e33f62009-06-16 15:31:57 -07004268 return page;
4269}
4270
David Rientjes5ecd9d42018-04-05 16:25:16 -07004271static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
4272 const struct alloc_context *ac)
Mel Gorman11e33f62009-06-16 15:31:57 -07004273{
4274 struct zoneref *z;
4275 struct zone *zone;
Mel Gormane1a55632016-07-28 15:46:26 -07004276 pg_data_t *last_pgdat = NULL;
Joonsoo Kim97a225e2020-06-03 15:59:01 -07004277 enum zone_type highest_zoneidx = ac->highest_zoneidx;
Mel Gorman11e33f62009-06-16 15:31:57 -07004278
Joonsoo Kim97a225e2020-06-03 15:59:01 -07004279 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx,
David Rientjes5ecd9d42018-04-05 16:25:16 -07004280 ac->nodemask) {
Mel Gormane1a55632016-07-28 15:46:26 -07004281 if (last_pgdat != zone->zone_pgdat)
Joonsoo Kim97a225e2020-06-03 15:59:01 -07004282 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx);
Mel Gormane1a55632016-07-28 15:46:26 -07004283 last_pgdat = zone->zone_pgdat;
4284 }
Mel Gorman11e33f62009-06-16 15:31:57 -07004285}
4286
Mel Gormanc6038442016-05-19 17:13:38 -07004287static inline unsigned int
Peter Zijlstra341ce062009-06-16 15:32:02 -07004288gfp_to_alloc_flags(gfp_t gfp_mask)
4289{
Mel Gormanc6038442016-05-19 17:13:38 -07004290 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
Peter Zijlstra341ce062009-06-16 15:32:02 -07004291
Mateusz Nosek736838e2020-04-01 21:09:47 -07004292 /*
4293 * __GFP_HIGH is assumed to be the same as ALLOC_HIGH
4294 * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
4295 * to save two branches.
4296 */
Namhyung Kime6223a32010-10-26 14:21:59 -07004297 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
Mateusz Nosek736838e2020-04-01 21:09:47 -07004298 BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD);
Mel Gormana56f57f2009-06-16 15:32:02 -07004299
Peter Zijlstra341ce062009-06-16 15:32:02 -07004300 /*
4301 * The caller may dip into page reserves a bit more if the caller
4302 * cannot run direct reclaim, or if the caller has realtime scheduling
4303 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
Mel Gormand0164ad2015-11-06 16:28:21 -08004304 * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH).
Peter Zijlstra341ce062009-06-16 15:32:02 -07004305 */
Mateusz Nosek736838e2020-04-01 21:09:47 -07004306 alloc_flags |= (__force int)
4307 (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM));
Peter Zijlstra341ce062009-06-16 15:32:02 -07004308
Mel Gormand0164ad2015-11-06 16:28:21 -08004309 if (gfp_mask & __GFP_ATOMIC) {
Andrea Arcangeli5c3240d2011-01-13 15:46:49 -08004310 /*
David Rientjesb104a352014-07-30 16:08:24 -07004311 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
4312 * if it can't schedule.
Andrea Arcangeli5c3240d2011-01-13 15:46:49 -08004313 */
David Rientjesb104a352014-07-30 16:08:24 -07004314 if (!(gfp_mask & __GFP_NOMEMALLOC))
Andrea Arcangeli5c3240d2011-01-13 15:46:49 -08004315 alloc_flags |= ALLOC_HARDER;
Peter Zijlstra341ce062009-06-16 15:32:02 -07004316 /*
David Rientjesb104a352014-07-30 16:08:24 -07004317 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
Vladimir Davydov344736f2014-10-20 15:50:30 +04004318 * comment for __cpuset_node_allowed().
Peter Zijlstra341ce062009-06-16 15:32:02 -07004319 */
4320 alloc_flags &= ~ALLOC_CPUSET;
Andrew Mortonc06b1fc2011-01-13 15:47:32 -08004321 } else if (unlikely(rt_task(current)) && !in_interrupt())
Peter Zijlstra341ce062009-06-16 15:32:02 -07004322 alloc_flags |= ALLOC_HARDER;
4323
Joonsoo Kimd883c6c2018-05-23 10:18:21 +09004324#ifdef CONFIG_CMA
Wei Yang01c0bfe2020-06-03 15:59:08 -07004325 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)
Joonsoo Kimd883c6c2018-05-23 10:18:21 +09004326 alloc_flags |= ALLOC_CMA;
4327#endif
Peter Zijlstra341ce062009-06-16 15:32:02 -07004328 return alloc_flags;
4329}
4330
Michal Hockocd04ae12017-09-06 16:24:50 -07004331static bool oom_reserves_allowed(struct task_struct *tsk)
Mel Gorman072bb0a2012-07-31 16:43:58 -07004332{
Michal Hockocd04ae12017-09-06 16:24:50 -07004333 if (!tsk_is_oom_victim(tsk))
Vlastimil Babka31a6c192016-07-28 15:49:13 -07004334 return false;
4335
Michal Hockocd04ae12017-09-06 16:24:50 -07004336 /*
4337 * !MMU doesn't have oom reaper so give access to memory reserves
4338 * only to the thread with TIF_MEMDIE set
4339 */
4340 if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE))
4341 return false;
Vlastimil Babka31a6c192016-07-28 15:49:13 -07004342
Michal Hockocd04ae12017-09-06 16:24:50 -07004343 return true;
4344}
4345
4346/*
4347 * Distinguish requests which really need access to full memory
4348 * reserves from oom victims which can live with a portion of it
4349 */
4350static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask)
4351{
4352 if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
4353 return 0;
4354 if (gfp_mask & __GFP_MEMALLOC)
4355 return ALLOC_NO_WATERMARKS;
4356 if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
4357 return ALLOC_NO_WATERMARKS;
4358 if (!in_interrupt()) {
4359 if (current->flags & PF_MEMALLOC)
4360 return ALLOC_NO_WATERMARKS;
4361 else if (oom_reserves_allowed(current))
4362 return ALLOC_OOM;
4363 }
4364
4365 return 0;
4366}
4367
4368bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
4369{
4370 return !!__gfp_pfmemalloc_flags(gfp_mask);
Mel Gorman072bb0a2012-07-31 16:43:58 -07004371}
4372
Michal Hocko0a0337e2016-05-20 16:57:00 -07004373/*
Michal Hocko0a0337e2016-05-20 16:57:00 -07004374 * Checks whether it makes sense to retry the reclaim to make a forward progress
4375 * for the given allocation request.
Johannes Weiner491d79a2017-05-03 14:52:16 -07004376 *
4377 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row
4378 * without success, or when we couldn't even meet the watermark if we
4379 * reclaimed all remaining pages on the LRU lists.
Michal Hocko0a0337e2016-05-20 16:57:00 -07004380 *
4381 * Returns true if a retry is viable or false to enter the oom path.
4382 */
4383static inline bool
4384should_reclaim_retry(gfp_t gfp_mask, unsigned order,
4385 struct alloc_context *ac, int alloc_flags,
Vlastimil Babka423b4522016-10-07 17:00:40 -07004386 bool did_some_progress, int *no_progress_loops)
Michal Hocko0a0337e2016-05-20 16:57:00 -07004387{
4388 struct zone *zone;
4389 struct zoneref *z;
Michal Hocko15f570b2018-10-26 15:03:31 -07004390 bool ret = false;
Michal Hocko0a0337e2016-05-20 16:57:00 -07004391
4392 /*
Vlastimil Babka423b4522016-10-07 17:00:40 -07004393 * Costly allocations might have made a progress but this doesn't mean
4394 * their order will become available due to high fragmentation so
4395 * always increment the no progress counter for them
4396 */
4397 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
4398 *no_progress_loops = 0;
4399 else
4400 (*no_progress_loops)++;
4401
4402 /*
Michal Hocko0a0337e2016-05-20 16:57:00 -07004403 * Make sure we converge to OOM if we cannot make any progress
4404 * several times in the row.
4405 */
Minchan Kim04c87162016-12-12 16:42:11 -08004406 if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
4407 /* Before OOM, exhaust highatomic_reserve */
Minchan Kim29fac032016-12-12 16:42:14 -08004408 return unreserve_highatomic_pageblock(ac, true);
Minchan Kim04c87162016-12-12 16:42:11 -08004409 }
Michal Hocko0a0337e2016-05-20 16:57:00 -07004410
Michal Hocko0a0337e2016-05-20 16:57:00 -07004411 /*
Mel Gormanbca67592016-07-28 15:47:05 -07004412 * Keep reclaiming pages while there is a chance this will lead
4413 * somewhere. If none of the target zones can satisfy our allocation
4414 * request even if all reclaimable pages are considered then we are
4415 * screwed and have to go OOM.
Michal Hocko0a0337e2016-05-20 16:57:00 -07004416 */
Joonsoo Kim97a225e2020-06-03 15:59:01 -07004417 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
4418 ac->highest_zoneidx, ac->nodemask) {
Michal Hocko0a0337e2016-05-20 16:57:00 -07004419 unsigned long available;
Michal Hockoede37712016-05-20 16:57:03 -07004420 unsigned long reclaimable;
Michal Hockod379f012017-02-22 15:42:00 -08004421 unsigned long min_wmark = min_wmark_pages(zone);
4422 bool wmark;
Michal Hocko0a0337e2016-05-20 16:57:00 -07004423
Mel Gorman5a1c84b2016-07-28 15:47:31 -07004424 available = reclaimable = zone_reclaimable_pages(zone);
Mel Gorman5a1c84b2016-07-28 15:47:31 -07004425 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
Michal Hocko0a0337e2016-05-20 16:57:00 -07004426
4427 /*
Johannes Weiner491d79a2017-05-03 14:52:16 -07004428 * Would the allocation succeed if we reclaimed all
4429 * reclaimable pages?
Michal Hocko0a0337e2016-05-20 16:57:00 -07004430 */
Michal Hockod379f012017-02-22 15:42:00 -08004431 wmark = __zone_watermark_ok(zone, order, min_wmark,
Joonsoo Kim97a225e2020-06-03 15:59:01 -07004432 ac->highest_zoneidx, alloc_flags, available);
Michal Hockod379f012017-02-22 15:42:00 -08004433 trace_reclaim_retry_zone(z, order, reclaimable,
4434 available, min_wmark, *no_progress_loops, wmark);
4435 if (wmark) {
Michal Hockoede37712016-05-20 16:57:03 -07004436 /*
4437 * If we didn't make any progress and have a lot of
4438 * dirty + writeback pages then we should wait for
4439 * an IO to complete to slow down the reclaim and
4440 * prevent from pre mature OOM
4441 */
4442 if (!did_some_progress) {
Mel Gorman11fb9982016-07-28 15:46:20 -07004443 unsigned long write_pending;
Michal Hockoede37712016-05-20 16:57:03 -07004444
Mel Gorman5a1c84b2016-07-28 15:47:31 -07004445 write_pending = zone_page_state_snapshot(zone,
4446 NR_ZONE_WRITE_PENDING);
Michal Hockoede37712016-05-20 16:57:03 -07004447
Mel Gorman11fb9982016-07-28 15:46:20 -07004448 if (2 * write_pending > reclaimable) {
Michal Hockoede37712016-05-20 16:57:03 -07004449 congestion_wait(BLK_RW_ASYNC, HZ/10);
4450 return true;
4451 }
4452 }
Mel Gorman5a1c84b2016-07-28 15:47:31 -07004453
Michal Hocko15f570b2018-10-26 15:03:31 -07004454 ret = true;
4455 goto out;
Michal Hocko0a0337e2016-05-20 16:57:00 -07004456 }
4457 }
4458
Michal Hocko15f570b2018-10-26 15:03:31 -07004459out:
4460 /*
4461 * Memory allocation/reclaim might be called from a WQ context and the
4462 * current implementation of the WQ concurrency control doesn't
4463 * recognize that a particular WQ is congested if the worker thread is
4464 * looping without ever sleeping. Therefore we have to do a short sleep
4465 * here rather than calling cond_resched().
4466 */
4467 if (current->flags & PF_WQ_WORKER)
4468 schedule_timeout_uninterruptible(1);
4469 else
4470 cond_resched();
4471 return ret;
Michal Hocko0a0337e2016-05-20 16:57:00 -07004472}
4473
Vlastimil Babka902b6282017-07-06 15:39:56 -07004474static inline bool
4475check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
4476{
4477 /*
4478 * It's possible that cpuset's mems_allowed and the nodemask from
4479 * mempolicy don't intersect. This should be normally dealt with by
4480 * policy_nodemask(), but it's possible to race with cpuset update in
4481 * such a way the check therein was true, and then it became false
4482 * before we got our cpuset_mems_cookie here.
4483 * This assumes that for all allocations, ac->nodemask can come only
4484 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored
4485 * when it does not intersect with the cpuset restrictions) or the
4486 * caller can deal with a violated nodemask.
4487 */
4488 if (cpusets_enabled() && ac->nodemask &&
4489 !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) {
4490 ac->nodemask = NULL;
4491 return true;
4492 }
4493
4494 /*
4495 * When updating a task's mems_allowed or mempolicy nodemask, it is
4496 * possible to race with parallel threads in such a way that our
4497 * allocation can fail while the mask is being updated. If we are about
4498 * to fail, check if the cpuset changed during allocation and if so,
4499 * retry.
4500 */
4501 if (read_mems_allowed_retry(cpuset_mems_cookie))
4502 return true;
4503
4504 return false;
4505}
4506
Mel Gorman11e33f62009-06-16 15:31:57 -07004507static inline struct page *
4508__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
Vlastimil Babkaa9263752015-02-11 15:25:41 -08004509 struct alloc_context *ac)
Mel Gorman11e33f62009-06-16 15:31:57 -07004510{
Mel Gormand0164ad2015-11-06 16:28:21 -08004511 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
Vlastimil Babka282722b2017-05-08 15:54:49 -07004512 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
Mel Gorman11e33f62009-06-16 15:31:57 -07004513 struct page *page = NULL;
Mel Gormanc6038442016-05-19 17:13:38 -07004514 unsigned int alloc_flags;
Mel Gorman11e33f62009-06-16 15:31:57 -07004515 unsigned long did_some_progress;
Vlastimil Babka5ce9bfe2017-01-24 15:18:38 -08004516 enum compact_priority compact_priority;
Michal Hockoc5d01d02016-05-20 16:56:53 -07004517 enum compact_result compact_result;
Vlastimil Babka5ce9bfe2017-01-24 15:18:38 -08004518 int compaction_retries;
4519 int no_progress_loops;
Vlastimil Babka5ce9bfe2017-01-24 15:18:38 -08004520 unsigned int cpuset_mems_cookie;
Michal Hockocd04ae12017-09-06 16:24:50 -07004521 int reserve_flags;
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08004522
Christoph Lameter952f3b52006-12-06 20:33:26 -08004523 /*
Mel Gormand0164ad2015-11-06 16:28:21 -08004524 * We also sanity check to catch abuse of atomic reserves being used by
4525 * callers that are not in atomic context.
4526 */
4527 if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
4528 (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
4529 gfp_mask &= ~__GFP_ATOMIC;
4530
Vlastimil Babka5ce9bfe2017-01-24 15:18:38 -08004531retry_cpuset:
4532 compaction_retries = 0;
4533 no_progress_loops = 0;
4534 compact_priority = DEF_COMPACT_PRIORITY;
4535 cpuset_mems_cookie = read_mems_allowed_begin();
Michal Hocko9a67f642017-02-22 15:46:19 -08004536
4537 /*
4538 * The fast path uses conservative alloc_flags to succeed only until
4539 * kswapd needs to be woken up, and to avoid the cost of setting up
4540 * alloc_flags precisely. So we do that now.
4541 */
4542 alloc_flags = gfp_to_alloc_flags(gfp_mask);
4543
Vlastimil Babkae47483b2017-01-24 15:18:41 -08004544 /*
4545 * We need to recalculate the starting point for the zonelist iterator
4546 * because we might have used different nodemask in the fast path, or
4547 * there was a cpuset modification and we are retrying - otherwise we
4548 * could end up iterating over non-eligible zones endlessly.
4549 */
4550 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
Joonsoo Kim97a225e2020-06-03 15:59:01 -07004551 ac->highest_zoneidx, ac->nodemask);
Vlastimil Babkae47483b2017-01-24 15:18:41 -08004552 if (!ac->preferred_zoneref->zone)
4553 goto nopage;
4554
Mel Gorman0a79cda2018-12-28 00:35:48 -08004555 if (alloc_flags & ALLOC_KSWAPD)
David Rientjes5ecd9d42018-04-05 16:25:16 -07004556 wake_all_kswapds(order, gfp_mask, ac);
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08004557
Paul Jackson9bf22292005-09-06 15:18:12 -07004558 /*
Vlastimil Babka23771232016-07-28 15:49:16 -07004559 * The adjusted alloc_flags might result in immediate success, so try
4560 * that first
4561 */
4562 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4563 if (page)
4564 goto got_pg;
4565
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07004566 /*
4567 * For costly allocations, try direct compaction first, as it's likely
Vlastimil Babka282722b2017-05-08 15:54:49 -07004568 * that we have enough base pages and don't need to reclaim. For non-
4569 * movable high-order allocations, do that as well, as compaction will
4570 * try prevent permanent fragmentation by migrating from blocks of the
4571 * same migratetype.
4572 * Don't try this for allocations that are allowed to ignore
4573 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07004574 */
Vlastimil Babka282722b2017-05-08 15:54:49 -07004575 if (can_direct_reclaim &&
4576 (costly_order ||
4577 (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
4578 && !gfp_pfmemalloc_allowed(gfp_mask)) {
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07004579 page = __alloc_pages_direct_compact(gfp_mask, order,
4580 alloc_flags, ac,
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07004581 INIT_COMPACT_PRIORITY,
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07004582 &compact_result);
4583 if (page)
4584 goto got_pg;
4585
Vlastimil Babkacc638f32020-01-13 16:29:04 -08004586 /*
4587 * Checks for costly allocations with __GFP_NORETRY, which
4588 * includes some THP page fault allocations
4589 */
4590 if (costly_order && (gfp_mask & __GFP_NORETRY)) {
David Rientjesb39d0ee2019-09-04 12:54:22 -07004591 /*
4592 * If allocating entire pageblock(s) and compaction
4593 * failed because all zones are below low watermarks
4594 * or is prohibited because it recently failed at this
David Rientjes3f36d862019-10-14 14:12:04 -07004595 * order, fail immediately unless the allocator has
4596 * requested compaction and reclaim retry.
David Rientjesb39d0ee2019-09-04 12:54:22 -07004597 *
4598 * Reclaim is
4599 * - potentially very expensive because zones are far
4600 * below their low watermarks or this is part of very
4601 * bursty high order allocations,
4602 * - not guaranteed to help because isolate_freepages()
4603 * may not iterate over freed pages as part of its
4604 * linear scan, and
4605 * - unlikely to make entire pageblocks free on its
4606 * own.
4607 */
4608 if (compact_result == COMPACT_SKIPPED ||
4609 compact_result == COMPACT_DEFERRED)
4610 goto nopage;
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07004611
4612 /*
Vlastimil Babka3eb27712016-07-28 15:49:22 -07004613 * Looks like reclaim/compaction is worth trying, but
4614 * sync compaction could be very expensive, so keep
Vlastimil Babka25160352016-07-28 15:49:25 -07004615 * using async compaction.
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07004616 */
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07004617 compact_priority = INIT_COMPACT_PRIORITY;
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07004618 }
4619 }
Vlastimil Babka23771232016-07-28 15:49:16 -07004620
4621retry:
4622 /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
Mel Gorman0a79cda2018-12-28 00:35:48 -08004623 if (alloc_flags & ALLOC_KSWAPD)
David Rientjes5ecd9d42018-04-05 16:25:16 -07004624 wake_all_kswapds(order, gfp_mask, ac);
Vlastimil Babka23771232016-07-28 15:49:16 -07004625
Michal Hockocd04ae12017-09-06 16:24:50 -07004626 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
4627 if (reserve_flags)
4628 alloc_flags = reserve_flags;
Vlastimil Babka23771232016-07-28 15:49:16 -07004629
4630 /*
Vlastimil Babkad6a24df2018-08-17 15:45:05 -07004631 * Reset the nodemask and zonelist iterators if memory policies can be
4632 * ignored. These allocations are high priority and system rather than
4633 * user oriented.
Mel Gormane46e7b72016-06-03 14:56:01 -07004634 */
Michal Hockocd04ae12017-09-06 16:24:50 -07004635 if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
Vlastimil Babkad6a24df2018-08-17 15:45:05 -07004636 ac->nodemask = NULL;
Mel Gormane46e7b72016-06-03 14:56:01 -07004637 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
Joonsoo Kim97a225e2020-06-03 15:59:01 -07004638 ac->highest_zoneidx, ac->nodemask);
Mel Gormane46e7b72016-06-03 14:56:01 -07004639 }
4640
Vlastimil Babka23771232016-07-28 15:49:16 -07004641 /* Attempt with potentially adjusted zonelist and alloc_flags */
Vlastimil Babka31a6c192016-07-28 15:49:13 -07004642 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08004643 if (page)
4644 goto got_pg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004645
Mel Gormand0164ad2015-11-06 16:28:21 -08004646 /* Caller is not willing to reclaim, we can't balance anything */
Michal Hocko9a67f642017-02-22 15:46:19 -08004647 if (!can_direct_reclaim)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004648 goto nopage;
Michal Hocko9a67f642017-02-22 15:46:19 -08004649
Peter Zijlstra341ce062009-06-16 15:32:02 -07004650 /* Avoid recursion of direct reclaim */
Michal Hocko9a67f642017-02-22 15:46:19 -08004651 if (current->flags & PF_MEMALLOC)
Peter Zijlstra341ce062009-06-16 15:32:02 -07004652 goto nopage;
David Rientjes8fe78042014-08-06 16:07:54 -07004653
Mel Gorman11e33f62009-06-16 15:31:57 -07004654 /* Try direct reclaim and then allocating */
Vlastimil Babkaa9263752015-02-11 15:25:41 -08004655 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
4656 &did_some_progress);
Mel Gorman11e33f62009-06-16 15:31:57 -07004657 if (page)
4658 goto got_pg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004659
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07004660 /* Try direct compaction and then allocating */
4661 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07004662 compact_priority, &compact_result);
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07004663 if (page)
4664 goto got_pg;
4665
Johannes Weiner90839052015-06-24 16:57:21 -07004666 /* Do not loop if specifically requested */
4667 if (gfp_mask & __GFP_NORETRY)
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07004668 goto nopage;
Johannes Weiner90839052015-06-24 16:57:21 -07004669
Michal Hocko0a0337e2016-05-20 16:57:00 -07004670 /*
4671 * Do not retry costly high order allocations unless they are
Michal Hockodcda9b02017-07-12 14:36:45 -07004672 * __GFP_RETRY_MAYFAIL
Michal Hocko0a0337e2016-05-20 16:57:00 -07004673 */
Michal Hockodcda9b02017-07-12 14:36:45 -07004674 if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL))
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07004675 goto nopage;
Michal Hocko0a0337e2016-05-20 16:57:00 -07004676
Michal Hocko0a0337e2016-05-20 16:57:00 -07004677 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
Vlastimil Babka423b4522016-10-07 17:00:40 -07004678 did_some_progress > 0, &no_progress_loops))
Michal Hocko0a0337e2016-05-20 16:57:00 -07004679 goto retry;
4680
Michal Hocko33c2d212016-05-20 16:57:06 -07004681 /*
4682 * It doesn't make any sense to retry for the compaction if the order-0
4683 * reclaim is not able to make any progress because the current
4684 * implementation of the compaction depends on the sufficient amount
4685 * of free memory (see __compaction_suitable)
4686 */
4687 if (did_some_progress > 0 &&
Michal Hocko86a294a2016-05-20 16:57:12 -07004688 should_compact_retry(ac, order, alloc_flags,
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07004689 compact_result, &compact_priority,
Vlastimil Babkad9436492016-10-07 17:00:31 -07004690 &compaction_retries))
Michal Hocko33c2d212016-05-20 16:57:06 -07004691 goto retry;
4692
Vlastimil Babka902b6282017-07-06 15:39:56 -07004693
4694 /* Deal with possible cpuset update races before we start OOM killing */
4695 if (check_retry_cpuset(cpuset_mems_cookie, ac))
Vlastimil Babkae47483b2017-01-24 15:18:41 -08004696 goto retry_cpuset;
4697
Johannes Weiner90839052015-06-24 16:57:21 -07004698 /* Reclaim has failed us, start killing things */
4699 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
4700 if (page)
4701 goto got_pg;
4702
Michal Hocko9a67f642017-02-22 15:46:19 -08004703 /* Avoid allocations with no watermarks from looping endlessly */
Michal Hockocd04ae12017-09-06 16:24:50 -07004704 if (tsk_is_oom_victim(current) &&
4705 (alloc_flags == ALLOC_OOM ||
Tetsuo Handac2889832017-06-02 14:46:31 -07004706 (gfp_mask & __GFP_NOMEMALLOC)))
Michal Hocko9a67f642017-02-22 15:46:19 -08004707 goto nopage;
4708
Johannes Weiner90839052015-06-24 16:57:21 -07004709 /* Retry as long as the OOM killer is making progress */
Michal Hocko0a0337e2016-05-20 16:57:00 -07004710 if (did_some_progress) {
4711 no_progress_loops = 0;
Johannes Weiner90839052015-06-24 16:57:21 -07004712 goto retry;
Michal Hocko0a0337e2016-05-20 16:57:00 -07004713 }
Johannes Weiner90839052015-06-24 16:57:21 -07004714
Linus Torvalds1da177e2005-04-16 15:20:36 -07004715nopage:
Vlastimil Babka902b6282017-07-06 15:39:56 -07004716 /* Deal with possible cpuset update races before we fail */
4717 if (check_retry_cpuset(cpuset_mems_cookie, ac))
Vlastimil Babka5ce9bfe2017-01-24 15:18:38 -08004718 goto retry_cpuset;
4719
Michal Hocko9a67f642017-02-22 15:46:19 -08004720 /*
4721 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
4722 * we always retry
4723 */
4724 if (gfp_mask & __GFP_NOFAIL) {
4725 /*
4726 * All existing users of the __GFP_NOFAIL are blockable, so warn
4727 * of any new users that actually require GFP_NOWAIT
4728 */
4729 if (WARN_ON_ONCE(!can_direct_reclaim))
4730 goto fail;
4731
4732 /*
4733 * PF_MEMALLOC request from this context is rather bizarre
4734 * because we cannot reclaim anything and only can loop waiting
4735 * for somebody to do a work for us
4736 */
4737 WARN_ON_ONCE(current->flags & PF_MEMALLOC);
4738
4739 /*
4740 * non failing costly orders are a hard requirement which we
4741 * are not prepared for much so let's warn about these users
4742 * so that we can identify them and convert them to something
4743 * else.
4744 */
4745 WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER);
4746
Michal Hocko6c18ba72017-02-22 15:46:25 -08004747 /*
4748 * Help non-failing allocations by giving them access to memory
4749 * reserves but do not use ALLOC_NO_WATERMARKS because this
4750 * could deplete whole memory reserves which would just make
4751 * the situation worse
4752 */
4753 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac);
4754 if (page)
4755 goto got_pg;
4756
Michal Hocko9a67f642017-02-22 15:46:19 -08004757 cond_resched();
4758 goto retry;
4759 }
4760fail:
Michal Hockoa8e99252017-02-22 15:46:10 -08004761 warn_alloc(gfp_mask, ac->nodemask,
Michal Hocko7877cdc2016-10-07 17:01:55 -07004762 "page allocation failure: order:%u", order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004763got_pg:
Mel Gorman072bb0a2012-07-31 16:43:58 -07004764 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004765}
Mel Gorman11e33f62009-06-16 15:31:57 -07004766
Mel Gorman9cd75552017-02-24 14:56:29 -08004767static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
Vlastimil Babka04ec6262017-07-06 15:40:03 -07004768 int preferred_nid, nodemask_t *nodemask,
Mel Gorman9cd75552017-02-24 14:56:29 -08004769 struct alloc_context *ac, gfp_t *alloc_mask,
4770 unsigned int *alloc_flags)
4771{
Joonsoo Kim97a225e2020-06-03 15:59:01 -07004772 ac->highest_zoneidx = gfp_zone(gfp_mask);
Vlastimil Babka04ec6262017-07-06 15:40:03 -07004773 ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
Mel Gorman9cd75552017-02-24 14:56:29 -08004774 ac->nodemask = nodemask;
Wei Yang01c0bfe2020-06-03 15:59:08 -07004775 ac->migratetype = gfp_migratetype(gfp_mask);
Mel Gorman9cd75552017-02-24 14:56:29 -08004776
4777 if (cpusets_enabled()) {
4778 *alloc_mask |= __GFP_HARDWALL;
Mel Gorman9cd75552017-02-24 14:56:29 -08004779 if (!ac->nodemask)
4780 ac->nodemask = &cpuset_current_mems_allowed;
Vlastimil Babka51047822017-02-24 14:56:53 -08004781 else
4782 *alloc_flags |= ALLOC_CPUSET;
Mel Gorman9cd75552017-02-24 14:56:29 -08004783 }
4784
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01004785 fs_reclaim_acquire(gfp_mask);
4786 fs_reclaim_release(gfp_mask);
Mel Gorman9cd75552017-02-24 14:56:29 -08004787
4788 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
4789
4790 if (should_fail_alloc_page(gfp_mask, order))
4791 return false;
4792
Joonsoo Kimd883c6c2018-05-23 10:18:21 +09004793 if (IS_ENABLED(CONFIG_CMA) && ac->migratetype == MIGRATE_MOVABLE)
4794 *alloc_flags |= ALLOC_CMA;
4795
Mel Gorman9cd75552017-02-24 14:56:29 -08004796 return true;
4797}
4798
4799/* Determine whether to spread dirty pages and what the first usable zone */
Huaisheng Yea380b402018-06-07 17:07:57 -07004800static inline void finalise_ac(gfp_t gfp_mask, struct alloc_context *ac)
Mel Gorman9cd75552017-02-24 14:56:29 -08004801{
4802 /* Dirty zone balancing only done in the fast path */
4803 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
4804
4805 /*
4806 * The preferred zone is used for statistics but crucially it is
4807 * also used as the starting point for the zonelist iterator. It
4808 * may get reset for allocations that ignore memory policies.
4809 */
4810 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
Joonsoo Kim97a225e2020-06-03 15:59:01 -07004811 ac->highest_zoneidx, ac->nodemask);
Mel Gorman9cd75552017-02-24 14:56:29 -08004812}
4813
Mel Gorman11e33f62009-06-16 15:31:57 -07004814/*
4815 * This is the 'heart' of the zoned buddy allocator.
4816 */
4817struct page *
Vlastimil Babka04ec6262017-07-06 15:40:03 -07004818__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
4819 nodemask_t *nodemask)
Mel Gorman11e33f62009-06-16 15:31:57 -07004820{
Mel Gorman5bb1b162016-05-19 17:13:50 -07004821 struct page *page;
Mel Gormane6cbd7f2016-07-28 15:46:50 -07004822 unsigned int alloc_flags = ALLOC_WMARK_LOW;
Tetsuo Handaf19360f2017-09-08 16:13:22 -07004823 gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
Mel Gorman9cd75552017-02-24 14:56:29 -08004824 struct alloc_context ac = { };
Mel Gorman682a3382016-05-19 17:13:30 -07004825
Michal Hockoc63ae432018-11-16 15:08:53 -08004826 /*
4827 * There are several places where we assume that the order value is sane
4828 * so bail out early if the request is out of bound.
4829 */
4830 if (unlikely(order >= MAX_ORDER)) {
4831 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
4832 return NULL;
4833 }
4834
Benjamin Herrenschmidtdcce2842009-06-18 13:24:12 +10004835 gfp_mask &= gfp_allowed_mask;
Tetsuo Handaf19360f2017-09-08 16:13:22 -07004836 alloc_mask = gfp_mask;
Vlastimil Babka04ec6262017-07-06 15:40:03 -07004837 if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags))
Mel Gorman11e33f62009-06-16 15:31:57 -07004838 return NULL;
4839
Huaisheng Yea380b402018-06-07 17:07:57 -07004840 finalise_ac(gfp_mask, &ac);
Mel Gorman5bb1b162016-05-19 17:13:50 -07004841
Mel Gorman6bb15452018-12-28 00:35:41 -08004842 /*
4843 * Forbid the first pass from falling back to types that fragment
4844 * memory until all local zones are considered.
4845 */
Mel Gorman0a79cda2018-12-28 00:35:48 -08004846 alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp_mask);
Mel Gorman6bb15452018-12-28 00:35:41 -08004847
Mel Gorman5117f452009-06-16 15:31:59 -07004848 /* First allocation attempt */
Vlastimil Babkaa9263752015-02-11 15:25:41 -08004849 page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
Mel Gorman4fcb0972016-05-19 17:14:01 -07004850 if (likely(page))
4851 goto out;
Andrew Morton91fbdc02015-02-11 15:25:04 -08004852
Mel Gorman4fcb0972016-05-19 17:14:01 -07004853 /*
Michal Hocko7dea19f2017-05-03 14:53:15 -07004854 * Apply scoped allocation constraints. This is mainly about GFP_NOFS
4855 * resp. GFP_NOIO which has to be inherited for all allocation requests
4856 * from a particular context which has been marked by
4857 * memalloc_no{fs,io}_{save,restore}.
Mel Gorman4fcb0972016-05-19 17:14:01 -07004858 */
Michal Hocko7dea19f2017-05-03 14:53:15 -07004859 alloc_mask = current_gfp_context(gfp_mask);
Mel Gorman4fcb0972016-05-19 17:14:01 -07004860 ac.spread_dirty_pages = false;
Mel Gorman11e33f62009-06-16 15:31:57 -07004861
Mel Gorman47415262016-05-19 17:14:44 -07004862 /*
4863 * Restore the original nodemask if it was potentially replaced with
4864 * &cpuset_current_mems_allowed to optimize the fast-path attempt.
4865 */
Mateusz Nosek97ce86f2020-04-01 21:09:53 -07004866 ac.nodemask = nodemask;
Vlastimil Babka16096c22017-01-24 15:18:35 -08004867
Mel Gorman4fcb0972016-05-19 17:14:01 -07004868 page = __alloc_pages_slowpath(alloc_mask, order, &ac);
Xishi Qiu23f086f2015-02-11 15:25:07 -08004869
Mel Gorman4fcb0972016-05-19 17:14:01 -07004870out:
Vladimir Davydovc4159a72016-08-08 23:03:12 +03004871 if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
Roman Gushchinf4b00ea2020-04-01 21:06:46 -07004872 unlikely(__memcg_kmem_charge_page(page, gfp_mask, order) != 0)) {
Vladimir Davydovc4159a72016-08-08 23:03:12 +03004873 __free_pages(page, order);
4874 page = NULL;
Vladimir Davydov49491482016-07-26 15:24:24 -07004875 }
4876
Mel Gorman4fcb0972016-05-19 17:14:01 -07004877 trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
4878
Mel Gorman11e33f62009-06-16 15:31:57 -07004879 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004880}
Mel Gormand2391712009-06-16 15:31:52 -07004881EXPORT_SYMBOL(__alloc_pages_nodemask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004882
4883/*
Michal Hocko9ea9a682018-08-17 15:46:01 -07004884 * Common helper functions. Never use with __GFP_HIGHMEM because the returned
4885 * address cannot represent highmem pages. Use alloc_pages and then kmap if
4886 * you need to access high mem.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004887 */
Harvey Harrison920c7a52008-02-04 22:29:26 -08004888unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004889{
Akinobu Mita945a1112009-09-21 17:01:47 -07004890 struct page *page;
4891
Michal Hocko9ea9a682018-08-17 15:46:01 -07004892 page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004893 if (!page)
4894 return 0;
4895 return (unsigned long) page_address(page);
4896}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004897EXPORT_SYMBOL(__get_free_pages);
4898
Harvey Harrison920c7a52008-02-04 22:29:26 -08004899unsigned long get_zeroed_page(gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004900{
Akinobu Mita945a1112009-09-21 17:01:47 -07004901 return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004902}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004903EXPORT_SYMBOL(get_zeroed_page);
4904
Aaron Lu742aa7f2018-12-28 00:35:22 -08004905static inline void free_the_page(struct page *page, unsigned int order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004906{
Aaron Lu742aa7f2018-12-28 00:35:22 -08004907 if (order == 0) /* Via pcp? */
4908 free_unref_page(page);
4909 else
4910 __free_pages_ok(page, order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004911}
4912
Aaron Lu742aa7f2018-12-28 00:35:22 -08004913void __free_pages(struct page *page, unsigned int order)
4914{
4915 if (put_page_testzero(page))
4916 free_the_page(page, order);
4917}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004918EXPORT_SYMBOL(__free_pages);
4919
Harvey Harrison920c7a52008-02-04 22:29:26 -08004920void free_pages(unsigned long addr, unsigned int order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004921{
4922 if (addr != 0) {
Nick Piggin725d7042006-09-25 23:30:55 -07004923 VM_BUG_ON(!virt_addr_valid((void *)addr));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004924 __free_pages(virt_to_page((void *)addr), order);
4925 }
4926}
4927
4928EXPORT_SYMBOL(free_pages);
4929
Glauber Costa6a1a0d32012-12-18 14:22:00 -08004930/*
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07004931 * Page Fragment:
4932 * An arbitrary-length arbitrary-offset area of memory which resides
4933 * within a 0 or higher order page. Multiple fragments within that page
4934 * are individually refcounted, in the page's reference counter.
4935 *
4936 * The page_frag functions below provide a simple allocation framework for
4937 * page fragments. This is used by the network stack and network device
4938 * drivers to provide a backing region of memory for use as either an
4939 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
4940 */
Alexander Duyck2976db82017-01-10 16:58:09 -08004941static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
4942 gfp_t gfp_mask)
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07004943{
4944 struct page *page = NULL;
4945 gfp_t gfp = gfp_mask;
4946
4947#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4948 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
4949 __GFP_NOMEMALLOC;
4950 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
4951 PAGE_FRAG_CACHE_MAX_ORDER);
4952 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
4953#endif
4954 if (unlikely(!page))
4955 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
4956
4957 nc->va = page ? page_address(page) : NULL;
4958
4959 return page;
4960}
4961
Alexander Duyck2976db82017-01-10 16:58:09 -08004962void __page_frag_cache_drain(struct page *page, unsigned int count)
Alexander Duyck44fdffd2016-12-14 15:05:26 -08004963{
4964 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
4965
Aaron Lu742aa7f2018-12-28 00:35:22 -08004966 if (page_ref_sub_and_test(page, count))
4967 free_the_page(page, compound_order(page));
Alexander Duyck44fdffd2016-12-14 15:05:26 -08004968}
Alexander Duyck2976db82017-01-10 16:58:09 -08004969EXPORT_SYMBOL(__page_frag_cache_drain);
Alexander Duyck44fdffd2016-12-14 15:05:26 -08004970
Alexander Duyck8c2dd3e2017-01-10 16:58:06 -08004971void *page_frag_alloc(struct page_frag_cache *nc,
4972 unsigned int fragsz, gfp_t gfp_mask)
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07004973{
4974 unsigned int size = PAGE_SIZE;
4975 struct page *page;
4976 int offset;
4977
4978 if (unlikely(!nc->va)) {
4979refill:
Alexander Duyck2976db82017-01-10 16:58:09 -08004980 page = __page_frag_cache_refill(nc, gfp_mask);
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07004981 if (!page)
4982 return NULL;
4983
4984#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4985 /* if size can vary use size else just use PAGE_SIZE */
4986 size = nc->size;
4987#endif
4988 /* Even if we own the page, we do not use atomic_set().
4989 * This would break get_page_unless_zero() users.
4990 */
Alexander Duyck86447722019-02-15 14:44:12 -08004991 page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07004992
4993 /* reset page count bias and offset to start of new frag */
Michal Hocko2f064f32015-08-21 14:11:51 -07004994 nc->pfmemalloc = page_is_pfmemalloc(page);
Alexander Duyck86447722019-02-15 14:44:12 -08004995 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07004996 nc->offset = size;
4997 }
4998
4999 offset = nc->offset - fragsz;
5000 if (unlikely(offset < 0)) {
5001 page = virt_to_page(nc->va);
5002
Joonsoo Kimfe896d12016-03-17 14:19:26 -07005003 if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07005004 goto refill;
5005
5006#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5007 /* if size can vary use size else just use PAGE_SIZE */
5008 size = nc->size;
5009#endif
5010 /* OK, page count is 0, we can safely set it */
Alexander Duyck86447722019-02-15 14:44:12 -08005011 set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07005012
5013 /* reset page count bias and offset to start of new frag */
Alexander Duyck86447722019-02-15 14:44:12 -08005014 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07005015 offset = size - fragsz;
5016 }
5017
5018 nc->pagecnt_bias--;
5019 nc->offset = offset;
5020
5021 return nc->va + offset;
5022}
Alexander Duyck8c2dd3e2017-01-10 16:58:06 -08005023EXPORT_SYMBOL(page_frag_alloc);
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07005024
5025/*
5026 * Frees a page fragment allocated out of either a compound or order 0 page.
5027 */
Alexander Duyck8c2dd3e2017-01-10 16:58:06 -08005028void page_frag_free(void *addr)
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07005029{
5030 struct page *page = virt_to_head_page(addr);
5031
Aaron Lu742aa7f2018-12-28 00:35:22 -08005032 if (unlikely(put_page_testzero(page)))
5033 free_the_page(page, compound_order(page));
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07005034}
Alexander Duyck8c2dd3e2017-01-10 16:58:06 -08005035EXPORT_SYMBOL(page_frag_free);
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07005036
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08005037static void *make_alloc_exact(unsigned long addr, unsigned int order,
5038 size_t size)
Andi Kleenee85c2e2011-05-11 15:13:34 -07005039{
5040 if (addr) {
5041 unsigned long alloc_end = addr + (PAGE_SIZE << order);
5042 unsigned long used = addr + PAGE_ALIGN(size);
5043
5044 split_page(virt_to_page((void *)addr), order);
5045 while (used < alloc_end) {
5046 free_page(used);
5047 used += PAGE_SIZE;
5048 }
5049 }
5050 return (void *)addr;
5051}
5052
Timur Tabi2be0ffe2008-07-23 21:28:11 -07005053/**
5054 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
5055 * @size: the number of bytes to allocate
Vlastimil Babka63931eb2019-05-13 17:16:47 -07005056 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
Timur Tabi2be0ffe2008-07-23 21:28:11 -07005057 *
5058 * This function is similar to alloc_pages(), except that it allocates the
5059 * minimum number of pages to satisfy the request. alloc_pages() can only
5060 * allocate memory in power-of-two pages.
5061 *
5062 * This function is also limited by MAX_ORDER.
5063 *
5064 * Memory allocated by this function must be released by free_pages_exact().
Mike Rapoporta862f682019-03-05 15:48:42 -08005065 *
5066 * Return: pointer to the allocated area or %NULL in case of error.
Timur Tabi2be0ffe2008-07-23 21:28:11 -07005067 */
5068void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
5069{
5070 unsigned int order = get_order(size);
5071 unsigned long addr;
5072
Vlastimil Babka63931eb2019-05-13 17:16:47 -07005073 if (WARN_ON_ONCE(gfp_mask & __GFP_COMP))
5074 gfp_mask &= ~__GFP_COMP;
5075
Timur Tabi2be0ffe2008-07-23 21:28:11 -07005076 addr = __get_free_pages(gfp_mask, order);
Andi Kleenee85c2e2011-05-11 15:13:34 -07005077 return make_alloc_exact(addr, order, size);
Timur Tabi2be0ffe2008-07-23 21:28:11 -07005078}
5079EXPORT_SYMBOL(alloc_pages_exact);
5080
5081/**
Andi Kleenee85c2e2011-05-11 15:13:34 -07005082 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
5083 * pages on a node.
Randy Dunlapb5e6ab52011-05-16 13:16:54 -07005084 * @nid: the preferred node ID where memory should be allocated
Andi Kleenee85c2e2011-05-11 15:13:34 -07005085 * @size: the number of bytes to allocate
Vlastimil Babka63931eb2019-05-13 17:16:47 -07005086 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
Andi Kleenee85c2e2011-05-11 15:13:34 -07005087 *
5088 * Like alloc_pages_exact(), but try to allocate on node nid first before falling
5089 * back.
Mike Rapoporta862f682019-03-05 15:48:42 -08005090 *
5091 * Return: pointer to the allocated area or %NULL in case of error.
Andi Kleenee85c2e2011-05-11 15:13:34 -07005092 */
Fabian Fredericke1931812014-08-06 16:04:59 -07005093void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
Andi Kleenee85c2e2011-05-11 15:13:34 -07005094{
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08005095 unsigned int order = get_order(size);
Vlastimil Babka63931eb2019-05-13 17:16:47 -07005096 struct page *p;
5097
5098 if (WARN_ON_ONCE(gfp_mask & __GFP_COMP))
5099 gfp_mask &= ~__GFP_COMP;
5100
5101 p = alloc_pages_node(nid, gfp_mask, order);
Andi Kleenee85c2e2011-05-11 15:13:34 -07005102 if (!p)
5103 return NULL;
5104 return make_alloc_exact((unsigned long)page_address(p), order, size);
5105}
Andi Kleenee85c2e2011-05-11 15:13:34 -07005106
5107/**
Timur Tabi2be0ffe2008-07-23 21:28:11 -07005108 * free_pages_exact - release memory allocated via alloc_pages_exact()
5109 * @virt: the value returned by alloc_pages_exact.
5110 * @size: size of allocation, same value as passed to alloc_pages_exact().
5111 *
5112 * Release the memory allocated by a previous call to alloc_pages_exact.
5113 */
5114void free_pages_exact(void *virt, size_t size)
5115{
5116 unsigned long addr = (unsigned long)virt;
5117 unsigned long end = addr + PAGE_ALIGN(size);
5118
5119 while (addr < end) {
5120 free_page(addr);
5121 addr += PAGE_SIZE;
5122 }
5123}
5124EXPORT_SYMBOL(free_pages_exact);
5125
Zhang Yanfeie0fb5812013-02-22 16:35:54 -08005126/**
5127 * nr_free_zone_pages - count number of pages beyond high watermark
5128 * @offset: The zone index of the highest zone
5129 *
Mike Rapoporta862f682019-03-05 15:48:42 -08005130 * nr_free_zone_pages() counts the number of pages which are beyond the
Zhang Yanfeie0fb5812013-02-22 16:35:54 -08005131 * high watermark within all zones at or below a given zone index. For each
5132 * zone, the number of pages is calculated as:
mchehab@s-opensource.com0e056eb2017-03-30 17:11:36 -03005133 *
5134 * nr_free_zone_pages = managed_pages - high_pages
Mike Rapoporta862f682019-03-05 15:48:42 -08005135 *
5136 * Return: number of pages beyond high watermark.
Zhang Yanfeie0fb5812013-02-22 16:35:54 -08005137 */
Zhang Yanfeiebec3862013-02-22 16:35:43 -08005138static unsigned long nr_free_zone_pages(int offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005139{
Mel Gormandd1a2392008-04-28 02:12:17 -07005140 struct zoneref *z;
Mel Gorman54a6eb52008-04-28 02:12:16 -07005141 struct zone *zone;
5142
Martin J. Blighe310fd42005-07-29 22:59:18 -07005143 /* Just pick one node, since fallback list is circular */
Zhang Yanfeiebec3862013-02-22 16:35:43 -08005144 unsigned long sum = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005145
Mel Gorman0e884602008-04-28 02:12:14 -07005146 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005147
Mel Gorman54a6eb52008-04-28 02:12:16 -07005148 for_each_zone_zonelist(zone, z, zonelist, offset) {
Arun KS9705bea2018-12-28 00:34:24 -08005149 unsigned long size = zone_managed_pages(zone);
Mel Gorman41858962009-06-16 15:32:12 -07005150 unsigned long high = high_wmark_pages(zone);
Martin J. Blighe310fd42005-07-29 22:59:18 -07005151 if (size > high)
5152 sum += size - high;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005153 }
5154
5155 return sum;
5156}
5157
Zhang Yanfeie0fb5812013-02-22 16:35:54 -08005158/**
5159 * nr_free_buffer_pages - count number of pages beyond high watermark
5160 *
5161 * nr_free_buffer_pages() counts the number of pages which are beyond the high
5162 * watermark within ZONE_DMA and ZONE_NORMAL.
Mike Rapoporta862f682019-03-05 15:48:42 -08005163 *
5164 * Return: number of pages beyond high watermark within ZONE_DMA and
5165 * ZONE_NORMAL.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005166 */
Zhang Yanfeiebec3862013-02-22 16:35:43 -08005167unsigned long nr_free_buffer_pages(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005168{
Al Viroaf4ca452005-10-21 02:55:38 -04005169 return nr_free_zone_pages(gfp_zone(GFP_USER));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005170}
Meelap Shahc2f1a552007-07-17 04:04:39 -07005171EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005172
Zhang Yanfeie0fb5812013-02-22 16:35:54 -08005173/**
5174 * nr_free_pagecache_pages - count number of pages beyond high watermark
5175 *
5176 * nr_free_pagecache_pages() counts the number of pages which are beyond the
5177 * high watermark within all zones.
Mike Rapoporta862f682019-03-05 15:48:42 -08005178 *
5179 * Return: number of pages beyond high watermark within all zones.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005180 */
Zhang Yanfeiebec3862013-02-22 16:35:43 -08005181unsigned long nr_free_pagecache_pages(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005182{
Mel Gorman2a1e2742007-07-17 04:03:12 -07005183 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005184}
Christoph Lameter08e0f6a2006-09-27 01:50:06 -07005185
5186static inline void show_node(struct zone *zone)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005187{
Kirill A. Shutemove5adfff2012-12-11 16:00:29 -08005188 if (IS_ENABLED(CONFIG_NUMA))
Andy Whitcroft25ba77c2006-12-06 20:33:03 -08005189 printk("Node %d ", zone_to_nid(zone));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005190}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005191
Igor Redkod02bd272016-03-17 14:19:05 -07005192long si_mem_available(void)
5193{
5194 long available;
5195 unsigned long pagecache;
5196 unsigned long wmark_low = 0;
5197 unsigned long pages[NR_LRU_LISTS];
Vlastimil Babkab29940c2018-10-26 15:05:46 -07005198 unsigned long reclaimable;
Igor Redkod02bd272016-03-17 14:19:05 -07005199 struct zone *zone;
5200 int lru;
5201
5202 for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
Mel Gorman2f95ff92016-08-11 15:32:57 -07005203 pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
Igor Redkod02bd272016-03-17 14:19:05 -07005204
5205 for_each_zone(zone)
Mel Gormana9214442018-12-28 00:35:44 -08005206 wmark_low += low_wmark_pages(zone);
Igor Redkod02bd272016-03-17 14:19:05 -07005207
5208 /*
5209 * Estimate the amount of memory available for userspace allocations,
5210 * without causing swapping.
5211 */
Michal Hockoc41f0122017-09-06 16:23:36 -07005212 available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages;
Igor Redkod02bd272016-03-17 14:19:05 -07005213
5214 /*
5215 * Not all the page cache can be freed, otherwise the system will
5216 * start swapping. Assume at least half of the page cache, or the
5217 * low watermark worth of cache, needs to stay.
5218 */
5219 pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
5220 pagecache -= min(pagecache / 2, wmark_low);
5221 available += pagecache;
5222
5223 /*
Vlastimil Babkab29940c2018-10-26 15:05:46 -07005224 * Part of the reclaimable slab and other kernel memory consists of
5225 * items that are in use, and cannot be freed. Cap this estimate at the
5226 * low watermark.
Igor Redkod02bd272016-03-17 14:19:05 -07005227 */
Roman Gushchind42f3242020-08-06 23:20:39 -07005228 reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) +
5229 global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE);
Vlastimil Babkab29940c2018-10-26 15:05:46 -07005230 available += reclaimable - min(reclaimable / 2, wmark_low);
Roman Gushchin034ebf62018-04-10 16:27:40 -07005231
Igor Redkod02bd272016-03-17 14:19:05 -07005232 if (available < 0)
5233 available = 0;
5234 return available;
5235}
5236EXPORT_SYMBOL_GPL(si_mem_available);
5237
Linus Torvalds1da177e2005-04-16 15:20:36 -07005238void si_meminfo(struct sysinfo *val)
5239{
Arun KSca79b0c2018-12-28 00:34:29 -08005240 val->totalram = totalram_pages();
Mel Gorman11fb9982016-07-28 15:46:20 -07005241 val->sharedram = global_node_page_state(NR_SHMEM);
Michal Hockoc41f0122017-09-06 16:23:36 -07005242 val->freeram = global_zone_page_state(NR_FREE_PAGES);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005243 val->bufferram = nr_blockdev_pages();
Arun KSca79b0c2018-12-28 00:34:29 -08005244 val->totalhigh = totalhigh_pages();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005245 val->freehigh = nr_free_highpages();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005246 val->mem_unit = PAGE_SIZE;
5247}
5248
5249EXPORT_SYMBOL(si_meminfo);
5250
5251#ifdef CONFIG_NUMA
5252void si_meminfo_node(struct sysinfo *val, int nid)
5253{
Jiang Liucdd91a72013-07-03 15:03:27 -07005254 int zone_type; /* needs to be signed */
5255 unsigned long managed_pages = 0;
Joonsoo Kimfc2bd792016-05-19 17:12:23 -07005256 unsigned long managed_highpages = 0;
5257 unsigned long free_highpages = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005258 pg_data_t *pgdat = NODE_DATA(nid);
5259
Jiang Liucdd91a72013-07-03 15:03:27 -07005260 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
Arun KS9705bea2018-12-28 00:34:24 -08005261 managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]);
Jiang Liucdd91a72013-07-03 15:03:27 -07005262 val->totalram = managed_pages;
Mel Gorman11fb9982016-07-28 15:46:20 -07005263 val->sharedram = node_page_state(pgdat, NR_SHMEM);
Mel Gorman75ef7182016-07-28 15:45:24 -07005264 val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES);
Christoph Lameter98d2b0e2006-09-25 23:31:12 -07005265#ifdef CONFIG_HIGHMEM
Joonsoo Kimfc2bd792016-05-19 17:12:23 -07005266 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
5267 struct zone *zone = &pgdat->node_zones[zone_type];
5268
5269 if (is_highmem(zone)) {
Arun KS9705bea2018-12-28 00:34:24 -08005270 managed_highpages += zone_managed_pages(zone);
Joonsoo Kimfc2bd792016-05-19 17:12:23 -07005271 free_highpages += zone_page_state(zone, NR_FREE_PAGES);
5272 }
5273 }
5274 val->totalhigh = managed_highpages;
5275 val->freehigh = free_highpages;
Christoph Lameter98d2b0e2006-09-25 23:31:12 -07005276#else
Joonsoo Kimfc2bd792016-05-19 17:12:23 -07005277 val->totalhigh = managed_highpages;
5278 val->freehigh = free_highpages;
Christoph Lameter98d2b0e2006-09-25 23:31:12 -07005279#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07005280 val->mem_unit = PAGE_SIZE;
5281}
5282#endif
5283
David Rientjesddd588b2011-03-22 16:30:46 -07005284/*
David Rientjes7bf02ea2011-05-24 17:11:16 -07005285 * Determine whether the node should be displayed or not, depending on whether
5286 * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
David Rientjesddd588b2011-03-22 16:30:46 -07005287 */
Michal Hocko9af744d2017-02-22 15:46:16 -08005288static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask)
David Rientjesddd588b2011-03-22 16:30:46 -07005289{
David Rientjesddd588b2011-03-22 16:30:46 -07005290 if (!(flags & SHOW_MEM_FILTER_NODES))
Michal Hocko9af744d2017-02-22 15:46:16 -08005291 return false;
David Rientjesddd588b2011-03-22 16:30:46 -07005292
Michal Hocko9af744d2017-02-22 15:46:16 -08005293 /*
5294 * no node mask - aka implicit memory numa policy. Do not bother with
5295 * the synchronization - read_mems_allowed_begin - because we do not
5296 * have to be precise here.
5297 */
5298 if (!nodemask)
5299 nodemask = &cpuset_current_mems_allowed;
5300
5301 return !node_isset(nid, *nodemask);
David Rientjesddd588b2011-03-22 16:30:46 -07005302}
5303
Linus Torvalds1da177e2005-04-16 15:20:36 -07005304#define K(x) ((x) << (PAGE_SHIFT-10))
5305
Rabin Vincent377e4f12012-12-11 16:00:24 -08005306static void show_migration_types(unsigned char type)
5307{
5308 static const char types[MIGRATE_TYPES] = {
5309 [MIGRATE_UNMOVABLE] = 'U',
Rabin Vincent377e4f12012-12-11 16:00:24 -08005310 [MIGRATE_MOVABLE] = 'M',
Vlastimil Babka475a2f92015-12-11 13:40:29 -08005311 [MIGRATE_RECLAIMABLE] = 'E',
5312 [MIGRATE_HIGHATOMIC] = 'H',
Rabin Vincent377e4f12012-12-11 16:00:24 -08005313#ifdef CONFIG_CMA
5314 [MIGRATE_CMA] = 'C',
5315#endif
Minchan Kim194159f2013-02-22 16:33:58 -08005316#ifdef CONFIG_MEMORY_ISOLATION
Rabin Vincent377e4f12012-12-11 16:00:24 -08005317 [MIGRATE_ISOLATE] = 'I',
Minchan Kim194159f2013-02-22 16:33:58 -08005318#endif
Rabin Vincent377e4f12012-12-11 16:00:24 -08005319 };
5320 char tmp[MIGRATE_TYPES + 1];
5321 char *p = tmp;
5322 int i;
5323
5324 for (i = 0; i < MIGRATE_TYPES; i++) {
5325 if (type & (1 << i))
5326 *p++ = types[i];
5327 }
5328
5329 *p = '\0';
Joe Perches1f84a182016-10-27 17:46:29 -07005330 printk(KERN_CONT "(%s) ", tmp);
Rabin Vincent377e4f12012-12-11 16:00:24 -08005331}
5332
Linus Torvalds1da177e2005-04-16 15:20:36 -07005333/*
5334 * Show free area list (used inside shift_scroll-lock stuff)
5335 * We also calculate the percentage fragmentation. We do this by counting the
5336 * memory on each free list with the exception of the first item on the list.
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07005337 *
5338 * Bits in @filter:
5339 * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
5340 * cpuset.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005341 */
Michal Hocko9af744d2017-02-22 15:46:16 -08005342void show_free_areas(unsigned int filter, nodemask_t *nodemask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005343{
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07005344 unsigned long free_pcp = 0;
Jes Sorensenc7241912006-09-27 01:50:05 -07005345 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005346 struct zone *zone;
Mel Gorman599d0c92016-07-28 15:45:31 -07005347 pg_data_t *pgdat;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005348
KOSAKI Motohiroee99c712009-03-31 15:19:31 -07005349 for_each_populated_zone(zone) {
Michal Hocko9af744d2017-02-22 15:46:16 -08005350 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
David Rientjesddd588b2011-03-22 16:30:46 -07005351 continue;
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07005352
Konstantin Khlebnikov761b0672015-04-14 15:45:32 -07005353 for_each_online_cpu(cpu)
5354 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005355 }
5356
KOSAKI Motohiroa7312862009-09-21 17:01:37 -07005357 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
5358 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
NeilBrown8d928902020-06-01 21:48:21 -07005359 " unevictable:%lu dirty:%lu writeback:%lu\n"
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07005360 " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -07005361 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07005362 " free:%lu free_pcp:%lu free_cma:%lu\n",
Mel Gorman599d0c92016-07-28 15:45:31 -07005363 global_node_page_state(NR_ACTIVE_ANON),
5364 global_node_page_state(NR_INACTIVE_ANON),
5365 global_node_page_state(NR_ISOLATED_ANON),
5366 global_node_page_state(NR_ACTIVE_FILE),
5367 global_node_page_state(NR_INACTIVE_FILE),
5368 global_node_page_state(NR_ISOLATED_FILE),
5369 global_node_page_state(NR_UNEVICTABLE),
Mel Gorman11fb9982016-07-28 15:46:20 -07005370 global_node_page_state(NR_FILE_DIRTY),
5371 global_node_page_state(NR_WRITEBACK),
Roman Gushchind42f3242020-08-06 23:20:39 -07005372 global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B),
5373 global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B),
Mel Gorman50658e22016-07-28 15:46:14 -07005374 global_node_page_state(NR_FILE_MAPPED),
Mel Gorman11fb9982016-07-28 15:46:20 -07005375 global_node_page_state(NR_SHMEM),
Michal Hockoc41f0122017-09-06 16:23:36 -07005376 global_zone_page_state(NR_PAGETABLE),
5377 global_zone_page_state(NR_BOUNCE),
5378 global_zone_page_state(NR_FREE_PAGES),
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07005379 free_pcp,
Michal Hockoc41f0122017-09-06 16:23:36 -07005380 global_zone_page_state(NR_FREE_CMA_PAGES));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005381
Mel Gorman599d0c92016-07-28 15:45:31 -07005382 for_each_online_pgdat(pgdat) {
Michal Hocko9af744d2017-02-22 15:46:16 -08005383 if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
Michal Hockoc02e50b2017-02-22 15:46:07 -08005384 continue;
5385
Mel Gorman599d0c92016-07-28 15:45:31 -07005386 printk("Node %d"
5387 " active_anon:%lukB"
5388 " inactive_anon:%lukB"
5389 " active_file:%lukB"
5390 " inactive_file:%lukB"
5391 " unevictable:%lukB"
5392 " isolated(anon):%lukB"
5393 " isolated(file):%lukB"
Mel Gorman50658e22016-07-28 15:46:14 -07005394 " mapped:%lukB"
Mel Gorman11fb9982016-07-28 15:46:20 -07005395 " dirty:%lukB"
5396 " writeback:%lukB"
5397 " shmem:%lukB"
5398#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5399 " shmem_thp: %lukB"
5400 " shmem_pmdmapped: %lukB"
5401 " anon_thp: %lukB"
5402#endif
5403 " writeback_tmp:%lukB"
Shakeel Butt991e7672020-08-06 23:21:37 -07005404 " kernel_stack:%lukB"
5405#ifdef CONFIG_SHADOW_CALL_STACK
5406 " shadow_call_stack:%lukB"
5407#endif
Mel Gorman599d0c92016-07-28 15:45:31 -07005408 " all_unreclaimable? %s"
5409 "\n",
5410 pgdat->node_id,
5411 K(node_page_state(pgdat, NR_ACTIVE_ANON)),
5412 K(node_page_state(pgdat, NR_INACTIVE_ANON)),
5413 K(node_page_state(pgdat, NR_ACTIVE_FILE)),
5414 K(node_page_state(pgdat, NR_INACTIVE_FILE)),
5415 K(node_page_state(pgdat, NR_UNEVICTABLE)),
5416 K(node_page_state(pgdat, NR_ISOLATED_ANON)),
5417 K(node_page_state(pgdat, NR_ISOLATED_FILE)),
Mel Gorman50658e22016-07-28 15:46:14 -07005418 K(node_page_state(pgdat, NR_FILE_MAPPED)),
Mel Gorman11fb9982016-07-28 15:46:20 -07005419 K(node_page_state(pgdat, NR_FILE_DIRTY)),
5420 K(node_page_state(pgdat, NR_WRITEBACK)),
Alexander Polakov1f06b812017-04-07 16:04:45 -07005421 K(node_page_state(pgdat, NR_SHMEM)),
Mel Gorman11fb9982016-07-28 15:46:20 -07005422#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5423 K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR),
5424 K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)
5425 * HPAGE_PMD_NR),
5426 K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR),
5427#endif
Mel Gorman11fb9982016-07-28 15:46:20 -07005428 K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
Shakeel Butt991e7672020-08-06 23:21:37 -07005429 node_page_state(pgdat, NR_KERNEL_STACK_KB),
5430#ifdef CONFIG_SHADOW_CALL_STACK
5431 node_page_state(pgdat, NR_KERNEL_SCS_KB),
5432#endif
Johannes Weinerc73322d2017-05-03 14:51:51 -07005433 pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
5434 "yes" : "no");
Mel Gorman599d0c92016-07-28 15:45:31 -07005435 }
5436
KOSAKI Motohiroee99c712009-03-31 15:19:31 -07005437 for_each_populated_zone(zone) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005438 int i;
5439
Michal Hocko9af744d2017-02-22 15:46:16 -08005440 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
David Rientjesddd588b2011-03-22 16:30:46 -07005441 continue;
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07005442
5443 free_pcp = 0;
5444 for_each_online_cpu(cpu)
5445 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
5446
Linus Torvalds1da177e2005-04-16 15:20:36 -07005447 show_node(zone);
Joe Perches1f84a182016-10-27 17:46:29 -07005448 printk(KERN_CONT
5449 "%s"
Linus Torvalds1da177e2005-04-16 15:20:36 -07005450 " free:%lukB"
5451 " min:%lukB"
5452 " low:%lukB"
5453 " high:%lukB"
lijiazie47b3462019-11-30 17:55:21 -08005454 " reserved_highatomic:%luKB"
Minchan Kim71c799f2016-07-28 15:47:26 -07005455 " active_anon:%lukB"
5456 " inactive_anon:%lukB"
5457 " active_file:%lukB"
5458 " inactive_file:%lukB"
5459 " unevictable:%lukB"
Mel Gorman5a1c84b2016-07-28 15:47:31 -07005460 " writepending:%lukB"
Linus Torvalds1da177e2005-04-16 15:20:36 -07005461 " present:%lukB"
Jiang Liu9feedc92012-12-12 13:52:12 -08005462 " managed:%lukB"
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07005463 " mlocked:%lukB"
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07005464 " pagetables:%lukB"
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07005465 " bounce:%lukB"
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07005466 " free_pcp:%lukB"
5467 " local_pcp:%ukB"
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -07005468 " free_cma:%lukB"
Linus Torvalds1da177e2005-04-16 15:20:36 -07005469 "\n",
5470 zone->name,
Mel Gorman88f5acf2011-01-13 15:45:41 -08005471 K(zone_page_state(zone, NR_FREE_PAGES)),
Mel Gorman41858962009-06-16 15:32:12 -07005472 K(min_wmark_pages(zone)),
5473 K(low_wmark_pages(zone)),
5474 K(high_wmark_pages(zone)),
lijiazie47b3462019-11-30 17:55:21 -08005475 K(zone->nr_reserved_highatomic),
Minchan Kim71c799f2016-07-28 15:47:26 -07005476 K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
5477 K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
5478 K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
5479 K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)),
5480 K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
Mel Gorman5a1c84b2016-07-28 15:47:31 -07005481 K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)),
Linus Torvalds1da177e2005-04-16 15:20:36 -07005482 K(zone->present_pages),
Arun KS9705bea2018-12-28 00:34:24 -08005483 K(zone_managed_pages(zone)),
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07005484 K(zone_page_state(zone, NR_MLOCK)),
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07005485 K(zone_page_state(zone, NR_PAGETABLE)),
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07005486 K(zone_page_state(zone, NR_BOUNCE)),
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07005487 K(free_pcp),
5488 K(this_cpu_read(zone->pageset->pcp.count)),
Minchan Kim33e077b2016-07-28 15:47:14 -07005489 K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005490 printk("lowmem_reserve[]:");
5491 for (i = 0; i < MAX_NR_ZONES; i++)
Joe Perches1f84a182016-10-27 17:46:29 -07005492 printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
5493 printk(KERN_CONT "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005494 }
5495
KOSAKI Motohiroee99c712009-03-31 15:19:31 -07005496 for_each_populated_zone(zone) {
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08005497 unsigned int order;
5498 unsigned long nr[MAX_ORDER], flags, total = 0;
Rabin Vincent377e4f12012-12-11 16:00:24 -08005499 unsigned char types[MAX_ORDER];
Linus Torvalds1da177e2005-04-16 15:20:36 -07005500
Michal Hocko9af744d2017-02-22 15:46:16 -08005501 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
David Rientjesddd588b2011-03-22 16:30:46 -07005502 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005503 show_node(zone);
Joe Perches1f84a182016-10-27 17:46:29 -07005504 printk(KERN_CONT "%s: ", zone->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005505
5506 spin_lock_irqsave(&zone->lock, flags);
5507 for (order = 0; order < MAX_ORDER; order++) {
Rabin Vincent377e4f12012-12-11 16:00:24 -08005508 struct free_area *area = &zone->free_area[order];
5509 int type;
5510
5511 nr[order] = area->nr_free;
Kirill Korotaev8f9de512006-06-23 02:03:50 -07005512 total += nr[order] << order;
Rabin Vincent377e4f12012-12-11 16:00:24 -08005513
5514 types[order] = 0;
5515 for (type = 0; type < MIGRATE_TYPES; type++) {
Dan Williamsb03641a2019-05-14 15:41:32 -07005516 if (!free_area_empty(area, type))
Rabin Vincent377e4f12012-12-11 16:00:24 -08005517 types[order] |= 1 << type;
5518 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005519 }
5520 spin_unlock_irqrestore(&zone->lock, flags);
Rabin Vincent377e4f12012-12-11 16:00:24 -08005521 for (order = 0; order < MAX_ORDER; order++) {
Joe Perches1f84a182016-10-27 17:46:29 -07005522 printk(KERN_CONT "%lu*%lukB ",
5523 nr[order], K(1UL) << order);
Rabin Vincent377e4f12012-12-11 16:00:24 -08005524 if (nr[order])
5525 show_migration_types(types[order]);
5526 }
Joe Perches1f84a182016-10-27 17:46:29 -07005527 printk(KERN_CONT "= %lukB\n", K(total));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005528 }
5529
David Rientjes949f7ec2013-04-29 15:07:48 -07005530 hugetlb_show_meminfo();
5531
Mel Gorman11fb9982016-07-28 15:46:20 -07005532 printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES));
Larry Woodmane6f36022008-02-04 22:29:30 -08005533
Linus Torvalds1da177e2005-04-16 15:20:36 -07005534 show_swap_cache_info();
5535}
5536
Mel Gorman19770b32008-04-28 02:12:18 -07005537static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
5538{
5539 zoneref->zone = zone;
5540 zoneref->zone_idx = zone_idx(zone);
5541}
5542
Linus Torvalds1da177e2005-04-16 15:20:36 -07005543/*
5544 * Builds allocation fallback zone lists.
Christoph Lameter1a932052006-01-06 00:11:16 -08005545 *
5546 * Add all populated zones of a node to the zonelist.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005547 */
Michal Hocko9d3be212017-09-06 16:20:30 -07005548static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005549{
Christoph Lameter1a932052006-01-06 00:11:16 -08005550 struct zone *zone;
Zhang Yanfeibc732f12013-07-08 16:00:06 -07005551 enum zone_type zone_type = MAX_NR_ZONES;
Michal Hocko9d3be212017-09-06 16:20:30 -07005552 int nr_zones = 0;
Christoph Lameter02a68a52006-01-06 00:11:18 -08005553
5554 do {
Christoph Lameter2f6726e2006-09-25 23:31:18 -07005555 zone_type--;
Christoph Lameter070f8032006-01-06 00:11:19 -08005556 zone = pgdat->node_zones + zone_type;
Mel Gorman6aa303d2016-09-01 16:14:55 -07005557 if (managed_zone(zone)) {
Michal Hocko9d3be212017-09-06 16:20:30 -07005558 zoneref_set_zone(zone, &zonerefs[nr_zones++]);
Christoph Lameter070f8032006-01-06 00:11:19 -08005559 check_highest_zone(zone_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005560 }
Christoph Lameter2f6726e2006-09-25 23:31:18 -07005561 } while (zone_type);
Zhang Yanfeibc732f12013-07-08 16:00:06 -07005562
Christoph Lameter070f8032006-01-06 00:11:19 -08005563 return nr_zones;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005564}
5565
5566#ifdef CONFIG_NUMA
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005567
5568static int __parse_numa_zonelist_order(char *s)
5569{
Michal Hockoc9bff3e2017-09-06 16:20:13 -07005570 /*
5571 * We used to support different zonlists modes but they turned
5572 * out to be just not useful. Let's keep the warning in place
5573 * if somebody still use the cmd line parameter so that we do
5574 * not fail it silently
5575 */
5576 if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) {
5577 pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s);
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005578 return -EINVAL;
5579 }
5580 return 0;
5581}
5582
Michal Hockoc9bff3e2017-09-06 16:20:13 -07005583char numa_zonelist_order[] = "Node";
5584
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005585/*
5586 * sysctl handler for numa_zonelist_order
5587 */
Joe Perchescccad5b2014-06-06 14:38:09 -07005588int numa_zonelist_order_handler(struct ctl_table *table, int write,
Christoph Hellwig32927392020-04-24 08:43:38 +02005589 void *buffer, size_t *length, loff_t *ppos)
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005590{
Christoph Hellwig32927392020-04-24 08:43:38 +02005591 if (write)
5592 return __parse_numa_zonelist_order(buffer);
5593 return proc_dostring(table, write, buffer, length, ppos);
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005594}
5595
5596
Christoph Lameter62bc62a2009-06-16 15:32:15 -07005597#define MAX_NODE_LOAD (nr_online_nodes)
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005598static int node_load[MAX_NUMNODES];
5599
Linus Torvalds1da177e2005-04-16 15:20:36 -07005600/**
Pavel Pisa4dc3b162005-05-01 08:59:25 -07005601 * find_next_best_node - find the next node that should appear in a given node's fallback list
Linus Torvalds1da177e2005-04-16 15:20:36 -07005602 * @node: node whose fallback list we're appending
5603 * @used_node_mask: nodemask_t of already used nodes
5604 *
5605 * We use a number of factors to determine which is the next node that should
5606 * appear on a given node's fallback list. The node should not have appeared
5607 * already in @node's fallback list, and it should be the next closest node
5608 * according to the distance array (which contains arbitrary distance values
5609 * from each node to each node in the system), and should also prefer nodes
5610 * with no CPUs, since presumably they'll have very little allocation pressure
5611 * on them otherwise.
Mike Rapoporta862f682019-03-05 15:48:42 -08005612 *
5613 * Return: node id of the found node or %NUMA_NO_NODE if no node is found.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005614 */
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005615static int find_next_best_node(int node, nodemask_t *used_node_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005616{
Linus Torvalds4cf808eb2006-02-17 20:38:21 +01005617 int n, val;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005618 int min_val = INT_MAX;
David Rientjes00ef2d22013-02-22 16:35:36 -08005619 int best_node = NUMA_NO_NODE;
Rusty Russella70f7302009-03-13 14:49:46 +10305620 const struct cpumask *tmp = cpumask_of_node(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005621
Linus Torvalds4cf808eb2006-02-17 20:38:21 +01005622 /* Use the local node if we haven't already */
5623 if (!node_isset(node, *used_node_mask)) {
5624 node_set(node, *used_node_mask);
5625 return node;
5626 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005627
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08005628 for_each_node_state(n, N_MEMORY) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005629
5630 /* Don't want a node to appear more than once */
5631 if (node_isset(n, *used_node_mask))
5632 continue;
5633
Linus Torvalds1da177e2005-04-16 15:20:36 -07005634 /* Use the distance array to find the distance */
5635 val = node_distance(node, n);
5636
Linus Torvalds4cf808eb2006-02-17 20:38:21 +01005637 /* Penalize nodes under us ("prefer the next node") */
5638 val += (n < node);
5639
Linus Torvalds1da177e2005-04-16 15:20:36 -07005640 /* Give preference to headless and unused nodes */
Rusty Russella70f7302009-03-13 14:49:46 +10305641 tmp = cpumask_of_node(n);
5642 if (!cpumask_empty(tmp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005643 val += PENALTY_FOR_NODE_WITH_CPUS;
5644
5645 /* Slight preference for less loaded node */
5646 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
5647 val += node_load[n];
5648
5649 if (val < min_val) {
5650 min_val = val;
5651 best_node = n;
5652 }
5653 }
5654
5655 if (best_node >= 0)
5656 node_set(best_node, *used_node_mask);
5657
5658 return best_node;
5659}
5660
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005661
5662/*
5663 * Build zonelists ordered by node and zones within node.
5664 * This results in maximum locality--normal zone overflows into local
5665 * DMA zone, if any--but risks exhausting DMA zone.
5666 */
Michal Hocko9d3be212017-09-06 16:20:30 -07005667static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order,
5668 unsigned nr_nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005669{
Michal Hocko9d3be212017-09-06 16:20:30 -07005670 struct zoneref *zonerefs;
5671 int i;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005672
Michal Hocko9d3be212017-09-06 16:20:30 -07005673 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
5674
5675 for (i = 0; i < nr_nodes; i++) {
5676 int nr_zones;
5677
5678 pg_data_t *node = NODE_DATA(node_order[i]);
5679
5680 nr_zones = build_zonerefs_node(node, zonerefs);
5681 zonerefs += nr_zones;
5682 }
5683 zonerefs->zone = NULL;
5684 zonerefs->zone_idx = 0;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005685}
5686
5687/*
Christoph Lameter523b9452007-10-16 01:25:37 -07005688 * Build gfp_thisnode zonelists
5689 */
5690static void build_thisnode_zonelists(pg_data_t *pgdat)
5691{
Michal Hocko9d3be212017-09-06 16:20:30 -07005692 struct zoneref *zonerefs;
5693 int nr_zones;
Christoph Lameter523b9452007-10-16 01:25:37 -07005694
Michal Hocko9d3be212017-09-06 16:20:30 -07005695 zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs;
5696 nr_zones = build_zonerefs_node(pgdat, zonerefs);
5697 zonerefs += nr_zones;
5698 zonerefs->zone = NULL;
5699 zonerefs->zone_idx = 0;
Christoph Lameter523b9452007-10-16 01:25:37 -07005700}
5701
5702/*
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005703 * Build zonelists ordered by zone and nodes within zones.
5704 * This results in conserving DMA zone[s] until all Normal memory is
5705 * exhausted, but results in overflowing to remote node while memory
5706 * may still exist in local DMA zone.
5707 */
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005708
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005709static void build_zonelists(pg_data_t *pgdat)
5710{
Michal Hocko9d3be212017-09-06 16:20:30 -07005711 static int node_order[MAX_NUMNODES];
5712 int node, load, nr_nodes = 0;
Wei Yangd0ddf492020-06-03 15:59:05 -07005713 nodemask_t used_mask = NODE_MASK_NONE;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005714 int local_node, prev_node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005715
5716 /* NUMA-aware ordering of nodes */
5717 local_node = pgdat->node_id;
Christoph Lameter62bc62a2009-06-16 15:32:15 -07005718 load = nr_online_nodes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005719 prev_node = local_node;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005720
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005721 memset(node_order, 0, sizeof(node_order));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005722 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
5723 /*
5724 * We don't want to pressure a particular node.
5725 * So adding penalty to the first node in same
5726 * distance group to make it round-robin.
5727 */
David Rientjes957f8222012-10-08 16:33:24 -07005728 if (node_distance(local_node, node) !=
5729 node_distance(local_node, prev_node))
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005730 node_load[node] = load;
5731
Michal Hocko9d3be212017-09-06 16:20:30 -07005732 node_order[nr_nodes++] = node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005733 prev_node = node;
5734 load--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005735 }
Christoph Lameter523b9452007-10-16 01:25:37 -07005736
Michal Hocko9d3be212017-09-06 16:20:30 -07005737 build_zonelists_in_node_order(pgdat, node_order, nr_nodes);
Christoph Lameter523b9452007-10-16 01:25:37 -07005738 build_thisnode_zonelists(pgdat);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005739}
5740
Lee Schermerhorn7aac7892010-05-26 14:45:00 -07005741#ifdef CONFIG_HAVE_MEMORYLESS_NODES
5742/*
5743 * Return node id of node used for "local" allocations.
5744 * I.e., first node id of first zone in arg node's generic zonelist.
5745 * Used for initializing percpu 'numa_mem', which is used primarily
5746 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
5747 */
5748int local_memory_node(int node)
5749{
Mel Gormanc33d6c02016-05-19 17:14:10 -07005750 struct zoneref *z;
Lee Schermerhorn7aac7892010-05-26 14:45:00 -07005751
Mel Gormanc33d6c02016-05-19 17:14:10 -07005752 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
Lee Schermerhorn7aac7892010-05-26 14:45:00 -07005753 gfp_zone(GFP_KERNEL),
Mel Gormanc33d6c02016-05-19 17:14:10 -07005754 NULL);
Pavel Tatashinc1093b72018-08-21 21:53:32 -07005755 return zone_to_nid(z->zone);
Lee Schermerhorn7aac7892010-05-26 14:45:00 -07005756}
5757#endif
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005758
Joonsoo Kim6423aa82016-08-10 16:27:49 -07005759static void setup_min_unmapped_ratio(void);
5760static void setup_min_slab_ratio(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005761#else /* CONFIG_NUMA */
5762
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005763static void build_zonelists(pg_data_t *pgdat)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005764{
Christoph Lameter19655d32006-09-25 23:31:19 -07005765 int node, local_node;
Michal Hocko9d3be212017-09-06 16:20:30 -07005766 struct zoneref *zonerefs;
5767 int nr_zones;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005768
5769 local_node = pgdat->node_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005770
Michal Hocko9d3be212017-09-06 16:20:30 -07005771 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
5772 nr_zones = build_zonerefs_node(pgdat, zonerefs);
5773 zonerefs += nr_zones;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005774
Mel Gorman54a6eb52008-04-28 02:12:16 -07005775 /*
5776 * Now we build the zonelist so that it contains the zones
5777 * of all the other nodes.
5778 * We don't want to pressure a particular node, so when
5779 * building the zones for node N, we make sure that the
5780 * zones coming right after the local ones are those from
5781 * node N+1 (modulo N)
5782 */
5783 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
5784 if (!node_online(node))
5785 continue;
Michal Hocko9d3be212017-09-06 16:20:30 -07005786 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
5787 zonerefs += nr_zones;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005788 }
Mel Gorman54a6eb52008-04-28 02:12:16 -07005789 for (node = 0; node < local_node; node++) {
5790 if (!node_online(node))
5791 continue;
Michal Hocko9d3be212017-09-06 16:20:30 -07005792 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
5793 zonerefs += nr_zones;
Mel Gorman54a6eb52008-04-28 02:12:16 -07005794 }
5795
Michal Hocko9d3be212017-09-06 16:20:30 -07005796 zonerefs->zone = NULL;
5797 zonerefs->zone_idx = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005798}
5799
5800#endif /* CONFIG_NUMA */
5801
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09005802/*
5803 * Boot pageset table. One per cpu which is going to be used for all
5804 * zones and all nodes. The parameters will be set in such a way
5805 * that an item put on a list will immediately be handed over to
5806 * the buddy list. This is safe since pageset manipulation is done
5807 * with interrupts disabled.
5808 *
5809 * The boot_pagesets must be kept even after bootup is complete for
5810 * unused processors and/or zones. They do play a role for bootstrapping
5811 * hotplugged processors.
5812 *
5813 * zoneinfo_show() and maybe other functions do
5814 * not check if the processor is online before following the pageset pointer.
5815 * Other parts of the kernel may not check if the zone is available.
5816 */
5817static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
5818static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
Johannes Weiner385386c2017-07-06 15:40:43 -07005819static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09005820
Michal Hocko11cd8632017-09-06 16:20:34 -07005821static void __build_all_zonelists(void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005822{
Yasunori Goto68113782006-06-23 02:03:11 -07005823 int nid;
Michal Hockoafb6ebb2017-09-06 16:20:17 -07005824 int __maybe_unused cpu;
Jiang Liu9adb62a2012-07-31 16:43:28 -07005825 pg_data_t *self = data;
Michal Hockob93e0f32017-09-06 16:20:37 -07005826 static DEFINE_SPINLOCK(lock);
5827
5828 spin_lock(&lock);
Paul Jackson9276b1bc2006-12-06 20:31:48 -08005829
Bo Liu7f9cfb32009-08-18 14:11:19 -07005830#ifdef CONFIG_NUMA
5831 memset(node_load, 0, sizeof(node_load));
5832#endif
Jiang Liu9adb62a2012-07-31 16:43:28 -07005833
Wei Yangc1152582017-09-06 16:19:33 -07005834 /*
5835 * This node is hotadded and no memory is yet present. So just
5836 * building zonelists is fine - no need to touch other nodes.
5837 */
Jiang Liu9adb62a2012-07-31 16:43:28 -07005838 if (self && !node_online(self->node_id)) {
5839 build_zonelists(self);
Wei Yangc1152582017-09-06 16:19:33 -07005840 } else {
5841 for_each_online_node(nid) {
5842 pg_data_t *pgdat = NODE_DATA(nid);
Jiang Liu9adb62a2012-07-31 16:43:28 -07005843
Wei Yangc1152582017-09-06 16:19:33 -07005844 build_zonelists(pgdat);
5845 }
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09005846
Michal Hockoafb6ebb2017-09-06 16:20:17 -07005847#ifdef CONFIG_HAVE_MEMORYLESS_NODES
Michal Hockoafb6ebb2017-09-06 16:20:17 -07005848 /*
5849 * We now know the "local memory node" for each node--
5850 * i.e., the node of the first zone in the generic zonelist.
5851 * Set up numa_mem percpu variable for on-line cpus. During
5852 * boot, only the boot cpu should be on-line; we'll init the
5853 * secondary cpus' numa_mem as they come on-line. During
5854 * node/memory hotplug, we'll fixup all on-line cpus.
5855 */
Michal Hockod9c9a0b2017-09-06 16:20:20 -07005856 for_each_online_cpu(cpu)
Michal Hockoafb6ebb2017-09-06 16:20:17 -07005857 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
Michal Hockoafb6ebb2017-09-06 16:20:17 -07005858#endif
Michal Hockod9c9a0b2017-09-06 16:20:20 -07005859 }
Michal Hockob93e0f32017-09-06 16:20:37 -07005860
5861 spin_unlock(&lock);
Michal Hockoafb6ebb2017-09-06 16:20:17 -07005862}
5863
5864static noinline void __init
5865build_all_zonelists_init(void)
5866{
5867 int cpu;
5868
5869 __build_all_zonelists(NULL);
5870
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09005871 /*
5872 * Initialize the boot_pagesets that are going to be used
5873 * for bootstrapping processors. The real pagesets for
5874 * each zone will be allocated later when the per cpu
5875 * allocator is available.
5876 *
5877 * boot_pagesets are used also for bootstrapping offline
5878 * cpus if the system is already booted because the pagesets
5879 * are needed to initialize allocators on a specific cpu too.
5880 * F.e. the percpu allocator needs the page allocator which
5881 * needs the percpu allocator in order to allocate its pagesets
5882 * (a chicken-egg dilemma).
5883 */
Michal Hockoafb6ebb2017-09-06 16:20:17 -07005884 for_each_possible_cpu(cpu)
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09005885 setup_pageset(&per_cpu(boot_pageset, cpu), 0);
5886
Rasmus Villemoes061f67b2015-02-12 15:00:06 -08005887 mminit_verify_zonelist();
5888 cpuset_init_current_mems_allowed();
5889}
5890
Haicheng Li4eaf3f62010-05-24 14:32:52 -07005891/*
Haicheng Li4eaf3f62010-05-24 14:32:52 -07005892 * unless system_state == SYSTEM_BOOTING.
Rasmus Villemoes061f67b2015-02-12 15:00:06 -08005893 *
Michal Hocko72675e12017-09-06 16:20:24 -07005894 * __ref due to call of __init annotated helper build_all_zonelists_init
Rasmus Villemoes061f67b2015-02-12 15:00:06 -08005895 * [protected by SYSTEM_BOOTING].
Haicheng Li4eaf3f62010-05-24 14:32:52 -07005896 */
Michal Hocko72675e12017-09-06 16:20:24 -07005897void __ref build_all_zonelists(pg_data_t *pgdat)
Yasunori Goto68113782006-06-23 02:03:11 -07005898{
5899 if (system_state == SYSTEM_BOOTING) {
Rasmus Villemoes061f67b2015-02-12 15:00:06 -08005900 build_all_zonelists_init();
Yasunori Goto68113782006-06-23 02:03:11 -07005901 } else {
Michal Hocko11cd8632017-09-06 16:20:34 -07005902 __build_all_zonelists(pgdat);
Yasunori Goto68113782006-06-23 02:03:11 -07005903 /* cpuset refresh routine should be here */
5904 }
Andrew Mortonbd1e22b2006-06-23 02:03:47 -07005905 vm_total_pages = nr_free_pagecache_pages();
Mel Gorman9ef9acb2007-10-16 01:25:54 -07005906 /*
5907 * Disable grouping by mobility if the number of pages in the
5908 * system is too low to allow the mechanism to work. It would be
5909 * more accurate, but expensive to check per-zone. This check is
5910 * made on memory-hotadd so a system can start with mobility
5911 * disabled and enable it later
5912 */
Mel Gormand9c23402007-10-16 01:26:01 -07005913 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
Mel Gorman9ef9acb2007-10-16 01:25:54 -07005914 page_group_by_mobility_disabled = 1;
5915 else
5916 page_group_by_mobility_disabled = 0;
5917
Alexey Dobriyance0725f2019-03-05 15:48:29 -08005918 pr_info("Built %u zonelists, mobility grouping %s. Total pages: %ld\n",
Joe Perches756a0252016-03-17 14:19:47 -07005919 nr_online_nodes,
Joe Perches756a0252016-03-17 14:19:47 -07005920 page_group_by_mobility_disabled ? "off" : "on",
5921 vm_total_pages);
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005922#ifdef CONFIG_NUMA
Anton Blanchardf88dfff2014-12-10 15:42:53 -08005923 pr_info("Policy zone: %s\n", zone_names[policy_zone]);
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005924#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07005925}
5926
Pavel Tatashina9a9e772018-10-26 15:09:40 -07005927/* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
5928static bool __meminit
5929overlap_memmap_init(unsigned long zone, unsigned long *pfn)
5930{
Pavel Tatashina9a9e772018-10-26 15:09:40 -07005931 static struct memblock_region *r;
5932
5933 if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
5934 if (!r || *pfn >= memblock_region_memory_end_pfn(r)) {
5935 for_each_memblock(memory, r) {
5936 if (*pfn < memblock_region_memory_end_pfn(r))
5937 break;
5938 }
5939 }
5940 if (*pfn >= memblock_region_memory_base_pfn(r) &&
5941 memblock_is_mirror(r)) {
5942 *pfn = memblock_region_memory_end_pfn(r);
5943 return true;
5944 }
5945 }
Pavel Tatashina9a9e772018-10-26 15:09:40 -07005946 return false;
5947}
5948
Linus Torvalds1da177e2005-04-16 15:20:36 -07005949/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07005950 * Initially all pages are reserved - free ones are freed
Mike Rapoportc6ffc5c2018-10-30 15:09:30 -07005951 * up by memblock_free_all() once the early boot process is
Linus Torvalds1da177e2005-04-16 15:20:36 -07005952 * done. Non-atomic initialization, single-pass.
5953 */
Matt Tolentinoc09b4242006-01-17 07:03:44 +01005954void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
Christoph Hellwiga99583e2017-12-29 08:53:57 +01005955 unsigned long start_pfn, enum memmap_context context,
5956 struct vmem_altmap *altmap)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005957{
Pavel Tatashina9a9e772018-10-26 15:09:40 -07005958 unsigned long pfn, end_pfn = start_pfn + size;
Pavel Tatashind0dc12e2018-04-05 16:23:00 -07005959 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005960
Hugh Dickins22b31ee2009-01-06 14:40:09 -08005961 if (highest_memmap_pfn < end_pfn - 1)
5962 highest_memmap_pfn = end_pfn - 1;
5963
Alexander Duyck966cf442018-10-26 15:07:52 -07005964#ifdef CONFIG_ZONE_DEVICE
Dan Williams4b94ffd2016-01-15 16:56:22 -08005965 /*
5966 * Honor reservation requested by the driver for this ZONE_DEVICE
Alexander Duyck966cf442018-10-26 15:07:52 -07005967 * memory. We limit the total number of pages to initialize to just
5968 * those that might contain the memory mapping. We will defer the
5969 * ZONE_DEVICE page initialization until after we have released
5970 * the hotplug lock.
Dan Williams4b94ffd2016-01-15 16:56:22 -08005971 */
Alexander Duyck966cf442018-10-26 15:07:52 -07005972 if (zone == ZONE_DEVICE) {
5973 if (!altmap)
5974 return;
5975
5976 if (start_pfn == altmap->base_pfn)
5977 start_pfn += altmap->reserve;
5978 end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
5979 }
5980#endif
Dan Williams4b94ffd2016-01-15 16:56:22 -08005981
David Hildenbrand948c4362020-02-03 17:33:59 -08005982 for (pfn = start_pfn; pfn < end_pfn; ) {
Dave Hansena2f3aa022007-01-10 23:15:30 -08005983 /*
Andrew Mortonb72d0ff2016-03-15 14:55:25 -07005984 * There can be holes in boot-time mem_map[]s handed to this
5985 * function. They do not exist on hotplugged memory.
Dave Hansena2f3aa022007-01-10 23:15:30 -08005986 */
Pavel Tatashina9a9e772018-10-26 15:09:40 -07005987 if (context == MEMMAP_EARLY) {
Pavel Tatashina9a9e772018-10-26 15:09:40 -07005988 if (overlap_memmap_init(zone, &pfn))
5989 continue;
5990 if (defer_init(nid, pfn, end_pfn))
5991 break;
Dave Hansena2f3aa022007-01-10 23:15:30 -08005992 }
Mel Gormanac5d2532015-06-30 14:57:20 -07005993
Pavel Tatashind0dc12e2018-04-05 16:23:00 -07005994 page = pfn_to_page(pfn);
5995 __init_single_page(page, pfn, zone, nid);
5996 if (context == MEMMAP_HOTPLUG)
Alexander Duyckd483da52018-10-26 15:07:48 -07005997 __SetPageReserved(page);
Pavel Tatashind0dc12e2018-04-05 16:23:00 -07005998
Mel Gormanac5d2532015-06-30 14:57:20 -07005999 /*
6000 * Mark the block movable so that blocks are reserved for
6001 * movable at startup. This will force kernel allocations
6002 * to reserve their blocks rather than leaking throughout
6003 * the address space during boot when many long-lived
Mel Gorman974a7862015-11-06 16:28:34 -08006004 * kernel allocations are made.
Mel Gormanac5d2532015-06-30 14:57:20 -07006005 *
6006 * bitmap is created for zone's valid pfn range. but memmap
6007 * can be created for invalid pages (for alignment)
6008 * check here not to call set_pageblock_migratetype() against
6009 * pfn out of zone.
6010 */
6011 if (!(pfn & (pageblock_nr_pages - 1))) {
Mel Gormanac5d2532015-06-30 14:57:20 -07006012 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
Michal Hocko9b6e63c2017-10-03 16:16:19 -07006013 cond_resched();
Mel Gormanac5d2532015-06-30 14:57:20 -07006014 }
David Hildenbrand948c4362020-02-03 17:33:59 -08006015 pfn++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006016 }
6017}
6018
Alexander Duyck966cf442018-10-26 15:07:52 -07006019#ifdef CONFIG_ZONE_DEVICE
6020void __ref memmap_init_zone_device(struct zone *zone,
6021 unsigned long start_pfn,
Aneesh Kumar K.V1f8d75c2020-02-03 17:34:06 -08006022 unsigned long nr_pages,
Alexander Duyck966cf442018-10-26 15:07:52 -07006023 struct dev_pagemap *pgmap)
6024{
Aneesh Kumar K.V1f8d75c2020-02-03 17:34:06 -08006025 unsigned long pfn, end_pfn = start_pfn + nr_pages;
Alexander Duyck966cf442018-10-26 15:07:52 -07006026 struct pglist_data *pgdat = zone->zone_pgdat;
Christoph Hellwig514caf22019-06-26 14:27:13 +02006027 struct vmem_altmap *altmap = pgmap_altmap(pgmap);
Alexander Duyck966cf442018-10-26 15:07:52 -07006028 unsigned long zone_idx = zone_idx(zone);
6029 unsigned long start = jiffies;
6030 int nid = pgdat->node_id;
6031
Dan Williams46d945a2019-07-18 15:58:18 -07006032 if (WARN_ON_ONCE(!pgmap || zone_idx(zone) != ZONE_DEVICE))
Alexander Duyck966cf442018-10-26 15:07:52 -07006033 return;
6034
6035 /*
6036 * The call to memmap_init_zone should have already taken care
6037 * of the pages reserved for the memmap, so we can just jump to
6038 * the end of that region and start processing the device pages.
6039 */
Christoph Hellwig514caf22019-06-26 14:27:13 +02006040 if (altmap) {
Alexander Duyck966cf442018-10-26 15:07:52 -07006041 start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
Aneesh Kumar K.V1f8d75c2020-02-03 17:34:06 -08006042 nr_pages = end_pfn - start_pfn;
Alexander Duyck966cf442018-10-26 15:07:52 -07006043 }
6044
6045 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
6046 struct page *page = pfn_to_page(pfn);
6047
6048 __init_single_page(page, pfn, zone_idx, nid);
6049
6050 /*
6051 * Mark page reserved as it will need to wait for onlining
6052 * phase for it to be fully associated with a zone.
6053 *
6054 * We can use the non-atomic __set_bit operation for setting
6055 * the flag as we are still initializing the pages.
6056 */
6057 __SetPageReserved(page);
6058
6059 /*
Christoph Hellwig8a164fe2019-06-26 14:27:21 +02006060 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer
6061 * and zone_device_data. It is a bug if a ZONE_DEVICE page is
6062 * ever freed or placed on a driver-private list.
Alexander Duyck966cf442018-10-26 15:07:52 -07006063 */
6064 page->pgmap = pgmap;
Christoph Hellwig8a164fe2019-06-26 14:27:21 +02006065 page->zone_device_data = NULL;
Alexander Duyck966cf442018-10-26 15:07:52 -07006066
6067 /*
6068 * Mark the block movable so that blocks are reserved for
6069 * movable at startup. This will force kernel allocations
6070 * to reserve their blocks rather than leaking throughout
6071 * the address space during boot when many long-lived
6072 * kernel allocations are made.
6073 *
6074 * bitmap is created for zone's valid pfn range. but memmap
6075 * can be created for invalid pages (for alignment)
6076 * check here not to call set_pageblock_migratetype() against
6077 * pfn out of zone.
6078 *
6079 * Please note that MEMMAP_HOTPLUG path doesn't clear memmap
Dan Williamsba72b4c2019-07-18 15:58:26 -07006080 * because this is done early in section_activate()
Alexander Duyck966cf442018-10-26 15:07:52 -07006081 */
6082 if (!(pfn & (pageblock_nr_pages - 1))) {
6083 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
6084 cond_resched();
6085 }
6086 }
6087
Christoph Hellwigfdc029b2019-08-18 11:05:55 +02006088 pr_info("%s initialised %lu pages in %ums\n", __func__,
Aneesh Kumar K.V1f8d75c2020-02-03 17:34:06 -08006089 nr_pages, jiffies_to_msecs(jiffies - start));
Alexander Duyck966cf442018-10-26 15:07:52 -07006090}
6091
6092#endif
Andi Kleen1e548de2008-02-04 22:29:26 -08006093static void __meminit zone_init_free_lists(struct zone *zone)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006094{
Mel Gorman7aeb09f2014-06-04 16:10:21 -07006095 unsigned int order, t;
Mel Gormanb2a0ac82007-10-16 01:25:48 -07006096 for_each_migratetype_order(order, t) {
6097 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006098 zone->free_area[order].nr_free = 0;
6099 }
6100}
6101
Pavel Tatashindfb3ccd2018-10-26 15:09:32 -07006102void __meminit __weak memmap_init(unsigned long size, int nid,
Baoquan He73a6e472020-06-03 15:57:55 -07006103 unsigned long zone,
6104 unsigned long range_start_pfn)
Pavel Tatashindfb3ccd2018-10-26 15:09:32 -07006105{
Baoquan He73a6e472020-06-03 15:57:55 -07006106 unsigned long start_pfn, end_pfn;
6107 unsigned long range_end_pfn = range_start_pfn + size;
6108 int i;
6109
6110 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
6111 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
6112 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
6113
6114 if (end_pfn > start_pfn) {
6115 size = end_pfn - start_pfn;
6116 memmap_init_zone(size, nid, zone, start_pfn,
6117 MEMMAP_EARLY, NULL);
6118 }
6119 }
Pavel Tatashindfb3ccd2018-10-26 15:09:32 -07006120}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006121
David Rientjes7cd2b0a2014-06-23 13:22:04 -07006122static int zone_batchsize(struct zone *zone)
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07006123{
David Howells3a6be872009-05-06 16:03:03 -07006124#ifdef CONFIG_MMU
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07006125 int batch;
6126
6127 /*
6128 * The per-cpu-pages pools are set to around 1000th of the
Aaron Lud8a759b2018-08-17 15:49:14 -07006129 * size of the zone.
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07006130 */
Arun KS9705bea2018-12-28 00:34:24 -08006131 batch = zone_managed_pages(zone) / 1024;
Aaron Lud8a759b2018-08-17 15:49:14 -07006132 /* But no more than a meg. */
6133 if (batch * PAGE_SIZE > 1024 * 1024)
6134 batch = (1024 * 1024) / PAGE_SIZE;
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07006135 batch /= 4; /* We effectively *= 4 below */
6136 if (batch < 1)
6137 batch = 1;
6138
6139 /*
Nick Piggin0ceaacc2005-12-04 13:55:25 +11006140 * Clamp the batch to a 2^n - 1 value. Having a power
6141 * of 2 value was found to be more likely to have
6142 * suboptimal cache aliasing properties in some cases.
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07006143 *
Nick Piggin0ceaacc2005-12-04 13:55:25 +11006144 * For example if 2 tasks are alternately allocating
6145 * batches of pages, one task can end up with a lot
6146 * of pages of one half of the possible page colors
6147 * and the other with pages of the other colors.
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07006148 */
David Howells91552032009-05-06 16:03:02 -07006149 batch = rounddown_pow_of_two(batch + batch/2) - 1;
Seth, Rohitba56e912005-10-29 18:15:47 -07006150
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07006151 return batch;
David Howells3a6be872009-05-06 16:03:03 -07006152
6153#else
6154 /* The deferral and batching of frees should be suppressed under NOMMU
6155 * conditions.
6156 *
6157 * The problem is that NOMMU needs to be able to allocate large chunks
6158 * of contiguous memory as there's no hardware page translation to
6159 * assemble apparent contiguous memory from discontiguous pages.
6160 *
6161 * Queueing large contiguous runs of pages for batching, however,
6162 * causes the pages to actually be freed in smaller chunks. As there
6163 * can be a significant delay between the individual batches being
6164 * recycled, this leads to the once large chunks of space being
6165 * fragmented and becoming unavailable for high-order allocations.
6166 */
6167 return 0;
6168#endif
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07006169}
6170
Cody P Schafer8d7a8fa2013-07-03 15:01:31 -07006171/*
6172 * pcp->high and pcp->batch values are related and dependent on one another:
6173 * ->batch must never be higher then ->high.
6174 * The following function updates them in a safe manner without read side
6175 * locking.
6176 *
6177 * Any new users of pcp->batch and pcp->high should ensure they can cope with
6178 * those fields changing asynchronously (acording the the above rule).
6179 *
6180 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
6181 * outside of boot time (or some other assurance that no concurrent updaters
6182 * exist).
6183 */
6184static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
6185 unsigned long batch)
6186{
6187 /* start with a fail safe value for batch */
6188 pcp->batch = 1;
6189 smp_wmb();
6190
6191 /* Update high, then batch, in order */
6192 pcp->high = high;
6193 smp_wmb();
6194
6195 pcp->batch = batch;
6196}
6197
Cody P Schafer36640332013-07-03 15:01:40 -07006198/* a companion to pageset_set_high() */
Cody P Schafer4008bab2013-07-03 15:01:28 -07006199static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch)
6200{
Cody P Schafer8d7a8fa2013-07-03 15:01:31 -07006201 pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch));
Cody P Schafer4008bab2013-07-03 15:01:28 -07006202}
6203
Cody P Schafer88c90db2013-07-03 15:01:35 -07006204static void pageset_init(struct per_cpu_pageset *p)
Christoph Lameter2caaad42005-06-21 17:15:00 -07006205{
6206 struct per_cpu_pages *pcp;
Mel Gorman5f8dcc22009-09-21 17:03:19 -07006207 int migratetype;
Christoph Lameter2caaad42005-06-21 17:15:00 -07006208
Magnus Damm1c6fe942005-10-26 01:58:59 -07006209 memset(p, 0, sizeof(*p));
6210
Christoph Lameter3dfa5722008-02-04 22:29:19 -08006211 pcp = &p->pcp;
Mel Gorman5f8dcc22009-09-21 17:03:19 -07006212 for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
6213 INIT_LIST_HEAD(&pcp->lists[migratetype]);
Christoph Lameter2caaad42005-06-21 17:15:00 -07006214}
6215
Cody P Schafer88c90db2013-07-03 15:01:35 -07006216static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
6217{
6218 pageset_init(p);
6219 pageset_set_batch(p, batch);
6220}
6221
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08006222/*
Cody P Schafer36640332013-07-03 15:01:40 -07006223 * pageset_set_high() sets the high water mark for hot per_cpu_pagelist
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08006224 * to the value high for the pageset p.
6225 */
Cody P Schafer36640332013-07-03 15:01:40 -07006226static void pageset_set_high(struct per_cpu_pageset *p,
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08006227 unsigned long high)
6228{
Cody P Schafer8d7a8fa2013-07-03 15:01:31 -07006229 unsigned long batch = max(1UL, high / 4);
6230 if ((high / 4) > (PAGE_SHIFT * 8))
6231 batch = PAGE_SHIFT * 8;
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08006232
Cody P Schafer8d7a8fa2013-07-03 15:01:31 -07006233 pageset_update(&p->pcp, high, batch);
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08006234}
6235
David Rientjes7cd2b0a2014-06-23 13:22:04 -07006236static void pageset_set_high_and_batch(struct zone *zone,
6237 struct per_cpu_pageset *pcp)
Cody P Schafer56cef2b2013-07-03 15:01:38 -07006238{
Cody P Schafer56cef2b2013-07-03 15:01:38 -07006239 if (percpu_pagelist_fraction)
Cody P Schafer36640332013-07-03 15:01:40 -07006240 pageset_set_high(pcp,
Arun KS9705bea2018-12-28 00:34:24 -08006241 (zone_managed_pages(zone) /
Cody P Schafer56cef2b2013-07-03 15:01:38 -07006242 percpu_pagelist_fraction));
6243 else
6244 pageset_set_batch(pcp, zone_batchsize(zone));
6245}
6246
Cody P Schafer169f6c12013-07-03 15:01:41 -07006247static void __meminit zone_pageset_init(struct zone *zone, int cpu)
6248{
6249 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
6250
6251 pageset_init(pcp);
6252 pageset_set_high_and_batch(zone, pcp);
6253}
6254
Michal Hocko72675e12017-09-06 16:20:24 -07006255void __meminit setup_zone_pageset(struct zone *zone)
Wu Fengguang319774e2010-05-24 14:32:49 -07006256{
6257 int cpu;
Wu Fengguang319774e2010-05-24 14:32:49 -07006258 zone->pageset = alloc_percpu(struct per_cpu_pageset);
Cody P Schafer56cef2b2013-07-03 15:01:38 -07006259 for_each_possible_cpu(cpu)
6260 zone_pageset_init(zone, cpu);
Wu Fengguang319774e2010-05-24 14:32:49 -07006261}
6262
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07006263/*
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09006264 * Allocate per cpu pagesets and initialize them.
6265 * Before this call only boot pagesets were available.
Christoph Lameter2caaad42005-06-21 17:15:00 -07006266 */
Al Viro78d99552005-12-15 09:18:25 +00006267void __init setup_per_cpu_pageset(void)
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07006268{
Mel Gormanb4911ea2016-08-04 15:31:49 -07006269 struct pglist_data *pgdat;
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09006270 struct zone *zone;
Sandipan Dasb418a0f2020-06-03 15:59:11 -07006271 int __maybe_unused cpu;
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07006272
Wu Fengguang319774e2010-05-24 14:32:49 -07006273 for_each_populated_zone(zone)
6274 setup_zone_pageset(zone);
Mel Gormanb4911ea2016-08-04 15:31:49 -07006275
Sandipan Dasb418a0f2020-06-03 15:59:11 -07006276#ifdef CONFIG_NUMA
6277 /*
6278 * Unpopulated zones continue using the boot pagesets.
6279 * The numa stats for these pagesets need to be reset.
6280 * Otherwise, they will end up skewing the stats of
6281 * the nodes these zones are associated with.
6282 */
6283 for_each_possible_cpu(cpu) {
6284 struct per_cpu_pageset *pcp = &per_cpu(boot_pageset, cpu);
6285 memset(pcp->vm_numa_stat_diff, 0,
6286 sizeof(pcp->vm_numa_stat_diff));
6287 }
6288#endif
6289
Mel Gormanb4911ea2016-08-04 15:31:49 -07006290 for_each_online_pgdat(pgdat)
6291 pgdat->per_cpu_nodestats =
6292 alloc_percpu(struct per_cpu_nodestat);
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07006293}
6294
Matt Tolentinoc09b4242006-01-17 07:03:44 +01006295static __meminit void zone_pcp_init(struct zone *zone)
Dave Hansened8ece22005-10-29 18:16:50 -07006296{
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09006297 /*
6298 * per cpu subsystem is not up at this point. The following code
6299 * relies on the ability of the linker to provide the
6300 * offset of a (static) per cpu variable into the per cpu area.
6301 */
6302 zone->pageset = &boot_pageset;
Dave Hansened8ece22005-10-29 18:16:50 -07006303
Xishi Qiub38a8722013-11-12 15:07:20 -08006304 if (populated_zone(zone))
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09006305 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
6306 zone->name, zone->present_pages,
6307 zone_batchsize(zone));
Dave Hansened8ece22005-10-29 18:16:50 -07006308}
6309
Michal Hockodc0bbf32017-07-06 15:37:35 -07006310void __meminit init_currently_empty_zone(struct zone *zone,
Yasunori Goto718127c2006-06-23 02:03:10 -07006311 unsigned long zone_start_pfn,
Yaowei Baib171e402015-11-05 18:47:06 -08006312 unsigned long size)
Dave Hansened8ece22005-10-29 18:16:50 -07006313{
6314 struct pglist_data *pgdat = zone->zone_pgdat;
Wei Yang8f416832018-11-30 14:09:07 -08006315 int zone_idx = zone_idx(zone) + 1;
Linus Torvalds9dcb8b62016-10-26 10:15:30 -07006316
Wei Yang8f416832018-11-30 14:09:07 -08006317 if (zone_idx > pgdat->nr_zones)
6318 pgdat->nr_zones = zone_idx;
Dave Hansened8ece22005-10-29 18:16:50 -07006319
Dave Hansened8ece22005-10-29 18:16:50 -07006320 zone->zone_start_pfn = zone_start_pfn;
6321
Mel Gorman708614e2008-07-23 21:26:51 -07006322 mminit_dprintk(MMINIT_TRACE, "memmap_init",
6323 "Initialising map node %d zone %lu pfns %lu -> %lu\n",
6324 pgdat->node_id,
6325 (unsigned long)zone_idx(zone),
6326 zone_start_pfn, (zone_start_pfn + size));
6327
Andi Kleen1e548de2008-02-04 22:29:26 -08006328 zone_init_free_lists(zone);
Linus Torvalds9dcb8b62016-10-26 10:15:30 -07006329 zone->initialized = 1;
Dave Hansened8ece22005-10-29 18:16:50 -07006330}
6331
Mel Gormanc7132162006-09-27 01:49:43 -07006332/**
Mel Gormanc7132162006-09-27 01:49:43 -07006333 * get_pfn_range_for_nid - Return the start and end page frames for a node
Randy Dunlap88ca3b92006-10-04 02:15:25 -07006334 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
6335 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
6336 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
Mel Gormanc7132162006-09-27 01:49:43 -07006337 *
6338 * It returns the start and end page frame of a node based on information
Zhang Zhen7d018172014-06-04 16:10:53 -07006339 * provided by memblock_set_node(). If called for a node
Mel Gormanc7132162006-09-27 01:49:43 -07006340 * with no available memory, a warning is printed and the start and end
Randy Dunlap88ca3b92006-10-04 02:15:25 -07006341 * PFNs will be 0.
Mel Gormanc7132162006-09-27 01:49:43 -07006342 */
Oscar Salvadorbbe5d992018-12-28 00:37:24 -08006343void __init get_pfn_range_for_nid(unsigned int nid,
Mel Gormanc7132162006-09-27 01:49:43 -07006344 unsigned long *start_pfn, unsigned long *end_pfn)
6345{
Tejun Heoc13291a2011-07-12 10:46:30 +02006346 unsigned long this_start_pfn, this_end_pfn;
Mel Gormanc7132162006-09-27 01:49:43 -07006347 int i;
Tejun Heoc13291a2011-07-12 10:46:30 +02006348
Mel Gormanc7132162006-09-27 01:49:43 -07006349 *start_pfn = -1UL;
6350 *end_pfn = 0;
6351
Tejun Heoc13291a2011-07-12 10:46:30 +02006352 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
6353 *start_pfn = min(*start_pfn, this_start_pfn);
6354 *end_pfn = max(*end_pfn, this_end_pfn);
Mel Gormanc7132162006-09-27 01:49:43 -07006355 }
6356
Christoph Lameter633c0662007-10-16 01:25:37 -07006357 if (*start_pfn == -1UL)
Mel Gormanc7132162006-09-27 01:49:43 -07006358 *start_pfn = 0;
Mel Gormanc7132162006-09-27 01:49:43 -07006359}
6360
6361/*
Mel Gorman2a1e2742007-07-17 04:03:12 -07006362 * This finds a zone that can be used for ZONE_MOVABLE pages. The
6363 * assumption is made that zones within a node are ordered in monotonic
6364 * increasing memory addresses so that the "highest" populated zone is used
6365 */
Adrian Bunkb69a7282008-07-23 21:28:12 -07006366static void __init find_usable_zone_for_movable(void)
Mel Gorman2a1e2742007-07-17 04:03:12 -07006367{
6368 int zone_index;
6369 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
6370 if (zone_index == ZONE_MOVABLE)
6371 continue;
6372
6373 if (arch_zone_highest_possible_pfn[zone_index] >
6374 arch_zone_lowest_possible_pfn[zone_index])
6375 break;
6376 }
6377
6378 VM_BUG_ON(zone_index == -1);
6379 movable_zone = zone_index;
6380}
6381
6382/*
6383 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
Lucas De Marchi25985ed2011-03-30 22:57:33 -03006384 * because it is sized independent of architecture. Unlike the other zones,
Mel Gorman2a1e2742007-07-17 04:03:12 -07006385 * the starting point for ZONE_MOVABLE is not fixed. It may be different
6386 * in each node depending on the size of each node and how evenly kernelcore
6387 * is distributed. This helper function adjusts the zone ranges
6388 * provided by the architecture for a given node by using the end of the
6389 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
6390 * zones within a node are in order of monotonic increases memory addresses
6391 */
Oscar Salvadorbbe5d992018-12-28 00:37:24 -08006392static void __init adjust_zone_range_for_zone_movable(int nid,
Mel Gorman2a1e2742007-07-17 04:03:12 -07006393 unsigned long zone_type,
6394 unsigned long node_start_pfn,
6395 unsigned long node_end_pfn,
6396 unsigned long *zone_start_pfn,
6397 unsigned long *zone_end_pfn)
6398{
6399 /* Only adjust if ZONE_MOVABLE is on this node */
6400 if (zone_movable_pfn[nid]) {
6401 /* Size ZONE_MOVABLE */
6402 if (zone_type == ZONE_MOVABLE) {
6403 *zone_start_pfn = zone_movable_pfn[nid];
6404 *zone_end_pfn = min(node_end_pfn,
6405 arch_zone_highest_possible_pfn[movable_zone]);
6406
Xishi Qiue506b992016-10-07 16:58:06 -07006407 /* Adjust for ZONE_MOVABLE starting within this range */
6408 } else if (!mirrored_kernelcore &&
6409 *zone_start_pfn < zone_movable_pfn[nid] &&
6410 *zone_end_pfn > zone_movable_pfn[nid]) {
6411 *zone_end_pfn = zone_movable_pfn[nid];
6412
Mel Gorman2a1e2742007-07-17 04:03:12 -07006413 /* Check if this whole range is within ZONE_MOVABLE */
6414 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
6415 *zone_start_pfn = *zone_end_pfn;
6416 }
6417}
6418
6419/*
Mel Gormanc7132162006-09-27 01:49:43 -07006420 * Return the number of pages a zone spans in a node, including holes
6421 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
6422 */
Oscar Salvadorbbe5d992018-12-28 00:37:24 -08006423static unsigned long __init zone_spanned_pages_in_node(int nid,
Mel Gormanc7132162006-09-27 01:49:43 -07006424 unsigned long zone_type,
Zhang Yanfei7960aed2013-07-08 15:59:52 -07006425 unsigned long node_start_pfn,
6426 unsigned long node_end_pfn,
Taku Izumid91749c2016-03-15 14:55:18 -07006427 unsigned long *zone_start_pfn,
Mike Rapoport854e8842020-06-03 15:58:13 -07006428 unsigned long *zone_end_pfn)
Mel Gormanc7132162006-09-27 01:49:43 -07006429{
Linxu Fang299c83d2019-05-13 17:19:17 -07006430 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
6431 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
Xishi Qiub5685e92015-09-08 15:04:16 -07006432 /* When hotadd a new node from cpu_up(), the node should be empty */
Xishi Qiuf9126ab2015-08-14 15:35:16 -07006433 if (!node_start_pfn && !node_end_pfn)
6434 return 0;
6435
Zhang Yanfei7960aed2013-07-08 15:59:52 -07006436 /* Get the start and end of the zone */
Linxu Fang299c83d2019-05-13 17:19:17 -07006437 *zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
6438 *zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
Mel Gorman2a1e2742007-07-17 04:03:12 -07006439 adjust_zone_range_for_zone_movable(nid, zone_type,
6440 node_start_pfn, node_end_pfn,
Taku Izumid91749c2016-03-15 14:55:18 -07006441 zone_start_pfn, zone_end_pfn);
Mel Gormanc7132162006-09-27 01:49:43 -07006442
6443 /* Check that this node has pages within the zone's required range */
Taku Izumid91749c2016-03-15 14:55:18 -07006444 if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
Mel Gormanc7132162006-09-27 01:49:43 -07006445 return 0;
6446
6447 /* Move the zone boundaries inside the node if necessary */
Taku Izumid91749c2016-03-15 14:55:18 -07006448 *zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
6449 *zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
Mel Gormanc7132162006-09-27 01:49:43 -07006450
6451 /* Return the spanned pages */
Taku Izumid91749c2016-03-15 14:55:18 -07006452 return *zone_end_pfn - *zone_start_pfn;
Mel Gormanc7132162006-09-27 01:49:43 -07006453}
6454
6455/*
6456 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
Randy Dunlap88ca3b92006-10-04 02:15:25 -07006457 * then all holes in the requested range will be accounted for.
Mel Gormanc7132162006-09-27 01:49:43 -07006458 */
Oscar Salvadorbbe5d992018-12-28 00:37:24 -08006459unsigned long __init __absent_pages_in_range(int nid,
Mel Gormanc7132162006-09-27 01:49:43 -07006460 unsigned long range_start_pfn,
6461 unsigned long range_end_pfn)
6462{
Tejun Heo96e907d2011-07-12 10:46:29 +02006463 unsigned long nr_absent = range_end_pfn - range_start_pfn;
6464 unsigned long start_pfn, end_pfn;
6465 int i;
Mel Gormanc7132162006-09-27 01:49:43 -07006466
Tejun Heo96e907d2011-07-12 10:46:29 +02006467 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
6468 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
6469 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
6470 nr_absent -= end_pfn - start_pfn;
Mel Gormanc7132162006-09-27 01:49:43 -07006471 }
Tejun Heo96e907d2011-07-12 10:46:29 +02006472 return nr_absent;
Mel Gormanc7132162006-09-27 01:49:43 -07006473}
6474
6475/**
6476 * absent_pages_in_range - Return number of page frames in holes within a range
6477 * @start_pfn: The start PFN to start searching for holes
6478 * @end_pfn: The end PFN to stop searching for holes
6479 *
Mike Rapoporta862f682019-03-05 15:48:42 -08006480 * Return: the number of pages frames in memory holes within a range.
Mel Gormanc7132162006-09-27 01:49:43 -07006481 */
6482unsigned long __init absent_pages_in_range(unsigned long start_pfn,
6483 unsigned long end_pfn)
6484{
6485 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
6486}
6487
6488/* Return the number of page frames in holes in a zone on a node */
Oscar Salvadorbbe5d992018-12-28 00:37:24 -08006489static unsigned long __init zone_absent_pages_in_node(int nid,
Mel Gormanc7132162006-09-27 01:49:43 -07006490 unsigned long zone_type,
Zhang Yanfei7960aed2013-07-08 15:59:52 -07006491 unsigned long node_start_pfn,
Mike Rapoport854e8842020-06-03 15:58:13 -07006492 unsigned long node_end_pfn)
Mel Gormanc7132162006-09-27 01:49:43 -07006493{
Tejun Heo96e907d2011-07-12 10:46:29 +02006494 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
6495 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
Mel Gorman9c7cd682006-09-27 01:49:58 -07006496 unsigned long zone_start_pfn, zone_end_pfn;
Taku Izumi342332e2016-03-15 14:55:22 -07006497 unsigned long nr_absent;
Mel Gorman9c7cd682006-09-27 01:49:58 -07006498
Xishi Qiub5685e92015-09-08 15:04:16 -07006499 /* When hotadd a new node from cpu_up(), the node should be empty */
Xishi Qiuf9126ab2015-08-14 15:35:16 -07006500 if (!node_start_pfn && !node_end_pfn)
6501 return 0;
6502
Tejun Heo96e907d2011-07-12 10:46:29 +02006503 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
6504 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
Mel Gorman9c7cd682006-09-27 01:49:58 -07006505
Mel Gorman2a1e2742007-07-17 04:03:12 -07006506 adjust_zone_range_for_zone_movable(nid, zone_type,
6507 node_start_pfn, node_end_pfn,
6508 &zone_start_pfn, &zone_end_pfn);
Taku Izumi342332e2016-03-15 14:55:22 -07006509 nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
6510
6511 /*
6512 * ZONE_MOVABLE handling.
6513 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
6514 * and vice versa.
6515 */
Xishi Qiue506b992016-10-07 16:58:06 -07006516 if (mirrored_kernelcore && zone_movable_pfn[nid]) {
6517 unsigned long start_pfn, end_pfn;
6518 struct memblock_region *r;
Taku Izumi342332e2016-03-15 14:55:22 -07006519
Xishi Qiue506b992016-10-07 16:58:06 -07006520 for_each_memblock(memory, r) {
6521 start_pfn = clamp(memblock_region_memory_base_pfn(r),
6522 zone_start_pfn, zone_end_pfn);
6523 end_pfn = clamp(memblock_region_memory_end_pfn(r),
6524 zone_start_pfn, zone_end_pfn);
Taku Izumi342332e2016-03-15 14:55:22 -07006525
Xishi Qiue506b992016-10-07 16:58:06 -07006526 if (zone_type == ZONE_MOVABLE &&
6527 memblock_is_mirror(r))
6528 nr_absent += end_pfn - start_pfn;
Taku Izumi342332e2016-03-15 14:55:22 -07006529
Xishi Qiue506b992016-10-07 16:58:06 -07006530 if (zone_type == ZONE_NORMAL &&
6531 !memblock_is_mirror(r))
6532 nr_absent += end_pfn - start_pfn;
Taku Izumi342332e2016-03-15 14:55:22 -07006533 }
6534 }
6535
6536 return nr_absent;
Mel Gormanc7132162006-09-27 01:49:43 -07006537}
Mel Gorman0e0b8642006-09-27 01:49:56 -07006538
Oscar Salvadorbbe5d992018-12-28 00:37:24 -08006539static void __init calculate_node_totalpages(struct pglist_data *pgdat,
Zhang Yanfei7960aed2013-07-08 15:59:52 -07006540 unsigned long node_start_pfn,
Mike Rapoport854e8842020-06-03 15:58:13 -07006541 unsigned long node_end_pfn)
Mel Gormanc7132162006-09-27 01:49:43 -07006542{
Gu Zhengfebd5942015-06-24 16:57:02 -07006543 unsigned long realtotalpages = 0, totalpages = 0;
Mel Gormanc7132162006-09-27 01:49:43 -07006544 enum zone_type i;
6545
Gu Zhengfebd5942015-06-24 16:57:02 -07006546 for (i = 0; i < MAX_NR_ZONES; i++) {
6547 struct zone *zone = pgdat->node_zones + i;
Taku Izumid91749c2016-03-15 14:55:18 -07006548 unsigned long zone_start_pfn, zone_end_pfn;
Mike Rapoport3f08a302020-06-03 15:57:02 -07006549 unsigned long spanned, absent;
Gu Zhengfebd5942015-06-24 16:57:02 -07006550 unsigned long size, real_size;
Mel Gormanc7132162006-09-27 01:49:43 -07006551
Mike Rapoport854e8842020-06-03 15:58:13 -07006552 spanned = zone_spanned_pages_in_node(pgdat->node_id, i,
6553 node_start_pfn,
6554 node_end_pfn,
6555 &zone_start_pfn,
6556 &zone_end_pfn);
6557 absent = zone_absent_pages_in_node(pgdat->node_id, i,
6558 node_start_pfn,
6559 node_end_pfn);
Mike Rapoport3f08a302020-06-03 15:57:02 -07006560
6561 size = spanned;
6562 real_size = size - absent;
6563
Taku Izumid91749c2016-03-15 14:55:18 -07006564 if (size)
6565 zone->zone_start_pfn = zone_start_pfn;
6566 else
6567 zone->zone_start_pfn = 0;
Gu Zhengfebd5942015-06-24 16:57:02 -07006568 zone->spanned_pages = size;
6569 zone->present_pages = real_size;
6570
6571 totalpages += size;
6572 realtotalpages += real_size;
6573 }
6574
6575 pgdat->node_spanned_pages = totalpages;
Mel Gormanc7132162006-09-27 01:49:43 -07006576 pgdat->node_present_pages = realtotalpages;
6577 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
6578 realtotalpages);
6579}
6580
Mel Gorman835c1342007-10-16 01:25:47 -07006581#ifndef CONFIG_SPARSEMEM
6582/*
6583 * Calculate the size of the zone->blockflags rounded to an unsigned long
Mel Gormand9c23402007-10-16 01:26:01 -07006584 * Start by making sure zonesize is a multiple of pageblock_order by rounding
6585 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
Mel Gorman835c1342007-10-16 01:25:47 -07006586 * round what is now in bits to nearest long in bits, then return it in
6587 * bytes.
6588 */
Linus Torvalds7c455122013-02-18 09:58:02 -08006589static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
Mel Gorman835c1342007-10-16 01:25:47 -07006590{
6591 unsigned long usemapsize;
6592
Linus Torvalds7c455122013-02-18 09:58:02 -08006593 zonesize += zone_start_pfn & (pageblock_nr_pages-1);
Mel Gormand9c23402007-10-16 01:26:01 -07006594 usemapsize = roundup(zonesize, pageblock_nr_pages);
6595 usemapsize = usemapsize >> pageblock_order;
Mel Gorman835c1342007-10-16 01:25:47 -07006596 usemapsize *= NR_PAGEBLOCK_BITS;
6597 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
6598
6599 return usemapsize / 8;
6600}
6601
Pavel Tatashin7cc2a952018-08-21 21:53:36 -07006602static void __ref setup_usemap(struct pglist_data *pgdat,
Linus Torvalds7c455122013-02-18 09:58:02 -08006603 struct zone *zone,
6604 unsigned long zone_start_pfn,
6605 unsigned long zonesize)
Mel Gorman835c1342007-10-16 01:25:47 -07006606{
Linus Torvalds7c455122013-02-18 09:58:02 -08006607 unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
Mel Gorman835c1342007-10-16 01:25:47 -07006608 zone->pageblock_flags = NULL;
Mike Rapoport23a70522019-03-05 15:46:43 -08006609 if (usemapsize) {
Santosh Shilimkar67828322014-01-21 15:50:25 -08006610 zone->pageblock_flags =
Mike Rapoport26fb3da2019-03-11 23:30:42 -07006611 memblock_alloc_node(usemapsize, SMP_CACHE_BYTES,
6612 pgdat->node_id);
Mike Rapoport23a70522019-03-05 15:46:43 -08006613 if (!zone->pageblock_flags)
6614 panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n",
6615 usemapsize, zone->name, pgdat->node_id);
6616 }
Mel Gorman835c1342007-10-16 01:25:47 -07006617}
6618#else
Linus Torvalds7c455122013-02-18 09:58:02 -08006619static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
6620 unsigned long zone_start_pfn, unsigned long zonesize) {}
Mel Gorman835c1342007-10-16 01:25:47 -07006621#endif /* CONFIG_SPARSEMEM */
6622
Mel Gormand9c23402007-10-16 01:26:01 -07006623#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
Mel Gormanba72cb82007-11-28 16:21:13 -08006624
Mel Gormand9c23402007-10-16 01:26:01 -07006625/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
Oscar Salvador03e85f92018-08-21 21:53:43 -07006626void __init set_pageblock_order(void)
Mel Gormand9c23402007-10-16 01:26:01 -07006627{
Andrew Morton955c1cd2012-05-29 15:06:31 -07006628 unsigned int order;
6629
Mel Gormand9c23402007-10-16 01:26:01 -07006630 /* Check that pageblock_nr_pages has not already been setup */
6631 if (pageblock_order)
6632 return;
6633
Andrew Morton955c1cd2012-05-29 15:06:31 -07006634 if (HPAGE_SHIFT > PAGE_SHIFT)
6635 order = HUGETLB_PAGE_ORDER;
6636 else
6637 order = MAX_ORDER - 1;
6638
Mel Gormand9c23402007-10-16 01:26:01 -07006639 /*
6640 * Assume the largest contiguous order of interest is a huge page.
Andrew Morton955c1cd2012-05-29 15:06:31 -07006641 * This value may be variable depending on boot parameters on IA64 and
6642 * powerpc.
Mel Gormand9c23402007-10-16 01:26:01 -07006643 */
6644 pageblock_order = order;
6645}
6646#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
6647
Mel Gormanba72cb82007-11-28 16:21:13 -08006648/*
6649 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
Andrew Morton955c1cd2012-05-29 15:06:31 -07006650 * is unused as pageblock_order is set at compile-time. See
6651 * include/linux/pageblock-flags.h for the values of pageblock_order based on
6652 * the kernel config
Mel Gormanba72cb82007-11-28 16:21:13 -08006653 */
Oscar Salvador03e85f92018-08-21 21:53:43 -07006654void __init set_pageblock_order(void)
Mel Gormanba72cb82007-11-28 16:21:13 -08006655{
Mel Gormanba72cb82007-11-28 16:21:13 -08006656}
Mel Gormand9c23402007-10-16 01:26:01 -07006657
6658#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
6659
Oscar Salvador03e85f92018-08-21 21:53:43 -07006660static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
Pavel Tatashin7cc2a952018-08-21 21:53:36 -07006661 unsigned long present_pages)
Jiang Liu01cefae2012-12-12 13:52:19 -08006662{
6663 unsigned long pages = spanned_pages;
6664
6665 /*
6666 * Provide a more accurate estimation if there are holes within
6667 * the zone and SPARSEMEM is in use. If there are holes within the
6668 * zone, each populated memory region may cost us one or two extra
6669 * memmap pages due to alignment because memmap pages for each
Masahiro Yamada89d790a2017-02-27 14:29:01 -08006670 * populated regions may not be naturally aligned on page boundary.
Jiang Liu01cefae2012-12-12 13:52:19 -08006671 * So the (present_pages >> 4) heuristic is a tradeoff for that.
6672 */
6673 if (spanned_pages > present_pages + (present_pages >> 4) &&
6674 IS_ENABLED(CONFIG_SPARSEMEM))
6675 pages = present_pages;
6676
6677 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
6678}
6679
Oscar Salvadorace1db32018-08-21 21:53:29 -07006680#ifdef CONFIG_TRANSPARENT_HUGEPAGE
6681static void pgdat_init_split_queue(struct pglist_data *pgdat)
6682{
Yang Shi364c1ee2019-09-23 15:38:06 -07006683 struct deferred_split *ds_queue = &pgdat->deferred_split_queue;
6684
6685 spin_lock_init(&ds_queue->split_queue_lock);
6686 INIT_LIST_HEAD(&ds_queue->split_queue);
6687 ds_queue->split_queue_len = 0;
Oscar Salvadorace1db32018-08-21 21:53:29 -07006688}
6689#else
6690static void pgdat_init_split_queue(struct pglist_data *pgdat) {}
6691#endif
6692
6693#ifdef CONFIG_COMPACTION
6694static void pgdat_init_kcompactd(struct pglist_data *pgdat)
6695{
6696 init_waitqueue_head(&pgdat->kcompactd_wait);
6697}
6698#else
6699static void pgdat_init_kcompactd(struct pglist_data *pgdat) {}
6700#endif
6701
Oscar Salvador03e85f92018-08-21 21:53:43 -07006702static void __meminit pgdat_init_internals(struct pglist_data *pgdat)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006703{
Dave Hansen208d54e2005-10-29 18:16:52 -07006704 pgdat_resize_init(pgdat);
Oscar Salvadorace1db32018-08-21 21:53:29 -07006705
Oscar Salvadorace1db32018-08-21 21:53:29 -07006706 pgdat_init_split_queue(pgdat);
6707 pgdat_init_kcompactd(pgdat);
6708
Linus Torvalds1da177e2005-04-16 15:20:36 -07006709 init_waitqueue_head(&pgdat->kswapd_wait);
Mel Gorman55150612012-07-31 16:44:35 -07006710 init_waitqueue_head(&pgdat->pfmemalloc_wait);
Oscar Salvadorace1db32018-08-21 21:53:29 -07006711
Joonsoo Kimeefa864b2014-12-12 16:55:46 -08006712 pgdat_page_ext_init(pgdat);
Mel Gormana52633d2016-07-28 15:45:28 -07006713 spin_lock_init(&pgdat->lru_lock);
Johannes Weiner867e5e12019-11-30 17:55:34 -08006714 lruvec_init(&pgdat->__lruvec);
Oscar Salvador03e85f92018-08-21 21:53:43 -07006715}
Michal Nazarewicz5f63b722012-01-11 15:16:11 +01006716
Oscar Salvador03e85f92018-08-21 21:53:43 -07006717static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid,
6718 unsigned long remaining_pages)
6719{
Arun KS9705bea2018-12-28 00:34:24 -08006720 atomic_long_set(&zone->managed_pages, remaining_pages);
Oscar Salvador03e85f92018-08-21 21:53:43 -07006721 zone_set_nid(zone, nid);
6722 zone->name = zone_names[idx];
6723 zone->zone_pgdat = NODE_DATA(nid);
6724 spin_lock_init(&zone->lock);
6725 zone_seqlock_init(zone);
6726 zone_pcp_init(zone);
6727}
6728
6729/*
6730 * Set up the zone data structures
6731 * - init pgdat internals
6732 * - init all zones belonging to this node
6733 *
6734 * NOTE: this function is only called during memory hotplug
6735 */
6736#ifdef CONFIG_MEMORY_HOTPLUG
6737void __ref free_area_init_core_hotplug(int nid)
6738{
6739 enum zone_type z;
6740 pg_data_t *pgdat = NODE_DATA(nid);
6741
6742 pgdat_init_internals(pgdat);
6743 for (z = 0; z < MAX_NR_ZONES; z++)
6744 zone_init_internals(&pgdat->node_zones[z], z, nid, 0);
6745}
6746#endif
6747
6748/*
6749 * Set up the zone data structures:
6750 * - mark all pages reserved
6751 * - mark all memory queues empty
6752 * - clear the memory bitmaps
6753 *
6754 * NOTE: pgdat should get zeroed by caller.
6755 * NOTE: this function is only called during early init.
6756 */
6757static void __init free_area_init_core(struct pglist_data *pgdat)
6758{
6759 enum zone_type j;
6760 int nid = pgdat->node_id;
6761
6762 pgdat_init_internals(pgdat);
Johannes Weiner385386c2017-07-06 15:40:43 -07006763 pgdat->per_cpu_nodestats = &boot_nodestats;
6764
Linus Torvalds1da177e2005-04-16 15:20:36 -07006765 for (j = 0; j < MAX_NR_ZONES; j++) {
6766 struct zone *zone = pgdat->node_zones + j;
Wei Yange6943852018-06-07 17:06:04 -07006767 unsigned long size, freesize, memmap_pages;
Taku Izumid91749c2016-03-15 14:55:18 -07006768 unsigned long zone_start_pfn = zone->zone_start_pfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006769
Gu Zhengfebd5942015-06-24 16:57:02 -07006770 size = zone->spanned_pages;
Wei Yange6943852018-06-07 17:06:04 -07006771 freesize = zone->present_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006772
Mel Gorman0e0b8642006-09-27 01:49:56 -07006773 /*
Jiang Liu9feedc92012-12-12 13:52:12 -08006774 * Adjust freesize so that it accounts for how much memory
Mel Gorman0e0b8642006-09-27 01:49:56 -07006775 * is used by this zone for memmap. This affects the watermark
6776 * and per-cpu initialisations
6777 */
Wei Yange6943852018-06-07 17:06:04 -07006778 memmap_pages = calc_memmap_size(size, freesize);
Zhong Hongboba914f42014-12-12 16:56:21 -08006779 if (!is_highmem_idx(j)) {
6780 if (freesize >= memmap_pages) {
6781 freesize -= memmap_pages;
6782 if (memmap_pages)
6783 printk(KERN_DEBUG
6784 " %s zone: %lu pages used for memmap\n",
6785 zone_names[j], memmap_pages);
6786 } else
Joe Perches11705322016-03-17 14:19:50 -07006787 pr_warn(" %s zone: %lu pages exceeds freesize %lu\n",
Zhong Hongboba914f42014-12-12 16:56:21 -08006788 zone_names[j], memmap_pages, freesize);
6789 }
Mel Gorman0e0b8642006-09-27 01:49:56 -07006790
Christoph Lameter62672762007-02-10 01:43:07 -08006791 /* Account for reserved pages */
Jiang Liu9feedc92012-12-12 13:52:12 -08006792 if (j == 0 && freesize > dma_reserve) {
6793 freesize -= dma_reserve;
Yinghai Lud903ef92008-10-18 20:27:06 -07006794 printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
Christoph Lameter62672762007-02-10 01:43:07 -08006795 zone_names[0], dma_reserve);
Mel Gorman0e0b8642006-09-27 01:49:56 -07006796 }
6797
Christoph Lameter98d2b0e2006-09-25 23:31:12 -07006798 if (!is_highmem_idx(j))
Jiang Liu9feedc92012-12-12 13:52:12 -08006799 nr_kernel_pages += freesize;
Jiang Liu01cefae2012-12-12 13:52:19 -08006800 /* Charge for highmem memmap if there are enough kernel pages */
6801 else if (nr_kernel_pages > memmap_pages * 2)
6802 nr_kernel_pages -= memmap_pages;
Jiang Liu9feedc92012-12-12 13:52:12 -08006803 nr_all_pages += freesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006804
Jiang Liu9feedc92012-12-12 13:52:12 -08006805 /*
6806 * Set an approximate value for lowmem here, it will be adjusted
6807 * when the bootmem allocator frees pages into the buddy system.
6808 * And all highmem pages will be managed by the buddy system.
6809 */
Oscar Salvador03e85f92018-08-21 21:53:43 -07006810 zone_init_internals(zone, j, nid, freesize);
Johannes Weiner81c0a2b2013-09-11 14:20:47 -07006811
Joonsoo Kimd883c6c2018-05-23 10:18:21 +09006812 if (!size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006813 continue;
6814
Andrew Morton955c1cd2012-05-29 15:06:31 -07006815 set_pageblock_order();
Joonsoo Kimd883c6c2018-05-23 10:18:21 +09006816 setup_usemap(pgdat, zone, zone_start_pfn, size);
6817 init_currently_empty_zone(zone, zone_start_pfn, size);
Heiko Carstens76cdd582008-05-14 16:05:52 -07006818 memmap_init(size, nid, j, zone_start_pfn);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006819 }
6820}
6821
Oscar Salvador0cd842f2017-11-15 17:39:18 -08006822#ifdef CONFIG_FLAT_NODE_MEM_MAP
Fabian Frederickbd721ea2016-08-02 14:03:33 -07006823static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006824{
Tony Luckb0aeba72015-11-10 10:09:47 -08006825 unsigned long __maybe_unused start = 0;
Laura Abbotta1c34a32015-11-05 18:48:46 -08006826 unsigned long __maybe_unused offset = 0;
6827
Linus Torvalds1da177e2005-04-16 15:20:36 -07006828 /* Skip empty nodes */
6829 if (!pgdat->node_spanned_pages)
6830 return;
6831
Tony Luckb0aeba72015-11-10 10:09:47 -08006832 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
6833 offset = pgdat->node_start_pfn - start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006834 /* ia64 gets its own node_mem_map, before this, without bootmem */
6835 if (!pgdat->node_mem_map) {
Tony Luckb0aeba72015-11-10 10:09:47 -08006836 unsigned long size, end;
Andy Whitcroftd41dee32005-06-23 00:07:54 -07006837 struct page *map;
6838
Bob Piccoe984bb42006-05-20 15:00:31 -07006839 /*
6840 * The zone's endpoints aren't required to be MAX_ORDER
6841 * aligned but the node_mem_map endpoints must be in order
6842 * for the buddy allocator to function correctly.
6843 */
Cody P Schafer108bcc92013-02-22 16:35:23 -08006844 end = pgdat_end_pfn(pgdat);
Bob Piccoe984bb42006-05-20 15:00:31 -07006845 end = ALIGN(end, MAX_ORDER_NR_PAGES);
6846 size = (end - start) * sizeof(struct page);
Mike Rapoport26fb3da2019-03-11 23:30:42 -07006847 map = memblock_alloc_node(size, SMP_CACHE_BYTES,
6848 pgdat->node_id);
Mike Rapoport23a70522019-03-05 15:46:43 -08006849 if (!map)
6850 panic("Failed to allocate %ld bytes for node %d memory map\n",
6851 size, pgdat->node_id);
Laura Abbotta1c34a32015-11-05 18:48:46 -08006852 pgdat->node_mem_map = map + offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006853 }
Oscar Salvador0cd842f2017-11-15 17:39:18 -08006854 pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
6855 __func__, pgdat->node_id, (unsigned long)pgdat,
6856 (unsigned long)pgdat->node_mem_map);
Roman Zippel12d810c2007-05-31 00:40:54 -07006857#ifndef CONFIG_NEED_MULTIPLE_NODES
Linus Torvalds1da177e2005-04-16 15:20:36 -07006858 /*
6859 * With no DISCONTIG, the global mem_map is just set as node 0's
6860 */
Mel Gormanc7132162006-09-27 01:49:43 -07006861 if (pgdat == NODE_DATA(0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006862 mem_map = NODE_DATA(0)->node_mem_map;
Mel Gormanc7132162006-09-27 01:49:43 -07006863 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
Laura Abbotta1c34a32015-11-05 18:48:46 -08006864 mem_map -= offset;
Mel Gormanc7132162006-09-27 01:49:43 -07006865 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006866#endif
6867}
Oscar Salvador0cd842f2017-11-15 17:39:18 -08006868#else
6869static void __ref alloc_node_mem_map(struct pglist_data *pgdat) { }
6870#endif /* CONFIG_FLAT_NODE_MEM_MAP */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006871
Oscar Salvador0188dc92018-08-21 21:53:39 -07006872#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
6873static inline void pgdat_set_deferred_range(pg_data_t *pgdat)
6874{
Oscar Salvador0188dc92018-08-21 21:53:39 -07006875 pgdat->first_deferred_pfn = ULONG_MAX;
6876}
6877#else
6878static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {}
6879#endif
6880
Mike Rapoport854e8842020-06-03 15:58:13 -07006881static void __init free_area_init_node(int nid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006882{
Johannes Weiner9109fb72008-07-23 21:27:20 -07006883 pg_data_t *pgdat = NODE_DATA(nid);
Zhang Yanfei7960aed2013-07-08 15:59:52 -07006884 unsigned long start_pfn = 0;
6885 unsigned long end_pfn = 0;
Johannes Weiner9109fb72008-07-23 21:27:20 -07006886
Minchan Kim88fdf752012-07-31 16:46:14 -07006887 /* pg_data_t should be reset to zero when it's allocated */
Joonsoo Kim97a225e2020-06-03 15:59:01 -07006888 WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx);
Minchan Kim88fdf752012-07-31 16:46:14 -07006889
Mike Rapoport854e8842020-06-03 15:58:13 -07006890 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
Mel Gormanc7132162006-09-27 01:49:43 -07006891
Linus Torvalds1da177e2005-04-16 15:20:36 -07006892 pgdat->node_id = nid;
Mike Rapoport854e8842020-06-03 15:58:13 -07006893 pgdat->node_start_pfn = start_pfn;
Mel Gorman75ef7182016-07-28 15:45:24 -07006894 pgdat->per_cpu_nodestats = NULL;
Mike Rapoport854e8842020-06-03 15:58:13 -07006895
Juergen Gross8d29e182015-02-11 15:26:01 -08006896 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
Zhen Lei4ada0c52015-09-08 15:04:19 -07006897 (u64)start_pfn << PAGE_SHIFT,
6898 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
Mike Rapoport854e8842020-06-03 15:58:13 -07006899 calculate_node_totalpages(pgdat, start_pfn, end_pfn);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006900
6901 alloc_node_mem_map(pgdat);
Oscar Salvador0188dc92018-08-21 21:53:39 -07006902 pgdat_set_deferred_range(pgdat);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006903
Wei Yang7f3eb552015-09-08 14:59:50 -07006904 free_area_init_core(pgdat);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006905}
6906
Mike Rapoportbc9331a2020-06-03 15:58:09 -07006907void __init free_area_init_memoryless_node(int nid)
Mike Rapoport3f08a302020-06-03 15:57:02 -07006908{
Mike Rapoport854e8842020-06-03 15:58:13 -07006909 free_area_init_node(nid);
Mike Rapoport3f08a302020-06-03 15:57:02 -07006910}
6911
Mike Rapoportaca52c32018-10-30 15:07:44 -07006912#if !defined(CONFIG_FLAT_NODE_MEM_MAP)
Pavel Tatashinec393a0f2018-10-26 15:10:21 -07006913/*
David Hildenbrand4b094b72020-02-03 17:33:55 -08006914 * Initialize all valid struct pages in the range [spfn, epfn) and mark them
6915 * PageReserved(). Return the number of struct pages that were initialized.
Pavel Tatashinec393a0f2018-10-26 15:10:21 -07006916 */
David Hildenbrand4b094b72020-02-03 17:33:55 -08006917static u64 __init init_unavailable_range(unsigned long spfn, unsigned long epfn)
Pavel Tatashinec393a0f2018-10-26 15:10:21 -07006918{
6919 unsigned long pfn;
6920 u64 pgcnt = 0;
6921
6922 for (pfn = spfn; pfn < epfn; pfn++) {
6923 if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) {
6924 pfn = ALIGN_DOWN(pfn, pageblock_nr_pages)
6925 + pageblock_nr_pages - 1;
6926 continue;
6927 }
David Hildenbrand4b094b72020-02-03 17:33:55 -08006928 /*
6929 * Use a fake node/zone (0) for now. Some of these pages
6930 * (in memblock.reserved but not in memblock.memory) will
6931 * get re-initialized via reserve_bootmem_region() later.
6932 */
6933 __init_single_page(pfn_to_page(pfn), pfn, 0, 0);
6934 __SetPageReserved(pfn_to_page(pfn));
Pavel Tatashinec393a0f2018-10-26 15:10:21 -07006935 pgcnt++;
6936 }
6937
6938 return pgcnt;
6939}
6940
Pavel Tatashina4a3ede2017-11-15 17:36:31 -08006941/*
6942 * Only struct pages that are backed by physical memory are zeroed and
6943 * initialized by going through __init_single_page(). But, there are some
6944 * struct pages which are reserved in memblock allocator and their fields
6945 * may be accessed (for example page_to_pfn() on some configuration accesses
David Hildenbrand4b094b72020-02-03 17:33:55 -08006946 * flags). We must explicitly initialize those struct pages.
Naoya Horiguchi907ec5f2018-10-26 15:10:15 -07006947 *
6948 * This function also addresses a similar issue where struct pages are left
6949 * uninitialized because the physical address range is not covered by
6950 * memblock.memory or memblock.reserved. That could happen when memblock
David Hildenbrande8229692020-02-03 17:33:48 -08006951 * layout is manually configured via memmap=, or when the highest physical
6952 * address (max_pfn) does not end on a section boundary.
Pavel Tatashina4a3ede2017-11-15 17:36:31 -08006953 */
David Hildenbrand4b094b72020-02-03 17:33:55 -08006954static void __init init_unavailable_mem(void)
Pavel Tatashina4a3ede2017-11-15 17:36:31 -08006955{
6956 phys_addr_t start, end;
Pavel Tatashina4a3ede2017-11-15 17:36:31 -08006957 u64 i, pgcnt;
Naoya Horiguchi907ec5f2018-10-26 15:10:15 -07006958 phys_addr_t next = 0;
Pavel Tatashina4a3ede2017-11-15 17:36:31 -08006959
6960 /*
Naoya Horiguchi907ec5f2018-10-26 15:10:15 -07006961 * Loop through unavailable ranges not covered by memblock.memory.
Pavel Tatashina4a3ede2017-11-15 17:36:31 -08006962 */
6963 pgcnt = 0;
Naoya Horiguchi907ec5f2018-10-26 15:10:15 -07006964 for_each_mem_range(i, &memblock.memory, NULL,
6965 NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end, NULL) {
Pavel Tatashinec393a0f2018-10-26 15:10:21 -07006966 if (next < start)
David Hildenbrand4b094b72020-02-03 17:33:55 -08006967 pgcnt += init_unavailable_range(PFN_DOWN(next),
6968 PFN_UP(start));
Naoya Horiguchi907ec5f2018-10-26 15:10:15 -07006969 next = end;
Pavel Tatashina4a3ede2017-11-15 17:36:31 -08006970 }
David Hildenbrande8229692020-02-03 17:33:48 -08006971
6972 /*
6973 * Early sections always have a fully populated memmap for the whole
6974 * section - see pfn_valid(). If the last section has holes at the
6975 * end and that section is marked "online", the memmap will be
6976 * considered initialized. Make sure that memmap has a well defined
6977 * state.
6978 */
David Hildenbrand4b094b72020-02-03 17:33:55 -08006979 pgcnt += init_unavailable_range(PFN_DOWN(next),
6980 round_up(max_pfn, PAGES_PER_SECTION));
Pavel Tatashina4a3ede2017-11-15 17:36:31 -08006981
6982 /*
6983 * Struct pages that do not have backing memory. This could be because
6984 * firmware is using some of this memory, or for some other reasons.
Pavel Tatashina4a3ede2017-11-15 17:36:31 -08006985 */
6986 if (pgcnt)
Naoya Horiguchi907ec5f2018-10-26 15:10:15 -07006987 pr_info("Zeroed struct page in unavailable ranges: %lld pages", pgcnt);
Pavel Tatashina4a3ede2017-11-15 17:36:31 -08006988}
David Hildenbrand4b094b72020-02-03 17:33:55 -08006989#else
6990static inline void __init init_unavailable_mem(void)
6991{
6992}
Mike Rapoportaca52c32018-10-30 15:07:44 -07006993#endif /* !CONFIG_FLAT_NODE_MEM_MAP */
Pavel Tatashina4a3ede2017-11-15 17:36:31 -08006994
Miklos Szeredi418508c2007-05-23 13:57:55 -07006995#if MAX_NUMNODES > 1
6996/*
6997 * Figure out the number of possible node ids.
6998 */
Cody P Schaferf9872ca2013-04-29 15:08:01 -07006999void __init setup_nr_node_ids(void)
Miklos Szeredi418508c2007-05-23 13:57:55 -07007000{
Wei Yang904a9552015-09-08 14:59:48 -07007001 unsigned int highest;
Miklos Szeredi418508c2007-05-23 13:57:55 -07007002
Wei Yang904a9552015-09-08 14:59:48 -07007003 highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
Miklos Szeredi418508c2007-05-23 13:57:55 -07007004 nr_node_ids = highest + 1;
7005}
Miklos Szeredi418508c2007-05-23 13:57:55 -07007006#endif
7007
Mel Gormanc7132162006-09-27 01:49:43 -07007008/**
Tejun Heo1e019792011-07-12 09:45:34 +02007009 * node_map_pfn_alignment - determine the maximum internode alignment
7010 *
7011 * This function should be called after node map is populated and sorted.
7012 * It calculates the maximum power of two alignment which can distinguish
7013 * all the nodes.
7014 *
7015 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
7016 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the
7017 * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is
7018 * shifted, 1GiB is enough and this function will indicate so.
7019 *
7020 * This is used to test whether pfn -> nid mapping of the chosen memory
7021 * model has fine enough granularity to avoid incorrect mapping for the
7022 * populated node map.
7023 *
Mike Rapoporta862f682019-03-05 15:48:42 -08007024 * Return: the determined alignment in pfn's. 0 if there is no alignment
Tejun Heo1e019792011-07-12 09:45:34 +02007025 * requirement (single node).
7026 */
7027unsigned long __init node_map_pfn_alignment(void)
7028{
7029 unsigned long accl_mask = 0, last_end = 0;
Tejun Heoc13291a2011-07-12 10:46:30 +02007030 unsigned long start, end, mask;
Anshuman Khandual98fa15f2019-03-05 15:42:58 -08007031 int last_nid = NUMA_NO_NODE;
Tejun Heoc13291a2011-07-12 10:46:30 +02007032 int i, nid;
Tejun Heo1e019792011-07-12 09:45:34 +02007033
Tejun Heoc13291a2011-07-12 10:46:30 +02007034 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
Tejun Heo1e019792011-07-12 09:45:34 +02007035 if (!start || last_nid < 0 || last_nid == nid) {
7036 last_nid = nid;
7037 last_end = end;
7038 continue;
7039 }
7040
7041 /*
7042 * Start with a mask granular enough to pin-point to the
7043 * start pfn and tick off bits one-by-one until it becomes
7044 * too coarse to separate the current node from the last.
7045 */
7046 mask = ~((1 << __ffs(start)) - 1);
7047 while (mask && last_end <= (start & (mask << 1)))
7048 mask <<= 1;
7049
7050 /* accumulate all internode masks */
7051 accl_mask |= mask;
7052 }
7053
7054 /* convert mask to number of pages */
7055 return ~accl_mask + 1;
7056}
7057
Mel Gormanc7132162006-09-27 01:49:43 -07007058/**
7059 * find_min_pfn_with_active_regions - Find the minimum PFN registered
7060 *
Mike Rapoporta862f682019-03-05 15:48:42 -08007061 * Return: the minimum PFN based on information provided via
Zhang Zhen7d018172014-06-04 16:10:53 -07007062 * memblock_set_node().
Mel Gormanc7132162006-09-27 01:49:43 -07007063 */
7064unsigned long __init find_min_pfn_with_active_regions(void)
7065{
Mike Rapoport8a1b25f2020-06-03 15:58:18 -07007066 return PHYS_PFN(memblock_start_of_DRAM());
Mel Gormanc7132162006-09-27 01:49:43 -07007067}
7068
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07007069/*
7070 * early_calculate_totalpages()
7071 * Sum pages in active regions for movable zone.
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08007072 * Populate N_MEMORY for calculating usable_nodes.
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07007073 */
Adrian Bunk484f51f2007-10-16 01:26:03 -07007074static unsigned long __init early_calculate_totalpages(void)
Mel Gorman7e63efe2007-07-17 04:03:15 -07007075{
Mel Gorman7e63efe2007-07-17 04:03:15 -07007076 unsigned long totalpages = 0;
Tejun Heoc13291a2011-07-12 10:46:30 +02007077 unsigned long start_pfn, end_pfn;
7078 int i, nid;
Mel Gorman7e63efe2007-07-17 04:03:15 -07007079
Tejun Heoc13291a2011-07-12 10:46:30 +02007080 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
7081 unsigned long pages = end_pfn - start_pfn;
7082
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07007083 totalpages += pages;
7084 if (pages)
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08007085 node_set_state(nid, N_MEMORY);
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07007086 }
Pintu Kumarb8af2942013-09-11 14:20:34 -07007087 return totalpages;
Mel Gorman7e63efe2007-07-17 04:03:15 -07007088}
7089
Mel Gorman2a1e2742007-07-17 04:03:12 -07007090/*
7091 * Find the PFN the Movable zone begins in each node. Kernel memory
7092 * is spread evenly between nodes as long as the nodes have enough
7093 * memory. When they don't, some nodes will have more kernelcore than
7094 * others
7095 */
Kautuk Consulb224ef82012-03-21 16:34:15 -07007096static void __init find_zone_movable_pfns_for_nodes(void)
Mel Gorman2a1e2742007-07-17 04:03:12 -07007097{
7098 int i, nid;
7099 unsigned long usable_startpfn;
7100 unsigned long kernelcore_node, kernelcore_remaining;
Yinghai Lu66918dc2009-06-30 11:41:37 -07007101 /* save the state before borrow the nodemask */
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08007102 nodemask_t saved_node_state = node_states[N_MEMORY];
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07007103 unsigned long totalpages = early_calculate_totalpages();
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08007104 int usable_nodes = nodes_weight(node_states[N_MEMORY]);
Emil Medve136199f2014-04-07 15:37:52 -07007105 struct memblock_region *r;
Tang Chenb2f3eeb2014-01-21 15:49:38 -08007106
7107 /* Need to find movable_zone earlier when movable_node is specified. */
7108 find_usable_zone_for_movable();
Mel Gorman2a1e2742007-07-17 04:03:12 -07007109
Mel Gorman7e63efe2007-07-17 04:03:15 -07007110 /*
Tang Chenb2f3eeb2014-01-21 15:49:38 -08007111 * If movable_node is specified, ignore kernelcore and movablecore
7112 * options.
7113 */
7114 if (movable_node_is_enabled()) {
Emil Medve136199f2014-04-07 15:37:52 -07007115 for_each_memblock(memory, r) {
7116 if (!memblock_is_hotpluggable(r))
Tang Chenb2f3eeb2014-01-21 15:49:38 -08007117 continue;
7118
Mike Rapoportd622abf2020-06-03 15:56:53 -07007119 nid = memblock_get_region_node(r);
Tang Chenb2f3eeb2014-01-21 15:49:38 -08007120
Emil Medve136199f2014-04-07 15:37:52 -07007121 usable_startpfn = PFN_DOWN(r->base);
Tang Chenb2f3eeb2014-01-21 15:49:38 -08007122 zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
7123 min(usable_startpfn, zone_movable_pfn[nid]) :
7124 usable_startpfn;
7125 }
7126
7127 goto out2;
7128 }
7129
7130 /*
Taku Izumi342332e2016-03-15 14:55:22 -07007131 * If kernelcore=mirror is specified, ignore movablecore option
7132 */
7133 if (mirrored_kernelcore) {
7134 bool mem_below_4gb_not_mirrored = false;
7135
7136 for_each_memblock(memory, r) {
7137 if (memblock_is_mirror(r))
7138 continue;
7139
Mike Rapoportd622abf2020-06-03 15:56:53 -07007140 nid = memblock_get_region_node(r);
Taku Izumi342332e2016-03-15 14:55:22 -07007141
7142 usable_startpfn = memblock_region_memory_base_pfn(r);
7143
7144 if (usable_startpfn < 0x100000) {
7145 mem_below_4gb_not_mirrored = true;
7146 continue;
7147 }
7148
7149 zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
7150 min(usable_startpfn, zone_movable_pfn[nid]) :
7151 usable_startpfn;
7152 }
7153
7154 if (mem_below_4gb_not_mirrored)
Chen Tao633bf2f2020-06-03 16:00:02 -07007155 pr_warn("This configuration results in unmirrored kernel memory.\n");
Taku Izumi342332e2016-03-15 14:55:22 -07007156
7157 goto out2;
7158 }
7159
7160 /*
David Rientjesa5c6d652018-04-05 16:23:09 -07007161 * If kernelcore=nn% or movablecore=nn% was specified, calculate the
7162 * amount of necessary memory.
7163 */
7164 if (required_kernelcore_percent)
7165 required_kernelcore = (totalpages * 100 * required_kernelcore_percent) /
7166 10000UL;
7167 if (required_movablecore_percent)
7168 required_movablecore = (totalpages * 100 * required_movablecore_percent) /
7169 10000UL;
7170
7171 /*
7172 * If movablecore= was specified, calculate what size of
Mel Gorman7e63efe2007-07-17 04:03:15 -07007173 * kernelcore that corresponds so that memory usable for
7174 * any allocation type is evenly spread. If both kernelcore
7175 * and movablecore are specified, then the value of kernelcore
7176 * will be used for required_kernelcore if it's greater than
7177 * what movablecore would have allowed.
7178 */
7179 if (required_movablecore) {
Mel Gorman7e63efe2007-07-17 04:03:15 -07007180 unsigned long corepages;
7181
7182 /*
7183 * Round-up so that ZONE_MOVABLE is at least as large as what
7184 * was requested by the user
7185 */
7186 required_movablecore =
7187 roundup(required_movablecore, MAX_ORDER_NR_PAGES);
Xishi Qiu9fd745d2015-11-05 18:48:11 -08007188 required_movablecore = min(totalpages, required_movablecore);
Mel Gorman7e63efe2007-07-17 04:03:15 -07007189 corepages = totalpages - required_movablecore;
7190
7191 required_kernelcore = max(required_kernelcore, corepages);
7192 }
7193
Xishi Qiubde304b2015-11-05 18:48:56 -08007194 /*
7195 * If kernelcore was not specified or kernelcore size is larger
7196 * than totalpages, there is no ZONE_MOVABLE.
7197 */
7198 if (!required_kernelcore || required_kernelcore >= totalpages)
Yinghai Lu66918dc2009-06-30 11:41:37 -07007199 goto out;
Mel Gorman2a1e2742007-07-17 04:03:12 -07007200
7201 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
Mel Gorman2a1e2742007-07-17 04:03:12 -07007202 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
7203
7204restart:
7205 /* Spread kernelcore memory as evenly as possible throughout nodes */
7206 kernelcore_node = required_kernelcore / usable_nodes;
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08007207 for_each_node_state(nid, N_MEMORY) {
Tejun Heoc13291a2011-07-12 10:46:30 +02007208 unsigned long start_pfn, end_pfn;
7209
Mel Gorman2a1e2742007-07-17 04:03:12 -07007210 /*
7211 * Recalculate kernelcore_node if the division per node
7212 * now exceeds what is necessary to satisfy the requested
7213 * amount of memory for the kernel
7214 */
7215 if (required_kernelcore < kernelcore_node)
7216 kernelcore_node = required_kernelcore / usable_nodes;
7217
7218 /*
7219 * As the map is walked, we track how much memory is usable
7220 * by the kernel using kernelcore_remaining. When it is
7221 * 0, the rest of the node is usable by ZONE_MOVABLE
7222 */
7223 kernelcore_remaining = kernelcore_node;
7224
7225 /* Go through each range of PFNs within this node */
Tejun Heoc13291a2011-07-12 10:46:30 +02007226 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
Mel Gorman2a1e2742007-07-17 04:03:12 -07007227 unsigned long size_pages;
7228
Tejun Heoc13291a2011-07-12 10:46:30 +02007229 start_pfn = max(start_pfn, zone_movable_pfn[nid]);
Mel Gorman2a1e2742007-07-17 04:03:12 -07007230 if (start_pfn >= end_pfn)
7231 continue;
7232
7233 /* Account for what is only usable for kernelcore */
7234 if (start_pfn < usable_startpfn) {
7235 unsigned long kernel_pages;
7236 kernel_pages = min(end_pfn, usable_startpfn)
7237 - start_pfn;
7238
7239 kernelcore_remaining -= min(kernel_pages,
7240 kernelcore_remaining);
7241 required_kernelcore -= min(kernel_pages,
7242 required_kernelcore);
7243
7244 /* Continue if range is now fully accounted */
7245 if (end_pfn <= usable_startpfn) {
7246
7247 /*
7248 * Push zone_movable_pfn to the end so
7249 * that if we have to rebalance
7250 * kernelcore across nodes, we will
7251 * not double account here
7252 */
7253 zone_movable_pfn[nid] = end_pfn;
7254 continue;
7255 }
7256 start_pfn = usable_startpfn;
7257 }
7258
7259 /*
7260 * The usable PFN range for ZONE_MOVABLE is from
7261 * start_pfn->end_pfn. Calculate size_pages as the
7262 * number of pages used as kernelcore
7263 */
7264 size_pages = end_pfn - start_pfn;
7265 if (size_pages > kernelcore_remaining)
7266 size_pages = kernelcore_remaining;
7267 zone_movable_pfn[nid] = start_pfn + size_pages;
7268
7269 /*
7270 * Some kernelcore has been met, update counts and
7271 * break if the kernelcore for this node has been
Pintu Kumarb8af2942013-09-11 14:20:34 -07007272 * satisfied
Mel Gorman2a1e2742007-07-17 04:03:12 -07007273 */
7274 required_kernelcore -= min(required_kernelcore,
7275 size_pages);
7276 kernelcore_remaining -= size_pages;
7277 if (!kernelcore_remaining)
7278 break;
7279 }
7280 }
7281
7282 /*
7283 * If there is still required_kernelcore, we do another pass with one
7284 * less node in the count. This will push zone_movable_pfn[nid] further
7285 * along on the nodes that still have memory until kernelcore is
Pintu Kumarb8af2942013-09-11 14:20:34 -07007286 * satisfied
Mel Gorman2a1e2742007-07-17 04:03:12 -07007287 */
7288 usable_nodes--;
7289 if (usable_nodes && required_kernelcore > usable_nodes)
7290 goto restart;
7291
Tang Chenb2f3eeb2014-01-21 15:49:38 -08007292out2:
Mel Gorman2a1e2742007-07-17 04:03:12 -07007293 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
7294 for (nid = 0; nid < MAX_NUMNODES; nid++)
7295 zone_movable_pfn[nid] =
7296 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
Yinghai Lu66918dc2009-06-30 11:41:37 -07007297
Yinghai Lu20e69262013-03-01 14:51:27 -08007298out:
Yinghai Lu66918dc2009-06-30 11:41:37 -07007299 /* restore the node_state */
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08007300 node_states[N_MEMORY] = saved_node_state;
Mel Gorman2a1e2742007-07-17 04:03:12 -07007301}
7302
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08007303/* Any regular or high memory on that node ? */
7304static void check_for_memory(pg_data_t *pgdat, int nid)
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07007305{
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07007306 enum zone_type zone_type;
7307
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08007308 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07007309 struct zone *zone = &pgdat->node_zones[zone_type];
Xishi Qiub38a8722013-11-12 15:07:20 -08007310 if (populated_zone(zone)) {
Oscar Salvador7b0e0c02018-10-26 15:03:58 -07007311 if (IS_ENABLED(CONFIG_HIGHMEM))
7312 node_set_state(nid, N_HIGH_MEMORY);
7313 if (zone_type <= ZONE_NORMAL)
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08007314 node_set_state(nid, N_NORMAL_MEMORY);
Bob Liud0048b02012-01-12 17:19:07 -08007315 break;
7316 }
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07007317 }
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07007318}
7319
Mike Rapoport51930df2020-06-03 15:58:03 -07007320/*
7321 * Some architecturs, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For
7322 * such cases we allow max_zone_pfn sorted in the descending order
7323 */
7324bool __weak arch_has_descending_max_zone_pfns(void)
7325{
7326 return false;
7327}
7328
Mel Gormanc7132162006-09-27 01:49:43 -07007329/**
Mike Rapoport9691a072020-06-03 15:57:10 -07007330 * free_area_init - Initialise all pg_data_t and zone data
Randy Dunlap88ca3b92006-10-04 02:15:25 -07007331 * @max_zone_pfn: an array of max PFNs for each zone
Mel Gormanc7132162006-09-27 01:49:43 -07007332 *
7333 * This will call free_area_init_node() for each active node in the system.
Zhang Zhen7d018172014-06-04 16:10:53 -07007334 * Using the page ranges provided by memblock_set_node(), the size of each
Mel Gormanc7132162006-09-27 01:49:43 -07007335 * zone in each node and their holes is calculated. If the maximum PFN
7336 * between two adjacent zones match, it is assumed that the zone is empty.
7337 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
7338 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
7339 * starts where the previous one ended. For example, ZONE_DMA32 starts
7340 * at arch_max_dma_pfn.
7341 */
Mike Rapoport9691a072020-06-03 15:57:10 -07007342void __init free_area_init(unsigned long *max_zone_pfn)
Mel Gormanc7132162006-09-27 01:49:43 -07007343{
Tejun Heoc13291a2011-07-12 10:46:30 +02007344 unsigned long start_pfn, end_pfn;
Mike Rapoport51930df2020-06-03 15:58:03 -07007345 int i, nid, zone;
7346 bool descending;
Mel Gormana6af2bc2007-02-10 01:42:57 -08007347
Mel Gormanc7132162006-09-27 01:49:43 -07007348 /* Record where the zone boundaries are */
7349 memset(arch_zone_lowest_possible_pfn, 0,
7350 sizeof(arch_zone_lowest_possible_pfn));
7351 memset(arch_zone_highest_possible_pfn, 0,
7352 sizeof(arch_zone_highest_possible_pfn));
Oliver O'Halloran90cae1f2016-07-26 15:22:17 -07007353
7354 start_pfn = find_min_pfn_with_active_regions();
Mike Rapoport51930df2020-06-03 15:58:03 -07007355 descending = arch_has_descending_max_zone_pfns();
Oliver O'Halloran90cae1f2016-07-26 15:22:17 -07007356
7357 for (i = 0; i < MAX_NR_ZONES; i++) {
Mike Rapoport51930df2020-06-03 15:58:03 -07007358 if (descending)
7359 zone = MAX_NR_ZONES - i - 1;
7360 else
7361 zone = i;
7362
7363 if (zone == ZONE_MOVABLE)
Mel Gorman2a1e2742007-07-17 04:03:12 -07007364 continue;
Oliver O'Halloran90cae1f2016-07-26 15:22:17 -07007365
Mike Rapoport51930df2020-06-03 15:58:03 -07007366 end_pfn = max(max_zone_pfn[zone], start_pfn);
7367 arch_zone_lowest_possible_pfn[zone] = start_pfn;
7368 arch_zone_highest_possible_pfn[zone] = end_pfn;
Oliver O'Halloran90cae1f2016-07-26 15:22:17 -07007369
7370 start_pfn = end_pfn;
Mel Gormanc7132162006-09-27 01:49:43 -07007371 }
Mel Gorman2a1e2742007-07-17 04:03:12 -07007372
7373 /* Find the PFNs that ZONE_MOVABLE begins at in each node */
7374 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
Kautuk Consulb224ef82012-03-21 16:34:15 -07007375 find_zone_movable_pfns_for_nodes();
Mel Gormanc7132162006-09-27 01:49:43 -07007376
Mel Gormanc7132162006-09-27 01:49:43 -07007377 /* Print out the zone ranges */
Anton Blanchardf88dfff2014-12-10 15:42:53 -08007378 pr_info("Zone ranges:\n");
Mel Gorman2a1e2742007-07-17 04:03:12 -07007379 for (i = 0; i < MAX_NR_ZONES; i++) {
7380 if (i == ZONE_MOVABLE)
7381 continue;
Anton Blanchardf88dfff2014-12-10 15:42:53 -08007382 pr_info(" %-8s ", zone_names[i]);
David Rientjes72f0ba02010-03-05 13:42:14 -08007383 if (arch_zone_lowest_possible_pfn[i] ==
7384 arch_zone_highest_possible_pfn[i])
Anton Blanchardf88dfff2014-12-10 15:42:53 -08007385 pr_cont("empty\n");
David Rientjes72f0ba02010-03-05 13:42:14 -08007386 else
Juergen Gross8d29e182015-02-11 15:26:01 -08007387 pr_cont("[mem %#018Lx-%#018Lx]\n",
7388 (u64)arch_zone_lowest_possible_pfn[i]
7389 << PAGE_SHIFT,
7390 ((u64)arch_zone_highest_possible_pfn[i]
Bjorn Helgaasa62e2f42012-05-29 15:06:30 -07007391 << PAGE_SHIFT) - 1);
Mel Gorman2a1e2742007-07-17 04:03:12 -07007392 }
7393
7394 /* Print out the PFNs ZONE_MOVABLE begins at in each node */
Anton Blanchardf88dfff2014-12-10 15:42:53 -08007395 pr_info("Movable zone start for each node\n");
Mel Gorman2a1e2742007-07-17 04:03:12 -07007396 for (i = 0; i < MAX_NUMNODES; i++) {
7397 if (zone_movable_pfn[i])
Juergen Gross8d29e182015-02-11 15:26:01 -08007398 pr_info(" Node %d: %#018Lx\n", i,
7399 (u64)zone_movable_pfn[i] << PAGE_SHIFT);
Mel Gorman2a1e2742007-07-17 04:03:12 -07007400 }
Mel Gormanc7132162006-09-27 01:49:43 -07007401
Dan Williamsf46edbd2019-07-18 15:58:04 -07007402 /*
7403 * Print out the early node map, and initialize the
7404 * subsection-map relative to active online memory ranges to
7405 * enable future "sub-section" extensions of the memory map.
7406 */
Anton Blanchardf88dfff2014-12-10 15:42:53 -08007407 pr_info("Early memory node ranges\n");
Dan Williamsf46edbd2019-07-18 15:58:04 -07007408 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
Juergen Gross8d29e182015-02-11 15:26:01 -08007409 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid,
7410 (u64)start_pfn << PAGE_SHIFT,
7411 ((u64)end_pfn << PAGE_SHIFT) - 1);
Dan Williamsf46edbd2019-07-18 15:58:04 -07007412 subsection_map_init(start_pfn, end_pfn - start_pfn);
7413 }
Mel Gormanc7132162006-09-27 01:49:43 -07007414
7415 /* Initialise every node */
Mel Gorman708614e2008-07-23 21:26:51 -07007416 mminit_verify_pageflags_layout();
Christoph Lameter8ef82862007-02-20 13:57:52 -08007417 setup_nr_node_ids();
David Hildenbrand4b094b72020-02-03 17:33:55 -08007418 init_unavailable_mem();
Mel Gormanc7132162006-09-27 01:49:43 -07007419 for_each_online_node(nid) {
7420 pg_data_t *pgdat = NODE_DATA(nid);
Mike Rapoport854e8842020-06-03 15:58:13 -07007421 free_area_init_node(nid);
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07007422
7423 /* Any memory on that node */
7424 if (pgdat->node_present_pages)
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08007425 node_set_state(nid, N_MEMORY);
7426 check_for_memory(pgdat, nid);
Mel Gormanc7132162006-09-27 01:49:43 -07007427 }
7428}
Mel Gorman2a1e2742007-07-17 04:03:12 -07007429
David Rientjesa5c6d652018-04-05 16:23:09 -07007430static int __init cmdline_parse_core(char *p, unsigned long *core,
7431 unsigned long *percent)
Mel Gorman2a1e2742007-07-17 04:03:12 -07007432{
7433 unsigned long long coremem;
David Rientjesa5c6d652018-04-05 16:23:09 -07007434 char *endptr;
7435
Mel Gorman2a1e2742007-07-17 04:03:12 -07007436 if (!p)
7437 return -EINVAL;
7438
David Rientjesa5c6d652018-04-05 16:23:09 -07007439 /* Value may be a percentage of total memory, otherwise bytes */
7440 coremem = simple_strtoull(p, &endptr, 0);
7441 if (*endptr == '%') {
7442 /* Paranoid check for percent values greater than 100 */
7443 WARN_ON(coremem > 100);
Mel Gorman2a1e2742007-07-17 04:03:12 -07007444
David Rientjesa5c6d652018-04-05 16:23:09 -07007445 *percent = coremem;
7446 } else {
7447 coremem = memparse(p, &p);
7448 /* Paranoid check that UL is enough for the coremem value */
7449 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
Mel Gorman2a1e2742007-07-17 04:03:12 -07007450
David Rientjesa5c6d652018-04-05 16:23:09 -07007451 *core = coremem >> PAGE_SHIFT;
7452 *percent = 0UL;
7453 }
Mel Gorman2a1e2742007-07-17 04:03:12 -07007454 return 0;
7455}
Mel Gormaned7ed362007-07-17 04:03:14 -07007456
Mel Gorman7e63efe2007-07-17 04:03:15 -07007457/*
7458 * kernelcore=size sets the amount of memory for use for allocations that
7459 * cannot be reclaimed or migrated.
7460 */
7461static int __init cmdline_parse_kernelcore(char *p)
7462{
Taku Izumi342332e2016-03-15 14:55:22 -07007463 /* parse kernelcore=mirror */
7464 if (parse_option_str(p, "mirror")) {
7465 mirrored_kernelcore = true;
7466 return 0;
7467 }
7468
David Rientjesa5c6d652018-04-05 16:23:09 -07007469 return cmdline_parse_core(p, &required_kernelcore,
7470 &required_kernelcore_percent);
Mel Gorman7e63efe2007-07-17 04:03:15 -07007471}
7472
7473/*
7474 * movablecore=size sets the amount of memory for use for allocations that
7475 * can be reclaimed or migrated.
7476 */
7477static int __init cmdline_parse_movablecore(char *p)
7478{
David Rientjesa5c6d652018-04-05 16:23:09 -07007479 return cmdline_parse_core(p, &required_movablecore,
7480 &required_movablecore_percent);
Mel Gorman7e63efe2007-07-17 04:03:15 -07007481}
7482
Mel Gormaned7ed362007-07-17 04:03:14 -07007483early_param("kernelcore", cmdline_parse_kernelcore);
Mel Gorman7e63efe2007-07-17 04:03:15 -07007484early_param("movablecore", cmdline_parse_movablecore);
Mel Gormaned7ed362007-07-17 04:03:14 -07007485
Jiang Liuc3d5f5f2013-07-03 15:03:14 -07007486void adjust_managed_page_count(struct page *page, long count)
7487{
Arun KS9705bea2018-12-28 00:34:24 -08007488 atomic_long_add(count, &page_zone(page)->managed_pages);
Arun KSca79b0c2018-12-28 00:34:29 -08007489 totalram_pages_add(count);
Jiang Liu3dcc0572013-07-03 15:03:21 -07007490#ifdef CONFIG_HIGHMEM
7491 if (PageHighMem(page))
Arun KSca79b0c2018-12-28 00:34:29 -08007492 totalhigh_pages_add(count);
Jiang Liu3dcc0572013-07-03 15:03:21 -07007493#endif
Jiang Liuc3d5f5f2013-07-03 15:03:14 -07007494}
Jiang Liu3dcc0572013-07-03 15:03:21 -07007495EXPORT_SYMBOL(adjust_managed_page_count);
Jiang Liuc3d5f5f2013-07-03 15:03:14 -07007496
Alexey Dobriyane5cb1132018-12-28 00:36:03 -08007497unsigned long free_reserved_area(void *start, void *end, int poison, const char *s)
Jiang Liu69afade2013-04-29 15:06:21 -07007498{
Jiang Liu11199692013-07-03 15:02:48 -07007499 void *pos;
7500 unsigned long pages = 0;
Jiang Liu69afade2013-04-29 15:06:21 -07007501
Jiang Liu11199692013-07-03 15:02:48 -07007502 start = (void *)PAGE_ALIGN((unsigned long)start);
7503 end = (void *)((unsigned long)end & PAGE_MASK);
7504 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
Dave Hansen0d834322018-08-02 15:58:26 -07007505 struct page *page = virt_to_page(pos);
7506 void *direct_map_addr;
7507
7508 /*
7509 * 'direct_map_addr' might be different from 'pos'
7510 * because some architectures' virt_to_page()
7511 * work with aliases. Getting the direct map
7512 * address ensures that we get a _writeable_
7513 * alias for the memset().
7514 */
7515 direct_map_addr = page_address(page);
Jiang Liudbe67df2013-07-03 15:02:51 -07007516 if ((unsigned int)poison <= 0xFF)
Dave Hansen0d834322018-08-02 15:58:26 -07007517 memset(direct_map_addr, poison, PAGE_SIZE);
7518
7519 free_reserved_page(page);
Jiang Liu69afade2013-04-29 15:06:21 -07007520 }
7521
7522 if (pages && s)
Josh Poimboeufadb1fe92016-10-25 09:51:14 -05007523 pr_info("Freeing %s memory: %ldK\n",
7524 s, pages << (PAGE_SHIFT - 10));
Jiang Liu69afade2013-04-29 15:06:21 -07007525
7526 return pages;
7527}
7528
Jiang Liucfa11e02013-04-29 15:07:00 -07007529#ifdef CONFIG_HIGHMEM
7530void free_highmem_page(struct page *page)
7531{
7532 __free_reserved_page(page);
Arun KSca79b0c2018-12-28 00:34:29 -08007533 totalram_pages_inc();
Arun KS9705bea2018-12-28 00:34:24 -08007534 atomic_long_inc(&page_zone(page)->managed_pages);
Arun KSca79b0c2018-12-28 00:34:29 -08007535 totalhigh_pages_inc();
Jiang Liucfa11e02013-04-29 15:07:00 -07007536}
7537#endif
7538
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07007539
7540void __init mem_init_print_info(const char *str)
7541{
7542 unsigned long physpages, codesize, datasize, rosize, bss_size;
7543 unsigned long init_code_size, init_data_size;
7544
7545 physpages = get_num_physpages();
7546 codesize = _etext - _stext;
7547 datasize = _edata - _sdata;
7548 rosize = __end_rodata - __start_rodata;
7549 bss_size = __bss_stop - __bss_start;
7550 init_data_size = __init_end - __init_begin;
7551 init_code_size = _einittext - _sinittext;
7552
7553 /*
7554 * Detect special cases and adjust section sizes accordingly:
7555 * 1) .init.* may be embedded into .data sections
7556 * 2) .init.text.* may be out of [__init_begin, __init_end],
7557 * please refer to arch/tile/kernel/vmlinux.lds.S.
7558 * 3) .rodata.* may be embedded into .text or .data sections.
7559 */
7560#define adj_init_size(start, end, size, pos, adj) \
Pintu Kumarb8af2942013-09-11 14:20:34 -07007561 do { \
7562 if (start <= pos && pos < end && size > adj) \
7563 size -= adj; \
7564 } while (0)
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07007565
7566 adj_init_size(__init_begin, __init_end, init_data_size,
7567 _sinittext, init_code_size);
7568 adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
7569 adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
7570 adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
7571 adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
7572
7573#undef adj_init_size
7574
Joe Perches756a0252016-03-17 14:19:47 -07007575 pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07007576#ifdef CONFIG_HIGHMEM
Joe Perches756a0252016-03-17 14:19:47 -07007577 ", %luK highmem"
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07007578#endif
Joe Perches756a0252016-03-17 14:19:47 -07007579 "%s%s)\n",
7580 nr_free_pages() << (PAGE_SHIFT - 10),
7581 physpages << (PAGE_SHIFT - 10),
7582 codesize >> 10, datasize >> 10, rosize >> 10,
7583 (init_data_size + init_code_size) >> 10, bss_size >> 10,
Arun KSca79b0c2018-12-28 00:34:29 -08007584 (physpages - totalram_pages() - totalcma_pages) << (PAGE_SHIFT - 10),
Joe Perches756a0252016-03-17 14:19:47 -07007585 totalcma_pages << (PAGE_SHIFT - 10),
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07007586#ifdef CONFIG_HIGHMEM
Arun KSca79b0c2018-12-28 00:34:29 -08007587 totalhigh_pages() << (PAGE_SHIFT - 10),
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07007588#endif
Joe Perches756a0252016-03-17 14:19:47 -07007589 str ? ", " : "", str ? str : "");
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07007590}
7591
Mel Gorman0e0b8642006-09-27 01:49:56 -07007592/**
Randy Dunlap88ca3b92006-10-04 02:15:25 -07007593 * set_dma_reserve - set the specified number of pages reserved in the first zone
7594 * @new_dma_reserve: The number of pages to mark reserved
Mel Gorman0e0b8642006-09-27 01:49:56 -07007595 *
Yaowei Bai013110a2015-09-08 15:04:10 -07007596 * The per-cpu batchsize and zone watermarks are determined by managed_pages.
Mel Gorman0e0b8642006-09-27 01:49:56 -07007597 * In the DMA zone, a significant percentage may be consumed by kernel image
7598 * and other unfreeable allocations which can skew the watermarks badly. This
Randy Dunlap88ca3b92006-10-04 02:15:25 -07007599 * function may optionally be used to account for unfreeable pages in the
7600 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
7601 * smaller per-cpu batchsize.
Mel Gorman0e0b8642006-09-27 01:49:56 -07007602 */
7603void __init set_dma_reserve(unsigned long new_dma_reserve)
7604{
7605 dma_reserve = new_dma_reserve;
7606}
7607
Sebastian Andrzej Siewior005fd4b2016-11-03 15:50:02 +01007608static int page_alloc_cpu_dead(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007609{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007610
Sebastian Andrzej Siewior005fd4b2016-11-03 15:50:02 +01007611 lru_add_drain_cpu(cpu);
7612 drain_pages(cpu);
Christoph Lameter9f8f2172008-02-04 22:29:11 -08007613
Sebastian Andrzej Siewior005fd4b2016-11-03 15:50:02 +01007614 /*
7615 * Spill the event counters of the dead processor
7616 * into the current processors event counters.
7617 * This artificially elevates the count of the current
7618 * processor.
7619 */
7620 vm_events_fold_cpu(cpu);
Christoph Lameter9f8f2172008-02-04 22:29:11 -08007621
Sebastian Andrzej Siewior005fd4b2016-11-03 15:50:02 +01007622 /*
7623 * Zero the differential counters of the dead processor
7624 * so that the vm statistics are consistent.
7625 *
7626 * This is only okay since the processor is dead and cannot
7627 * race with what we are doing.
7628 */
7629 cpu_vm_stats_fold(cpu);
7630 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007631}
Linus Torvalds1da177e2005-04-16 15:20:36 -07007632
Nicholas Piggine03a5122019-07-11 20:59:12 -07007633#ifdef CONFIG_NUMA
7634int hashdist = HASHDIST_DEFAULT;
7635
7636static int __init set_hashdist(char *str)
7637{
7638 if (!str)
7639 return 0;
7640 hashdist = simple_strtoul(str, &str, 0);
7641 return 1;
7642}
7643__setup("hashdist=", set_hashdist);
7644#endif
7645
Linus Torvalds1da177e2005-04-16 15:20:36 -07007646void __init page_alloc_init(void)
7647{
Sebastian Andrzej Siewior005fd4b2016-11-03 15:50:02 +01007648 int ret;
7649
Nicholas Piggine03a5122019-07-11 20:59:12 -07007650#ifdef CONFIG_NUMA
7651 if (num_node_state(N_MEMORY) == 1)
7652 hashdist = 0;
7653#endif
7654
Sebastian Andrzej Siewior005fd4b2016-11-03 15:50:02 +01007655 ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC_DEAD,
7656 "mm/page_alloc:dead", NULL,
7657 page_alloc_cpu_dead);
7658 WARN_ON(ret < 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007659}
7660
7661/*
Yaowei Bai34b10062015-09-08 15:04:13 -07007662 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07007663 * or min_free_kbytes changes.
7664 */
7665static void calculate_totalreserve_pages(void)
7666{
7667 struct pglist_data *pgdat;
7668 unsigned long reserve_pages = 0;
Christoph Lameter2f6726e2006-09-25 23:31:18 -07007669 enum zone_type i, j;
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07007670
7671 for_each_online_pgdat(pgdat) {
Mel Gorman281e3722016-07-28 15:46:11 -07007672
7673 pgdat->totalreserve_pages = 0;
7674
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07007675 for (i = 0; i < MAX_NR_ZONES; i++) {
7676 struct zone *zone = pgdat->node_zones + i;
Mel Gorman3484b2d2014-08-06 16:07:14 -07007677 long max = 0;
Arun KS9705bea2018-12-28 00:34:24 -08007678 unsigned long managed_pages = zone_managed_pages(zone);
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07007679
7680 /* Find valid and maximum lowmem_reserve in the zone */
7681 for (j = i; j < MAX_NR_ZONES; j++) {
7682 if (zone->lowmem_reserve[j] > max)
7683 max = zone->lowmem_reserve[j];
7684 }
7685
Mel Gorman41858962009-06-16 15:32:12 -07007686 /* we treat the high watermark as reserved pages. */
7687 max += high_wmark_pages(zone);
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07007688
Arun KS3d6357d2018-12-28 00:34:20 -08007689 if (max > managed_pages)
7690 max = managed_pages;
Johannes Weinera8d01432016-01-14 15:20:15 -08007691
Mel Gorman281e3722016-07-28 15:46:11 -07007692 pgdat->totalreserve_pages += max;
Johannes Weinera8d01432016-01-14 15:20:15 -08007693
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07007694 reserve_pages += max;
7695 }
7696 }
7697 totalreserve_pages = reserve_pages;
7698}
7699
7700/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07007701 * setup_per_zone_lowmem_reserve - called whenever
Yaowei Bai34b10062015-09-08 15:04:13 -07007702 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone
Linus Torvalds1da177e2005-04-16 15:20:36 -07007703 * has a correct pages reserved value, so an adequate number of
7704 * pages are left in the zone after a successful __alloc_pages().
7705 */
7706static void setup_per_zone_lowmem_reserve(void)
7707{
7708 struct pglist_data *pgdat;
Christoph Lameter2f6726e2006-09-25 23:31:18 -07007709 enum zone_type j, idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007710
KAMEZAWA Hiroyukiec936fc2006-03-27 01:15:59 -08007711 for_each_online_pgdat(pgdat) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007712 for (j = 0; j < MAX_NR_ZONES; j++) {
7713 struct zone *zone = pgdat->node_zones + j;
Arun KS9705bea2018-12-28 00:34:24 -08007714 unsigned long managed_pages = zone_managed_pages(zone);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007715
7716 zone->lowmem_reserve[j] = 0;
7717
Christoph Lameter2f6726e2006-09-25 23:31:18 -07007718 idx = j;
7719 while (idx) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007720 struct zone *lower_zone;
7721
Christoph Lameter2f6726e2006-09-25 23:31:18 -07007722 idx--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007723 lower_zone = pgdat->node_zones + idx;
Joonsoo Kimd3cda232018-04-10 16:30:11 -07007724
Baoquan Hef6366152020-06-03 15:58:52 -07007725 if (!sysctl_lowmem_reserve_ratio[idx] ||
7726 !zone_managed_pages(lower_zone)) {
Joonsoo Kimd3cda232018-04-10 16:30:11 -07007727 lower_zone->lowmem_reserve[j] = 0;
Baoquan Hef6366152020-06-03 15:58:52 -07007728 continue;
Joonsoo Kimd3cda232018-04-10 16:30:11 -07007729 } else {
7730 lower_zone->lowmem_reserve[j] =
7731 managed_pages / sysctl_lowmem_reserve_ratio[idx];
7732 }
Arun KS9705bea2018-12-28 00:34:24 -08007733 managed_pages += zone_managed_pages(lower_zone);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007734 }
7735 }
7736 }
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07007737
7738 /* update totalreserve_pages */
7739 calculate_totalreserve_pages();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007740}
7741
Mel Gormancfd3da12011-04-25 21:36:42 +00007742static void __setup_per_zone_wmarks(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007743{
7744 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
7745 unsigned long lowmem_pages = 0;
7746 struct zone *zone;
7747 unsigned long flags;
7748
7749 /* Calculate total number of !ZONE_HIGHMEM pages */
7750 for_each_zone(zone) {
7751 if (!is_highmem(zone))
Arun KS9705bea2018-12-28 00:34:24 -08007752 lowmem_pages += zone_managed_pages(zone);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007753 }
7754
7755 for_each_zone(zone) {
Andrew Mortonac924c62006-05-15 09:43:59 -07007756 u64 tmp;
7757
Gerald Schaefer1125b4e2008-10-18 20:27:11 -07007758 spin_lock_irqsave(&zone->lock, flags);
Arun KS9705bea2018-12-28 00:34:24 -08007759 tmp = (u64)pages_min * zone_managed_pages(zone);
Andrew Mortonac924c62006-05-15 09:43:59 -07007760 do_div(tmp, lowmem_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007761 if (is_highmem(zone)) {
7762 /*
Nick Piggin669ed172005-11-13 16:06:45 -08007763 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
7764 * need highmem pages, so cap pages_min to a small
7765 * value here.
7766 *
Mel Gorman41858962009-06-16 15:32:12 -07007767 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
Wei Yang8bb4e7a2019-03-05 15:46:22 -08007768 * deltas control async page reclaim, and so should
Nick Piggin669ed172005-11-13 16:06:45 -08007769 * not be capped for highmem.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007770 */
Andrew Morton90ae8d62013-02-22 16:32:22 -08007771 unsigned long min_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007772
Arun KS9705bea2018-12-28 00:34:24 -08007773 min_pages = zone_managed_pages(zone) / 1024;
Andrew Morton90ae8d62013-02-22 16:32:22 -08007774 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
Mel Gormana9214442018-12-28 00:35:44 -08007775 zone->_watermark[WMARK_MIN] = min_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007776 } else {
Nick Piggin669ed172005-11-13 16:06:45 -08007777 /*
7778 * If it's a lowmem zone, reserve a number of pages
Linus Torvalds1da177e2005-04-16 15:20:36 -07007779 * proportionate to the zone's size.
7780 */
Mel Gormana9214442018-12-28 00:35:44 -08007781 zone->_watermark[WMARK_MIN] = tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007782 }
7783
Johannes Weiner795ae7a2016-03-17 14:19:14 -07007784 /*
7785 * Set the kswapd watermarks distance according to the
7786 * scale factor in proportion to available memory, but
7787 * ensure a minimum size on small systems.
7788 */
7789 tmp = max_t(u64, tmp >> 2,
Arun KS9705bea2018-12-28 00:34:24 -08007790 mult_frac(zone_managed_pages(zone),
Johannes Weiner795ae7a2016-03-17 14:19:14 -07007791 watermark_scale_factor, 10000));
7792
Charan Teja Reddyaa092592020-06-03 15:59:14 -07007793 zone->watermark_boost = 0;
Mel Gormana9214442018-12-28 00:35:44 -08007794 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp;
7795 zone->_watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2;
Marek Szyprowski49f223a2012-01-25 12:49:24 +01007796
Gerald Schaefer1125b4e2008-10-18 20:27:11 -07007797 spin_unlock_irqrestore(&zone->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007798 }
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07007799
7800 /* update totalreserve_pages */
7801 calculate_totalreserve_pages();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007802}
7803
Mel Gormancfd3da12011-04-25 21:36:42 +00007804/**
7805 * setup_per_zone_wmarks - called when min_free_kbytes changes
7806 * or when memory is hot-{added|removed}
7807 *
7808 * Ensures that the watermark[min,low,high] values for each zone are set
7809 * correctly with respect to min_free_kbytes.
7810 */
7811void setup_per_zone_wmarks(void)
7812{
Michal Hockob93e0f32017-09-06 16:20:37 -07007813 static DEFINE_SPINLOCK(lock);
7814
7815 spin_lock(&lock);
Mel Gormancfd3da12011-04-25 21:36:42 +00007816 __setup_per_zone_wmarks();
Michal Hockob93e0f32017-09-06 16:20:37 -07007817 spin_unlock(&lock);
Mel Gormancfd3da12011-04-25 21:36:42 +00007818}
7819
Randy Dunlap55a44622009-09-21 17:01:20 -07007820/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07007821 * Initialise min_free_kbytes.
7822 *
7823 * For small machines we want it small (128k min). For large machines
Joel Savitz8beeae82020-07-03 15:15:30 -07007824 * we want it large (256MB max). But it is not linear, because network
Linus Torvalds1da177e2005-04-16 15:20:36 -07007825 * bandwidth does not increase linearly with machine size. We use
7826 *
Pintu Kumarb8af2942013-09-11 14:20:34 -07007827 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007828 * min_free_kbytes = sqrt(lowmem_kbytes * 16)
7829 *
7830 * which yields
7831 *
7832 * 16MB: 512k
7833 * 32MB: 724k
7834 * 64MB: 1024k
7835 * 128MB: 1448k
7836 * 256MB: 2048k
7837 * 512MB: 2896k
7838 * 1024MB: 4096k
7839 * 2048MB: 5792k
7840 * 4096MB: 8192k
7841 * 8192MB: 11584k
7842 * 16384MB: 16384k
7843 */
KOSAKI Motohiro1b79acc2011-05-24 17:11:32 -07007844int __meminit init_per_zone_wmark_min(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007845{
7846 unsigned long lowmem_kbytes;
Michal Hocko5f127332013-07-08 16:00:40 -07007847 int new_min_free_kbytes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007848
7849 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
Michal Hocko5f127332013-07-08 16:00:40 -07007850 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007851
Michal Hocko5f127332013-07-08 16:00:40 -07007852 if (new_min_free_kbytes > user_min_free_kbytes) {
7853 min_free_kbytes = new_min_free_kbytes;
7854 if (min_free_kbytes < 128)
7855 min_free_kbytes = 128;
Joel Savitzee8eb9a2020-04-01 21:09:44 -07007856 if (min_free_kbytes > 262144)
7857 min_free_kbytes = 262144;
Michal Hocko5f127332013-07-08 16:00:40 -07007858 } else {
7859 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
7860 new_min_free_kbytes, user_min_free_kbytes);
7861 }
Minchan Kimbc75d332009-06-16 15:32:48 -07007862 setup_per_zone_wmarks();
KOSAKI Motohiroa6cccdc2011-05-24 17:11:33 -07007863 refresh_zone_stat_thresholds();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007864 setup_per_zone_lowmem_reserve();
Joonsoo Kim6423aa82016-08-10 16:27:49 -07007865
7866#ifdef CONFIG_NUMA
7867 setup_min_unmapped_ratio();
7868 setup_min_slab_ratio();
7869#endif
7870
Linus Torvalds1da177e2005-04-16 15:20:36 -07007871 return 0;
7872}
Jason Baronbc22af742016-05-05 16:22:12 -07007873core_initcall(init_per_zone_wmark_min)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007874
7875/*
Pintu Kumarb8af2942013-09-11 14:20:34 -07007876 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
Linus Torvalds1da177e2005-04-16 15:20:36 -07007877 * that we can call two helper functions whenever min_free_kbytes
7878 * changes.
7879 */
Joe Perchescccad5b2014-06-06 14:38:09 -07007880int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
Christoph Hellwig32927392020-04-24 08:43:38 +02007881 void *buffer, size_t *length, loff_t *ppos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007882{
Han Pingtianda8c7572014-01-23 15:53:17 -08007883 int rc;
7884
7885 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7886 if (rc)
7887 return rc;
7888
Michal Hocko5f127332013-07-08 16:00:40 -07007889 if (write) {
7890 user_min_free_kbytes = min_free_kbytes;
Minchan Kimbc75d332009-06-16 15:32:48 -07007891 setup_per_zone_wmarks();
Michal Hocko5f127332013-07-08 16:00:40 -07007892 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007893 return 0;
7894}
7895
Johannes Weiner795ae7a2016-03-17 14:19:14 -07007896int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
Christoph Hellwig32927392020-04-24 08:43:38 +02007897 void *buffer, size_t *length, loff_t *ppos)
Johannes Weiner795ae7a2016-03-17 14:19:14 -07007898{
7899 int rc;
7900
7901 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7902 if (rc)
7903 return rc;
7904
7905 if (write)
7906 setup_per_zone_wmarks();
7907
7908 return 0;
7909}
7910
Christoph Lameter96146342006-07-03 00:24:13 -07007911#ifdef CONFIG_NUMA
Joonsoo Kim6423aa82016-08-10 16:27:49 -07007912static void setup_min_unmapped_ratio(void)
Christoph Lameter96146342006-07-03 00:24:13 -07007913{
Joonsoo Kim6423aa82016-08-10 16:27:49 -07007914 pg_data_t *pgdat;
Christoph Lameter96146342006-07-03 00:24:13 -07007915 struct zone *zone;
Christoph Lameter96146342006-07-03 00:24:13 -07007916
Mel Gormana5f5f912016-07-28 15:46:32 -07007917 for_each_online_pgdat(pgdat)
Joonsoo Kim81cbcbc2016-08-10 16:27:46 -07007918 pgdat->min_unmapped_pages = 0;
Mel Gormana5f5f912016-07-28 15:46:32 -07007919
Christoph Lameter96146342006-07-03 00:24:13 -07007920 for_each_zone(zone)
Arun KS9705bea2018-12-28 00:34:24 -08007921 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) *
7922 sysctl_min_unmapped_ratio) / 100;
Christoph Lameter96146342006-07-03 00:24:13 -07007923}
Christoph Lameter0ff38492006-09-25 23:31:52 -07007924
Joonsoo Kim6423aa82016-08-10 16:27:49 -07007925
7926int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
Christoph Hellwig32927392020-04-24 08:43:38 +02007927 void *buffer, size_t *length, loff_t *ppos)
Christoph Lameter0ff38492006-09-25 23:31:52 -07007928{
Christoph Lameter0ff38492006-09-25 23:31:52 -07007929 int rc;
7930
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07007931 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
Christoph Lameter0ff38492006-09-25 23:31:52 -07007932 if (rc)
7933 return rc;
7934
Joonsoo Kim6423aa82016-08-10 16:27:49 -07007935 setup_min_unmapped_ratio();
7936
7937 return 0;
7938}
7939
7940static void setup_min_slab_ratio(void)
7941{
7942 pg_data_t *pgdat;
7943 struct zone *zone;
7944
Mel Gormana5f5f912016-07-28 15:46:32 -07007945 for_each_online_pgdat(pgdat)
7946 pgdat->min_slab_pages = 0;
7947
Christoph Lameter0ff38492006-09-25 23:31:52 -07007948 for_each_zone(zone)
Arun KS9705bea2018-12-28 00:34:24 -08007949 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) *
7950 sysctl_min_slab_ratio) / 100;
Joonsoo Kim6423aa82016-08-10 16:27:49 -07007951}
7952
7953int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
Christoph Hellwig32927392020-04-24 08:43:38 +02007954 void *buffer, size_t *length, loff_t *ppos)
Joonsoo Kim6423aa82016-08-10 16:27:49 -07007955{
7956 int rc;
7957
7958 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7959 if (rc)
7960 return rc;
7961
7962 setup_min_slab_ratio();
7963
Christoph Lameter0ff38492006-09-25 23:31:52 -07007964 return 0;
7965}
Christoph Lameter96146342006-07-03 00:24:13 -07007966#endif
7967
Linus Torvalds1da177e2005-04-16 15:20:36 -07007968/*
7969 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
7970 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
7971 * whenever sysctl_lowmem_reserve_ratio changes.
7972 *
7973 * The reserve ratio obviously has absolutely no relation with the
Mel Gorman41858962009-06-16 15:32:12 -07007974 * minimum watermarks. The lowmem reserve ratio can only make sense
Linus Torvalds1da177e2005-04-16 15:20:36 -07007975 * if in function of the boot time zone sizes.
7976 */
Joe Perchescccad5b2014-06-06 14:38:09 -07007977int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
Christoph Hellwig32927392020-04-24 08:43:38 +02007978 void *buffer, size_t *length, loff_t *ppos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007979{
Baoquan He86aaf252020-06-03 15:58:48 -07007980 int i;
7981
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07007982 proc_dointvec_minmax(table, write, buffer, length, ppos);
Baoquan He86aaf252020-06-03 15:58:48 -07007983
7984 for (i = 0; i < MAX_NR_ZONES; i++) {
7985 if (sysctl_lowmem_reserve_ratio[i] < 1)
7986 sysctl_lowmem_reserve_ratio[i] = 0;
7987 }
7988
Linus Torvalds1da177e2005-04-16 15:20:36 -07007989 setup_per_zone_lowmem_reserve();
7990 return 0;
7991}
7992
Mel Gormancb1ef532019-11-30 17:55:11 -08007993static void __zone_pcp_update(struct zone *zone)
7994{
7995 unsigned int cpu;
7996
7997 for_each_possible_cpu(cpu)
7998 pageset_set_high_and_batch(zone,
7999 per_cpu_ptr(zone->pageset, cpu));
8000}
8001
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08008002/*
8003 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
Pintu Kumarb8af2942013-09-11 14:20:34 -07008004 * cpu. It is the fraction of total pages in each zone that a hot per cpu
8005 * pagelist can have before it gets flushed back to buddy allocator.
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08008006 */
Joe Perchescccad5b2014-06-06 14:38:09 -07008007int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
Christoph Hellwig32927392020-04-24 08:43:38 +02008008 void *buffer, size_t *length, loff_t *ppos)
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08008009{
8010 struct zone *zone;
David Rientjes7cd2b0a2014-06-23 13:22:04 -07008011 int old_percpu_pagelist_fraction;
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08008012 int ret;
8013
Cody P Schaferc8e251f2013-07-03 15:01:29 -07008014 mutex_lock(&pcp_batch_high_lock);
David Rientjes7cd2b0a2014-06-23 13:22:04 -07008015 old_percpu_pagelist_fraction = percpu_pagelist_fraction;
8016
8017 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
8018 if (!write || ret < 0)
8019 goto out;
8020
8021 /* Sanity checking to avoid pcp imbalance */
8022 if (percpu_pagelist_fraction &&
8023 percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
8024 percpu_pagelist_fraction = old_percpu_pagelist_fraction;
8025 ret = -EINVAL;
8026 goto out;
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08008027 }
David Rientjes7cd2b0a2014-06-23 13:22:04 -07008028
8029 /* No change? */
8030 if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
8031 goto out;
8032
Mel Gormancb1ef532019-11-30 17:55:11 -08008033 for_each_populated_zone(zone)
8034 __zone_pcp_update(zone);
David Rientjes7cd2b0a2014-06-23 13:22:04 -07008035out:
Cody P Schaferc8e251f2013-07-03 15:01:29 -07008036 mutex_unlock(&pcp_batch_high_lock);
David Rientjes7cd2b0a2014-06-23 13:22:04 -07008037 return ret;
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08008038}
8039
Srikar Dronamrajuf6f34b42016-10-07 16:59:15 -07008040#ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
8041/*
8042 * Returns the number of pages that arch has reserved but
8043 * is not known to alloc_large_system_hash().
8044 */
8045static unsigned long __init arch_reserved_kernel_pages(void)
8046{
8047 return 0;
8048}
8049#endif
8050
Linus Torvalds1da177e2005-04-16 15:20:36 -07008051/*
Pavel Tatashin90172172017-07-06 15:39:14 -07008052 * Adaptive scale is meant to reduce sizes of hash tables on large memory
8053 * machines. As memory size is increased the scale is also increased but at
8054 * slower pace. Starting from ADAPT_SCALE_BASE (64G), every time memory
8055 * quadruples the scale is increased by one, which means the size of hash table
8056 * only doubles, instead of quadrupling as well.
8057 * Because 32-bit systems cannot have large physical memory, where this scaling
8058 * makes sense, it is disabled on such platforms.
8059 */
8060#if __BITS_PER_LONG > 32
8061#define ADAPT_SCALE_BASE (64ul << 30)
8062#define ADAPT_SCALE_SHIFT 2
8063#define ADAPT_SCALE_NPAGES (ADAPT_SCALE_BASE >> PAGE_SHIFT)
8064#endif
8065
8066/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07008067 * allocate a large system hash table from bootmem
8068 * - it is assumed that the hash table must contain an exact power-of-2
8069 * quantity of entries
8070 * - limit is the number of hash buckets, not the total allocation size
8071 */
8072void *__init alloc_large_system_hash(const char *tablename,
8073 unsigned long bucketsize,
8074 unsigned long numentries,
8075 int scale,
8076 int flags,
8077 unsigned int *_hash_shift,
8078 unsigned int *_hash_mask,
Tim Bird31fe62b2012-05-23 13:33:35 +00008079 unsigned long low_limit,
8080 unsigned long high_limit)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008081{
Tim Bird31fe62b2012-05-23 13:33:35 +00008082 unsigned long long max = high_limit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008083 unsigned long log2qty, size;
8084 void *table = NULL;
Pavel Tatashin3749a8f2017-07-06 15:39:08 -07008085 gfp_t gfp_flags;
Nicholas Pigginec114082019-07-11 20:59:09 -07008086 bool virt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008087
8088 /* allow the kernel cmdline to have a say */
8089 if (!numentries) {
8090 /* round applicable memory size up to nearest megabyte */
Andrew Morton04903662006-12-06 20:37:33 -08008091 numentries = nr_kernel_pages;
Srikar Dronamrajuf6f34b42016-10-07 16:59:15 -07008092 numentries -= arch_reserved_kernel_pages();
Jerry Zhoua7e83312013-09-11 14:20:26 -07008093
8094 /* It isn't necessary when PAGE_SIZE >= 1MB */
8095 if (PAGE_SHIFT < 20)
8096 numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008097
Pavel Tatashin90172172017-07-06 15:39:14 -07008098#if __BITS_PER_LONG > 32
8099 if (!high_limit) {
8100 unsigned long adapt;
8101
8102 for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries;
8103 adapt <<= ADAPT_SCALE_SHIFT)
8104 scale++;
8105 }
8106#endif
8107
Linus Torvalds1da177e2005-04-16 15:20:36 -07008108 /* limit to 1 bucket per 2^scale bytes of low memory */
8109 if (scale > PAGE_SHIFT)
8110 numentries >>= (scale - PAGE_SHIFT);
8111 else
8112 numentries <<= (PAGE_SHIFT - scale);
Paul Mundt9ab37b82007-01-05 16:36:30 -08008113
8114 /* Make sure we've got at least a 0-order allocation.. */
Jan Beulich2c85f512009-09-21 17:03:07 -07008115 if (unlikely(flags & HASH_SMALL)) {
8116 /* Makes no sense without HASH_EARLY */
8117 WARN_ON(!(flags & HASH_EARLY));
8118 if (!(numentries >> *_hash_shift)) {
8119 numentries = 1UL << *_hash_shift;
8120 BUG_ON(!numentries);
8121 }
8122 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
Paul Mundt9ab37b82007-01-05 16:36:30 -08008123 numentries = PAGE_SIZE / bucketsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008124 }
John Hawkes6e692ed2006-03-25 03:08:02 -08008125 numentries = roundup_pow_of_two(numentries);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008126
8127 /* limit allocation size to 1/16 total memory by default */
8128 if (max == 0) {
8129 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
8130 do_div(max, bucketsize);
8131 }
Dimitri Sivanich074b8512012-02-08 12:39:07 -08008132 max = min(max, 0x80000000ULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008133
Tim Bird31fe62b2012-05-23 13:33:35 +00008134 if (numentries < low_limit)
8135 numentries = low_limit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008136 if (numentries > max)
8137 numentries = max;
8138
David Howellsf0d1b0b2006-12-08 02:37:49 -08008139 log2qty = ilog2(numentries);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008140
Pavel Tatashin3749a8f2017-07-06 15:39:08 -07008141 gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008142 do {
Nicholas Pigginec114082019-07-11 20:59:09 -07008143 virt = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008144 size = bucketsize << log2qty;
Pavel Tatashinea1f5f32017-11-15 17:36:27 -08008145 if (flags & HASH_EARLY) {
8146 if (flags & HASH_ZERO)
Mike Rapoport26fb3da2019-03-11 23:30:42 -07008147 table = memblock_alloc(size, SMP_CACHE_BYTES);
Pavel Tatashinea1f5f32017-11-15 17:36:27 -08008148 else
Mike Rapoport7e1c4e22018-10-30 15:09:57 -07008149 table = memblock_alloc_raw(size,
8150 SMP_CACHE_BYTES);
Nicholas Pigginec114082019-07-11 20:59:09 -07008151 } else if (get_order(size) >= MAX_ORDER || hashdist) {
Christoph Hellwig88dca4c2020-06-01 21:51:40 -07008152 table = __vmalloc(size, gfp_flags);
Nicholas Pigginec114082019-07-11 20:59:09 -07008153 virt = true;
Pavel Tatashinea1f5f32017-11-15 17:36:27 -08008154 } else {
Eric Dumazet1037b832007-07-15 23:38:05 -07008155 /*
8156 * If bucketsize is not a power-of-two, we may free
Mel Gormana1dd2682009-06-16 15:32:19 -07008157 * some pages at the end of hash table which
8158 * alloc_pages_exact() automatically does
Eric Dumazet1037b832007-07-15 23:38:05 -07008159 */
Nicholas Pigginec114082019-07-11 20:59:09 -07008160 table = alloc_pages_exact(size, gfp_flags);
8161 kmemleak_alloc(table, size, 1, gfp_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008162 }
8163 } while (!table && size > PAGE_SIZE && --log2qty);
8164
8165 if (!table)
8166 panic("Failed to allocate %s hash table\n", tablename);
8167
Nicholas Pigginec114082019-07-11 20:59:09 -07008168 pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n",
8169 tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size,
8170 virt ? "vmalloc" : "linear");
Linus Torvalds1da177e2005-04-16 15:20:36 -07008171
8172 if (_hash_shift)
8173 *_hash_shift = log2qty;
8174 if (_hash_mask)
8175 *_hash_mask = (1 << log2qty) - 1;
8176
8177 return table;
8178}
KAMEZAWA Hiroyukia117e662006-03-27 01:15:25 -08008179
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -07008180/*
Minchan Kim80934512012-07-31 16:43:01 -07008181 * This function checks whether pageblock includes unmovable pages or not.
Minchan Kim80934512012-07-31 16:43:01 -07008182 *
Pintu Kumarb8af2942013-09-11 14:20:34 -07008183 * PageLRU check without isolation or lru_lock could race so that
Yisheng Xie0efadf42017-02-24 14:57:39 -08008184 * MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable
8185 * check without lock_page also may miss some movable non-lru pages at
8186 * race condition. So you can't expect this function should be exact.
Qian Cai4a55c042020-01-30 22:14:57 -08008187 *
8188 * Returns a page without holding a reference. If the caller wants to
8189 * dereference that page (e.g., dumping), it has to make sure that that it
8190 * cannot get removed (e.g., via memory unplug) concurrently.
8191 *
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -07008192 */
Qian Cai4a55c042020-01-30 22:14:57 -08008193struct page *has_unmovable_pages(struct zone *zone, struct page *page,
8194 int migratetype, int flags)
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07008195{
Qian Cai1a9f2192019-04-18 17:50:30 -07008196 unsigned long iter = 0;
8197 unsigned long pfn = page_to_pfn(page);
Michal Nazarewicz47118af2011-12-29 13:09:50 +01008198
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07008199 /*
Michal Hocko15c30bc2018-05-25 14:47:42 -07008200 * TODO we could make this much more efficient by not checking every
8201 * page in the range if we know all of them are in MOVABLE_ZONE and
8202 * that the movable zone guarantees that pages are migratable but
8203 * the later is not the case right now unfortunatelly. E.g. movablecore
8204 * can still lead to having bootmem allocations in zone_movable.
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07008205 */
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07008206
Qian Cai1a9f2192019-04-18 17:50:30 -07008207 if (is_migrate_cma_page(page)) {
8208 /*
8209 * CMA allocations (alloc_contig_range) really need to mark
8210 * isolate CMA pageblocks even when they are not movable in fact
8211 * so consider them movable here.
8212 */
8213 if (is_migrate_cma(migratetype))
Qian Cai4a55c042020-01-30 22:14:57 -08008214 return NULL;
Michal Hocko4da2ce22017-11-15 17:33:26 -08008215
Qian Cai3d680bd2020-01-30 22:15:01 -08008216 return page;
Qian Cai1a9f2192019-04-18 17:50:30 -07008217 }
8218
David Hildenbrandfe4c86c2020-01-30 22:14:04 -08008219 for (; iter < pageblock_nr_pages; iter++) {
8220 if (!pfn_valid_within(pfn + iter))
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07008221 continue;
Namhyung Kim29723fc2011-02-25 14:44:25 -08008222
David Hildenbrandfe4c86c2020-01-30 22:14:04 -08008223 page = pfn_to_page(pfn + iter);
Naoya Horiguchic8721bb2013-09-11 14:22:09 -07008224
Michal Hockod7ab3672017-11-15 17:33:30 -08008225 if (PageReserved(page))
Qian Cai3d680bd2020-01-30 22:15:01 -08008226 return page;
Michal Hockod7ab3672017-11-15 17:33:30 -08008227
Naoya Horiguchic8721bb2013-09-11 14:22:09 -07008228 /*
Michal Hocko9d789992018-11-16 15:08:15 -08008229 * If the zone is movable and we have ruled out all reserved
8230 * pages then it should be reasonably safe to assume the rest
8231 * is movable.
8232 */
8233 if (zone_idx(zone) == ZONE_MOVABLE)
8234 continue;
8235
8236 /*
Naoya Horiguchic8721bb2013-09-11 14:22:09 -07008237 * Hugepages are not in LRU lists, but they're movable.
Rik van Riel1da2f322020-04-01 21:10:31 -07008238 * THPs are on the LRU, but need to be counted as #small pages.
Wei Yang8bb4e7a2019-03-05 15:46:22 -08008239 * We need not scan over tail pages because we don't
Naoya Horiguchic8721bb2013-09-11 14:22:09 -07008240 * handle each tail page individually in migration.
8241 */
Rik van Riel1da2f322020-04-01 21:10:31 -07008242 if (PageHuge(page) || PageTransCompound(page)) {
Oscar Salvador17e2e7d2018-12-21 14:31:00 -08008243 struct page *head = compound_head(page);
8244 unsigned int skip_pages;
Aneesh Kumar K.V464c7ff2018-09-04 15:45:59 -07008245
Rik van Riel1da2f322020-04-01 21:10:31 -07008246 if (PageHuge(page)) {
8247 if (!hugepage_migration_supported(page_hstate(head)))
8248 return page;
8249 } else if (!PageLRU(head) && !__PageMovable(head)) {
Qian Cai3d680bd2020-01-30 22:15:01 -08008250 return page;
Rik van Riel1da2f322020-04-01 21:10:31 -07008251 }
Aneesh Kumar K.V464c7ff2018-09-04 15:45:59 -07008252
Matthew Wilcox (Oracle)d8c65462019-09-23 15:34:30 -07008253 skip_pages = compound_nr(head) - (page - head);
Oscar Salvador17e2e7d2018-12-21 14:31:00 -08008254 iter += skip_pages - 1;
Naoya Horiguchic8721bb2013-09-11 14:22:09 -07008255 continue;
8256 }
8257
Minchan Kim97d255c2012-07-31 16:42:59 -07008258 /*
8259 * We can't use page_count without pin a page
8260 * because another CPU can free compound page.
8261 * This check already skips compound tails of THP
Joonsoo Kim0139aa72016-05-19 17:10:49 -07008262 * because their page->_refcount is zero at all time.
Minchan Kim97d255c2012-07-31 16:42:59 -07008263 */
Joonsoo Kimfe896d12016-03-17 14:19:26 -07008264 if (!page_ref_count(page)) {
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07008265 if (PageBuddy(page))
8266 iter += (1 << page_order(page)) - 1;
8267 continue;
8268 }
Minchan Kim97d255c2012-07-31 16:42:59 -07008269
Wen Congyangb023f462012-12-11 16:00:45 -08008270 /*
8271 * The HWPoisoned page may be not in buddy system, and
8272 * page_count() is not 0.
8273 */
David Hildenbrand756d25be2019-11-30 17:54:07 -08008274 if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
Wen Congyangb023f462012-12-11 16:00:45 -08008275 continue;
8276
David Hildenbrandaa218792020-05-07 16:01:30 +02008277 /*
8278 * We treat all PageOffline() pages as movable when offlining
8279 * to give drivers a chance to decrement their reference count
8280 * in MEM_GOING_OFFLINE in order to indicate that these pages
8281 * can be offlined as there are no direct references anymore.
8282 * For actually unmovable PageOffline() where the driver does
8283 * not support this, we will fail later when trying to actually
8284 * move these pages that still have a reference count > 0.
8285 * (false negatives in this function only)
8286 */
8287 if ((flags & MEMORY_OFFLINE) && PageOffline(page))
8288 continue;
8289
David Hildenbrandfe4c86c2020-01-30 22:14:04 -08008290 if (__PageMovable(page) || PageLRU(page))
Yisheng Xie0efadf42017-02-24 14:57:39 -08008291 continue;
8292
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07008293 /*
Johannes Weiner6b4f7792014-12-12 16:56:13 -08008294 * If there are RECLAIMABLE pages, we need to check
8295 * it. But now, memory offline itself doesn't call
8296 * shrink_node_slabs() and it still to be fixed.
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07008297 */
8298 /*
8299 * If the page is not RAM, page_count()should be 0.
8300 * we don't need more check. This is an _used_ not-movable page.
8301 *
8302 * The problematic thing here is PG_reserved pages. PG_reserved
8303 * is set to both of a memory hole page and a _used_ kernel
8304 * page at boot.
8305 */
Qian Cai3d680bd2020-01-30 22:15:01 -08008306 return page;
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07008307 }
Qian Cai4a55c042020-01-30 22:14:57 -08008308 return NULL;
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07008309}
8310
Alexandre Ghiti8df995f2019-05-13 17:19:00 -07008311#ifdef CONFIG_CONTIG_ALLOC
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008312static unsigned long pfn_max_align_down(unsigned long pfn)
8313{
8314 return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
8315 pageblock_nr_pages) - 1);
8316}
8317
8318static unsigned long pfn_max_align_up(unsigned long pfn)
8319{
8320 return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
8321 pageblock_nr_pages));
8322}
8323
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008324/* [start, end) must belong to a single zone. */
Mel Gormanbb13ffe2012-10-08 16:32:41 -07008325static int __alloc_contig_migrate_range(struct compact_control *cc,
8326 unsigned long start, unsigned long end)
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008327{
8328 /* This function is based on compact_zone() from compaction.c. */
Maninder Singh730ec8c2020-06-03 16:01:18 -07008329 unsigned int nr_reclaimed;
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008330 unsigned long pfn = start;
8331 unsigned int tries = 0;
8332 int ret = 0;
8333
Marek Szyprowskibe49a6e2012-12-12 13:51:19 -08008334 migrate_prep();
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008335
Mel Gormanbb13ffe2012-10-08 16:32:41 -07008336 while (pfn < end || !list_empty(&cc->migratepages)) {
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008337 if (fatal_signal_pending(current)) {
8338 ret = -EINTR;
8339 break;
8340 }
8341
Mel Gormanbb13ffe2012-10-08 16:32:41 -07008342 if (list_empty(&cc->migratepages)) {
8343 cc->nr_migratepages = 0;
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07008344 pfn = isolate_migratepages_range(cc, pfn, end);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008345 if (!pfn) {
8346 ret = -EINTR;
8347 break;
8348 }
8349 tries = 0;
8350 } else if (++tries == 5) {
8351 ret = ret < 0 ? ret : -EBUSY;
8352 break;
8353 }
8354
Minchan Kimbeb51ea2012-10-08 16:33:51 -07008355 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
8356 &cc->migratepages);
8357 cc->nr_migratepages -= nr_reclaimed;
Minchan Kim02c6de82012-10-08 16:31:55 -07008358
Hugh Dickins9c620e22013-02-22 16:35:14 -08008359 ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
Anshuman Khandual31025352018-04-05 16:22:08 -07008360 NULL, 0, cc->mode, MR_CONTIG_RANGE);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008361 }
Srinivas Pandruvada2a6f5122013-02-22 16:32:09 -08008362 if (ret < 0) {
8363 putback_movable_pages(&cc->migratepages);
8364 return ret;
8365 }
8366 return 0;
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008367}
8368
8369/**
8370 * alloc_contig_range() -- tries to allocate given range of pages
8371 * @start: start PFN to allocate
8372 * @end: one-past-the-last PFN to allocate
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +02008373 * @migratetype: migratetype of the underlaying pageblocks (either
8374 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
8375 * in range must have the same migratetype and it must
8376 * be either of the two.
Lucas Stachca96b622017-02-24 14:58:37 -08008377 * @gfp_mask: GFP mask to use during compaction
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008378 *
8379 * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
Mike Kravetz2c7452a2018-04-05 16:25:26 -07008380 * aligned. The PFN range must belong to a single zone.
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008381 *
Mike Kravetz2c7452a2018-04-05 16:25:26 -07008382 * The first thing this routine does is attempt to MIGRATE_ISOLATE all
8383 * pageblocks in the range. Once isolated, the pageblocks should not
8384 * be modified by others.
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008385 *
Mike Rapoporta862f682019-03-05 15:48:42 -08008386 * Return: zero on success or negative error code. On success all
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008387 * pages which PFN is in [start, end) are allocated for the caller and
8388 * need to be freed with free_contig_range().
8389 */
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +02008390int alloc_contig_range(unsigned long start, unsigned long end,
Lucas Stachca96b622017-02-24 14:58:37 -08008391 unsigned migratetype, gfp_t gfp_mask)
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008392{
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008393 unsigned long outer_start, outer_end;
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08008394 unsigned int order;
8395 int ret = 0;
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008396
Mel Gormanbb13ffe2012-10-08 16:32:41 -07008397 struct compact_control cc = {
8398 .nr_migratepages = 0,
8399 .order = -1,
8400 .zone = page_zone(pfn_to_page(start)),
David Rientjese0b9dae2014-06-04 16:08:28 -07008401 .mode = MIGRATE_SYNC,
Mel Gormanbb13ffe2012-10-08 16:32:41 -07008402 .ignore_skip_hint = true,
Vlastimil Babka2583d672017-11-17 15:26:38 -08008403 .no_set_skip_hint = true,
Michal Hocko7dea19f2017-05-03 14:53:15 -07008404 .gfp_mask = current_gfp_context(gfp_mask),
Rik van Rielb06eda02020-04-01 21:10:28 -07008405 .alloc_contig = true,
Mel Gormanbb13ffe2012-10-08 16:32:41 -07008406 };
8407 INIT_LIST_HEAD(&cc.migratepages);
8408
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008409 /*
8410 * What we do here is we mark all pageblocks in range as
8411 * MIGRATE_ISOLATE. Because pageblock and max order pages may
8412 * have different sizes, and due to the way page allocator
8413 * work, we align the range to biggest of the two pages so
8414 * that page allocator won't try to merge buddies from
8415 * different pageblocks and change MIGRATE_ISOLATE to some
8416 * other migration type.
8417 *
8418 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
8419 * migrate the pages from an unaligned range (ie. pages that
8420 * we are interested in). This will put all the pages in
8421 * range back to page allocator as MIGRATE_ISOLATE.
8422 *
8423 * When this is done, we take the pages in range from page
8424 * allocator removing them from the buddy system. This way
8425 * page allocator will never consider using them.
8426 *
8427 * This lets us mark the pageblocks back as
8428 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
8429 * aligned range but not in the unaligned, original range are
8430 * put back to page allocator so that buddy can use them.
8431 */
8432
8433 ret = start_isolate_page_range(pfn_max_align_down(start),
Michal Hockod381c542018-12-28 00:33:56 -08008434 pfn_max_align_up(end), migratetype, 0);
Qian Cai9b7ea462019-03-28 20:43:34 -07008435 if (ret < 0)
Bob Liu86a595f2012-10-25 13:37:56 -07008436 return ret;
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008437
Joonsoo Kim8ef58492016-01-14 15:18:45 -08008438 /*
8439 * In case of -EBUSY, we'd like to know which page causes problem.
Mike Kravetz63cd4482017-11-29 16:10:01 -08008440 * So, just fall through. test_pages_isolated() has a tracepoint
8441 * which will report the busy page.
8442 *
8443 * It is possible that busy pages could become available before
8444 * the call to test_pages_isolated, and the range will actually be
8445 * allocated. So, if we fall through be sure to clear ret so that
8446 * -EBUSY is not accidentally used or returned to caller.
Joonsoo Kim8ef58492016-01-14 15:18:45 -08008447 */
Mel Gormanbb13ffe2012-10-08 16:32:41 -07008448 ret = __alloc_contig_migrate_range(&cc, start, end);
Joonsoo Kim8ef58492016-01-14 15:18:45 -08008449 if (ret && ret != -EBUSY)
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008450 goto done;
Mike Kravetz63cd4482017-11-29 16:10:01 -08008451 ret =0;
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008452
8453 /*
8454 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
8455 * aligned blocks that are marked as MIGRATE_ISOLATE. What's
8456 * more, all pages in [start, end) are free in page allocator.
8457 * What we are going to do is to allocate all pages from
8458 * [start, end) (that is remove them from page allocator).
8459 *
8460 * The only problem is that pages at the beginning and at the
8461 * end of interesting range may be not aligned with pages that
8462 * page allocator holds, ie. they can be part of higher order
8463 * pages. Because of this, we reserve the bigger range and
8464 * once this is done free the pages we are not interested in.
8465 *
8466 * We don't have to hold zone->lock here because the pages are
8467 * isolated thus they won't get removed from buddy.
8468 */
8469
8470 lru_add_drain_all();
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008471
8472 order = 0;
8473 outer_start = start;
8474 while (!PageBuddy(pfn_to_page(outer_start))) {
8475 if (++order >= MAX_ORDER) {
Joonsoo Kim8ef58492016-01-14 15:18:45 -08008476 outer_start = start;
8477 break;
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008478 }
8479 outer_start &= ~0UL << order;
8480 }
8481
Joonsoo Kim8ef58492016-01-14 15:18:45 -08008482 if (outer_start != start) {
8483 order = page_order(pfn_to_page(outer_start));
8484
8485 /*
8486 * outer_start page could be small order buddy page and
8487 * it doesn't include start page. Adjust outer_start
8488 * in this case to report failed page properly
8489 * on tracepoint in test_pages_isolated()
8490 */
8491 if (outer_start + (1UL << order) <= start)
8492 outer_start = start;
8493 }
8494
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008495 /* Make sure the range is really isolated. */
David Hildenbrand756d25be2019-11-30 17:54:07 -08008496 if (test_pages_isolated(outer_start, end, 0)) {
Jonathan Toppins75dddef2017-08-10 15:23:35 -07008497 pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",
Michal Nazarewiczdae803e2014-11-13 15:19:27 -08008498 __func__, outer_start, end);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008499 ret = -EBUSY;
8500 goto done;
8501 }
8502
Marek Szyprowski49f223a2012-01-25 12:49:24 +01008503 /* Grab isolated pages from freelists. */
Mel Gormanbb13ffe2012-10-08 16:32:41 -07008504 outer_end = isolate_freepages_range(&cc, outer_start, end);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008505 if (!outer_end) {
8506 ret = -EBUSY;
8507 goto done;
8508 }
8509
8510 /* Free head and tail (if any) */
8511 if (start != outer_start)
8512 free_contig_range(outer_start, start - outer_start);
8513 if (end != outer_end)
8514 free_contig_range(end, outer_end - end);
8515
8516done:
8517 undo_isolate_page_range(pfn_max_align_down(start),
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +02008518 pfn_max_align_up(end), migratetype);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008519 return ret;
8520}
David Hildenbrand255f5982020-05-07 16:01:29 +02008521EXPORT_SYMBOL(alloc_contig_range);
Anshuman Khandual5e27a2d2019-11-30 17:55:06 -08008522
8523static int __alloc_contig_pages(unsigned long start_pfn,
8524 unsigned long nr_pages, gfp_t gfp_mask)
8525{
8526 unsigned long end_pfn = start_pfn + nr_pages;
8527
8528 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
8529 gfp_mask);
8530}
8531
8532static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn,
8533 unsigned long nr_pages)
8534{
8535 unsigned long i, end_pfn = start_pfn + nr_pages;
8536 struct page *page;
8537
8538 for (i = start_pfn; i < end_pfn; i++) {
8539 page = pfn_to_online_page(i);
8540 if (!page)
8541 return false;
8542
8543 if (page_zone(page) != z)
8544 return false;
8545
8546 if (PageReserved(page))
8547 return false;
8548
8549 if (page_count(page) > 0)
8550 return false;
8551
8552 if (PageHuge(page))
8553 return false;
8554 }
8555 return true;
8556}
8557
8558static bool zone_spans_last_pfn(const struct zone *zone,
8559 unsigned long start_pfn, unsigned long nr_pages)
8560{
8561 unsigned long last_pfn = start_pfn + nr_pages - 1;
8562
8563 return zone_spans_pfn(zone, last_pfn);
8564}
8565
8566/**
8567 * alloc_contig_pages() -- tries to find and allocate contiguous range of pages
8568 * @nr_pages: Number of contiguous pages to allocate
8569 * @gfp_mask: GFP mask to limit search and used during compaction
8570 * @nid: Target node
8571 * @nodemask: Mask for other possible nodes
8572 *
8573 * This routine is a wrapper around alloc_contig_range(). It scans over zones
8574 * on an applicable zonelist to find a contiguous pfn range which can then be
8575 * tried for allocation with alloc_contig_range(). This routine is intended
8576 * for allocation requests which can not be fulfilled with the buddy allocator.
8577 *
8578 * The allocated memory is always aligned to a page boundary. If nr_pages is a
8579 * power of two then the alignment is guaranteed to be to the given nr_pages
8580 * (e.g. 1GB request would be aligned to 1GB).
8581 *
8582 * Allocated pages can be freed with free_contig_range() or by manually calling
8583 * __free_page() on each allocated page.
8584 *
8585 * Return: pointer to contiguous pages on success, or NULL if not successful.
8586 */
8587struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
8588 int nid, nodemask_t *nodemask)
8589{
8590 unsigned long ret, pfn, flags;
8591 struct zonelist *zonelist;
8592 struct zone *zone;
8593 struct zoneref *z;
8594
8595 zonelist = node_zonelist(nid, gfp_mask);
8596 for_each_zone_zonelist_nodemask(zone, z, zonelist,
8597 gfp_zone(gfp_mask), nodemask) {
8598 spin_lock_irqsave(&zone->lock, flags);
8599
8600 pfn = ALIGN(zone->zone_start_pfn, nr_pages);
8601 while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
8602 if (pfn_range_valid_contig(zone, pfn, nr_pages)) {
8603 /*
8604 * We release the zone lock here because
8605 * alloc_contig_range() will also lock the zone
8606 * at some point. If there's an allocation
8607 * spinning on this lock, it may win the race
8608 * and cause alloc_contig_range() to fail...
8609 */
8610 spin_unlock_irqrestore(&zone->lock, flags);
8611 ret = __alloc_contig_pages(pfn, nr_pages,
8612 gfp_mask);
8613 if (!ret)
8614 return pfn_to_page(pfn);
8615 spin_lock_irqsave(&zone->lock, flags);
8616 }
8617 pfn += nr_pages;
8618 }
8619 spin_unlock_irqrestore(&zone->lock, flags);
8620 }
8621 return NULL;
8622}
Alexandre Ghiti4eb07162019-05-13 17:19:04 -07008623#endif /* CONFIG_CONTIG_ALLOC */
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008624
Alexandre Ghiti4eb07162019-05-13 17:19:04 -07008625void free_contig_range(unsigned long pfn, unsigned int nr_pages)
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008626{
Marek Szyprowskibcc2b022012-12-20 15:05:18 -08008627 unsigned int count = 0;
8628
8629 for (; nr_pages--; pfn++) {
8630 struct page *page = pfn_to_page(pfn);
8631
8632 count += page_count(page) != 1;
8633 __free_page(page);
8634 }
8635 WARN(count != 0, "%d pages are still in use!\n", count);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008636}
David Hildenbrand255f5982020-05-07 16:01:29 +02008637EXPORT_SYMBOL(free_contig_range);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008638
Cody P Schafer0a647f32013-07-03 15:01:33 -07008639/*
8640 * The zone indicated has a new number of managed_pages; batch sizes and percpu
8641 * page high values need to be recalulated.
8642 */
Jiang Liu4ed7e022012-07-31 16:43:35 -07008643void __meminit zone_pcp_update(struct zone *zone)
8644{
Cody P Schaferc8e251f2013-07-03 15:01:29 -07008645 mutex_lock(&pcp_batch_high_lock);
Mel Gormancb1ef532019-11-30 17:55:11 -08008646 __zone_pcp_update(zone);
Cody P Schaferc8e251f2013-07-03 15:01:29 -07008647 mutex_unlock(&pcp_batch_high_lock);
Jiang Liu4ed7e022012-07-31 16:43:35 -07008648}
Jiang Liu4ed7e022012-07-31 16:43:35 -07008649
Jiang Liu340175b2012-07-31 16:43:32 -07008650void zone_pcp_reset(struct zone *zone)
8651{
8652 unsigned long flags;
Minchan Kim5a883812012-10-08 16:33:39 -07008653 int cpu;
8654 struct per_cpu_pageset *pset;
Jiang Liu340175b2012-07-31 16:43:32 -07008655
8656 /* avoid races with drain_pages() */
8657 local_irq_save(flags);
8658 if (zone->pageset != &boot_pageset) {
Minchan Kim5a883812012-10-08 16:33:39 -07008659 for_each_online_cpu(cpu) {
8660 pset = per_cpu_ptr(zone->pageset, cpu);
8661 drain_zonestat(zone, pset);
8662 }
Jiang Liu340175b2012-07-31 16:43:32 -07008663 free_percpu(zone->pageset);
8664 zone->pageset = &boot_pageset;
8665 }
8666 local_irq_restore(flags);
8667}
8668
Wen Congyang6dcd73d2012-12-11 16:01:01 -08008669#ifdef CONFIG_MEMORY_HOTREMOVE
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07008670/*
Joonsoo Kimb9eb6312016-05-19 17:12:06 -07008671 * All pages in the range must be in a single zone and isolated
8672 * before calling this.
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07008673 */
Michal Hocko5557c762019-05-13 17:21:24 -07008674unsigned long
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07008675__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
8676{
8677 struct page *page;
8678 struct zone *zone;
David Hildenbrand0ee5f4f2019-11-30 17:54:03 -08008679 unsigned int order;
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07008680 unsigned long pfn;
8681 unsigned long flags;
Michal Hocko5557c762019-05-13 17:21:24 -07008682 unsigned long offlined_pages = 0;
8683
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07008684 /* find the first valid pfn */
8685 for (pfn = start_pfn; pfn < end_pfn; pfn++)
8686 if (pfn_valid(pfn))
8687 break;
8688 if (pfn == end_pfn)
Michal Hocko5557c762019-05-13 17:21:24 -07008689 return offlined_pages;
8690
Michal Hocko2d070ea2017-07-06 15:37:56 -07008691 offline_mem_sections(pfn, end_pfn);
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07008692 zone = page_zone(pfn_to_page(pfn));
8693 spin_lock_irqsave(&zone->lock, flags);
8694 pfn = start_pfn;
8695 while (pfn < end_pfn) {
8696 if (!pfn_valid(pfn)) {
8697 pfn++;
8698 continue;
8699 }
8700 page = pfn_to_page(pfn);
Wen Congyangb023f462012-12-11 16:00:45 -08008701 /*
8702 * The HWPoisoned page may be not in buddy system, and
8703 * page_count() is not 0.
8704 */
8705 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
8706 pfn++;
Michal Hocko5557c762019-05-13 17:21:24 -07008707 offlined_pages++;
Wen Congyangb023f462012-12-11 16:00:45 -08008708 continue;
8709 }
David Hildenbrandaa218792020-05-07 16:01:30 +02008710 /*
8711 * At this point all remaining PageOffline() pages have a
8712 * reference count of 0 and can simply be skipped.
8713 */
8714 if (PageOffline(page)) {
8715 BUG_ON(page_count(page));
8716 BUG_ON(PageBuddy(page));
8717 pfn++;
8718 offlined_pages++;
8719 continue;
8720 }
Wen Congyangb023f462012-12-11 16:00:45 -08008721
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07008722 BUG_ON(page_count(page));
8723 BUG_ON(!PageBuddy(page));
8724 order = page_order(page);
Michal Hocko5557c762019-05-13 17:21:24 -07008725 offlined_pages += 1 << order;
Alexander Duyck6ab01362020-04-06 20:04:49 -07008726 del_page_from_free_list(page, zone, order);
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07008727 pfn += (1 << order);
8728 }
8729 spin_unlock_irqrestore(&zone->lock, flags);
Michal Hocko5557c762019-05-13 17:21:24 -07008730
8731 return offlined_pages;
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07008732}
8733#endif
Wu Fengguang8d22ba12009-12-16 12:19:58 +01008734
Wu Fengguang8d22ba12009-12-16 12:19:58 +01008735bool is_free_buddy_page(struct page *page)
8736{
8737 struct zone *zone = page_zone(page);
8738 unsigned long pfn = page_to_pfn(page);
8739 unsigned long flags;
Mel Gorman7aeb09f2014-06-04 16:10:21 -07008740 unsigned int order;
Wu Fengguang8d22ba12009-12-16 12:19:58 +01008741
8742 spin_lock_irqsave(&zone->lock, flags);
8743 for (order = 0; order < MAX_ORDER; order++) {
8744 struct page *page_head = page - (pfn & ((1 << order) - 1));
8745
8746 if (PageBuddy(page_head) && page_order(page_head) >= order)
8747 break;
8748 }
8749 spin_unlock_irqrestore(&zone->lock, flags);
8750
8751 return order < MAX_ORDER;
8752}
Naoya Horiguchid4ae9912018-08-23 17:00:42 -07008753
8754#ifdef CONFIG_MEMORY_FAILURE
8755/*
8756 * Set PG_hwpoison flag if a given page is confirmed to be a free page. This
8757 * test is performed under the zone lock to prevent a race against page
8758 * allocation.
8759 */
8760bool set_hwpoison_free_buddy_page(struct page *page)
8761{
8762 struct zone *zone = page_zone(page);
8763 unsigned long pfn = page_to_pfn(page);
8764 unsigned long flags;
8765 unsigned int order;
8766 bool hwpoisoned = false;
8767
8768 spin_lock_irqsave(&zone->lock, flags);
8769 for (order = 0; order < MAX_ORDER; order++) {
8770 struct page *page_head = page - (pfn & ((1 << order) - 1));
8771
8772 if (PageBuddy(page_head) && page_order(page_head) >= order) {
8773 if (!TestSetPageHWPoison(page))
8774 hwpoisoned = true;
8775 break;
8776 }
8777 }
8778 spin_unlock_irqrestore(&zone->lock, flags);
8779
8780 return hwpoisoned;
8781}
8782#endif