blob: e132705d19fdbc2a55725863237638d5625189e1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/mm/page_alloc.c
3 *
4 * Manages the free list, the system allocates free pages here.
5 * Note that kmalloc() lives in slab.c
6 *
7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Swap reorganised 29.12.95, Stephen Tweedie
9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15 */
16
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/stddef.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/interrupt.h>
21#include <linux/pagemap.h>
KOSAKI Motohiro10ed2732008-03-04 14:28:32 -080022#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/bootmem.h>
Yinghai Luedbe7d22010-08-25 13:39:16 -070024#include <linux/memblock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/compiler.h>
Randy Dunlap9f158332005-09-13 01:25:16 -070026#include <linux/kernel.h>
Vegard Nossumb1eeab62008-11-25 16:55:53 +010027#include <linux/kmemcheck.h>
Andrey Ryabininb8c73fc2015-02-13 14:39:28 -080028#include <linux/kasan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30#include <linux/suspend.h>
31#include <linux/pagevec.h>
32#include <linux/blkdev.h>
33#include <linux/slab.h>
Dave Hansena238ab52011-05-24 17:12:16 -070034#include <linux/ratelimit.h>
David Rientjes5a3135c22007-10-16 23:25:53 -070035#include <linux/oom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <linux/notifier.h>
37#include <linux/topology.h>
38#include <linux/sysctl.h>
39#include <linux/cpu.h>
40#include <linux/cpuset.h>
Dave Hansenbdc8cb92005-10-29 18:16:53 -070041#include <linux/memory_hotplug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/nodemask.h>
43#include <linux/vmalloc.h>
KOSAKI Motohiroa6cccdc2011-05-24 17:11:33 -070044#include <linux/vmstat.h>
Christoph Lameter4be38e32006-01-06 00:11:17 -080045#include <linux/mempolicy.h>
Dan Williams4b94ffd2016-01-15 16:56:22 -080046#include <linux/memremap.h>
Yasunori Goto68113782006-06-23 02:03:11 -070047#include <linux/stop_machine.h>
Mel Gormanc7132162006-09-27 01:49:43 -070048#include <linux/sort.h>
49#include <linux/pfn.h>
Andrew Morton3fcfab12006-10-19 23:28:16 -070050#include <linux/backing-dev.h>
Akinobu Mita933e3122006-12-08 02:39:45 -080051#include <linux/fault-inject.h>
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -070052#include <linux/page-isolation.h>
Joonsoo Kimeefa864b2014-12-12 16:55:46 -080053#include <linux/page_ext.h>
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070054#include <linux/debugobjects.h>
Catalin Marinasdbb1f812009-06-11 13:23:19 +010055#include <linux/kmemleak.h>
Mel Gorman56de7262010-05-24 14:32:30 -070056#include <linux/compaction.h>
Mel Gorman0d3d0622009-09-21 17:02:44 -070057#include <trace/events/kmem.h>
Linus Torvalds268bb0c2011-05-20 12:50:29 -070058#include <linux/prefetch.h>
Lisa Du6e543d52013-09-11 14:22:36 -070059#include <linux/mm_inline.h>
Michal Nazarewicz041d3a82011-12-29 13:09:50 +010060#include <linux/migrate.h>
Joonsoo Kime30825f2014-12-12 16:55:49 -080061#include <linux/page_ext.h>
David Rientjes949f7ec2013-04-29 15:07:48 -070062#include <linux/hugetlb.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060063#include <linux/sched/rt.h>
Joonsoo Kim48c96a32014-12-12 16:56:01 -080064#include <linux/page_owner.h>
Mel Gorman0e1cc952015-06-30 14:57:27 -070065#include <linux/kthread.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070066
Jiang Liu7ee3d4e2013-07-03 15:03:41 -070067#include <asm/sections.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070068#include <asm/tlbflush.h>
Andrew Mortonac924c62006-05-15 09:43:59 -070069#include <asm/div64.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070070#include "internal.h"
71
Cody P Schaferc8e251f2013-07-03 15:01:29 -070072/* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
73static DEFINE_MUTEX(pcp_batch_high_lock);
David Rientjes7cd2b0a2014-06-23 13:22:04 -070074#define MIN_PERCPU_PAGELIST_FRACTION (8)
Cody P Schaferc8e251f2013-07-03 15:01:29 -070075
Lee Schermerhorn72812012010-05-26 14:44:56 -070076#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
77DEFINE_PER_CPU(int, numa_node);
78EXPORT_PER_CPU_SYMBOL(numa_node);
79#endif
80
Lee Schermerhorn7aac7892010-05-26 14:45:00 -070081#ifdef CONFIG_HAVE_MEMORYLESS_NODES
82/*
83 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
84 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
85 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
86 * defined in <linux/topology.h>.
87 */
88DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
89EXPORT_PER_CPU_SYMBOL(_numa_mem_);
Joonsoo Kimad2c8142014-10-09 15:26:13 -070090int _node_numa_mem_[MAX_NUMNODES];
Lee Schermerhorn7aac7892010-05-26 14:45:00 -070091#endif
92
Linus Torvalds1da177e2005-04-16 15:20:36 -070093/*
Christoph Lameter13808912007-10-16 01:25:27 -070094 * Array of node states.
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 */
Christoph Lameter13808912007-10-16 01:25:27 -070096nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
97 [N_POSSIBLE] = NODE_MASK_ALL,
98 [N_ONLINE] = { { [0] = 1UL } },
99#ifndef CONFIG_NUMA
100 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
101#ifdef CONFIG_HIGHMEM
102 [N_HIGH_MEMORY] = { { [0] = 1UL } },
103#endif
Lai Jiangshan20b2f522012-12-12 13:52:00 -0800104#ifdef CONFIG_MOVABLE_NODE
105 [N_MEMORY] = { { [0] = 1UL } },
106#endif
Christoph Lameter13808912007-10-16 01:25:27 -0700107 [N_CPU] = { { [0] = 1UL } },
108#endif /* NUMA */
109};
110EXPORT_SYMBOL(node_states);
111
Jiang Liuc3d5f5f2013-07-03 15:03:14 -0700112/* Protect totalram_pages and zone->managed_pages */
113static DEFINE_SPINLOCK(managed_page_count_lock);
114
Ravikiran G Thirumalai6c231b72005-09-06 15:17:45 -0700115unsigned long totalram_pages __read_mostly;
Hideo AOKIcb45b0e2006-04-10 22:52:59 -0700116unsigned long totalreserve_pages __read_mostly;
Pintu Kumare48322a2014-12-18 16:17:15 -0800117unsigned long totalcma_pages __read_mostly;
Johannes Weinerab8fabd2012-01-10 15:07:42 -0800118
Hugh Dickins1b76b022012-05-11 01:00:07 -0700119int percpu_pagelist_fraction;
Benjamin Herrenschmidtdcce2842009-06-18 13:24:12 +1000120gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -0700122/*
123 * A cached value of the page's pageblock's migratetype, used when the page is
124 * put on a pcplist. Used to avoid the pageblock migratetype lookup when
125 * freeing from pcplists in most cases, at the cost of possibly becoming stale.
126 * Also the migratetype set in the page does not necessarily match the pcplist
127 * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any
128 * other index - this ensures that it will be put on the correct CMA freelist.
129 */
130static inline int get_pcppage_migratetype(struct page *page)
131{
132 return page->index;
133}
134
135static inline void set_pcppage_migratetype(struct page *page, int migratetype)
136{
137 page->index = migratetype;
138}
139
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800140#ifdef CONFIG_PM_SLEEP
141/*
142 * The following functions are used by the suspend/hibernate code to temporarily
143 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
144 * while devices are suspended. To avoid races with the suspend/hibernate code,
145 * they should always be called with pm_mutex held (gfp_allowed_mask also should
146 * only be modified with pm_mutex held, unless the suspend/hibernate code is
147 * guaranteed not to run in parallel with that modification).
148 */
Rafael J. Wysockic9e664f2010-12-03 22:57:45 +0100149
150static gfp_t saved_gfp_mask;
151
152void pm_restore_gfp_mask(void)
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800153{
154 WARN_ON(!mutex_is_locked(&pm_mutex));
Rafael J. Wysockic9e664f2010-12-03 22:57:45 +0100155 if (saved_gfp_mask) {
156 gfp_allowed_mask = saved_gfp_mask;
157 saved_gfp_mask = 0;
158 }
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800159}
160
Rafael J. Wysockic9e664f2010-12-03 22:57:45 +0100161void pm_restrict_gfp_mask(void)
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800162{
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800163 WARN_ON(!mutex_is_locked(&pm_mutex));
Rafael J. Wysockic9e664f2010-12-03 22:57:45 +0100164 WARN_ON(saved_gfp_mask);
165 saved_gfp_mask = gfp_allowed_mask;
Mel Gormand0164ad2015-11-06 16:28:21 -0800166 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800167}
Mel Gormanf90ac392012-01-10 15:07:15 -0800168
169bool pm_suspended_storage(void)
170{
Mel Gormand0164ad2015-11-06 16:28:21 -0800171 if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
Mel Gormanf90ac392012-01-10 15:07:15 -0800172 return false;
173 return true;
174}
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800175#endif /* CONFIG_PM_SLEEP */
176
Mel Gormand9c23402007-10-16 01:26:01 -0700177#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -0800178unsigned int pageblock_order __read_mostly;
Mel Gormand9c23402007-10-16 01:26:01 -0700179#endif
180
Hugh Dickinsd98c7a02006-02-14 13:52:59 -0800181static void __free_pages_ok(struct page *page, unsigned int order);
David Howellsa226f6c2006-01-06 00:11:08 -0800182
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183/*
184 * results with 256, 32 in the lowmem_reserve sysctl:
185 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
186 * 1G machine -> (16M dma, 784M normal, 224M high)
187 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
188 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
Yaowei Bai84109e12015-02-12 15:00:22 -0800189 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
Andi Kleena2f1b422005-11-05 17:25:53 +0100190 *
191 * TBD: should special case ZONE_DMA32 machines here - in those we normally
192 * don't need any ZONE_NORMAL reservation
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 */
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700194int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
Christoph Lameter4b51d662007-02-10 01:43:10 -0800195#ifdef CONFIG_ZONE_DMA
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700196 256,
Christoph Lameter4b51d662007-02-10 01:43:10 -0800197#endif
Christoph Lameterfb0e7942006-09-25 23:31:13 -0700198#ifdef CONFIG_ZONE_DMA32
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700199 256,
Christoph Lameterfb0e7942006-09-25 23:31:13 -0700200#endif
Christoph Lametere53ef382006-09-25 23:31:14 -0700201#ifdef CONFIG_HIGHMEM
Mel Gorman2a1e2742007-07-17 04:03:12 -0700202 32,
Christoph Lametere53ef382006-09-25 23:31:14 -0700203#endif
Mel Gorman2a1e2742007-07-17 04:03:12 -0700204 32,
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700205};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206
207EXPORT_SYMBOL(totalram_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
Helge Deller15ad7cd2006-12-06 20:40:36 -0800209static char * const zone_names[MAX_NR_ZONES] = {
Christoph Lameter4b51d662007-02-10 01:43:10 -0800210#ifdef CONFIG_ZONE_DMA
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700211 "DMA",
Christoph Lameter4b51d662007-02-10 01:43:10 -0800212#endif
Christoph Lameterfb0e7942006-09-25 23:31:13 -0700213#ifdef CONFIG_ZONE_DMA32
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700214 "DMA32",
Christoph Lameterfb0e7942006-09-25 23:31:13 -0700215#endif
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700216 "Normal",
Christoph Lametere53ef382006-09-25 23:31:14 -0700217#ifdef CONFIG_HIGHMEM
Mel Gorman2a1e2742007-07-17 04:03:12 -0700218 "HighMem",
Christoph Lametere53ef382006-09-25 23:31:14 -0700219#endif
Mel Gorman2a1e2742007-07-17 04:03:12 -0700220 "Movable",
Dan Williams033fbae2015-08-09 15:29:06 -0400221#ifdef CONFIG_ZONE_DEVICE
222 "Device",
223#endif
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700224};
225
Vlastimil Babka60f30352016-03-15 14:56:08 -0700226char * const migratetype_names[MIGRATE_TYPES] = {
227 "Unmovable",
228 "Movable",
229 "Reclaimable",
230 "HighAtomic",
231#ifdef CONFIG_CMA
232 "CMA",
233#endif
234#ifdef CONFIG_MEMORY_ISOLATION
235 "Isolate",
236#endif
237};
238
Kirill A. Shutemovf1e61552015-11-06 16:29:50 -0800239compound_page_dtor * const compound_page_dtors[] = {
240 NULL,
241 free_compound_page,
242#ifdef CONFIG_HUGETLB_PAGE
243 free_huge_page,
244#endif
Kirill A. Shutemov9a982252016-01-15 16:54:17 -0800245#ifdef CONFIG_TRANSPARENT_HUGEPAGE
246 free_transhuge_page,
247#endif
Kirill A. Shutemovf1e61552015-11-06 16:29:50 -0800248};
249
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250int min_free_kbytes = 1024;
Han Pingtian42aa83c2014-01-23 15:53:28 -0800251int user_min_free_kbytes = -1;
Johannes Weiner795ae7a2016-03-17 14:19:14 -0700252int watermark_scale_factor = 10;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253
Jan Beulich2c85f512009-09-21 17:03:07 -0700254static unsigned long __meminitdata nr_kernel_pages;
255static unsigned long __meminitdata nr_all_pages;
Yasunori Gotoa3142c82007-05-08 00:23:07 -0700256static unsigned long __meminitdata dma_reserve;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257
Tejun Heo0ee332c2011-12-08 10:22:09 -0800258#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
259static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
260static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
261static unsigned long __initdata required_kernelcore;
262static unsigned long __initdata required_movablecore;
263static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
Taku Izumi342332e2016-03-15 14:55:22 -0700264static bool mirrored_kernelcore;
Mel Gormanc7132162006-09-27 01:49:43 -0700265
Tejun Heo0ee332c2011-12-08 10:22:09 -0800266/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
267int movable_zone;
268EXPORT_SYMBOL(movable_zone);
269#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
Mel Gormanc7132162006-09-27 01:49:43 -0700270
Miklos Szeredi418508c2007-05-23 13:57:55 -0700271#if MAX_NUMNODES > 1
272int nr_node_ids __read_mostly = MAX_NUMNODES;
Christoph Lameter62bc62a2009-06-16 15:32:15 -0700273int nr_online_nodes __read_mostly = 1;
Miklos Szeredi418508c2007-05-23 13:57:55 -0700274EXPORT_SYMBOL(nr_node_ids);
Christoph Lameter62bc62a2009-06-16 15:32:15 -0700275EXPORT_SYMBOL(nr_online_nodes);
Miklos Szeredi418508c2007-05-23 13:57:55 -0700276#endif
277
Mel Gorman9ef9acb2007-10-16 01:25:54 -0700278int page_group_by_mobility_disabled __read_mostly;
279
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700280#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
281static inline void reset_deferred_meminit(pg_data_t *pgdat)
282{
283 pgdat->first_deferred_pfn = ULONG_MAX;
284}
285
286/* Returns true if the struct page for the pfn is uninitialised */
Mel Gorman0e1cc952015-06-30 14:57:27 -0700287static inline bool __meminit early_page_uninitialised(unsigned long pfn)
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700288{
Mel Gormanae026b22015-07-17 16:23:48 -0700289 if (pfn >= NODE_DATA(early_pfn_to_nid(pfn))->first_deferred_pfn)
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700290 return true;
291
292 return false;
293}
294
Mel Gorman7e18adb2015-06-30 14:57:05 -0700295static inline bool early_page_nid_uninitialised(unsigned long pfn, int nid)
296{
297 if (pfn >= NODE_DATA(nid)->first_deferred_pfn)
298 return true;
299
300 return false;
301}
302
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700303/*
304 * Returns false when the remaining initialisation should be deferred until
305 * later in the boot cycle when it can be parallelised.
306 */
307static inline bool update_defer_init(pg_data_t *pgdat,
308 unsigned long pfn, unsigned long zone_end,
309 unsigned long *nr_initialised)
310{
Li Zhang987b3092016-03-17 14:20:16 -0700311 unsigned long max_initialise;
312
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700313 /* Always populate low zones for address-contrained allocations */
314 if (zone_end < pgdat_end_pfn(pgdat))
315 return true;
Li Zhang987b3092016-03-17 14:20:16 -0700316 /*
317 * Initialise at least 2G of a node but also take into account that
318 * two large system hashes that can take up 1GB for 0.25TB/node.
319 */
320 max_initialise = max(2UL << (30 - PAGE_SHIFT),
321 (pgdat->node_spanned_pages >> 8));
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700322
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700323 (*nr_initialised)++;
Li Zhang987b3092016-03-17 14:20:16 -0700324 if ((*nr_initialised > max_initialise) &&
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700325 (pfn & (PAGES_PER_SECTION - 1)) == 0) {
326 pgdat->first_deferred_pfn = pfn;
327 return false;
328 }
329
330 return true;
331}
332#else
333static inline void reset_deferred_meminit(pg_data_t *pgdat)
334{
335}
336
337static inline bool early_page_uninitialised(unsigned long pfn)
338{
339 return false;
340}
341
Mel Gorman7e18adb2015-06-30 14:57:05 -0700342static inline bool early_page_nid_uninitialised(unsigned long pfn, int nid)
343{
344 return false;
345}
346
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700347static inline bool update_defer_init(pg_data_t *pgdat,
348 unsigned long pfn, unsigned long zone_end,
349 unsigned long *nr_initialised)
350{
351 return true;
352}
353#endif
354
355
Minchan Kimee6f5092012-07-31 16:43:50 -0700356void set_pageblock_migratetype(struct page *page, int migratetype)
Mel Gormanb2a0ac82007-10-16 01:25:48 -0700357{
KOSAKI Motohiro5d0f3f72013-11-12 15:08:18 -0800358 if (unlikely(page_group_by_mobility_disabled &&
359 migratetype < MIGRATE_PCPTYPES))
Mel Gorman49255c62009-06-16 15:31:58 -0700360 migratetype = MIGRATE_UNMOVABLE;
361
Mel Gormanb2a0ac82007-10-16 01:25:48 -0700362 set_pageblock_flags_group(page, (unsigned long)migratetype,
363 PB_migrate, PB_migrate_end);
364}
365
Nick Piggin13e74442006-01-06 00:10:58 -0800366#ifdef CONFIG_DEBUG_VM
Dave Hansenc6a57e12005-10-29 18:16:52 -0700367static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368{
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700369 int ret = 0;
370 unsigned seq;
371 unsigned long pfn = page_to_pfn(page);
Cody P Schaferb5e6a5a2013-02-22 16:35:28 -0800372 unsigned long sp, start_pfn;
Dave Hansenc6a57e12005-10-29 18:16:52 -0700373
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700374 do {
375 seq = zone_span_seqbegin(zone);
Cody P Schaferb5e6a5a2013-02-22 16:35:28 -0800376 start_pfn = zone->zone_start_pfn;
377 sp = zone->spanned_pages;
Cody P Schafer108bcc92013-02-22 16:35:23 -0800378 if (!zone_spans_pfn(zone, pfn))
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700379 ret = 1;
380 } while (zone_span_seqretry(zone, seq));
381
Cody P Schaferb5e6a5a2013-02-22 16:35:28 -0800382 if (ret)
Dave Hansen613813e2014-06-04 16:07:27 -0700383 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
384 pfn, zone_to_nid(zone), zone->name,
385 start_pfn, start_pfn + sp);
Cody P Schaferb5e6a5a2013-02-22 16:35:28 -0800386
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700387 return ret;
Dave Hansenc6a57e12005-10-29 18:16:52 -0700388}
389
390static int page_is_consistent(struct zone *zone, struct page *page)
391{
Andy Whitcroft14e07292007-05-06 14:49:14 -0700392 if (!pfn_valid_within(page_to_pfn(page)))
Dave Hansenc6a57e12005-10-29 18:16:52 -0700393 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 if (zone != page_zone(page))
Dave Hansenc6a57e12005-10-29 18:16:52 -0700395 return 0;
396
397 return 1;
398}
399/*
400 * Temporary debugging check for pages not lying within a given zone.
401 */
402static int bad_range(struct zone *zone, struct page *page)
403{
404 if (page_outside_zone_boundaries(zone, page))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 return 1;
Dave Hansenc6a57e12005-10-29 18:16:52 -0700406 if (!page_is_consistent(zone, page))
407 return 1;
408
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 return 0;
410}
Nick Piggin13e74442006-01-06 00:10:58 -0800411#else
412static inline int bad_range(struct zone *zone, struct page *page)
413{
414 return 0;
415}
416#endif
417
Kirill A. Shutemovd230dec2014-04-07 15:37:38 -0700418static void bad_page(struct page *page, const char *reason,
419 unsigned long bad_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420{
Hugh Dickinsd936cf92009-01-06 14:40:12 -0800421 static unsigned long resume;
422 static unsigned long nr_shown;
423 static unsigned long nr_unshown;
424
Wu Fengguang2a7684a2009-09-16 11:50:12 +0200425 /* Don't complain about poisoned pages */
426 if (PageHWPoison(page)) {
Mel Gorman22b751c2013-02-22 16:34:59 -0800427 page_mapcount_reset(page); /* remove PageBuddy */
Wu Fengguang2a7684a2009-09-16 11:50:12 +0200428 return;
429 }
430
Hugh Dickinsd936cf92009-01-06 14:40:12 -0800431 /*
432 * Allow a burst of 60 reports, then keep quiet for that minute;
433 * or allow a steady drip of one report per second.
434 */
435 if (nr_shown == 60) {
436 if (time_before(jiffies, resume)) {
437 nr_unshown++;
438 goto out;
439 }
440 if (nr_unshown) {
Vlastimil Babkaff8e8112016-03-15 14:56:24 -0700441 pr_alert(
Hugh Dickins1e9e6362009-01-06 14:40:13 -0800442 "BUG: Bad page state: %lu messages suppressed\n",
Hugh Dickinsd936cf92009-01-06 14:40:12 -0800443 nr_unshown);
444 nr_unshown = 0;
445 }
446 nr_shown = 0;
447 }
448 if (nr_shown++ == 0)
449 resume = jiffies + 60 * HZ;
450
Vlastimil Babkaff8e8112016-03-15 14:56:24 -0700451 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n",
Hugh Dickins3dc14742009-01-06 14:40:08 -0800452 current->comm, page_to_pfn(page));
Vlastimil Babkaff8e8112016-03-15 14:56:24 -0700453 __dump_page(page, reason);
454 bad_flags &= page->flags;
455 if (bad_flags)
456 pr_alert("bad because of flags: %#lx(%pGp)\n",
457 bad_flags, &bad_flags);
Vlastimil Babka4e462112016-03-15 14:56:21 -0700458 dump_page_owner(page);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700459
Dave Jones4f318882011-10-31 17:07:24 -0700460 print_modules();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 dump_stack();
Hugh Dickinsd936cf92009-01-06 14:40:12 -0800462out:
Hugh Dickins8cc3b392009-01-06 14:40:06 -0800463 /* Leave bad fields for debug, except PageBuddy could make trouble */
Mel Gorman22b751c2013-02-22 16:34:59 -0800464 page_mapcount_reset(page); /* remove PageBuddy */
Rusty Russell373d4d02013-01-21 17:17:39 +1030465 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466}
467
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468/*
469 * Higher-order pages are called "compound pages". They are structured thusly:
470 *
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -0800471 * The first PAGE_SIZE page is called the "head page" and have PG_head set.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 *
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -0800473 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
474 * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475 *
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -0800476 * The first tail page's ->compound_dtor holds the offset in array of compound
477 * page destructors. See compound_page_dtors.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 *
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -0800479 * The first tail page's ->compound_order holds the order of allocation.
Hugh Dickins41d78ba2006-02-14 13:52:58 -0800480 * This usage means that zero-order pages may not be compound.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 */
Hugh Dickinsd98c7a02006-02-14 13:52:59 -0800482
Kirill A. Shutemov9a982252016-01-15 16:54:17 -0800483void free_compound_page(struct page *page)
Hugh Dickinsd98c7a02006-02-14 13:52:59 -0800484{
Christoph Lameterd85f3382007-05-06 14:49:39 -0700485 __free_pages_ok(page, compound_order(page));
Hugh Dickinsd98c7a02006-02-14 13:52:59 -0800486}
487
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -0800488void prep_compound_page(struct page *page, unsigned int order)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489{
490 int i;
491 int nr_pages = 1 << order;
492
Kirill A. Shutemovf1e61552015-11-06 16:29:50 -0800493 set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
Christoph Lameterd85f3382007-05-06 14:49:39 -0700494 set_compound_order(page, order);
Christoph Lameter6d777952007-05-06 14:49:40 -0700495 __SetPageHead(page);
Andy Whitcroft18229df2008-11-06 12:53:27 -0800496 for (i = 1; i < nr_pages; i++) {
497 struct page *p = page + i;
Youquan Song58a84aa2011-12-08 14:34:18 -0800498 set_page_count(p, 0);
Kirill A. Shutemov1c290f62016-01-15 16:52:07 -0800499 p->mapping = TAIL_MAPPING;
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -0800500 set_compound_head(p, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 }
Kirill A. Shutemov53f92632016-01-15 16:53:42 -0800502 atomic_set(compound_mapcount_ptr(page), -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503}
504
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800505#ifdef CONFIG_DEBUG_PAGEALLOC
506unsigned int _debug_guardpage_minorder;
Christian Borntraegerea6eabb2016-03-15 14:55:30 -0700507bool _debug_pagealloc_enabled __read_mostly
508 = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
Joonsoo Kim505f6d22016-03-17 14:17:56 -0700509EXPORT_SYMBOL(_debug_pagealloc_enabled);
Joonsoo Kime30825f2014-12-12 16:55:49 -0800510bool _debug_guardpage_enabled __read_mostly;
511
Joonsoo Kim031bc572014-12-12 16:55:52 -0800512static int __init early_debug_pagealloc(char *buf)
513{
514 if (!buf)
515 return -EINVAL;
516
517 if (strcmp(buf, "on") == 0)
518 _debug_pagealloc_enabled = true;
519
Christian Borntraegerea6eabb2016-03-15 14:55:30 -0700520 if (strcmp(buf, "off") == 0)
521 _debug_pagealloc_enabled = false;
522
Joonsoo Kim031bc572014-12-12 16:55:52 -0800523 return 0;
524}
525early_param("debug_pagealloc", early_debug_pagealloc);
526
Joonsoo Kime30825f2014-12-12 16:55:49 -0800527static bool need_debug_guardpage(void)
528{
Joonsoo Kim031bc572014-12-12 16:55:52 -0800529 /* If we don't use debug_pagealloc, we don't need guard page */
530 if (!debug_pagealloc_enabled())
531 return false;
532
Joonsoo Kime30825f2014-12-12 16:55:49 -0800533 return true;
534}
535
536static void init_debug_guardpage(void)
537{
Joonsoo Kim031bc572014-12-12 16:55:52 -0800538 if (!debug_pagealloc_enabled())
539 return;
540
Joonsoo Kime30825f2014-12-12 16:55:49 -0800541 _debug_guardpage_enabled = true;
542}
543
544struct page_ext_operations debug_guardpage_ops = {
545 .need = need_debug_guardpage,
546 .init = init_debug_guardpage,
547};
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800548
549static int __init debug_guardpage_minorder_setup(char *buf)
550{
551 unsigned long res;
552
553 if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
Joe Perches11705322016-03-17 14:19:50 -0700554 pr_err("Bad debug_guardpage_minorder value\n");
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800555 return 0;
556 }
557 _debug_guardpage_minorder = res;
Joe Perches11705322016-03-17 14:19:50 -0700558 pr_info("Setting debug_guardpage_minorder to %lu\n", res);
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800559 return 0;
560}
561__setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
562
Joonsoo Kim2847cf92014-12-12 16:55:01 -0800563static inline void set_page_guard(struct zone *zone, struct page *page,
564 unsigned int order, int migratetype)
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800565{
Joonsoo Kime30825f2014-12-12 16:55:49 -0800566 struct page_ext *page_ext;
567
568 if (!debug_guardpage_enabled())
569 return;
570
571 page_ext = lookup_page_ext(page);
572 __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
573
Joonsoo Kim2847cf92014-12-12 16:55:01 -0800574 INIT_LIST_HEAD(&page->lru);
575 set_page_private(page, order);
576 /* Guard pages are not available for any usage */
577 __mod_zone_freepage_state(zone, -(1 << order), migratetype);
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800578}
579
Joonsoo Kim2847cf92014-12-12 16:55:01 -0800580static inline void clear_page_guard(struct zone *zone, struct page *page,
581 unsigned int order, int migratetype)
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800582{
Joonsoo Kime30825f2014-12-12 16:55:49 -0800583 struct page_ext *page_ext;
584
585 if (!debug_guardpage_enabled())
586 return;
587
588 page_ext = lookup_page_ext(page);
589 __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
590
Joonsoo Kim2847cf92014-12-12 16:55:01 -0800591 set_page_private(page, 0);
592 if (!is_migrate_isolate(migratetype))
593 __mod_zone_freepage_state(zone, (1 << order), migratetype);
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800594}
595#else
Joonsoo Kime30825f2014-12-12 16:55:49 -0800596struct page_ext_operations debug_guardpage_ops = { NULL, };
Joonsoo Kim2847cf92014-12-12 16:55:01 -0800597static inline void set_page_guard(struct zone *zone, struct page *page,
598 unsigned int order, int migratetype) {}
599static inline void clear_page_guard(struct zone *zone, struct page *page,
600 unsigned int order, int migratetype) {}
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800601#endif
602
Mel Gorman7aeb09f2014-06-04 16:10:21 -0700603static inline void set_page_order(struct page *page, unsigned int order)
Andrew Morton6aa3001b22006-04-18 22:20:52 -0700604{
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700605 set_page_private(page, order);
Nick Piggin676165a2006-04-10 11:21:48 +1000606 __SetPageBuddy(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607}
608
609static inline void rmv_page_order(struct page *page)
610{
Nick Piggin676165a2006-04-10 11:21:48 +1000611 __ClearPageBuddy(page);
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700612 set_page_private(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613}
614
615/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 * This function checks whether a page is free && is the buddy
617 * we can do coalesce a page and its buddy if
Nick Piggin13e74442006-01-06 00:10:58 -0800618 * (a) the buddy is not in a hole &&
Nick Piggin676165a2006-04-10 11:21:48 +1000619 * (b) the buddy is in the buddy system &&
Andy Whitcroftcb2b95e2006-06-23 02:03:01 -0700620 * (c) a page and its buddy have the same order &&
621 * (d) a page and its buddy are in the same zone.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 *
Wang Sheng-Huicf6fe942013-09-11 14:22:48 -0700623 * For recording whether a page is in the buddy system, we set ->_mapcount
624 * PAGE_BUDDY_MAPCOUNT_VALUE.
625 * Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is
626 * serialized by zone->lock.
Nick Piggin676165a2006-04-10 11:21:48 +1000627 *
628 * For recording page's order, we use page_private(page).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 */
Andy Whitcroftcb2b95e2006-06-23 02:03:01 -0700630static inline int page_is_buddy(struct page *page, struct page *buddy,
Mel Gorman7aeb09f2014-06-04 16:10:21 -0700631 unsigned int order)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632{
Andy Whitcroft14e07292007-05-06 14:49:14 -0700633 if (!pfn_valid_within(page_to_pfn(buddy)))
Nick Piggin13e74442006-01-06 00:10:58 -0800634 return 0;
Nick Piggin13e74442006-01-06 00:10:58 -0800635
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800636 if (page_is_guard(buddy) && page_order(buddy) == order) {
Mel Gormand34c5fa2014-06-04 16:10:10 -0700637 if (page_zone_id(page) != page_zone_id(buddy))
638 return 0;
639
Weijie Yang4c5018c2015-02-10 14:11:39 -0800640 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
641
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800642 return 1;
643 }
644
Andy Whitcroftcb2b95e2006-06-23 02:03:01 -0700645 if (PageBuddy(buddy) && page_order(buddy) == order) {
Mel Gormand34c5fa2014-06-04 16:10:10 -0700646 /*
647 * zone check is done late to avoid uselessly
648 * calculating zone/node ids for pages that could
649 * never merge.
650 */
651 if (page_zone_id(page) != page_zone_id(buddy))
652 return 0;
653
Weijie Yang4c5018c2015-02-10 14:11:39 -0800654 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
655
Andrew Morton6aa3001b22006-04-18 22:20:52 -0700656 return 1;
Nick Piggin676165a2006-04-10 11:21:48 +1000657 }
Andrew Morton6aa3001b22006-04-18 22:20:52 -0700658 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659}
660
661/*
662 * Freeing function for a buddy system allocator.
663 *
664 * The concept of a buddy system is to maintain direct-mapped table
665 * (containing bit values) for memory blocks of various "orders".
666 * The bottom level table contains the map for the smallest allocatable
667 * units of memory (here, pages), and each level above it describes
668 * pairs of units from the levels below, hence, "buddies".
669 * At a high level, all that happens here is marking the table entry
670 * at the bottom level available, and propagating the changes upward
671 * as necessary, plus some accounting needed to play nicely with other
672 * parts of the VM system.
673 * At each level, we keep a list of pages, which are heads of continuous
Wang Sheng-Huicf6fe942013-09-11 14:22:48 -0700674 * free pages of length of (1 << order) and marked with _mapcount
675 * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page)
676 * field.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 * So when we are allocating or freeing one, we can derive the state of the
Michal Nazarewicz5f63b722012-01-11 15:16:11 +0100678 * other. That is, if we allocate a small block, and both were
679 * free, the remainder of the region must be split into blocks.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 * If a block is freed, and its buddy is also free, then this
Michal Nazarewicz5f63b722012-01-11 15:16:11 +0100681 * triggers coalescing into a block of larger size.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 *
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +0100683 * -- nyc
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 */
685
Nick Piggin48db57f2006-01-08 01:00:42 -0800686static inline void __free_one_page(struct page *page,
Mel Gormandc4b0ca2014-06-04 16:10:17 -0700687 unsigned long pfn,
Mel Gormaned0ae212009-06-16 15:32:07 -0700688 struct zone *zone, unsigned int order,
689 int migratetype)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690{
691 unsigned long page_idx;
Corrado Zoccolo6dda9d52010-05-24 14:31:54 -0700692 unsigned long combined_idx;
KyongHo Cho43506fa2011-01-13 15:47:24 -0800693 unsigned long uninitialized_var(buddy_idx);
Corrado Zoccolo6dda9d52010-05-24 14:31:54 -0700694 struct page *buddy;
Vlastimil Babkad9dddbf2016-03-25 14:21:50 -0700695 unsigned int max_order;
696
697 max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698
Cody P Schaferd29bb972013-02-22 16:35:25 -0800699 VM_BUG_ON(!zone_is_initialized(zone));
Kirill A. Shutemov6e9f0d52015-02-11 15:25:50 -0800700 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701
Mel Gormaned0ae212009-06-16 15:32:07 -0700702 VM_BUG_ON(migratetype == -1);
Vlastimil Babkad9dddbf2016-03-25 14:21:50 -0700703 if (likely(!is_migrate_isolate(migratetype)))
Joonsoo Kim8f82b552014-11-13 15:19:18 -0800704 __mod_zone_freepage_state(zone, 1 << order, migratetype);
Mel Gormaned0ae212009-06-16 15:32:07 -0700705
Vlastimil Babkad9dddbf2016-03-25 14:21:50 -0700706 page_idx = pfn & ((1 << MAX_ORDER) - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707
Sasha Levin309381fea2014-01-23 15:52:54 -0800708 VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
709 VM_BUG_ON_PAGE(bad_range(zone, page), page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710
Vlastimil Babkad9dddbf2016-03-25 14:21:50 -0700711continue_merging:
Joonsoo Kim3c605092014-11-13 15:19:21 -0800712 while (order < max_order - 1) {
KyongHo Cho43506fa2011-01-13 15:47:24 -0800713 buddy_idx = __find_buddy_index(page_idx, order);
714 buddy = page + (buddy_idx - page_idx);
Andy Whitcroftcb2b95e2006-06-23 02:03:01 -0700715 if (!page_is_buddy(page, buddy, order))
Vlastimil Babkad9dddbf2016-03-25 14:21:50 -0700716 goto done_merging;
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800717 /*
718 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
719 * merge with it and move up one order.
720 */
721 if (page_is_guard(buddy)) {
Joonsoo Kim2847cf92014-12-12 16:55:01 -0800722 clear_page_guard(zone, buddy, order, migratetype);
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800723 } else {
724 list_del(&buddy->lru);
725 zone->free_area[order].nr_free--;
726 rmv_page_order(buddy);
727 }
KyongHo Cho43506fa2011-01-13 15:47:24 -0800728 combined_idx = buddy_idx & page_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 page = page + (combined_idx - page_idx);
730 page_idx = combined_idx;
731 order++;
732 }
Vlastimil Babkad9dddbf2016-03-25 14:21:50 -0700733 if (max_order < MAX_ORDER) {
734 /* If we are here, it means order is >= pageblock_order.
735 * We want to prevent merge between freepages on isolate
736 * pageblock and normal pageblock. Without this, pageblock
737 * isolation could cause incorrect freepage or CMA accounting.
738 *
739 * We don't want to hit this code for the more frequent
740 * low-order merging.
741 */
742 if (unlikely(has_isolate_pageblock(zone))) {
743 int buddy_mt;
744
745 buddy_idx = __find_buddy_index(page_idx, order);
746 buddy = page + (buddy_idx - page_idx);
747 buddy_mt = get_pageblock_migratetype(buddy);
748
749 if (migratetype != buddy_mt
750 && (is_migrate_isolate(migratetype) ||
751 is_migrate_isolate(buddy_mt)))
752 goto done_merging;
753 }
754 max_order++;
755 goto continue_merging;
756 }
757
758done_merging:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 set_page_order(page, order);
Corrado Zoccolo6dda9d52010-05-24 14:31:54 -0700760
761 /*
762 * If this is not the largest possible page, check if the buddy
763 * of the next-highest order is free. If it is, it's possible
764 * that pages are being freed that will coalesce soon. In case,
765 * that is happening, add the free page to the tail of the list
766 * so it's less likely to be used soon and more likely to be merged
767 * as a higher order page
768 */
Mel Gormanb7f50cf2010-10-26 14:21:11 -0700769 if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
Corrado Zoccolo6dda9d52010-05-24 14:31:54 -0700770 struct page *higher_page, *higher_buddy;
KyongHo Cho43506fa2011-01-13 15:47:24 -0800771 combined_idx = buddy_idx & page_idx;
772 higher_page = page + (combined_idx - page_idx);
773 buddy_idx = __find_buddy_index(combined_idx, order + 1);
Li Haifeng0ba8f2d2012-09-17 14:09:21 -0700774 higher_buddy = higher_page + (buddy_idx - combined_idx);
Corrado Zoccolo6dda9d52010-05-24 14:31:54 -0700775 if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
776 list_add_tail(&page->lru,
777 &zone->free_area[order].free_list[migratetype]);
778 goto out;
779 }
780 }
781
782 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
783out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 zone->free_area[order].nr_free++;
785}
786
Nick Piggin224abf92006-01-06 00:11:11 -0800787static inline int free_pages_check(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788{
Kirill A. Shutemovd230dec2014-04-07 15:37:38 -0700789 const char *bad_reason = NULL;
Dave Hansenf0b791a2014-01-23 15:52:49 -0800790 unsigned long bad_flags = 0;
791
Kirill A. Shutemov53f92632016-01-15 16:53:42 -0800792 if (unlikely(atomic_read(&page->_mapcount) != -1))
Dave Hansenf0b791a2014-01-23 15:52:49 -0800793 bad_reason = "nonzero mapcount";
794 if (unlikely(page->mapping != NULL))
795 bad_reason = "non-NULL mapping";
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700796 if (unlikely(page_ref_count(page) != 0))
Joonsoo Kim0139aa72016-05-19 17:10:49 -0700797 bad_reason = "nonzero _refcount";
Dave Hansenf0b791a2014-01-23 15:52:49 -0800798 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) {
799 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
800 bad_flags = PAGE_FLAGS_CHECK_AT_FREE;
801 }
Johannes Weiner9edad6e2014-12-10 15:44:58 -0800802#ifdef CONFIG_MEMCG
803 if (unlikely(page->mem_cgroup))
804 bad_reason = "page still charged to cgroup";
805#endif
Dave Hansenf0b791a2014-01-23 15:52:49 -0800806 if (unlikely(bad_reason)) {
807 bad_page(page, bad_reason, bad_flags);
Hugh Dickins79f4b7b2009-01-06 14:40:05 -0800808 return 1;
Hugh Dickins8cc3b392009-01-06 14:40:06 -0800809 }
Peter Zijlstra90572892013-10-07 11:29:20 +0100810 page_cpupid_reset_last(page);
Hugh Dickins79f4b7b2009-01-06 14:40:05 -0800811 if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
812 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
813 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814}
815
816/*
Mel Gorman5f8dcc22009-09-21 17:03:19 -0700817 * Frees a number of pages from the PCP lists
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 * Assumes all pages on list are in same zone, and of same order.
Renaud Lienhart207f36e2005-09-10 00:26:59 -0700819 * count is the number of pages to free.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820 *
821 * If the zone was previously in an "all pages pinned" state then look to
822 * see if this freeing clears that state.
823 *
824 * And clear the zone's pages_scanned counter, to hold off the "all pages are
825 * pinned" detection logic.
826 */
Mel Gorman5f8dcc22009-09-21 17:03:19 -0700827static void free_pcppages_bulk(struct zone *zone, int count,
828 struct per_cpu_pages *pcp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829{
Mel Gorman5f8dcc22009-09-21 17:03:19 -0700830 int migratetype = 0;
Mel Gormana6f9edd62009-09-21 17:03:20 -0700831 int batch_free = 0;
Mel Gorman72853e22010-09-09 16:38:16 -0700832 int to_free = count;
Mel Gorman0d5d8232014-08-06 16:07:16 -0700833 unsigned long nr_scanned;
Mel Gorman5f8dcc22009-09-21 17:03:19 -0700834
Nick Pigginc54ad302006-01-06 00:10:56 -0800835 spin_lock(&zone->lock);
Mel Gorman0d5d8232014-08-06 16:07:16 -0700836 nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
837 if (nr_scanned)
838 __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
Mel Gormanf2260e62009-06-16 15:32:13 -0700839
Mel Gorman72853e22010-09-09 16:38:16 -0700840 while (to_free) {
Nick Piggin48db57f2006-01-08 01:00:42 -0800841 struct page *page;
Mel Gorman5f8dcc22009-09-21 17:03:19 -0700842 struct list_head *list;
Nick Piggin48db57f2006-01-08 01:00:42 -0800843
Mel Gorman5f8dcc22009-09-21 17:03:19 -0700844 /*
Mel Gormana6f9edd62009-09-21 17:03:20 -0700845 * Remove pages from lists in a round-robin fashion. A
846 * batch_free count is maintained that is incremented when an
847 * empty list is encountered. This is so more pages are freed
848 * off fuller lists instead of spinning excessively around empty
849 * lists
Mel Gorman5f8dcc22009-09-21 17:03:19 -0700850 */
851 do {
Mel Gormana6f9edd62009-09-21 17:03:20 -0700852 batch_free++;
Mel Gorman5f8dcc22009-09-21 17:03:19 -0700853 if (++migratetype == MIGRATE_PCPTYPES)
854 migratetype = 0;
855 list = &pcp->lists[migratetype];
856 } while (list_empty(list));
857
Namhyung Kim1d168712011-03-22 16:32:45 -0700858 /* This is the only non-empty list. Free them all. */
859 if (batch_free == MIGRATE_PCPTYPES)
860 batch_free = to_free;
861
Mel Gormana6f9edd62009-09-21 17:03:20 -0700862 do {
Bartlomiej Zolnierkiewicz770c8aa2012-10-08 16:31:57 -0700863 int mt; /* migratetype of the to-be-freed page */
864
Geliang Tanga16601c2016-01-14 15:20:30 -0800865 page = list_last_entry(list, struct page, lru);
Mel Gormana6f9edd62009-09-21 17:03:20 -0700866 /* must delete as __free_one_page list manipulates */
867 list_del(&page->lru);
Vlastimil Babkaaa016d12015-09-08 15:01:22 -0700868
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -0700869 mt = get_pcppage_migratetype(page);
Vlastimil Babkaaa016d12015-09-08 15:01:22 -0700870 /* MIGRATE_ISOLATE page should not go to pcplists */
871 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
872 /* Pageblock could have been isolated meanwhile */
Joonsoo Kim8f82b552014-11-13 15:19:18 -0800873 if (unlikely(has_isolate_pageblock(zone)))
Joonsoo Kim51bb1a42014-11-13 15:19:14 -0800874 mt = get_pageblock_migratetype(page);
Joonsoo Kim51bb1a42014-11-13 15:19:14 -0800875
Mel Gormandc4b0ca2014-06-04 16:10:17 -0700876 __free_one_page(page, page_to_pfn(page), zone, 0, mt);
Bartlomiej Zolnierkiewicz770c8aa2012-10-08 16:31:57 -0700877 trace_mm_page_pcpu_drain(page, 0, mt);
Mel Gorman72853e22010-09-09 16:38:16 -0700878 } while (--to_free && --batch_free && !list_empty(list));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879 }
Nick Pigginc54ad302006-01-06 00:10:56 -0800880 spin_unlock(&zone->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881}
882
Mel Gormandc4b0ca2014-06-04 16:10:17 -0700883static void free_one_page(struct zone *zone,
884 struct page *page, unsigned long pfn,
Mel Gorman7aeb09f2014-06-04 16:10:21 -0700885 unsigned int order,
Mel Gormaned0ae212009-06-16 15:32:07 -0700886 int migratetype)
Nick Piggin48db57f2006-01-08 01:00:42 -0800887{
Mel Gorman0d5d8232014-08-06 16:07:16 -0700888 unsigned long nr_scanned;
Christoph Lameter006d22d2006-09-25 23:31:48 -0700889 spin_lock(&zone->lock);
Mel Gorman0d5d8232014-08-06 16:07:16 -0700890 nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
891 if (nr_scanned)
892 __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
Mel Gormanf2260e62009-06-16 15:32:13 -0700893
Joonsoo Kimad53f922014-11-13 15:19:11 -0800894 if (unlikely(has_isolate_pageblock(zone) ||
895 is_migrate_isolate(migratetype))) {
896 migratetype = get_pfnblock_migratetype(page, pfn);
Joonsoo Kimad53f922014-11-13 15:19:11 -0800897 }
Mel Gormandc4b0ca2014-06-04 16:10:17 -0700898 __free_one_page(page, pfn, zone, order, migratetype);
Christoph Lameter006d22d2006-09-25 23:31:48 -0700899 spin_unlock(&zone->lock);
Nick Piggin48db57f2006-01-08 01:00:42 -0800900}
901
Kirill A. Shutemov81422f22015-02-11 15:25:52 -0800902static int free_tail_pages_check(struct page *head_page, struct page *page)
903{
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -0800904 int ret = 1;
905
906 /*
907 * We rely page->lru.next never has bit 0 set, unless the page
908 * is PageTail(). Let's make sure that's true even for poisoned ->lru.
909 */
910 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
911
912 if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
913 ret = 0;
914 goto out;
915 }
Kirill A. Shutemov9a982252016-01-15 16:54:17 -0800916 switch (page - head_page) {
917 case 1:
918 /* the first tail page: ->mapping is compound_mapcount() */
Kirill A. Shutemov53f92632016-01-15 16:53:42 -0800919 if (unlikely(compound_mapcount(page))) {
920 bad_page(page, "nonzero compound_mapcount", 0);
921 goto out;
922 }
Kirill A. Shutemov9a982252016-01-15 16:54:17 -0800923 break;
924 case 2:
925 /*
926 * the second tail page: ->mapping is
927 * page_deferred_list().next -- ignore value.
928 */
929 break;
930 default:
931 if (page->mapping != TAIL_MAPPING) {
932 bad_page(page, "corrupted mapping in tail page", 0);
933 goto out;
934 }
935 break;
Kirill A. Shutemov1c290f62016-01-15 16:52:07 -0800936 }
Kirill A. Shutemov81422f22015-02-11 15:25:52 -0800937 if (unlikely(!PageTail(page))) {
938 bad_page(page, "PageTail not set", 0);
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -0800939 goto out;
Kirill A. Shutemov81422f22015-02-11 15:25:52 -0800940 }
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -0800941 if (unlikely(compound_head(page) != head_page)) {
942 bad_page(page, "compound_head not consistent", 0);
943 goto out;
Kirill A. Shutemov81422f22015-02-11 15:25:52 -0800944 }
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -0800945 ret = 0;
946out:
Kirill A. Shutemov1c290f62016-01-15 16:52:07 -0800947 page->mapping = NULL;
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -0800948 clear_compound_head(page);
949 return ret;
Kirill A. Shutemov81422f22015-02-11 15:25:52 -0800950}
951
Robin Holt1e8ce832015-06-30 14:56:45 -0700952static void __meminit __init_single_page(struct page *page, unsigned long pfn,
953 unsigned long zone, int nid)
954{
Robin Holt1e8ce832015-06-30 14:56:45 -0700955 set_page_links(page, zone, nid, pfn);
Robin Holt1e8ce832015-06-30 14:56:45 -0700956 init_page_count(page);
957 page_mapcount_reset(page);
958 page_cpupid_reset_last(page);
Robin Holt1e8ce832015-06-30 14:56:45 -0700959
Robin Holt1e8ce832015-06-30 14:56:45 -0700960 INIT_LIST_HEAD(&page->lru);
961#ifdef WANT_PAGE_VIRTUAL
962 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
963 if (!is_highmem_idx(zone))
964 set_page_address(page, __va(pfn << PAGE_SHIFT));
965#endif
966}
967
968static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone,
969 int nid)
970{
971 return __init_single_page(pfn_to_page(pfn), pfn, zone, nid);
972}
973
Mel Gorman7e18adb2015-06-30 14:57:05 -0700974#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
975static void init_reserved_page(unsigned long pfn)
976{
977 pg_data_t *pgdat;
978 int nid, zid;
979
980 if (!early_page_uninitialised(pfn))
981 return;
982
983 nid = early_pfn_to_nid(pfn);
984 pgdat = NODE_DATA(nid);
985
986 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
987 struct zone *zone = &pgdat->node_zones[zid];
988
989 if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
990 break;
991 }
992 __init_single_pfn(pfn, zid, nid);
993}
994#else
995static inline void init_reserved_page(unsigned long pfn)
996{
997}
998#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
999
Nathan Zimmer92923ca2015-06-30 14:56:48 -07001000/*
1001 * Initialised pages do not have PageReserved set. This function is
1002 * called for each range allocated by the bootmem allocator and
1003 * marks the pages PageReserved. The remaining valid pages are later
1004 * sent to the buddy page allocator.
1005 */
Mel Gorman7e18adb2015-06-30 14:57:05 -07001006void __meminit reserve_bootmem_region(unsigned long start, unsigned long end)
Nathan Zimmer92923ca2015-06-30 14:56:48 -07001007{
1008 unsigned long start_pfn = PFN_DOWN(start);
1009 unsigned long end_pfn = PFN_UP(end);
1010
Mel Gorman7e18adb2015-06-30 14:57:05 -07001011 for (; start_pfn < end_pfn; start_pfn++) {
1012 if (pfn_valid(start_pfn)) {
1013 struct page *page = pfn_to_page(start_pfn);
1014
1015 init_reserved_page(start_pfn);
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -08001016
1017 /* Avoid false-positive PageTail() */
1018 INIT_LIST_HEAD(&page->lru);
1019
Mel Gorman7e18adb2015-06-30 14:57:05 -07001020 SetPageReserved(page);
1021 }
1022 }
Nathan Zimmer92923ca2015-06-30 14:56:48 -07001023}
1024
KOSAKI Motohiroec95f532010-05-24 14:32:38 -07001025static bool free_pages_prepare(struct page *page, unsigned int order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026{
Kirill A. Shutemov81422f22015-02-11 15:25:52 -08001027 bool compound = PageCompound(page);
1028 int i, bad = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029
Yu Zhaoab1f3062014-12-10 15:43:17 -08001030 VM_BUG_ON_PAGE(PageTail(page), page);
Kirill A. Shutemov81422f22015-02-11 15:25:52 -08001031 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
Yu Zhaoab1f3062014-12-10 15:43:17 -08001032
Konstantin Khlebnikovb413d482012-01-10 15:07:09 -08001033 trace_mm_page_free(page, order);
Vegard Nossumb1eeab62008-11-25 16:55:53 +01001034 kmemcheck_free_shadow(page, order);
Andrey Ryabininb8c73fc2015-02-13 14:39:28 -08001035 kasan_free_pages(page, order);
Vegard Nossumb1eeab62008-11-25 16:55:53 +01001036
Andrea Arcangeli8dd60a32011-01-13 15:46:34 -08001037 if (PageAnon(page))
1038 page->mapping = NULL;
Kirill A. Shutemov81422f22015-02-11 15:25:52 -08001039 bad += free_pages_check(page);
1040 for (i = 1; i < (1 << order); i++) {
1041 if (compound)
1042 bad += free_tail_pages_check(page, page + i);
Andrea Arcangeli8dd60a32011-01-13 15:46:34 -08001043 bad += free_pages_check(page + i);
Kirill A. Shutemov81422f22015-02-11 15:25:52 -08001044 }
Hugh Dickins8cc3b392009-01-06 14:40:06 -08001045 if (bad)
KOSAKI Motohiroec95f532010-05-24 14:32:38 -07001046 return false;
Hugh Dickins689bceb2005-11-21 21:32:20 -08001047
Joonsoo Kim48c96a32014-12-12 16:56:01 -08001048 reset_page_owner(page, order);
1049
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001050 if (!PageHighMem(page)) {
Pintu Kumarb8af2942013-09-11 14:20:34 -07001051 debug_check_no_locks_freed(page_address(page),
1052 PAGE_SIZE << order);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001053 debug_check_no_obj_freed(page_address(page),
1054 PAGE_SIZE << order);
1055 }
Nick Piggindafb1362006-10-11 01:21:30 -07001056 arch_free_page(page, order);
Laura Abbott8823b1d2016-03-15 14:56:27 -07001057 kernel_poison_pages(page, 1 << order, 0);
Nick Piggin48db57f2006-01-08 01:00:42 -08001058 kernel_map_pages(page, 1 << order, 0);
Nick Piggindafb1362006-10-11 01:21:30 -07001059
KOSAKI Motohiroec95f532010-05-24 14:32:38 -07001060 return true;
1061}
1062
1063static void __free_pages_ok(struct page *page, unsigned int order)
1064{
1065 unsigned long flags;
Minchan Kim95e34412012-10-08 16:32:11 -07001066 int migratetype;
Mel Gormandc4b0ca2014-06-04 16:10:17 -07001067 unsigned long pfn = page_to_pfn(page);
KOSAKI Motohiroec95f532010-05-24 14:32:38 -07001068
1069 if (!free_pages_prepare(page, order))
1070 return;
1071
Mel Gormancfc47a22014-06-04 16:10:19 -07001072 migratetype = get_pfnblock_migratetype(page, pfn);
Nick Pigginc54ad302006-01-06 00:10:56 -08001073 local_irq_save(flags);
Christoph Lameterf8891e52006-06-30 01:55:45 -07001074 __count_vm_events(PGFREE, 1 << order);
Mel Gormandc4b0ca2014-06-04 16:10:17 -07001075 free_one_page(page_zone(page), page, pfn, order, migratetype);
Nick Pigginc54ad302006-01-06 00:10:56 -08001076 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077}
1078
Li Zhang949698a2016-05-19 17:11:37 -07001079static void __init __free_pages_boot_core(struct page *page, unsigned int order)
David Howellsa226f6c2006-01-06 00:11:08 -08001080{
Johannes Weinerc3993072012-01-10 15:08:10 -08001081 unsigned int nr_pages = 1 << order;
Yinghai Lue2d0bd22013-09-11 14:20:37 -07001082 struct page *p = page;
Johannes Weinerc3993072012-01-10 15:08:10 -08001083 unsigned int loop;
David Howellsa226f6c2006-01-06 00:11:08 -08001084
Yinghai Lue2d0bd22013-09-11 14:20:37 -07001085 prefetchw(p);
1086 for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
1087 prefetchw(p + 1);
Johannes Weinerc3993072012-01-10 15:08:10 -08001088 __ClearPageReserved(p);
1089 set_page_count(p, 0);
David Howellsa226f6c2006-01-06 00:11:08 -08001090 }
Yinghai Lue2d0bd22013-09-11 14:20:37 -07001091 __ClearPageReserved(p);
1092 set_page_count(p, 0);
Johannes Weinerc3993072012-01-10 15:08:10 -08001093
Yinghai Lue2d0bd22013-09-11 14:20:37 -07001094 page_zone(page)->managed_pages += nr_pages;
Johannes Weinerc3993072012-01-10 15:08:10 -08001095 set_page_refcounted(page);
1096 __free_pages(page, order);
David Howellsa226f6c2006-01-06 00:11:08 -08001097}
1098
Mel Gorman75a592a2015-06-30 14:56:59 -07001099#if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \
1100 defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
Mel Gorman7ace9912015-08-06 15:46:13 -07001101
Mel Gorman75a592a2015-06-30 14:56:59 -07001102static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
1103
1104int __meminit early_pfn_to_nid(unsigned long pfn)
1105{
Mel Gorman7ace9912015-08-06 15:46:13 -07001106 static DEFINE_SPINLOCK(early_pfn_lock);
Mel Gorman75a592a2015-06-30 14:56:59 -07001107 int nid;
1108
Mel Gorman7ace9912015-08-06 15:46:13 -07001109 spin_lock(&early_pfn_lock);
Mel Gorman75a592a2015-06-30 14:56:59 -07001110 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
Mel Gorman7ace9912015-08-06 15:46:13 -07001111 if (nid < 0)
1112 nid = 0;
1113 spin_unlock(&early_pfn_lock);
1114
1115 return nid;
Mel Gorman75a592a2015-06-30 14:56:59 -07001116}
1117#endif
1118
1119#ifdef CONFIG_NODES_SPAN_OTHER_NODES
1120static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node,
1121 struct mminit_pfnnid_cache *state)
1122{
1123 int nid;
1124
1125 nid = __early_pfn_to_nid(pfn, state);
1126 if (nid >= 0 && nid != node)
1127 return false;
1128 return true;
1129}
1130
1131/* Only safe to use early in boot when initialisation is single-threaded */
1132static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1133{
1134 return meminit_pfn_in_nid(pfn, node, &early_pfnnid_cache);
1135}
1136
1137#else
1138
1139static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1140{
1141 return true;
1142}
1143static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node,
1144 struct mminit_pfnnid_cache *state)
1145{
1146 return true;
1147}
1148#endif
1149
1150
Mel Gorman0e1cc952015-06-30 14:57:27 -07001151void __init __free_pages_bootmem(struct page *page, unsigned long pfn,
Mel Gorman3a80a7f2015-06-30 14:57:02 -07001152 unsigned int order)
1153{
1154 if (early_page_uninitialised(pfn))
1155 return;
Li Zhang949698a2016-05-19 17:11:37 -07001156 return __free_pages_boot_core(page, order);
Mel Gorman3a80a7f2015-06-30 14:57:02 -07001157}
1158
Joonsoo Kim7cf91a92016-03-15 14:57:51 -07001159/*
1160 * Check that the whole (or subset of) a pageblock given by the interval of
1161 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1162 * with the migration of free compaction scanner. The scanners then need to
1163 * use only pfn_valid_within() check for arches that allow holes within
1164 * pageblocks.
1165 *
1166 * Return struct page pointer of start_pfn, or NULL if checks were not passed.
1167 *
1168 * It's possible on some configurations to have a setup like node0 node1 node0
1169 * i.e. it's possible that all pages within a zones range of pages do not
1170 * belong to a single zone. We assume that a border between node0 and node1
1171 * can occur within a single pageblock, but not a node0 node1 node0
1172 * interleaving within a single pageblock. It is therefore sufficient to check
1173 * the first and last page of a pageblock and avoid checking each individual
1174 * page in a pageblock.
1175 */
1176struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
1177 unsigned long end_pfn, struct zone *zone)
1178{
1179 struct page *start_page;
1180 struct page *end_page;
1181
1182 /* end_pfn is one past the range we are checking */
1183 end_pfn--;
1184
1185 if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
1186 return NULL;
1187
1188 start_page = pfn_to_page(start_pfn);
1189
1190 if (page_zone(start_page) != zone)
1191 return NULL;
1192
1193 end_page = pfn_to_page(end_pfn);
1194
1195 /* This gives a shorter code than deriving page_zone(end_page) */
1196 if (page_zone_id(start_page) != page_zone_id(end_page))
1197 return NULL;
1198
1199 return start_page;
1200}
1201
1202void set_zone_contiguous(struct zone *zone)
1203{
1204 unsigned long block_start_pfn = zone->zone_start_pfn;
1205 unsigned long block_end_pfn;
1206
1207 block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages);
1208 for (; block_start_pfn < zone_end_pfn(zone);
1209 block_start_pfn = block_end_pfn,
1210 block_end_pfn += pageblock_nr_pages) {
1211
1212 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
1213
1214 if (!__pageblock_pfn_to_page(block_start_pfn,
1215 block_end_pfn, zone))
1216 return;
1217 }
1218
1219 /* We confirm that there is no hole */
1220 zone->contiguous = true;
1221}
1222
1223void clear_zone_contiguous(struct zone *zone)
1224{
1225 zone->contiguous = false;
1226}
1227
Mel Gorman7e18adb2015-06-30 14:57:05 -07001228#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
Mel Gorman0e1cc952015-06-30 14:57:27 -07001229static void __init deferred_free_range(struct page *page,
Mel Gormana4de83d2015-06-30 14:57:16 -07001230 unsigned long pfn, int nr_pages)
1231{
1232 int i;
1233
1234 if (!page)
1235 return;
1236
1237 /* Free a large naturally-aligned chunk if possible */
1238 if (nr_pages == MAX_ORDER_NR_PAGES &&
1239 (pfn & (MAX_ORDER_NR_PAGES-1)) == 0) {
Mel Gormanac5d2532015-06-30 14:57:20 -07001240 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
Li Zhang949698a2016-05-19 17:11:37 -07001241 __free_pages_boot_core(page, MAX_ORDER-1);
Mel Gormana4de83d2015-06-30 14:57:16 -07001242 return;
1243 }
1244
Li Zhang949698a2016-05-19 17:11:37 -07001245 for (i = 0; i < nr_pages; i++, page++)
1246 __free_pages_boot_core(page, 0);
Mel Gormana4de83d2015-06-30 14:57:16 -07001247}
1248
Nicolai Stanged3cd1312015-08-06 15:46:16 -07001249/* Completion tracking for deferred_init_memmap() threads */
1250static atomic_t pgdat_init_n_undone __initdata;
1251static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1252
1253static inline void __init pgdat_init_report_one_done(void)
1254{
1255 if (atomic_dec_and_test(&pgdat_init_n_undone))
1256 complete(&pgdat_init_all_done_comp);
1257}
Mel Gorman0e1cc952015-06-30 14:57:27 -07001258
Mel Gorman7e18adb2015-06-30 14:57:05 -07001259/* Initialise remaining memory on a node */
Mel Gorman0e1cc952015-06-30 14:57:27 -07001260static int __init deferred_init_memmap(void *data)
Mel Gorman7e18adb2015-06-30 14:57:05 -07001261{
Mel Gorman0e1cc952015-06-30 14:57:27 -07001262 pg_data_t *pgdat = data;
1263 int nid = pgdat->node_id;
Mel Gorman7e18adb2015-06-30 14:57:05 -07001264 struct mminit_pfnnid_cache nid_init_state = { };
1265 unsigned long start = jiffies;
1266 unsigned long nr_pages = 0;
1267 unsigned long walk_start, walk_end;
1268 int i, zid;
1269 struct zone *zone;
Mel Gorman7e18adb2015-06-30 14:57:05 -07001270 unsigned long first_init_pfn = pgdat->first_deferred_pfn;
Mel Gorman0e1cc952015-06-30 14:57:27 -07001271 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
Mel Gorman7e18adb2015-06-30 14:57:05 -07001272
Mel Gorman0e1cc952015-06-30 14:57:27 -07001273 if (first_init_pfn == ULONG_MAX) {
Nicolai Stanged3cd1312015-08-06 15:46:16 -07001274 pgdat_init_report_one_done();
Mel Gorman0e1cc952015-06-30 14:57:27 -07001275 return 0;
1276 }
1277
1278 /* Bind memory initialisation thread to a local node if possible */
1279 if (!cpumask_empty(cpumask))
1280 set_cpus_allowed_ptr(current, cpumask);
Mel Gorman7e18adb2015-06-30 14:57:05 -07001281
1282 /* Sanity check boundaries */
1283 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
1284 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
1285 pgdat->first_deferred_pfn = ULONG_MAX;
1286
1287 /* Only the highest zone is deferred so find it */
1288 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1289 zone = pgdat->node_zones + zid;
1290 if (first_init_pfn < zone_end_pfn(zone))
1291 break;
1292 }
1293
1294 for_each_mem_pfn_range(i, nid, &walk_start, &walk_end, NULL) {
1295 unsigned long pfn, end_pfn;
Mel Gorman54608c32015-06-30 14:57:09 -07001296 struct page *page = NULL;
Mel Gormana4de83d2015-06-30 14:57:16 -07001297 struct page *free_base_page = NULL;
1298 unsigned long free_base_pfn = 0;
1299 int nr_to_free = 0;
Mel Gorman7e18adb2015-06-30 14:57:05 -07001300
1301 end_pfn = min(walk_end, zone_end_pfn(zone));
1302 pfn = first_init_pfn;
1303 if (pfn < walk_start)
1304 pfn = walk_start;
1305 if (pfn < zone->zone_start_pfn)
1306 pfn = zone->zone_start_pfn;
1307
1308 for (; pfn < end_pfn; pfn++) {
Mel Gorman54608c32015-06-30 14:57:09 -07001309 if (!pfn_valid_within(pfn))
Mel Gormana4de83d2015-06-30 14:57:16 -07001310 goto free_range;
Mel Gorman7e18adb2015-06-30 14:57:05 -07001311
Mel Gorman54608c32015-06-30 14:57:09 -07001312 /*
1313 * Ensure pfn_valid is checked every
1314 * MAX_ORDER_NR_PAGES for memory holes
1315 */
1316 if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
1317 if (!pfn_valid(pfn)) {
1318 page = NULL;
Mel Gormana4de83d2015-06-30 14:57:16 -07001319 goto free_range;
Mel Gorman54608c32015-06-30 14:57:09 -07001320 }
1321 }
1322
1323 if (!meminit_pfn_in_nid(pfn, nid, &nid_init_state)) {
1324 page = NULL;
Mel Gormana4de83d2015-06-30 14:57:16 -07001325 goto free_range;
Mel Gorman54608c32015-06-30 14:57:09 -07001326 }
1327
1328 /* Minimise pfn page lookups and scheduler checks */
1329 if (page && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0) {
1330 page++;
1331 } else {
Mel Gormana4de83d2015-06-30 14:57:16 -07001332 nr_pages += nr_to_free;
1333 deferred_free_range(free_base_page,
1334 free_base_pfn, nr_to_free);
1335 free_base_page = NULL;
1336 free_base_pfn = nr_to_free = 0;
1337
Mel Gorman54608c32015-06-30 14:57:09 -07001338 page = pfn_to_page(pfn);
1339 cond_resched();
1340 }
Mel Gorman7e18adb2015-06-30 14:57:05 -07001341
1342 if (page->flags) {
1343 VM_BUG_ON(page_zone(page) != zone);
Mel Gormana4de83d2015-06-30 14:57:16 -07001344 goto free_range;
Mel Gorman7e18adb2015-06-30 14:57:05 -07001345 }
1346
1347 __init_single_page(page, pfn, zid, nid);
Mel Gormana4de83d2015-06-30 14:57:16 -07001348 if (!free_base_page) {
1349 free_base_page = page;
1350 free_base_pfn = pfn;
1351 nr_to_free = 0;
1352 }
1353 nr_to_free++;
1354
1355 /* Where possible, batch up pages for a single free */
1356 continue;
1357free_range:
1358 /* Free the current block of pages to allocator */
1359 nr_pages += nr_to_free;
1360 deferred_free_range(free_base_page, free_base_pfn,
1361 nr_to_free);
1362 free_base_page = NULL;
1363 free_base_pfn = nr_to_free = 0;
Mel Gorman7e18adb2015-06-30 14:57:05 -07001364 }
Mel Gormana4de83d2015-06-30 14:57:16 -07001365
Mel Gorman7e18adb2015-06-30 14:57:05 -07001366 first_init_pfn = max(end_pfn, first_init_pfn);
1367 }
1368
1369 /* Sanity check that the next zone really is unpopulated */
1370 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
1371
Mel Gorman0e1cc952015-06-30 14:57:27 -07001372 pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages,
Mel Gorman7e18adb2015-06-30 14:57:05 -07001373 jiffies_to_msecs(jiffies - start));
Nicolai Stanged3cd1312015-08-06 15:46:16 -07001374
1375 pgdat_init_report_one_done();
Mel Gorman0e1cc952015-06-30 14:57:27 -07001376 return 0;
1377}
Joonsoo Kim7cf91a92016-03-15 14:57:51 -07001378#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
Mel Gorman0e1cc952015-06-30 14:57:27 -07001379
1380void __init page_alloc_init_late(void)
1381{
Joonsoo Kim7cf91a92016-03-15 14:57:51 -07001382 struct zone *zone;
1383
1384#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
Mel Gorman0e1cc952015-06-30 14:57:27 -07001385 int nid;
1386
Nicolai Stanged3cd1312015-08-06 15:46:16 -07001387 /* There will be num_node_state(N_MEMORY) threads */
1388 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
Mel Gorman0e1cc952015-06-30 14:57:27 -07001389 for_each_node_state(nid, N_MEMORY) {
Mel Gorman0e1cc952015-06-30 14:57:27 -07001390 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
1391 }
1392
1393 /* Block until all are initialised */
Nicolai Stanged3cd1312015-08-06 15:46:16 -07001394 wait_for_completion(&pgdat_init_all_done_comp);
Mel Gorman4248b0d2015-08-06 15:46:20 -07001395
1396 /* Reinit limits that are based on free pages after the kernel is up */
1397 files_maxfiles_init();
Joonsoo Kim7cf91a92016-03-15 14:57:51 -07001398#endif
1399
1400 for_each_populated_zone(zone)
1401 set_zone_contiguous(zone);
Mel Gorman7e18adb2015-06-30 14:57:05 -07001402}
Mel Gorman7e18adb2015-06-30 14:57:05 -07001403
Michal Nazarewicz47118af2011-12-29 13:09:50 +01001404#ifdef CONFIG_CMA
Li Zhong9cf510a2013-08-23 13:52:52 +08001405/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
Michal Nazarewicz47118af2011-12-29 13:09:50 +01001406void __init init_cma_reserved_pageblock(struct page *page)
1407{
1408 unsigned i = pageblock_nr_pages;
1409 struct page *p = page;
1410
1411 do {
1412 __ClearPageReserved(p);
1413 set_page_count(p, 0);
1414 } while (++p, --i);
1415
Michal Nazarewicz47118af2011-12-29 13:09:50 +01001416 set_pageblock_migratetype(page, MIGRATE_CMA);
Michal Nazarewiczdc783272014-07-02 15:22:35 -07001417
1418 if (pageblock_order >= MAX_ORDER) {
1419 i = pageblock_nr_pages;
1420 p = page;
1421 do {
1422 set_page_refcounted(p);
1423 __free_pages(p, MAX_ORDER - 1);
1424 p += MAX_ORDER_NR_PAGES;
1425 } while (i -= MAX_ORDER_NR_PAGES);
1426 } else {
1427 set_page_refcounted(page);
1428 __free_pages(page, pageblock_order);
1429 }
1430
Jiang Liu3dcc0572013-07-03 15:03:21 -07001431 adjust_managed_page_count(page, pageblock_nr_pages);
Michal Nazarewicz47118af2011-12-29 13:09:50 +01001432}
1433#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434
1435/*
1436 * The order of subdivision here is critical for the IO subsystem.
1437 * Please do not alter this order without good reasons and regression
1438 * testing. Specifically, as large blocks of memory are subdivided,
1439 * the order in which smaller blocks are delivered depends on the order
1440 * they're subdivided in this function. This is the primary factor
1441 * influencing the order in which pages are delivered to the IO
1442 * subsystem according to empirical testing, and this is also justified
1443 * by considering the behavior of a buddy system containing a single
1444 * large block of memory acted on by a series of small allocations.
1445 * This behavior is a critical factor in sglist merging's success.
1446 *
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +01001447 * -- nyc
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448 */
Nick Piggin085cc7d52006-01-06 00:11:01 -08001449static inline void expand(struct zone *zone, struct page *page,
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001450 int low, int high, struct free_area *area,
1451 int migratetype)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452{
1453 unsigned long size = 1 << high;
1454
1455 while (high > low) {
1456 area--;
1457 high--;
1458 size >>= 1;
Sasha Levin309381fea2014-01-23 15:52:54 -08001459 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -08001460
Joonsoo Kim2847cf92014-12-12 16:55:01 -08001461 if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
Joonsoo Kime30825f2014-12-12 16:55:49 -08001462 debug_guardpage_enabled() &&
Joonsoo Kim2847cf92014-12-12 16:55:01 -08001463 high < debug_guardpage_minorder()) {
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -08001464 /*
1465 * Mark as guard pages (or page), that will allow to
1466 * merge back to allocator when buddy will be freed.
1467 * Corresponding page table entries will not be touched,
1468 * pages will stay not present in virtual address space
1469 */
Joonsoo Kim2847cf92014-12-12 16:55:01 -08001470 set_page_guard(zone, &page[size], high, migratetype);
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -08001471 continue;
1472 }
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001473 list_add(&page[size].lru, &area->free_list[migratetype]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001474 area->nr_free++;
1475 set_page_order(&page[size], high);
1476 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477}
1478
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479/*
1480 * This page is about to be returned from the page allocator
1481 */
Wu Fengguang2a7684a2009-09-16 11:50:12 +02001482static inline int check_new_page(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483{
Kirill A. Shutemovd230dec2014-04-07 15:37:38 -07001484 const char *bad_reason = NULL;
Dave Hansenf0b791a2014-01-23 15:52:49 -08001485 unsigned long bad_flags = 0;
1486
Kirill A. Shutemov53f92632016-01-15 16:53:42 -08001487 if (unlikely(atomic_read(&page->_mapcount) != -1))
Dave Hansenf0b791a2014-01-23 15:52:49 -08001488 bad_reason = "nonzero mapcount";
1489 if (unlikely(page->mapping != NULL))
1490 bad_reason = "non-NULL mapping";
Joonsoo Kimfe896d12016-03-17 14:19:26 -07001491 if (unlikely(page_ref_count(page) != 0))
Dave Hansenf0b791a2014-01-23 15:52:49 -08001492 bad_reason = "nonzero _count";
Naoya Horiguchif4c18e62015-08-06 15:47:08 -07001493 if (unlikely(page->flags & __PG_HWPOISON)) {
1494 bad_reason = "HWPoisoned (hardware-corrupted)";
1495 bad_flags = __PG_HWPOISON;
1496 }
Dave Hansenf0b791a2014-01-23 15:52:49 -08001497 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
1498 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
1499 bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
1500 }
Johannes Weiner9edad6e2014-12-10 15:44:58 -08001501#ifdef CONFIG_MEMCG
1502 if (unlikely(page->mem_cgroup))
1503 bad_reason = "page still charged to cgroup";
1504#endif
Dave Hansenf0b791a2014-01-23 15:52:49 -08001505 if (unlikely(bad_reason)) {
1506 bad_page(page, bad_reason, bad_flags);
Hugh Dickins689bceb2005-11-21 21:32:20 -08001507 return 1;
Hugh Dickins8cc3b392009-01-06 14:40:06 -08001508 }
Wu Fengguang2a7684a2009-09-16 11:50:12 +02001509 return 0;
1510}
1511
Laura Abbott1414c7f2016-03-15 14:56:30 -07001512static inline bool free_pages_prezeroed(bool poisoned)
1513{
1514 return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) &&
1515 page_poisoning_enabled() && poisoned;
1516}
1517
Vlastimil Babka75379192015-02-11 15:25:38 -08001518static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
1519 int alloc_flags)
Wu Fengguang2a7684a2009-09-16 11:50:12 +02001520{
1521 int i;
Laura Abbott1414c7f2016-03-15 14:56:30 -07001522 bool poisoned = true;
Wu Fengguang2a7684a2009-09-16 11:50:12 +02001523
1524 for (i = 0; i < (1 << order); i++) {
1525 struct page *p = page + i;
1526 if (unlikely(check_new_page(p)))
1527 return 1;
Laura Abbott1414c7f2016-03-15 14:56:30 -07001528 if (poisoned)
1529 poisoned &= page_is_poisoned(p);
Wu Fengguang2a7684a2009-09-16 11:50:12 +02001530 }
Hugh Dickins689bceb2005-11-21 21:32:20 -08001531
Hugh Dickins4c21e2f2005-10-29 18:16:40 -07001532 set_page_private(page, 0);
Nick Piggin7835e982006-03-22 00:08:40 -08001533 set_page_refcounted(page);
Nick Piggincc1025092006-12-06 20:32:00 -08001534
1535 arch_alloc_page(page, order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536 kernel_map_pages(page, 1 << order, 1);
Laura Abbott8823b1d2016-03-15 14:56:27 -07001537 kernel_poison_pages(page, 1 << order, 1);
Andrey Ryabininb8c73fc2015-02-13 14:39:28 -08001538 kasan_alloc_pages(page, order);
Nick Piggin17cf4402006-03-22 00:08:41 -08001539
Laura Abbott1414c7f2016-03-15 14:56:30 -07001540 if (!free_pages_prezeroed(poisoned) && (gfp_flags & __GFP_ZERO))
Anisse Astierf4d28972015-06-24 16:56:36 -07001541 for (i = 0; i < (1 << order); i++)
1542 clear_highpage(page + i);
Nick Piggin17cf4402006-03-22 00:08:41 -08001543
1544 if (order && (gfp_flags & __GFP_COMP))
1545 prep_compound_page(page, order);
1546
Joonsoo Kim48c96a32014-12-12 16:56:01 -08001547 set_page_owner(page, order, gfp_flags);
1548
Vlastimil Babka75379192015-02-11 15:25:38 -08001549 /*
Michal Hocko2f064f32015-08-21 14:11:51 -07001550 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
Vlastimil Babka75379192015-02-11 15:25:38 -08001551 * allocate the page. The expectation is that the caller is taking
1552 * steps that will free more memory. The caller should avoid the page
1553 * being used for !PFMEMALLOC purposes.
1554 */
Michal Hocko2f064f32015-08-21 14:11:51 -07001555 if (alloc_flags & ALLOC_NO_WATERMARKS)
1556 set_page_pfmemalloc(page);
1557 else
1558 clear_page_pfmemalloc(page);
Vlastimil Babka75379192015-02-11 15:25:38 -08001559
Hugh Dickins689bceb2005-11-21 21:32:20 -08001560 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561}
1562
Mel Gorman56fd56b2007-10-16 01:25:58 -07001563/*
1564 * Go through the free lists for the given migratetype and remove
1565 * the smallest available page from the freelists
1566 */
Mel Gorman728ec982009-06-16 15:32:04 -07001567static inline
1568struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
Mel Gorman56fd56b2007-10-16 01:25:58 -07001569 int migratetype)
1570{
1571 unsigned int current_order;
Pintu Kumarb8af2942013-09-11 14:20:34 -07001572 struct free_area *area;
Mel Gorman56fd56b2007-10-16 01:25:58 -07001573 struct page *page;
1574
1575 /* Find a page of the appropriate size in the preferred list */
1576 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
1577 area = &(zone->free_area[current_order]);
Geliang Tanga16601c2016-01-14 15:20:30 -08001578 page = list_first_entry_or_null(&area->free_list[migratetype],
Mel Gorman56fd56b2007-10-16 01:25:58 -07001579 struct page, lru);
Geliang Tanga16601c2016-01-14 15:20:30 -08001580 if (!page)
1581 continue;
Mel Gorman56fd56b2007-10-16 01:25:58 -07001582 list_del(&page->lru);
1583 rmv_page_order(page);
1584 area->nr_free--;
Mel Gorman56fd56b2007-10-16 01:25:58 -07001585 expand(zone, page, order, current_order, area, migratetype);
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -07001586 set_pcppage_migratetype(page, migratetype);
Mel Gorman56fd56b2007-10-16 01:25:58 -07001587 return page;
1588 }
1589
1590 return NULL;
1591}
1592
1593
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001594/*
1595 * This array describes the order lists are fallen back to when
1596 * the free lists for the desirable migrate type are depleted
1597 */
Michal Nazarewicz47118af2011-12-29 13:09:50 +01001598static int fallbacks[MIGRATE_TYPES][4] = {
Mel Gorman974a7862015-11-06 16:28:34 -08001599 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
1600 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
1601 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES },
Joonsoo Kimdc676472015-04-14 15:45:15 -07001602#ifdef CONFIG_CMA
Mel Gorman974a7862015-11-06 16:28:34 -08001603 [MIGRATE_CMA] = { MIGRATE_TYPES }, /* Never used */
Michal Nazarewicz47118af2011-12-29 13:09:50 +01001604#endif
Minchan Kim194159f2013-02-22 16:33:58 -08001605#ifdef CONFIG_MEMORY_ISOLATION
Mel Gorman974a7862015-11-06 16:28:34 -08001606 [MIGRATE_ISOLATE] = { MIGRATE_TYPES }, /* Never used */
Minchan Kim194159f2013-02-22 16:33:58 -08001607#endif
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001608};
1609
Joonsoo Kimdc676472015-04-14 15:45:15 -07001610#ifdef CONFIG_CMA
1611static struct page *__rmqueue_cma_fallback(struct zone *zone,
1612 unsigned int order)
1613{
1614 return __rmqueue_smallest(zone, order, MIGRATE_CMA);
1615}
1616#else
1617static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
1618 unsigned int order) { return NULL; }
1619#endif
1620
Mel Gormanc361be52007-10-16 01:25:51 -07001621/*
1622 * Move the free pages in a range to the free lists of the requested type.
Mel Gormand9c23402007-10-16 01:26:01 -07001623 * Note that start_page and end_pages are not aligned on a pageblock
Mel Gormanc361be52007-10-16 01:25:51 -07001624 * boundary. If alignment is required, use move_freepages_block()
1625 */
Minchan Kim435b4052012-10-08 16:32:16 -07001626int move_freepages(struct zone *zone,
Adrian Bunkb69a7282008-07-23 21:28:12 -07001627 struct page *start_page, struct page *end_page,
1628 int migratetype)
Mel Gormanc361be52007-10-16 01:25:51 -07001629{
1630 struct page *page;
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08001631 unsigned int order;
Mel Gormand1003132007-10-16 01:26:00 -07001632 int pages_moved = 0;
Mel Gormanc361be52007-10-16 01:25:51 -07001633
1634#ifndef CONFIG_HOLES_IN_ZONE
1635 /*
1636 * page_zone is not safe to call in this context when
1637 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
1638 * anyway as we check zone boundaries in move_freepages_block().
1639 * Remove at a later date when no bug reports exist related to
Mel Gormanac0e5b72007-10-16 01:25:58 -07001640 * grouping pages by mobility
Mel Gormanc361be52007-10-16 01:25:51 -07001641 */
Mel Gorman97ee4ba2014-10-09 15:28:28 -07001642 VM_BUG_ON(page_zone(start_page) != page_zone(end_page));
Mel Gormanc361be52007-10-16 01:25:51 -07001643#endif
1644
1645 for (page = start_page; page <= end_page;) {
Adam Litke344c7902008-09-02 14:35:38 -07001646 /* Make sure we are not inadvertently changing nodes */
Sasha Levin309381fea2014-01-23 15:52:54 -08001647 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
Adam Litke344c7902008-09-02 14:35:38 -07001648
Mel Gormanc361be52007-10-16 01:25:51 -07001649 if (!pfn_valid_within(page_to_pfn(page))) {
1650 page++;
1651 continue;
1652 }
1653
1654 if (!PageBuddy(page)) {
1655 page++;
1656 continue;
1657 }
1658
1659 order = page_order(page);
Kirill A. Shutemov84be48d2011-03-22 16:33:41 -07001660 list_move(&page->lru,
1661 &zone->free_area[order].free_list[migratetype]);
Mel Gormanc361be52007-10-16 01:25:51 -07001662 page += 1 << order;
Mel Gormand1003132007-10-16 01:26:00 -07001663 pages_moved += 1 << order;
Mel Gormanc361be52007-10-16 01:25:51 -07001664 }
1665
Mel Gormand1003132007-10-16 01:26:00 -07001666 return pages_moved;
Mel Gormanc361be52007-10-16 01:25:51 -07001667}
1668
Minchan Kimee6f5092012-07-31 16:43:50 -07001669int move_freepages_block(struct zone *zone, struct page *page,
Linus Torvalds68e3e922012-06-03 20:05:57 -07001670 int migratetype)
Mel Gormanc361be52007-10-16 01:25:51 -07001671{
1672 unsigned long start_pfn, end_pfn;
1673 struct page *start_page, *end_page;
1674
1675 start_pfn = page_to_pfn(page);
Mel Gormand9c23402007-10-16 01:26:01 -07001676 start_pfn = start_pfn & ~(pageblock_nr_pages-1);
Mel Gormanc361be52007-10-16 01:25:51 -07001677 start_page = pfn_to_page(start_pfn);
Mel Gormand9c23402007-10-16 01:26:01 -07001678 end_page = start_page + pageblock_nr_pages - 1;
1679 end_pfn = start_pfn + pageblock_nr_pages - 1;
Mel Gormanc361be52007-10-16 01:25:51 -07001680
1681 /* Do not cross zone boundaries */
Cody P Schafer108bcc92013-02-22 16:35:23 -08001682 if (!zone_spans_pfn(zone, start_pfn))
Mel Gormanc361be52007-10-16 01:25:51 -07001683 start_page = page;
Cody P Schafer108bcc92013-02-22 16:35:23 -08001684 if (!zone_spans_pfn(zone, end_pfn))
Mel Gormanc361be52007-10-16 01:25:51 -07001685 return 0;
1686
1687 return move_freepages(zone, start_page, end_page, migratetype);
1688}
1689
Mel Gorman2f66a682009-09-21 17:02:31 -07001690static void change_pageblock_range(struct page *pageblock_page,
1691 int start_order, int migratetype)
1692{
1693 int nr_pageblocks = 1 << (start_order - pageblock_order);
1694
1695 while (nr_pageblocks--) {
1696 set_pageblock_migratetype(pageblock_page, migratetype);
1697 pageblock_page += pageblock_nr_pages;
1698 }
1699}
1700
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07001701/*
Vlastimil Babka9c0415e2015-02-11 15:28:21 -08001702 * When we are falling back to another migratetype during allocation, try to
1703 * steal extra free pages from the same pageblocks to satisfy further
1704 * allocations, instead of polluting multiple pageblocks.
1705 *
1706 * If we are stealing a relatively large buddy page, it is likely there will
1707 * be more free pages in the pageblock, so try to steal them all. For
1708 * reclaimable and unmovable allocations, we steal regardless of page size,
1709 * as fragmentation caused by those allocations polluting movable pageblocks
1710 * is worse than movable allocations stealing from unmovable and reclaimable
1711 * pageblocks.
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07001712 */
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001713static bool can_steal_fallback(unsigned int order, int start_mt)
1714{
1715 /*
1716 * Leaving this order check is intended, although there is
1717 * relaxed order check in next check. The reason is that
1718 * we can actually steal whole pageblock if this condition met,
1719 * but, below check doesn't guarantee it and that is just heuristic
1720 * so could be changed anytime.
1721 */
1722 if (order >= pageblock_order)
1723 return true;
1724
1725 if (order >= pageblock_order / 2 ||
1726 start_mt == MIGRATE_RECLAIMABLE ||
1727 start_mt == MIGRATE_UNMOVABLE ||
1728 page_group_by_mobility_disabled)
1729 return true;
1730
1731 return false;
1732}
1733
1734/*
1735 * This function implements actual steal behaviour. If order is large enough,
1736 * we can steal whole pageblock. If not, we first move freepages in this
1737 * pageblock and check whether half of pages are moved or not. If half of
1738 * pages are moved, we can change migratetype of pageblock and permanently
1739 * use it's pages as requested migratetype in the future.
1740 */
1741static void steal_suitable_fallback(struct zone *zone, struct page *page,
1742 int start_type)
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07001743{
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08001744 unsigned int current_order = page_order(page);
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001745 int pages;
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07001746
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07001747 /* Take ownership for orders >= pageblock_order */
1748 if (current_order >= pageblock_order) {
1749 change_pageblock_range(page, current_order, start_type);
Vlastimil Babka3a1086f2015-02-11 15:28:18 -08001750 return;
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07001751 }
1752
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001753 pages = move_freepages_block(zone, page, start_type);
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07001754
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001755 /* Claim the whole block if over half of it is free */
1756 if (pages >= (1 << (pageblock_order-1)) ||
1757 page_group_by_mobility_disabled)
1758 set_pageblock_migratetype(page, start_type);
1759}
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07001760
Joonsoo Kim2149cda2015-04-14 15:45:21 -07001761/*
1762 * Check whether there is a suitable fallback freepage with requested order.
1763 * If only_stealable is true, this function returns fallback_mt only if
1764 * we can steal other freepages all together. This would help to reduce
1765 * fragmentation due to mixed migratetype pages in one pageblock.
1766 */
1767int find_suitable_fallback(struct free_area *area, unsigned int order,
1768 int migratetype, bool only_stealable, bool *can_steal)
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001769{
1770 int i;
1771 int fallback_mt;
1772
1773 if (area->nr_free == 0)
1774 return -1;
1775
1776 *can_steal = false;
1777 for (i = 0;; i++) {
1778 fallback_mt = fallbacks[migratetype][i];
Mel Gorman974a7862015-11-06 16:28:34 -08001779 if (fallback_mt == MIGRATE_TYPES)
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001780 break;
1781
1782 if (list_empty(&area->free_list[fallback_mt]))
1783 continue;
1784
1785 if (can_steal_fallback(order, migratetype))
1786 *can_steal = true;
1787
Joonsoo Kim2149cda2015-04-14 15:45:21 -07001788 if (!only_stealable)
1789 return fallback_mt;
1790
1791 if (*can_steal)
1792 return fallback_mt;
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07001793 }
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001794
1795 return -1;
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07001796}
1797
Mel Gorman0aaa29a2015-11-06 16:28:37 -08001798/*
1799 * Reserve a pageblock for exclusive use of high-order atomic allocations if
1800 * there are no empty page blocks that contain a page with a suitable order
1801 */
1802static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
1803 unsigned int alloc_order)
1804{
1805 int mt;
1806 unsigned long max_managed, flags;
1807
1808 /*
1809 * Limit the number reserved to 1 pageblock or roughly 1% of a zone.
1810 * Check is race-prone but harmless.
1811 */
1812 max_managed = (zone->managed_pages / 100) + pageblock_nr_pages;
1813 if (zone->nr_reserved_highatomic >= max_managed)
1814 return;
1815
1816 spin_lock_irqsave(&zone->lock, flags);
1817
1818 /* Recheck the nr_reserved_highatomic limit under the lock */
1819 if (zone->nr_reserved_highatomic >= max_managed)
1820 goto out_unlock;
1821
1822 /* Yoink! */
1823 mt = get_pageblock_migratetype(page);
1824 if (mt != MIGRATE_HIGHATOMIC &&
1825 !is_migrate_isolate(mt) && !is_migrate_cma(mt)) {
1826 zone->nr_reserved_highatomic += pageblock_nr_pages;
1827 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
1828 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC);
1829 }
1830
1831out_unlock:
1832 spin_unlock_irqrestore(&zone->lock, flags);
1833}
1834
1835/*
1836 * Used when an allocation is about to fail under memory pressure. This
1837 * potentially hurts the reliability of high-order allocations when under
1838 * intense memory pressure but failed atomic allocations should be easier
1839 * to recover from than an OOM.
1840 */
1841static void unreserve_highatomic_pageblock(const struct alloc_context *ac)
1842{
1843 struct zonelist *zonelist = ac->zonelist;
1844 unsigned long flags;
1845 struct zoneref *z;
1846 struct zone *zone;
1847 struct page *page;
1848 int order;
1849
1850 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
1851 ac->nodemask) {
1852 /* Preserve at least one pageblock */
1853 if (zone->nr_reserved_highatomic <= pageblock_nr_pages)
1854 continue;
1855
1856 spin_lock_irqsave(&zone->lock, flags);
1857 for (order = 0; order < MAX_ORDER; order++) {
1858 struct free_area *area = &(zone->free_area[order]);
1859
Geliang Tanga16601c2016-01-14 15:20:30 -08001860 page = list_first_entry_or_null(
1861 &area->free_list[MIGRATE_HIGHATOMIC],
1862 struct page, lru);
1863 if (!page)
Mel Gorman0aaa29a2015-11-06 16:28:37 -08001864 continue;
1865
Mel Gorman0aaa29a2015-11-06 16:28:37 -08001866 /*
1867 * It should never happen but changes to locking could
1868 * inadvertently allow a per-cpu drain to add pages
1869 * to MIGRATE_HIGHATOMIC while unreserving so be safe
1870 * and watch for underflows.
1871 */
1872 zone->nr_reserved_highatomic -= min(pageblock_nr_pages,
1873 zone->nr_reserved_highatomic);
1874
1875 /*
1876 * Convert to ac->migratetype and avoid the normal
1877 * pageblock stealing heuristics. Minimally, the caller
1878 * is doing the work and needs the pages. More
1879 * importantly, if the block was always converted to
1880 * MIGRATE_UNMOVABLE or another type then the number
1881 * of pageblocks that cannot be completely freed
1882 * may increase.
1883 */
1884 set_pageblock_migratetype(page, ac->migratetype);
1885 move_freepages_block(zone, page, ac->migratetype);
1886 spin_unlock_irqrestore(&zone->lock, flags);
1887 return;
1888 }
1889 spin_unlock_irqrestore(&zone->lock, flags);
1890 }
1891}
1892
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001893/* Remove an element from the buddy allocator from the fallback list */
Mel Gorman0ac3a402009-06-16 15:32:06 -07001894static inline struct page *
Mel Gorman7aeb09f2014-06-04 16:10:21 -07001895__rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001896{
Pintu Kumarb8af2942013-09-11 14:20:34 -07001897 struct free_area *area;
Mel Gorman7aeb09f2014-06-04 16:10:21 -07001898 unsigned int current_order;
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001899 struct page *page;
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001900 int fallback_mt;
1901 bool can_steal;
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001902
1903 /* Find the largest possible block of pages in the other list */
Mel Gorman7aeb09f2014-06-04 16:10:21 -07001904 for (current_order = MAX_ORDER-1;
1905 current_order >= order && current_order <= MAX_ORDER-1;
1906 --current_order) {
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001907 area = &(zone->free_area[current_order]);
1908 fallback_mt = find_suitable_fallback(area, current_order,
Joonsoo Kim2149cda2015-04-14 15:45:21 -07001909 start_migratetype, false, &can_steal);
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001910 if (fallback_mt == -1)
1911 continue;
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001912
Geliang Tanga16601c2016-01-14 15:20:30 -08001913 page = list_first_entry(&area->free_list[fallback_mt],
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001914 struct page, lru);
1915 if (can_steal)
1916 steal_suitable_fallback(zone, page, start_migratetype);
Mel Gormane0104872007-10-16 01:25:53 -07001917
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001918 /* Remove the page from the freelists */
1919 area->nr_free--;
1920 list_del(&page->lru);
1921 rmv_page_order(page);
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001922
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001923 expand(zone, page, order, current_order, area,
1924 start_migratetype);
1925 /*
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -07001926 * The pcppage_migratetype may differ from pageblock's
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001927 * migratetype depending on the decisions in
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -07001928 * find_suitable_fallback(). This is OK as long as it does not
1929 * differ for MIGRATE_CMA pageblocks. Those can be used as
1930 * fallback only via special __rmqueue_cma_fallback() function
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001931 */
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -07001932 set_pcppage_migratetype(page, start_migratetype);
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001933
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001934 trace_mm_page_alloc_extfrag(page, order, current_order,
1935 start_migratetype, fallback_mt);
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001936
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001937 return page;
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001938 }
1939
Mel Gorman728ec982009-06-16 15:32:04 -07001940 return NULL;
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001941}
1942
Mel Gorman56fd56b2007-10-16 01:25:58 -07001943/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944 * Do the hard work of removing an element from the buddy allocator.
1945 * Call me with the zone->lock already held.
1946 */
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001947static struct page *__rmqueue(struct zone *zone, unsigned int order,
Mel Gorman6ac02062016-01-14 15:20:28 -08001948 int migratetype)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950 struct page *page;
1951
Mel Gorman56fd56b2007-10-16 01:25:58 -07001952 page = __rmqueue_smallest(zone, order, migratetype);
Mel Gorman974a7862015-11-06 16:28:34 -08001953 if (unlikely(!page)) {
Joonsoo Kimdc676472015-04-14 15:45:15 -07001954 if (migratetype == MIGRATE_MOVABLE)
1955 page = __rmqueue_cma_fallback(zone, order);
1956
1957 if (!page)
1958 page = __rmqueue_fallback(zone, order, migratetype);
Mel Gorman728ec982009-06-16 15:32:04 -07001959 }
1960
Mel Gorman0d3d0622009-09-21 17:02:44 -07001961 trace_mm_page_alloc_zone_locked(page, order, migratetype);
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001962 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963}
1964
Michal Nazarewicz5f63b722012-01-11 15:16:11 +01001965/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966 * Obtain a specified number of elements from the buddy allocator, all under
1967 * a single hold of the lock, for efficiency. Add them to the supplied list.
1968 * Returns the number of new pages which were placed at *list.
1969 */
Michal Nazarewicz5f63b722012-01-11 15:16:11 +01001970static int rmqueue_bulk(struct zone *zone, unsigned int order,
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001971 unsigned long count, struct list_head *list,
Mel Gormanb745bc82014-06-04 16:10:22 -07001972 int migratetype, bool cold)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973{
Vlastimil Babka5bcc9f82014-06-04 16:07:22 -07001974 int i;
Michal Nazarewicz5f63b722012-01-11 15:16:11 +01001975
Nick Pigginc54ad302006-01-06 00:10:56 -08001976 spin_lock(&zone->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977 for (i = 0; i < count; ++i) {
Mel Gorman6ac02062016-01-14 15:20:28 -08001978 struct page *page = __rmqueue(zone, order, migratetype);
Nick Piggin085cc7d52006-01-06 00:11:01 -08001979 if (unlikely(page == NULL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980 break;
Mel Gorman81eabcb2007-12-17 16:20:05 -08001981
1982 /*
1983 * Split buddy pages returned by expand() are received here
1984 * in physical page order. The page is added to the callers and
1985 * list and the list head then moves forward. From the callers
1986 * perspective, the linked list is ordered by page number in
1987 * some conditions. This is useful for IO devices that can
1988 * merge IO requests if the physical pages are ordered
1989 * properly.
1990 */
Mel Gormanb745bc82014-06-04 16:10:22 -07001991 if (likely(!cold))
Mel Gormane084b2d2009-07-29 15:02:04 -07001992 list_add(&page->lru, list);
1993 else
1994 list_add_tail(&page->lru, list);
Mel Gorman81eabcb2007-12-17 16:20:05 -08001995 list = &page->lru;
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -07001996 if (is_migrate_cma(get_pcppage_migratetype(page)))
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -07001997 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
1998 -(1 << order));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999 }
Mel Gormanf2260e62009-06-16 15:32:13 -07002000 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
Nick Pigginc54ad302006-01-06 00:10:56 -08002001 spin_unlock(&zone->lock);
Nick Piggin085cc7d52006-01-06 00:11:01 -08002002 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003}
2004
Christoph Lameter4ae7c032005-06-21 17:14:57 -07002005#ifdef CONFIG_NUMA
Christoph Lameter8fce4d82006-03-09 17:33:54 -08002006/*
Christoph Lameter4037d452007-05-09 02:35:14 -07002007 * Called from the vmstat counter updater to drain pagesets of this
2008 * currently executing processor on remote nodes after they have
2009 * expired.
2010 *
Christoph Lameter879336c2006-03-22 00:09:08 -08002011 * Note that this function must be called with the thread pinned to
2012 * a single processor.
Christoph Lameter8fce4d82006-03-09 17:33:54 -08002013 */
Christoph Lameter4037d452007-05-09 02:35:14 -07002014void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
Christoph Lameter4ae7c032005-06-21 17:14:57 -07002015{
Christoph Lameter4ae7c032005-06-21 17:14:57 -07002016 unsigned long flags;
Michal Nazarewicz7be12fc2014-08-06 16:05:15 -07002017 int to_drain, batch;
Christoph Lameter4ae7c032005-06-21 17:14:57 -07002018
Christoph Lameter4037d452007-05-09 02:35:14 -07002019 local_irq_save(flags);
Jason Low4db0c3c2015-04-15 16:14:08 -07002020 batch = READ_ONCE(pcp->batch);
Michal Nazarewicz7be12fc2014-08-06 16:05:15 -07002021 to_drain = min(pcp->count, batch);
KOSAKI Motohiro2a135152012-07-31 16:42:53 -07002022 if (to_drain > 0) {
2023 free_pcppages_bulk(zone, to_drain, pcp);
2024 pcp->count -= to_drain;
2025 }
Christoph Lameter4037d452007-05-09 02:35:14 -07002026 local_irq_restore(flags);
Christoph Lameter4ae7c032005-06-21 17:14:57 -07002027}
2028#endif
2029
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002030/*
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002031 * Drain pcplists of the indicated processor and zone.
2032 *
2033 * The processor must either be the current processor and the
2034 * thread pinned to the current processor or a processor that
2035 * is not online.
2036 */
2037static void drain_pages_zone(unsigned int cpu, struct zone *zone)
2038{
2039 unsigned long flags;
2040 struct per_cpu_pageset *pset;
2041 struct per_cpu_pages *pcp;
2042
2043 local_irq_save(flags);
2044 pset = per_cpu_ptr(zone->pageset, cpu);
2045
2046 pcp = &pset->pcp;
2047 if (pcp->count) {
2048 free_pcppages_bulk(zone, pcp->count, pcp);
2049 pcp->count = 0;
2050 }
2051 local_irq_restore(flags);
2052}
2053
2054/*
2055 * Drain pcplists of all zones on the indicated processor.
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002056 *
2057 * The processor must either be the current processor and the
2058 * thread pinned to the current processor or a processor that
2059 * is not online.
2060 */
2061static void drain_pages(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062{
2063 struct zone *zone;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064
KOSAKI Motohiroee99c712009-03-31 15:19:31 -07002065 for_each_populated_zone(zone) {
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002066 drain_pages_zone(cpu, zone);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067 }
2068}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002070/*
2071 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002072 *
2073 * The CPU has to be pinned. When zone parameter is non-NULL, spill just
2074 * the single zone's pages.
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002075 */
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002076void drain_local_pages(struct zone *zone)
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002077{
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002078 int cpu = smp_processor_id();
2079
2080 if (zone)
2081 drain_pages_zone(cpu, zone);
2082 else
2083 drain_pages(cpu);
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002084}
2085
2086/*
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002087 * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
2088 *
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002089 * When zone parameter is non-NULL, spill just the single zone's pages.
2090 *
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002091 * Note that this code is protected against sending an IPI to an offline
2092 * CPU but does not guarantee sending an IPI to newly hotplugged CPUs:
2093 * on_each_cpu_mask() blocks hotplug and won't talk to offlined CPUs but
2094 * nothing keeps CPUs from showing up after we populated the cpumask and
2095 * before the call to on_each_cpu_mask().
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002096 */
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002097void drain_all_pages(struct zone *zone)
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002098{
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002099 int cpu;
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002100
2101 /*
2102 * Allocate in the BSS so we wont require allocation in
2103 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
2104 */
2105 static cpumask_t cpus_with_pcps;
2106
2107 /*
2108 * We don't care about racing with CPU hotplug event
2109 * as offline notification will cause the notified
2110 * cpu to drain that CPU pcps and on_each_cpu_mask
2111 * disables preemption as part of its processing
2112 */
2113 for_each_online_cpu(cpu) {
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002114 struct per_cpu_pageset *pcp;
2115 struct zone *z;
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002116 bool has_pcps = false;
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002117
2118 if (zone) {
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002119 pcp = per_cpu_ptr(zone->pageset, cpu);
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002120 if (pcp->pcp.count)
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002121 has_pcps = true;
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002122 } else {
2123 for_each_populated_zone(z) {
2124 pcp = per_cpu_ptr(z->pageset, cpu);
2125 if (pcp->pcp.count) {
2126 has_pcps = true;
2127 break;
2128 }
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002129 }
2130 }
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002131
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002132 if (has_pcps)
2133 cpumask_set_cpu(cpu, &cpus_with_pcps);
2134 else
2135 cpumask_clear_cpu(cpu, &cpus_with_pcps);
2136 }
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002137 on_each_cpu_mask(&cpus_with_pcps, (smp_call_func_t) drain_local_pages,
2138 zone, 1);
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002139}
2140
Rafael J. Wysocki296699d2007-07-29 23:27:18 +02002141#ifdef CONFIG_HIBERNATION
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142
2143void mark_free_pages(struct zone *zone)
2144{
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07002145 unsigned long pfn, max_zone_pfn;
2146 unsigned long flags;
Mel Gorman7aeb09f2014-06-04 16:10:21 -07002147 unsigned int order, t;
Geliang Tang86760a22016-01-14 15:20:33 -08002148 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149
Xishi Qiu8080fc02013-09-11 14:21:45 -07002150 if (zone_is_empty(zone))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151 return;
2152
2153 spin_lock_irqsave(&zone->lock, flags);
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07002154
Cody P Schafer108bcc92013-02-22 16:35:23 -08002155 max_zone_pfn = zone_end_pfn(zone);
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07002156 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
2157 if (pfn_valid(pfn)) {
Geliang Tang86760a22016-01-14 15:20:33 -08002158 page = pfn_to_page(pfn);
Joonsoo Kimba6b0972016-05-19 17:12:16 -07002159
2160 if (page_zone(page) != zone)
2161 continue;
2162
Rafael J. Wysocki7be98232007-05-06 14:50:42 -07002163 if (!swsusp_page_is_forbidden(page))
2164 swsusp_unset_page_free(page);
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07002165 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002166
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002167 for_each_migratetype_order(order, t) {
Geliang Tang86760a22016-01-14 15:20:33 -08002168 list_for_each_entry(page,
2169 &zone->free_area[order].free_list[t], lru) {
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07002170 unsigned long i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171
Geliang Tang86760a22016-01-14 15:20:33 -08002172 pfn = page_to_pfn(page);
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07002173 for (i = 0; i < (1UL << order); i++)
Rafael J. Wysocki7be98232007-05-06 14:50:42 -07002174 swsusp_set_page_free(pfn_to_page(pfn + i));
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07002175 }
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002176 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002177 spin_unlock_irqrestore(&zone->lock, flags);
2178}
Mel Gormane2c55dc2007-10-16 01:25:50 -07002179#endif /* CONFIG_PM */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180
2181/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 * Free a 0-order page
Mel Gormanb745bc82014-06-04 16:10:22 -07002183 * cold == true ? free a cold page : free a hot page
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184 */
Mel Gormanb745bc82014-06-04 16:10:22 -07002185void free_hot_cold_page(struct page *page, bool cold)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186{
2187 struct zone *zone = page_zone(page);
2188 struct per_cpu_pages *pcp;
2189 unsigned long flags;
Mel Gormandc4b0ca2014-06-04 16:10:17 -07002190 unsigned long pfn = page_to_pfn(page);
Mel Gorman5f8dcc22009-09-21 17:03:19 -07002191 int migratetype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192
KOSAKI Motohiroec95f532010-05-24 14:32:38 -07002193 if (!free_pages_prepare(page, 0))
Hugh Dickins689bceb2005-11-21 21:32:20 -08002194 return;
2195
Mel Gormandc4b0ca2014-06-04 16:10:17 -07002196 migratetype = get_pfnblock_migratetype(page, pfn);
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -07002197 set_pcppage_migratetype(page, migratetype);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198 local_irq_save(flags);
Christoph Lameterf8891e52006-06-30 01:55:45 -07002199 __count_vm_event(PGFREE);
Mel Gormanda456f12009-06-16 15:32:08 -07002200
Mel Gorman5f8dcc22009-09-21 17:03:19 -07002201 /*
2202 * We only track unmovable, reclaimable and movable on pcp lists.
2203 * Free ISOLATE pages back to the allocator because they are being
2204 * offlined but treat RESERVE as movable pages so we can get those
2205 * areas back if necessary. Otherwise, we may have to free
2206 * excessively into the page allocator
2207 */
2208 if (migratetype >= MIGRATE_PCPTYPES) {
Minchan Kim194159f2013-02-22 16:33:58 -08002209 if (unlikely(is_migrate_isolate(migratetype))) {
Mel Gormandc4b0ca2014-06-04 16:10:17 -07002210 free_one_page(zone, page, pfn, 0, migratetype);
Mel Gorman5f8dcc22009-09-21 17:03:19 -07002211 goto out;
2212 }
2213 migratetype = MIGRATE_MOVABLE;
2214 }
2215
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09002216 pcp = &this_cpu_ptr(zone->pageset)->pcp;
Mel Gormanb745bc82014-06-04 16:10:22 -07002217 if (!cold)
Mel Gorman5f8dcc22009-09-21 17:03:19 -07002218 list_add(&page->lru, &pcp->lists[migratetype]);
Mel Gormanb745bc82014-06-04 16:10:22 -07002219 else
2220 list_add_tail(&page->lru, &pcp->lists[migratetype]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221 pcp->count++;
Nick Piggin48db57f2006-01-08 01:00:42 -08002222 if (pcp->count >= pcp->high) {
Jason Low4db0c3c2015-04-15 16:14:08 -07002223 unsigned long batch = READ_ONCE(pcp->batch);
Cody P Schafer998d39cb2013-07-03 15:01:32 -07002224 free_pcppages_bulk(zone, batch, pcp);
2225 pcp->count -= batch;
Nick Piggin48db57f2006-01-08 01:00:42 -08002226 }
Mel Gorman5f8dcc22009-09-21 17:03:19 -07002227
2228out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230}
2231
Nick Piggin8dfcc9b2006-03-22 00:08:05 -08002232/*
Konstantin Khlebnikovcc598502012-01-10 15:07:04 -08002233 * Free a list of 0-order pages
2234 */
Mel Gormanb745bc82014-06-04 16:10:22 -07002235void free_hot_cold_page_list(struct list_head *list, bool cold)
Konstantin Khlebnikovcc598502012-01-10 15:07:04 -08002236{
2237 struct page *page, *next;
2238
2239 list_for_each_entry_safe(page, next, list, lru) {
Konstantin Khlebnikovb413d482012-01-10 15:07:09 -08002240 trace_mm_page_free_batched(page, cold);
Konstantin Khlebnikovcc598502012-01-10 15:07:04 -08002241 free_hot_cold_page(page, cold);
2242 }
2243}
2244
2245/*
Nick Piggin8dfcc9b2006-03-22 00:08:05 -08002246 * split_page takes a non-compound higher-order page, and splits it into
2247 * n (1<<order) sub-pages: page[0..n]
2248 * Each sub-page must be freed individually.
2249 *
2250 * Note: this is probably too low level an operation for use in drivers.
2251 * Please consult with lkml before using this in your driver.
2252 */
2253void split_page(struct page *page, unsigned int order)
2254{
2255 int i;
Joonsoo Kime2cfc912015-07-17 16:24:18 -07002256 gfp_t gfp_mask;
Nick Piggin8dfcc9b2006-03-22 00:08:05 -08002257
Sasha Levin309381fea2014-01-23 15:52:54 -08002258 VM_BUG_ON_PAGE(PageCompound(page), page);
2259 VM_BUG_ON_PAGE(!page_count(page), page);
Vegard Nossumb1eeab62008-11-25 16:55:53 +01002260
2261#ifdef CONFIG_KMEMCHECK
2262 /*
2263 * Split shadow pages too, because free(page[0]) would
2264 * otherwise free the whole shadow.
2265 */
2266 if (kmemcheck_page_is_tracked(page))
2267 split_page(virt_to_page(page[0].shadow), order);
2268#endif
2269
Joonsoo Kime2cfc912015-07-17 16:24:18 -07002270 gfp_mask = get_page_owner_gfp(page);
2271 set_page_owner(page, 0, gfp_mask);
Joonsoo Kim48c96a32014-12-12 16:56:01 -08002272 for (i = 1; i < (1 << order); i++) {
Nick Piggin7835e982006-03-22 00:08:40 -08002273 set_page_refcounted(page + i);
Joonsoo Kime2cfc912015-07-17 16:24:18 -07002274 set_page_owner(page + i, 0, gfp_mask);
Joonsoo Kim48c96a32014-12-12 16:56:01 -08002275 }
Nick Piggin8dfcc9b2006-03-22 00:08:05 -08002276}
K. Y. Srinivasan5853ff22013-03-25 15:47:38 -07002277EXPORT_SYMBOL_GPL(split_page);
Nick Piggin8dfcc9b2006-03-22 00:08:05 -08002278
Joonsoo Kim3c605092014-11-13 15:19:21 -08002279int __isolate_free_page(struct page *page, unsigned int order)
Mel Gorman748446b2010-05-24 14:32:27 -07002280{
Mel Gorman748446b2010-05-24 14:32:27 -07002281 unsigned long watermark;
2282 struct zone *zone;
Bartlomiej Zolnierkiewicz2139cbe2012-10-08 16:32:00 -07002283 int mt;
Mel Gorman748446b2010-05-24 14:32:27 -07002284
2285 BUG_ON(!PageBuddy(page));
2286
2287 zone = page_zone(page);
Marek Szyprowski2e30abd2012-12-11 16:02:57 -08002288 mt = get_pageblock_migratetype(page);
Mel Gorman748446b2010-05-24 14:32:27 -07002289
Minchan Kim194159f2013-02-22 16:33:58 -08002290 if (!is_migrate_isolate(mt)) {
Marek Szyprowski2e30abd2012-12-11 16:02:57 -08002291 /* Obey watermarks as if the page was being allocated */
2292 watermark = low_wmark_pages(zone) + (1 << order);
2293 if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
2294 return 0;
2295
Mel Gorman8fb74b92013-01-11 14:32:16 -08002296 __mod_zone_freepage_state(zone, -(1UL << order), mt);
Marek Szyprowski2e30abd2012-12-11 16:02:57 -08002297 }
Mel Gorman748446b2010-05-24 14:32:27 -07002298
2299 /* Remove page from free list */
2300 list_del(&page->lru);
2301 zone->free_area[order].nr_free--;
2302 rmv_page_order(page);
Bartlomiej Zolnierkiewicz2139cbe2012-10-08 16:32:00 -07002303
Joonsoo Kime2cfc912015-07-17 16:24:18 -07002304 set_page_owner(page, order, __GFP_MOVABLE);
Joonsoo Kimf3a14ce2015-07-17 16:24:15 -07002305
Mel Gorman8fb74b92013-01-11 14:32:16 -08002306 /* Set the pageblock if the isolated page is at least a pageblock */
Mel Gorman748446b2010-05-24 14:32:27 -07002307 if (order >= pageblock_order - 1) {
2308 struct page *endpage = page + (1 << order) - 1;
Michal Nazarewicz47118af2011-12-29 13:09:50 +01002309 for (; page < endpage; page += pageblock_nr_pages) {
2310 int mt = get_pageblock_migratetype(page);
Minchan Kim194159f2013-02-22 16:33:58 -08002311 if (!is_migrate_isolate(mt) && !is_migrate_cma(mt))
Michal Nazarewicz47118af2011-12-29 13:09:50 +01002312 set_pageblock_migratetype(page,
2313 MIGRATE_MOVABLE);
2314 }
Mel Gorman748446b2010-05-24 14:32:27 -07002315 }
2316
Joonsoo Kimf3a14ce2015-07-17 16:24:15 -07002317
Mel Gorman8fb74b92013-01-11 14:32:16 -08002318 return 1UL << order;
Mel Gorman1fb3f8c2012-10-08 16:29:12 -07002319}
2320
2321/*
2322 * Similar to split_page except the page is already free. As this is only
2323 * being used for migration, the migratetype of the block also changes.
2324 * As this is called with interrupts disabled, the caller is responsible
2325 * for calling arch_alloc_page() and kernel_map_page() after interrupts
2326 * are enabled.
2327 *
2328 * Note: this is probably too low level an operation for use in drivers.
2329 * Please consult with lkml before using this in your driver.
2330 */
2331int split_free_page(struct page *page)
2332{
2333 unsigned int order;
2334 int nr_pages;
2335
Mel Gorman1fb3f8c2012-10-08 16:29:12 -07002336 order = page_order(page);
2337
Mel Gorman8fb74b92013-01-11 14:32:16 -08002338 nr_pages = __isolate_free_page(page, order);
Mel Gorman1fb3f8c2012-10-08 16:29:12 -07002339 if (!nr_pages)
2340 return 0;
2341
2342 /* Split into individual pages */
2343 set_page_refcounted(page);
2344 split_page(page, order);
2345 return nr_pages;
Mel Gorman748446b2010-05-24 14:32:27 -07002346}
2347
2348/*
Vlastimil Babka75379192015-02-11 15:25:38 -08002349 * Allocate a page from the given zone. Use pcplists for order-0 allocations.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002350 */
Mel Gorman0a15c3e2009-06-16 15:32:05 -07002351static inline
2352struct page *buffered_rmqueue(struct zone *preferred_zone,
Mel Gorman7aeb09f2014-06-04 16:10:21 -07002353 struct zone *zone, unsigned int order,
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002354 gfp_t gfp_flags, int alloc_flags, int migratetype)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002355{
2356 unsigned long flags;
Hugh Dickins689bceb2005-11-21 21:32:20 -08002357 struct page *page;
Mel Gormanb745bc82014-06-04 16:10:22 -07002358 bool cold = ((gfp_flags & __GFP_COLD) != 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359
Nick Piggin48db57f2006-01-08 01:00:42 -08002360 if (likely(order == 0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002361 struct per_cpu_pages *pcp;
Mel Gorman5f8dcc22009-09-21 17:03:19 -07002362 struct list_head *list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364 local_irq_save(flags);
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09002365 pcp = &this_cpu_ptr(zone->pageset)->pcp;
2366 list = &pcp->lists[migratetype];
Mel Gorman5f8dcc22009-09-21 17:03:19 -07002367 if (list_empty(list)) {
Mel Gorman535131e62007-10-16 01:25:49 -07002368 pcp->count += rmqueue_bulk(zone, 0,
Mel Gorman5f8dcc22009-09-21 17:03:19 -07002369 pcp->batch, list,
Mel Gormane084b2d2009-07-29 15:02:04 -07002370 migratetype, cold);
Mel Gorman5f8dcc22009-09-21 17:03:19 -07002371 if (unlikely(list_empty(list)))
Shaohua Li6fb332f2009-09-21 17:01:17 -07002372 goto failed;
Mel Gorman535131e62007-10-16 01:25:49 -07002373 }
Mel Gormanb92a6ed2007-10-16 01:25:50 -07002374
Mel Gorman5f8dcc22009-09-21 17:03:19 -07002375 if (cold)
Geliang Tanga16601c2016-01-14 15:20:30 -08002376 page = list_last_entry(list, struct page, lru);
Mel Gorman5f8dcc22009-09-21 17:03:19 -07002377 else
Geliang Tanga16601c2016-01-14 15:20:30 -08002378 page = list_first_entry(list, struct page, lru);
Mel Gorman5f8dcc22009-09-21 17:03:19 -07002379
Mel Gormanb92a6ed2007-10-16 01:25:50 -07002380 list_del(&page->lru);
2381 pcp->count--;
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08002382 } else {
Michal Hocko0f352e52016-03-17 14:19:32 -07002383 /*
2384 * We most definitely don't want callers attempting to
2385 * allocate greater than order-1 page units with __GFP_NOFAIL.
2386 */
2387 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002388 spin_lock_irqsave(&zone->lock, flags);
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002389
2390 page = NULL;
2391 if (alloc_flags & ALLOC_HARDER) {
2392 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
2393 if (page)
2394 trace_mm_page_alloc_zone_locked(page, order, migratetype);
2395 }
2396 if (!page)
Mel Gorman6ac02062016-01-14 15:20:28 -08002397 page = __rmqueue(zone, order, migratetype);
Nick Piggina74609f2006-01-06 00:11:20 -08002398 spin_unlock(&zone->lock);
2399 if (!page)
2400 goto failed;
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -07002401 __mod_zone_freepage_state(zone, -(1 << order),
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -07002402 get_pcppage_migratetype(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403 }
2404
Johannes Weiner3a025762014-04-07 15:37:48 -07002405 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
Johannes Weinerabe5f972014-10-02 16:21:10 -07002406 if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
Johannes Weiner57054652014-10-09 15:28:17 -07002407 !test_bit(ZONE_FAIR_DEPLETED, &zone->flags))
2408 set_bit(ZONE_FAIR_DEPLETED, &zone->flags);
Johannes Weiner27329362014-03-03 15:38:41 -08002409
Christoph Lameterf8891e52006-06-30 01:55:45 -07002410 __count_zone_vm_events(PGALLOC, zone, 1 << order);
Andi Kleen78afd562011-03-22 16:33:12 -07002411 zone_statistics(preferred_zone, zone, gfp_flags);
Nick Piggina74609f2006-01-06 00:11:20 -08002412 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002413
Sasha Levin309381fea2014-01-23 15:52:54 -08002414 VM_BUG_ON_PAGE(bad_range(zone, page), page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002415 return page;
Nick Piggina74609f2006-01-06 00:11:20 -08002416
2417failed:
2418 local_irq_restore(flags);
Nick Piggina74609f2006-01-06 00:11:20 -08002419 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420}
2421
Akinobu Mita933e3122006-12-08 02:39:45 -08002422#ifdef CONFIG_FAIL_PAGE_ALLOC
2423
Akinobu Mitab2588c42011-07-26 16:09:03 -07002424static struct {
Akinobu Mita933e3122006-12-08 02:39:45 -08002425 struct fault_attr attr;
2426
Viresh Kumar621a5f72015-09-26 15:04:07 -07002427 bool ignore_gfp_highmem;
Mel Gorman71baba42015-11-06 16:28:28 -08002428 bool ignore_gfp_reclaim;
Akinobu Mita54114992007-07-15 23:40:23 -07002429 u32 min_order;
Akinobu Mita933e3122006-12-08 02:39:45 -08002430} fail_page_alloc = {
2431 .attr = FAULT_ATTR_INITIALIZER,
Mel Gorman71baba42015-11-06 16:28:28 -08002432 .ignore_gfp_reclaim = true,
Viresh Kumar621a5f72015-09-26 15:04:07 -07002433 .ignore_gfp_highmem = true,
Akinobu Mita54114992007-07-15 23:40:23 -07002434 .min_order = 1,
Akinobu Mita933e3122006-12-08 02:39:45 -08002435};
2436
2437static int __init setup_fail_page_alloc(char *str)
2438{
2439 return setup_fault_attr(&fail_page_alloc.attr, str);
2440}
2441__setup("fail_page_alloc=", setup_fail_page_alloc);
2442
Gavin Shandeaf3862012-07-31 16:41:51 -07002443static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
Akinobu Mita933e3122006-12-08 02:39:45 -08002444{
Akinobu Mita54114992007-07-15 23:40:23 -07002445 if (order < fail_page_alloc.min_order)
Gavin Shandeaf3862012-07-31 16:41:51 -07002446 return false;
Akinobu Mita933e3122006-12-08 02:39:45 -08002447 if (gfp_mask & __GFP_NOFAIL)
Gavin Shandeaf3862012-07-31 16:41:51 -07002448 return false;
Akinobu Mita933e3122006-12-08 02:39:45 -08002449 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
Gavin Shandeaf3862012-07-31 16:41:51 -07002450 return false;
Mel Gorman71baba42015-11-06 16:28:28 -08002451 if (fail_page_alloc.ignore_gfp_reclaim &&
2452 (gfp_mask & __GFP_DIRECT_RECLAIM))
Gavin Shandeaf3862012-07-31 16:41:51 -07002453 return false;
Akinobu Mita933e3122006-12-08 02:39:45 -08002454
2455 return should_fail(&fail_page_alloc.attr, 1 << order);
2456}
2457
2458#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
2459
2460static int __init fail_page_alloc_debugfs(void)
2461{
Al Virof4ae40a62011-07-24 04:33:43 -04002462 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
Akinobu Mita933e3122006-12-08 02:39:45 -08002463 struct dentry *dir;
Akinobu Mita933e3122006-12-08 02:39:45 -08002464
Akinobu Mitadd48c082011-08-03 16:21:01 -07002465 dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
2466 &fail_page_alloc.attr);
2467 if (IS_ERR(dir))
2468 return PTR_ERR(dir);
Akinobu Mita933e3122006-12-08 02:39:45 -08002469
Akinobu Mitab2588c42011-07-26 16:09:03 -07002470 if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
Mel Gorman71baba42015-11-06 16:28:28 -08002471 &fail_page_alloc.ignore_gfp_reclaim))
Akinobu Mitab2588c42011-07-26 16:09:03 -07002472 goto fail;
2473 if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
2474 &fail_page_alloc.ignore_gfp_highmem))
2475 goto fail;
2476 if (!debugfs_create_u32("min-order", mode, dir,
2477 &fail_page_alloc.min_order))
2478 goto fail;
Akinobu Mita933e3122006-12-08 02:39:45 -08002479
Akinobu Mitab2588c42011-07-26 16:09:03 -07002480 return 0;
2481fail:
Akinobu Mitadd48c082011-08-03 16:21:01 -07002482 debugfs_remove_recursive(dir);
Akinobu Mita933e3122006-12-08 02:39:45 -08002483
Akinobu Mitab2588c42011-07-26 16:09:03 -07002484 return -ENOMEM;
Akinobu Mita933e3122006-12-08 02:39:45 -08002485}
2486
2487late_initcall(fail_page_alloc_debugfs);
2488
2489#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
2490
2491#else /* CONFIG_FAIL_PAGE_ALLOC */
2492
Gavin Shandeaf3862012-07-31 16:41:51 -07002493static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
Akinobu Mita933e3122006-12-08 02:39:45 -08002494{
Gavin Shandeaf3862012-07-31 16:41:51 -07002495 return false;
Akinobu Mita933e3122006-12-08 02:39:45 -08002496}
2497
2498#endif /* CONFIG_FAIL_PAGE_ALLOC */
2499
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500/*
Mel Gorman97a16fc2015-11-06 16:28:40 -08002501 * Return true if free base pages are above 'mark'. For high-order checks it
2502 * will return true of the order-0 watermark is reached and there is at least
2503 * one free page of a suitable size. Checking now avoids taking the zone lock
2504 * to check in the allocation paths if no pages are free.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505 */
Mel Gorman7aeb09f2014-06-04 16:10:21 -07002506static bool __zone_watermark_ok(struct zone *z, unsigned int order,
2507 unsigned long mark, int classzone_idx, int alloc_flags,
2508 long free_pages)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509{
Christoph Lameterd23ad422007-02-10 01:43:02 -08002510 long min = mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511 int o;
Mel Gorman97a16fc2015-11-06 16:28:40 -08002512 const int alloc_harder = (alloc_flags & ALLOC_HARDER);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002513
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002514 /* free_pages may go negative - that's OK */
Michal Hockodf0a6da2012-01-10 15:08:02 -08002515 free_pages -= (1 << order) - 1;
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002516
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08002517 if (alloc_flags & ALLOC_HIGH)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518 min -= min / 2;
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002519
2520 /*
2521 * If the caller does not have rights to ALLOC_HARDER then subtract
2522 * the high-atomic reserves. This will over-estimate the size of the
2523 * atomic reserve but it avoids a search.
2524 */
Mel Gorman97a16fc2015-11-06 16:28:40 -08002525 if (likely(!alloc_harder))
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002526 free_pages -= z->nr_reserved_highatomic;
2527 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528 min -= min / 4;
Mel Gormane2b19192015-11-06 16:28:09 -08002529
Bartlomiej Zolnierkiewiczd95ea5d2012-10-08 16:32:05 -07002530#ifdef CONFIG_CMA
2531 /* If allocation can't use CMA areas don't use free CMA pages */
2532 if (!(alloc_flags & ALLOC_CMA))
Mel Gorman97a16fc2015-11-06 16:28:40 -08002533 free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
Bartlomiej Zolnierkiewiczd95ea5d2012-10-08 16:32:05 -07002534#endif
Tomasz Stanislawski026b0812013-06-12 14:05:02 -07002535
Mel Gorman97a16fc2015-11-06 16:28:40 -08002536 /*
2537 * Check watermarks for an order-0 allocation request. If these
2538 * are not met, then a high-order request also cannot go ahead
2539 * even if a suitable page happened to be free.
2540 */
2541 if (free_pages <= min + z->lowmem_reserve[classzone_idx])
Mel Gorman88f5acf2011-01-13 15:45:41 -08002542 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002543
Mel Gorman97a16fc2015-11-06 16:28:40 -08002544 /* If this is an order-0 request then the watermark is fine */
2545 if (!order)
2546 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547
Mel Gorman97a16fc2015-11-06 16:28:40 -08002548 /* For a high-order request, check at least one suitable page is free */
2549 for (o = order; o < MAX_ORDER; o++) {
2550 struct free_area *area = &z->free_area[o];
2551 int mt;
2552
2553 if (!area->nr_free)
2554 continue;
2555
2556 if (alloc_harder)
2557 return true;
2558
2559 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
2560 if (!list_empty(&area->free_list[mt]))
2561 return true;
2562 }
2563
2564#ifdef CONFIG_CMA
2565 if ((alloc_flags & ALLOC_CMA) &&
2566 !list_empty(&area->free_list[MIGRATE_CMA])) {
2567 return true;
2568 }
2569#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002570 }
Mel Gorman97a16fc2015-11-06 16:28:40 -08002571 return false;
Mel Gorman88f5acf2011-01-13 15:45:41 -08002572}
2573
Mel Gorman7aeb09f2014-06-04 16:10:21 -07002574bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
Mel Gorman88f5acf2011-01-13 15:45:41 -08002575 int classzone_idx, int alloc_flags)
2576{
2577 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
2578 zone_page_state(z, NR_FREE_PAGES));
2579}
2580
Mel Gorman7aeb09f2014-06-04 16:10:21 -07002581bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
Mel Gormane2b19192015-11-06 16:28:09 -08002582 unsigned long mark, int classzone_idx)
Mel Gorman88f5acf2011-01-13 15:45:41 -08002583{
2584 long free_pages = zone_page_state(z, NR_FREE_PAGES);
2585
2586 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
2587 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
2588
Mel Gormane2b19192015-11-06 16:28:09 -08002589 return __zone_watermark_ok(z, order, mark, classzone_idx, 0,
Mel Gorman88f5acf2011-01-13 15:45:41 -08002590 free_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591}
2592
Paul Jackson9276b1bc2006-12-06 20:31:48 -08002593#ifdef CONFIG_NUMA
Johannes Weiner81c0a2b2013-09-11 14:20:47 -07002594static bool zone_local(struct zone *local_zone, struct zone *zone)
2595{
Johannes Weinerfff4068c2013-12-20 14:54:12 +00002596 return local_zone->node == zone->node;
Johannes Weiner81c0a2b2013-09-11 14:20:47 -07002597}
2598
David Rientjes957f8222012-10-08 16:33:24 -07002599static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
2600{
Mel Gorman5f7a75a2014-06-04 16:07:15 -07002601 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <
2602 RECLAIM_DISTANCE;
David Rientjes957f8222012-10-08 16:33:24 -07002603}
Paul Jackson9276b1bc2006-12-06 20:31:48 -08002604#else /* CONFIG_NUMA */
Johannes Weiner81c0a2b2013-09-11 14:20:47 -07002605static bool zone_local(struct zone *local_zone, struct zone *zone)
2606{
2607 return true;
2608}
2609
David Rientjes957f8222012-10-08 16:33:24 -07002610static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
2611{
2612 return true;
2613}
Paul Jackson9276b1bc2006-12-06 20:31:48 -08002614#endif /* CONFIG_NUMA */
2615
Mel Gorman4ffeaf32014-08-06 16:07:22 -07002616static void reset_alloc_batches(struct zone *preferred_zone)
2617{
2618 struct zone *zone = preferred_zone->zone_pgdat->node_zones;
2619
2620 do {
2621 mod_zone_page_state(zone, NR_ALLOC_BATCH,
2622 high_wmark_pages(zone) - low_wmark_pages(zone) -
2623 atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
Johannes Weiner57054652014-10-09 15:28:17 -07002624 clear_bit(ZONE_FAIR_DEPLETED, &zone->flags);
Mel Gorman4ffeaf32014-08-06 16:07:22 -07002625 } while (zone++ != preferred_zone);
2626}
2627
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08002628/*
Paul Jackson0798e512006-12-06 20:31:38 -08002629 * get_page_from_freelist goes through the zonelist trying to allocate
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08002630 * a page.
2631 */
2632static struct page *
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002633get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
2634 const struct alloc_context *ac)
Martin Hicks753ee722005-06-21 17:14:41 -07002635{
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002636 struct zonelist *zonelist = ac->zonelist;
Mel Gormandd1a2392008-04-28 02:12:17 -07002637 struct zoneref *z;
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08002638 struct page *page = NULL;
Mel Gorman5117f452009-06-16 15:31:59 -07002639 struct zone *zone;
Mel Gorman4ffeaf32014-08-06 16:07:22 -07002640 int nr_fair_skipped = 0;
2641 bool zonelist_rescan;
Mel Gorman54a6eb52008-04-28 02:12:16 -07002642
Paul Jackson9276b1bc2006-12-06 20:31:48 -08002643zonelist_scan:
Mel Gorman4ffeaf32014-08-06 16:07:22 -07002644 zonelist_rescan = false;
2645
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08002646 /*
Paul Jackson9276b1bc2006-12-06 20:31:48 -08002647 * Scan zonelist, looking for a zone with enough free.
Vladimir Davydov344736f2014-10-20 15:50:30 +04002648 * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08002649 */
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002650 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
2651 ac->nodemask) {
Johannes Weinere085dbc2013-09-11 14:20:46 -07002652 unsigned long mark;
2653
Mel Gorman664eedd2014-06-04 16:10:08 -07002654 if (cpusets_enabled() &&
2655 (alloc_flags & ALLOC_CPUSET) &&
Vladimir Davydov344736f2014-10-20 15:50:30 +04002656 !cpuset_zone_allowed(zone, gfp_mask))
Mel Gormancd38b112011-07-25 17:12:29 -07002657 continue;
Johannes Weinera756cf52012-01-10 15:07:49 -08002658 /*
Johannes Weiner81c0a2b2013-09-11 14:20:47 -07002659 * Distribute pages in proportion to the individual
2660 * zone size to ensure fair page aging. The zone a
2661 * page was allocated in should have no effect on the
2662 * time the page has in memory before being reclaimed.
Johannes Weiner81c0a2b2013-09-11 14:20:47 -07002663 */
Johannes Weiner3a025762014-04-07 15:37:48 -07002664 if (alloc_flags & ALLOC_FAIR) {
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002665 if (!zone_local(ac->preferred_zone, zone))
Mel Gormanf7b5d642014-08-06 16:07:20 -07002666 break;
Johannes Weiner57054652014-10-09 15:28:17 -07002667 if (test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) {
Mel Gorman4ffeaf32014-08-06 16:07:22 -07002668 nr_fair_skipped++;
Johannes Weiner3a025762014-04-07 15:37:48 -07002669 continue;
Mel Gorman4ffeaf32014-08-06 16:07:22 -07002670 }
Johannes Weiner81c0a2b2013-09-11 14:20:47 -07002671 }
2672 /*
Johannes Weinera756cf52012-01-10 15:07:49 -08002673 * When allocating a page cache page for writing, we
2674 * want to get it from a zone that is within its dirty
2675 * limit, such that no single zone holds more than its
2676 * proportional share of globally allowed dirty pages.
2677 * The dirty limits take into account the zone's
2678 * lowmem reserves and high watermark so that kswapd
2679 * should be able to balance it without having to
2680 * write pages from its LRU list.
2681 *
2682 * This may look like it could increase pressure on
2683 * lower zones by failing allocations in higher zones
2684 * before they are full. But the pages that do spill
2685 * over are limited as the lower zones are protected
2686 * by this very same mechanism. It should not become
2687 * a practical burden to them.
2688 *
2689 * XXX: For now, allow allocations to potentially
2690 * exceed the per-zone dirty limit in the slowpath
Mel Gormanc9ab0c42015-11-06 16:28:12 -08002691 * (spread_dirty_pages unset) before going into reclaim,
Johannes Weinera756cf52012-01-10 15:07:49 -08002692 * which is important when on a NUMA setup the allowed
2693 * zones are together not big enough to reach the
2694 * global limit. The proper fix for these situations
2695 * will require awareness of zones in the
2696 * dirty-throttling and the flusher threads.
2697 */
Mel Gormanc9ab0c42015-11-06 16:28:12 -08002698 if (ac->spread_dirty_pages && !zone_dirty_ok(zone))
Mel Gorman800a1e72014-06-04 16:10:06 -07002699 continue;
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08002700
Johannes Weinere085dbc2013-09-11 14:20:46 -07002701 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
2702 if (!zone_watermark_ok(zone, order, mark,
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002703 ac->classzone_idx, alloc_flags)) {
Mel Gormanfa5e0842009-06-16 15:33:22 -07002704 int ret;
2705
Mel Gorman5dab2912014-06-04 16:10:14 -07002706 /* Checked here to keep the fast path fast */
2707 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
2708 if (alloc_flags & ALLOC_NO_WATERMARKS)
2709 goto try_this_zone;
2710
David Rientjes957f8222012-10-08 16:33:24 -07002711 if (zone_reclaim_mode == 0 ||
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002712 !zone_allows_reclaim(ac->preferred_zone, zone))
Mel Gormancd38b112011-07-25 17:12:29 -07002713 continue;
2714
Mel Gormanfa5e0842009-06-16 15:33:22 -07002715 ret = zone_reclaim(zone, gfp_mask, order);
2716 switch (ret) {
2717 case ZONE_RECLAIM_NOSCAN:
2718 /* did not scan */
Mel Gormancd38b112011-07-25 17:12:29 -07002719 continue;
Mel Gormanfa5e0842009-06-16 15:33:22 -07002720 case ZONE_RECLAIM_FULL:
2721 /* scanned but unreclaimable */
Mel Gormancd38b112011-07-25 17:12:29 -07002722 continue;
Mel Gormanfa5e0842009-06-16 15:33:22 -07002723 default:
2724 /* did we reclaim enough */
Mel Gormanfed27192013-04-29 15:07:57 -07002725 if (zone_watermark_ok(zone, order, mark,
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002726 ac->classzone_idx, alloc_flags))
Mel Gormanfed27192013-04-29 15:07:57 -07002727 goto try_this_zone;
2728
Mel Gormanfed27192013-04-29 15:07:57 -07002729 continue;
Paul Jackson0798e512006-12-06 20:31:38 -08002730 }
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08002731 }
2732
Mel Gormanfa5e0842009-06-16 15:33:22 -07002733try_this_zone:
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002734 page = buffered_rmqueue(ac->preferred_zone, zone, order,
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002735 gfp_mask, alloc_flags, ac->migratetype);
Vlastimil Babka75379192015-02-11 15:25:38 -08002736 if (page) {
2737 if (prep_new_page(page, order, gfp_mask, alloc_flags))
2738 goto try_this_zone;
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002739
2740 /*
2741 * If this is a high-order atomic allocation then check
2742 * if the pageblock should be reserved for the future
2743 */
2744 if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
2745 reserve_highatomic_pageblock(page, zone, order);
2746
Vlastimil Babka75379192015-02-11 15:25:38 -08002747 return page;
2748 }
Mel Gorman54a6eb52008-04-28 02:12:16 -07002749 }
Paul Jackson9276b1bc2006-12-06 20:31:48 -08002750
Mel Gorman4ffeaf32014-08-06 16:07:22 -07002751 /*
2752 * The first pass makes sure allocations are spread fairly within the
2753 * local node. However, the local node might have free pages left
2754 * after the fairness batches are exhausted, and remote zones haven't
2755 * even been considered yet. Try once more without fairness, and
2756 * include remote zones now, before entering the slowpath and waking
2757 * kswapd: prefer spilling to a remote zone over swapping locally.
2758 */
2759 if (alloc_flags & ALLOC_FAIR) {
2760 alloc_flags &= ~ALLOC_FAIR;
2761 if (nr_fair_skipped) {
2762 zonelist_rescan = true;
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002763 reset_alloc_batches(ac->preferred_zone);
Mel Gorman4ffeaf32014-08-06 16:07:22 -07002764 }
2765 if (nr_online_nodes > 1)
2766 zonelist_rescan = true;
2767 }
2768
Mel Gorman4ffeaf32014-08-06 16:07:22 -07002769 if (zonelist_rescan)
2770 goto zonelist_scan;
2771
2772 return NULL;
Martin Hicks753ee722005-06-21 17:14:41 -07002773}
2774
David Rientjes29423e772011-03-22 16:30:47 -07002775/*
2776 * Large machines with many possible nodes should not always dump per-node
2777 * meminfo in irq context.
2778 */
2779static inline bool should_suppress_show_mem(void)
2780{
2781 bool ret = false;
2782
2783#if NODES_SHIFT > 8
2784 ret = in_interrupt();
2785#endif
2786 return ret;
2787}
2788
Dave Hansena238ab52011-05-24 17:12:16 -07002789static DEFINE_RATELIMIT_STATE(nopage_rs,
2790 DEFAULT_RATELIMIT_INTERVAL,
2791 DEFAULT_RATELIMIT_BURST);
2792
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08002793void warn_alloc_failed(gfp_t gfp_mask, unsigned int order, const char *fmt, ...)
Dave Hansena238ab52011-05-24 17:12:16 -07002794{
Dave Hansena238ab52011-05-24 17:12:16 -07002795 unsigned int filter = SHOW_MEM_FILTER_NODES;
2796
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -08002797 if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) ||
2798 debug_guardpage_minorder() > 0)
Dave Hansena238ab52011-05-24 17:12:16 -07002799 return;
2800
2801 /*
2802 * This documents exceptions given to allocations in certain
2803 * contexts that are allowed to allocate outside current's set
2804 * of allowed nodes.
2805 */
2806 if (!(gfp_mask & __GFP_NOMEMALLOC))
2807 if (test_thread_flag(TIF_MEMDIE) ||
2808 (current->flags & (PF_MEMALLOC | PF_EXITING)))
2809 filter &= ~SHOW_MEM_FILTER_NODES;
Mel Gormand0164ad2015-11-06 16:28:21 -08002810 if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
Dave Hansena238ab52011-05-24 17:12:16 -07002811 filter &= ~SHOW_MEM_FILTER_NODES;
2812
2813 if (fmt) {
Joe Perches3ee9a4f2011-10-31 17:08:35 -07002814 struct va_format vaf;
2815 va_list args;
2816
Dave Hansena238ab52011-05-24 17:12:16 -07002817 va_start(args, fmt);
Joe Perches3ee9a4f2011-10-31 17:08:35 -07002818
2819 vaf.fmt = fmt;
2820 vaf.va = &args;
2821
2822 pr_warn("%pV", &vaf);
2823
Dave Hansena238ab52011-05-24 17:12:16 -07002824 va_end(args);
2825 }
2826
Vlastimil Babkac5c990e2016-03-15 14:56:02 -07002827 pr_warn("%s: page allocation failure: order:%u, mode:%#x(%pGg)\n",
2828 current->comm, order, gfp_mask, &gfp_mask);
Dave Hansena238ab52011-05-24 17:12:16 -07002829 dump_stack();
2830 if (!should_suppress_show_mem())
2831 show_mem(filter);
2832}
2833
Mel Gorman11e33f62009-06-16 15:31:57 -07002834static inline struct page *
2835__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002836 const struct alloc_context *ac, unsigned long *did_some_progress)
Mel Gorman11e33f62009-06-16 15:31:57 -07002837{
David Rientjes6e0fc462015-09-08 15:00:36 -07002838 struct oom_control oc = {
2839 .zonelist = ac->zonelist,
2840 .nodemask = ac->nodemask,
2841 .gfp_mask = gfp_mask,
2842 .order = order,
David Rientjes6e0fc462015-09-08 15:00:36 -07002843 };
Linus Torvalds1da177e2005-04-16 15:20:36 -07002844 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002845
Johannes Weiner9879de72015-01-26 12:58:32 -08002846 *did_some_progress = 0;
2847
Johannes Weiner9879de72015-01-26 12:58:32 -08002848 /*
Johannes Weinerdc564012015-06-24 16:57:19 -07002849 * Acquire the oom lock. If that fails, somebody else is
2850 * making progress for us.
Johannes Weiner9879de72015-01-26 12:58:32 -08002851 */
Johannes Weinerdc564012015-06-24 16:57:19 -07002852 if (!mutex_trylock(&oom_lock)) {
Johannes Weiner9879de72015-01-26 12:58:32 -08002853 *did_some_progress = 1;
Mel Gorman11e33f62009-06-16 15:31:57 -07002854 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002855 return NULL;
2856 }
Jens Axboe6b1de912005-11-17 21:35:02 +01002857
Mel Gorman11e33f62009-06-16 15:31:57 -07002858 /*
2859 * Go through the zonelist yet one more time, keep very high watermark
2860 * here, this is only to catch a parallel oom killing, we must fail if
2861 * we're still under heavy pressure.
2862 */
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002863 page = get_page_from_freelist(gfp_mask | __GFP_HARDWALL, order,
2864 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08002865 if (page)
Mel Gorman11e33f62009-06-16 15:31:57 -07002866 goto out;
2867
KAMEZAWA Hiroyuki4365a562009-12-15 16:45:33 -08002868 if (!(gfp_mask & __GFP_NOFAIL)) {
Johannes Weiner9879de72015-01-26 12:58:32 -08002869 /* Coredumps can quickly deplete all memory reserves */
2870 if (current->flags & PF_DUMPCORE)
2871 goto out;
KAMEZAWA Hiroyuki4365a562009-12-15 16:45:33 -08002872 /* The OOM killer will not help higher order allocs */
2873 if (order > PAGE_ALLOC_COSTLY_ORDER)
2874 goto out;
David Rientjes03668b32010-08-09 17:18:54 -07002875 /* The OOM killer does not needlessly kill tasks for lowmem */
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002876 if (ac->high_zoneidx < ZONE_NORMAL)
David Rientjes03668b32010-08-09 17:18:54 -07002877 goto out;
Johannes Weiner90839052015-06-24 16:57:21 -07002878 /* The OOM killer does not compensate for IO-less reclaim */
Johannes Weinercc873172015-02-27 15:52:09 -08002879 if (!(gfp_mask & __GFP_FS)) {
2880 /*
2881 * XXX: Page reclaim didn't yield anything,
2882 * and the OOM killer can't be invoked, but
Johannes Weiner90839052015-06-24 16:57:21 -07002883 * keep looping as per tradition.
Tetsuo Handa0a687aa2016-03-17 14:20:48 -07002884 *
2885 * But do not keep looping if oom_killer_disable()
2886 * was already called, for the system is trying to
2887 * enter a quiescent state during suspend.
Johannes Weinercc873172015-02-27 15:52:09 -08002888 */
Tetsuo Handa0a687aa2016-03-17 14:20:48 -07002889 *did_some_progress = !oom_killer_disabled;
Johannes Weiner9879de72015-01-26 12:58:32 -08002890 goto out;
Johannes Weinercc873172015-02-27 15:52:09 -08002891 }
Johannes Weiner90839052015-06-24 16:57:21 -07002892 if (pm_suspended_storage())
2893 goto out;
David Rientjes4167e9b2015-04-14 15:46:55 -07002894 /* The OOM killer may not free memory on a specific node */
KAMEZAWA Hiroyuki4365a562009-12-15 16:45:33 -08002895 if (gfp_mask & __GFP_THISNODE)
2896 goto out;
2897 }
Mel Gorman11e33f62009-06-16 15:31:57 -07002898 /* Exhausted what can be done so it's blamo time */
Michal Hocko5020e282016-01-14 15:20:36 -08002899 if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
Michal Hockoc32b3cb2015-02-11 15:26:24 -08002900 *did_some_progress = 1;
Michal Hocko5020e282016-01-14 15:20:36 -08002901
2902 if (gfp_mask & __GFP_NOFAIL) {
2903 page = get_page_from_freelist(gfp_mask, order,
2904 ALLOC_NO_WATERMARKS|ALLOC_CPUSET, ac);
2905 /*
2906 * fallback to ignore cpuset restriction if our nodes
2907 * are depleted
2908 */
2909 if (!page)
2910 page = get_page_from_freelist(gfp_mask, order,
2911 ALLOC_NO_WATERMARKS, ac);
2912 }
2913 }
Mel Gorman11e33f62009-06-16 15:31:57 -07002914out:
Johannes Weinerdc564012015-06-24 16:57:19 -07002915 mutex_unlock(&oom_lock);
Mel Gorman11e33f62009-06-16 15:31:57 -07002916 return page;
2917}
2918
Mel Gorman56de7262010-05-24 14:32:30 -07002919#ifdef CONFIG_COMPACTION
2920/* Try memory compaction for high-order allocations before reclaim */
2921static struct page *
2922__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002923 int alloc_flags, const struct alloc_context *ac,
2924 enum migrate_mode mode, int *contended_compaction,
2925 bool *deferred_compaction)
Mel Gorman56de7262010-05-24 14:32:30 -07002926{
Vlastimil Babka53853e22014-10-09 15:27:02 -07002927 unsigned long compact_result;
Vlastimil Babka98dd3b42014-10-09 15:27:04 -07002928 struct page *page;
Vlastimil Babka53853e22014-10-09 15:27:02 -07002929
Mel Gorman66199712012-01-12 17:19:41 -08002930 if (!order)
Mel Gorman56de7262010-05-24 14:32:30 -07002931 return NULL;
2932
Andrew Mortonc06b1fc2011-01-13 15:47:32 -08002933 current->flags |= PF_MEMALLOC;
Vlastimil Babka1a6d53a2015-02-11 15:25:44 -08002934 compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
2935 mode, contended_compaction);
Andrew Mortonc06b1fc2011-01-13 15:47:32 -08002936 current->flags &= ~PF_MEMALLOC;
Mel Gorman56de7262010-05-24 14:32:30 -07002937
Vlastimil Babka98dd3b42014-10-09 15:27:04 -07002938 switch (compact_result) {
2939 case COMPACT_DEFERRED:
Vlastimil Babka53853e22014-10-09 15:27:02 -07002940 *deferred_compaction = true;
Vlastimil Babka98dd3b42014-10-09 15:27:04 -07002941 /* fall-through */
2942 case COMPACT_SKIPPED:
2943 return NULL;
2944 default:
2945 break;
Mel Gorman56de7262010-05-24 14:32:30 -07002946 }
2947
Vlastimil Babka98dd3b42014-10-09 15:27:04 -07002948 /*
2949 * At least in one zone compaction wasn't deferred or skipped, so let's
2950 * count a compaction stall
2951 */
2952 count_vm_event(COMPACTSTALL);
2953
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002954 page = get_page_from_freelist(gfp_mask, order,
2955 alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
Vlastimil Babka98dd3b42014-10-09 15:27:04 -07002956
2957 if (page) {
2958 struct zone *zone = page_zone(page);
2959
2960 zone->compact_blockskip_flush = false;
2961 compaction_defer_reset(zone, order, true);
2962 count_vm_event(COMPACTSUCCESS);
2963 return page;
2964 }
2965
2966 /*
Vlastimil Babka98dd3b42014-10-09 15:27:04 -07002967 * It's bad if compaction run occurs and fails. The most likely reason
2968 * is that pages exist, but not enough to satisfy watermarks.
2969 */
2970 count_vm_event(COMPACTFAIL);
2971
2972 cond_resched();
2973
Mel Gorman56de7262010-05-24 14:32:30 -07002974 return NULL;
2975}
2976#else
2977static inline struct page *
2978__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002979 int alloc_flags, const struct alloc_context *ac,
2980 enum migrate_mode mode, int *contended_compaction,
2981 bool *deferred_compaction)
Mel Gorman56de7262010-05-24 14:32:30 -07002982{
2983 return NULL;
2984}
2985#endif /* CONFIG_COMPACTION */
2986
Marek Szyprowskibba90712012-01-25 12:09:52 +01002987/* Perform direct synchronous page reclaim */
2988static int
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002989__perform_reclaim(gfp_t gfp_mask, unsigned int order,
2990 const struct alloc_context *ac)
Mel Gorman11e33f62009-06-16 15:31:57 -07002991{
Mel Gorman11e33f62009-06-16 15:31:57 -07002992 struct reclaim_state reclaim_state;
Marek Szyprowskibba90712012-01-25 12:09:52 +01002993 int progress;
Mel Gorman11e33f62009-06-16 15:31:57 -07002994
2995 cond_resched();
2996
2997 /* We now go into synchronous reclaim */
2998 cpuset_memory_pressure_bump();
Andrew Mortonc06b1fc2011-01-13 15:47:32 -08002999 current->flags |= PF_MEMALLOC;
Mel Gorman11e33f62009-06-16 15:31:57 -07003000 lockdep_set_current_reclaim_state(gfp_mask);
3001 reclaim_state.reclaimed_slab = 0;
Andrew Mortonc06b1fc2011-01-13 15:47:32 -08003002 current->reclaim_state = &reclaim_state;
Mel Gorman11e33f62009-06-16 15:31:57 -07003003
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003004 progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
3005 ac->nodemask);
Mel Gorman11e33f62009-06-16 15:31:57 -07003006
Andrew Mortonc06b1fc2011-01-13 15:47:32 -08003007 current->reclaim_state = NULL;
Mel Gorman11e33f62009-06-16 15:31:57 -07003008 lockdep_clear_current_reclaim_state();
Andrew Mortonc06b1fc2011-01-13 15:47:32 -08003009 current->flags &= ~PF_MEMALLOC;
Mel Gorman11e33f62009-06-16 15:31:57 -07003010
3011 cond_resched();
3012
Marek Szyprowskibba90712012-01-25 12:09:52 +01003013 return progress;
3014}
3015
3016/* The really slow allocator path where we enter direct reclaim */
3017static inline struct page *
3018__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003019 int alloc_flags, const struct alloc_context *ac,
3020 unsigned long *did_some_progress)
Marek Szyprowskibba90712012-01-25 12:09:52 +01003021{
3022 struct page *page = NULL;
3023 bool drained = false;
3024
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003025 *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
Mel Gorman9ee493c2010-09-09 16:38:18 -07003026 if (unlikely(!(*did_some_progress)))
3027 return NULL;
Mel Gorman11e33f62009-06-16 15:31:57 -07003028
Mel Gorman9ee493c2010-09-09 16:38:18 -07003029retry:
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003030 page = get_page_from_freelist(gfp_mask, order,
3031 alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
Mel Gorman9ee493c2010-09-09 16:38:18 -07003032
3033 /*
3034 * If an allocation failed after direct reclaim, it could be because
Mel Gorman0aaa29a2015-11-06 16:28:37 -08003035 * pages are pinned on the per-cpu lists or in high alloc reserves.
3036 * Shrink them them and try again
Mel Gorman9ee493c2010-09-09 16:38:18 -07003037 */
3038 if (!page && !drained) {
Mel Gorman0aaa29a2015-11-06 16:28:37 -08003039 unreserve_highatomic_pageblock(ac);
Vlastimil Babka93481ff2014-12-10 15:43:01 -08003040 drain_all_pages(NULL);
Mel Gorman9ee493c2010-09-09 16:38:18 -07003041 drained = true;
3042 goto retry;
3043 }
3044
Mel Gorman11e33f62009-06-16 15:31:57 -07003045 return page;
3046}
3047
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003048static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac)
Mel Gorman11e33f62009-06-16 15:31:57 -07003049{
3050 struct zoneref *z;
3051 struct zone *zone;
3052
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003053 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
3054 ac->high_zoneidx, ac->nodemask)
3055 wakeup_kswapd(zone, order, zone_idx(ac->preferred_zone));
Mel Gorman11e33f62009-06-16 15:31:57 -07003056}
3057
Peter Zijlstra341ce062009-06-16 15:32:02 -07003058static inline int
3059gfp_to_alloc_flags(gfp_t gfp_mask)
3060{
Peter Zijlstra341ce062009-06-16 15:32:02 -07003061 int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
Peter Zijlstra341ce062009-06-16 15:32:02 -07003062
Mel Gormana56f57f2009-06-16 15:32:02 -07003063 /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
Namhyung Kime6223a32010-10-26 14:21:59 -07003064 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
Mel Gormana56f57f2009-06-16 15:32:02 -07003065
Peter Zijlstra341ce062009-06-16 15:32:02 -07003066 /*
3067 * The caller may dip into page reserves a bit more if the caller
3068 * cannot run direct reclaim, or if the caller has realtime scheduling
3069 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
Mel Gormand0164ad2015-11-06 16:28:21 -08003070 * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH).
Peter Zijlstra341ce062009-06-16 15:32:02 -07003071 */
Namhyung Kime6223a32010-10-26 14:21:59 -07003072 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
Peter Zijlstra341ce062009-06-16 15:32:02 -07003073
Mel Gormand0164ad2015-11-06 16:28:21 -08003074 if (gfp_mask & __GFP_ATOMIC) {
Andrea Arcangeli5c3240d2011-01-13 15:46:49 -08003075 /*
David Rientjesb104a352014-07-30 16:08:24 -07003076 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
3077 * if it can't schedule.
Andrea Arcangeli5c3240d2011-01-13 15:46:49 -08003078 */
David Rientjesb104a352014-07-30 16:08:24 -07003079 if (!(gfp_mask & __GFP_NOMEMALLOC))
Andrea Arcangeli5c3240d2011-01-13 15:46:49 -08003080 alloc_flags |= ALLOC_HARDER;
Peter Zijlstra341ce062009-06-16 15:32:02 -07003081 /*
David Rientjesb104a352014-07-30 16:08:24 -07003082 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
Vladimir Davydov344736f2014-10-20 15:50:30 +04003083 * comment for __cpuset_node_allowed().
Peter Zijlstra341ce062009-06-16 15:32:02 -07003084 */
3085 alloc_flags &= ~ALLOC_CPUSET;
Andrew Mortonc06b1fc2011-01-13 15:47:32 -08003086 } else if (unlikely(rt_task(current)) && !in_interrupt())
Peter Zijlstra341ce062009-06-16 15:32:02 -07003087 alloc_flags |= ALLOC_HARDER;
3088
Mel Gormanb37f1dd2012-07-31 16:44:03 -07003089 if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
3090 if (gfp_mask & __GFP_MEMALLOC)
3091 alloc_flags |= ALLOC_NO_WATERMARKS;
Mel Gorman907aed42012-07-31 16:44:07 -07003092 else if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
3093 alloc_flags |= ALLOC_NO_WATERMARKS;
3094 else if (!in_interrupt() &&
3095 ((current->flags & PF_MEMALLOC) ||
3096 unlikely(test_thread_flag(TIF_MEMDIE))))
Peter Zijlstra341ce062009-06-16 15:32:02 -07003097 alloc_flags |= ALLOC_NO_WATERMARKS;
3098 }
Bartlomiej Zolnierkiewiczd95ea5d2012-10-08 16:32:05 -07003099#ifdef CONFIG_CMA
David Rientjes43e7a342014-10-09 15:27:25 -07003100 if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
Bartlomiej Zolnierkiewiczd95ea5d2012-10-08 16:32:05 -07003101 alloc_flags |= ALLOC_CMA;
3102#endif
Peter Zijlstra341ce062009-06-16 15:32:02 -07003103 return alloc_flags;
3104}
3105
Mel Gorman072bb0a2012-07-31 16:43:58 -07003106bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
3107{
Mel Gormanb37f1dd2012-07-31 16:44:03 -07003108 return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS);
Mel Gorman072bb0a2012-07-31 16:43:58 -07003109}
3110
Mel Gormand0164ad2015-11-06 16:28:21 -08003111static inline bool is_thp_gfp_mask(gfp_t gfp_mask)
3112{
3113 return (gfp_mask & (GFP_TRANSHUGE | __GFP_KSWAPD_RECLAIM)) == GFP_TRANSHUGE;
3114}
3115
Mel Gorman11e33f62009-06-16 15:31:57 -07003116static inline struct page *
3117__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003118 struct alloc_context *ac)
Mel Gorman11e33f62009-06-16 15:31:57 -07003119{
Mel Gormand0164ad2015-11-06 16:28:21 -08003120 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
Mel Gorman11e33f62009-06-16 15:31:57 -07003121 struct page *page = NULL;
3122 int alloc_flags;
3123 unsigned long pages_reclaimed = 0;
3124 unsigned long did_some_progress;
David Rientjese0b9dae2014-06-04 16:08:28 -07003125 enum migrate_mode migration_mode = MIGRATE_ASYNC;
Mel Gorman66199712012-01-12 17:19:41 -08003126 bool deferred_compaction = false;
Vlastimil Babka1f9efde2014-10-09 15:27:14 -07003127 int contended_compaction = COMPACT_CONTENDED_NONE;
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08003128
Christoph Lameter952f3b52006-12-06 20:33:26 -08003129 /*
Mel Gorman72807a72009-06-16 15:32:18 -07003130 * In the slowpath, we sanity check order to avoid ever trying to
3131 * reclaim >= MAX_ORDER areas which will never succeed. Callers may
3132 * be using allocators in order of preference for an area that is
3133 * too large.
3134 */
Mel Gorman1fc28b72009-07-29 15:04:08 -07003135 if (order >= MAX_ORDER) {
3136 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
Mel Gorman72807a72009-06-16 15:32:18 -07003137 return NULL;
Mel Gorman1fc28b72009-07-29 15:04:08 -07003138 }
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08003139
Christoph Lameter952f3b52006-12-06 20:33:26 -08003140 /*
Mel Gormand0164ad2015-11-06 16:28:21 -08003141 * We also sanity check to catch abuse of atomic reserves being used by
3142 * callers that are not in atomic context.
3143 */
3144 if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
3145 (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
3146 gfp_mask &= ~__GFP_ATOMIC;
3147
Johannes Weiner9879de72015-01-26 12:58:32 -08003148retry:
Mel Gormand0164ad2015-11-06 16:28:21 -08003149 if (gfp_mask & __GFP_KSWAPD_RECLAIM)
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003150 wake_all_kswapds(order, ac);
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08003151
Paul Jackson9bf22292005-09-06 15:18:12 -07003152 /*
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08003153 * OK, we're below the kswapd watermark and have kicked background
3154 * reclaim. Now things get more complex, so set up alloc_flags according
3155 * to how we want to proceed.
Paul Jackson9bf22292005-09-06 15:18:12 -07003156 */
Peter Zijlstra341ce062009-06-16 15:32:02 -07003157 alloc_flags = gfp_to_alloc_flags(gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003158
David Rientjesf33261d2011-01-25 15:07:20 -08003159 /*
3160 * Find the true preferred zone if the allocation is unconstrained by
3161 * cpusets.
3162 */
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003163 if (!(alloc_flags & ALLOC_CPUSET) && !ac->nodemask) {
Mel Gormand8846372014-06-04 16:10:33 -07003164 struct zoneref *preferred_zoneref;
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003165 preferred_zoneref = first_zones_zonelist(ac->zonelist,
3166 ac->high_zoneidx, NULL, &ac->preferred_zone);
3167 ac->classzone_idx = zonelist_zone_idx(preferred_zoneref);
Mel Gormand8846372014-06-04 16:10:33 -07003168 }
David Rientjesf33261d2011-01-25 15:07:20 -08003169
Peter Zijlstra341ce062009-06-16 15:32:02 -07003170 /* This is the last chance, in general, before the goto nopage. */
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003171 page = get_page_from_freelist(gfp_mask, order,
3172 alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08003173 if (page)
3174 goto got_pg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003175
Mel Gorman11e33f62009-06-16 15:31:57 -07003176 /* Allocate without watermarks if the context allows */
Peter Zijlstra341ce062009-06-16 15:32:02 -07003177 if (alloc_flags & ALLOC_NO_WATERMARKS) {
Mel Gorman183f6372012-07-31 16:44:12 -07003178 /*
3179 * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds
3180 * the allocation is high priority and these type of
3181 * allocations are system rather than user orientated
3182 */
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003183 ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
Michal Hocko33d53102016-01-14 15:19:05 -08003184 page = get_page_from_freelist(gfp_mask, order,
3185 ALLOC_NO_WATERMARKS, ac);
3186 if (page)
3187 goto got_pg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003188 }
3189
Mel Gormand0164ad2015-11-06 16:28:21 -08003190 /* Caller is not willing to reclaim, we can't balance anything */
3191 if (!can_direct_reclaim) {
David Rientjesaed0a0e2014-01-21 15:51:12 -08003192 /*
Michal Hocko33d53102016-01-14 15:19:05 -08003193 * All existing users of the __GFP_NOFAIL are blockable, so warn
3194 * of any new users that actually allow this type of allocation
3195 * to fail.
David Rientjesaed0a0e2014-01-21 15:51:12 -08003196 */
3197 WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003198 goto nopage;
David Rientjesaed0a0e2014-01-21 15:51:12 -08003199 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003200
Peter Zijlstra341ce062009-06-16 15:32:02 -07003201 /* Avoid recursion of direct reclaim */
Michal Hocko33d53102016-01-14 15:19:05 -08003202 if (current->flags & PF_MEMALLOC) {
3203 /*
3204 * __GFP_NOFAIL request from this context is rather bizarre
3205 * because we cannot reclaim anything and only can loop waiting
3206 * for somebody to do a work for us.
3207 */
3208 if (WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
3209 cond_resched();
3210 goto retry;
3211 }
Peter Zijlstra341ce062009-06-16 15:32:02 -07003212 goto nopage;
Michal Hocko33d53102016-01-14 15:19:05 -08003213 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003214
David Rientjes6583bb62009-07-29 15:02:06 -07003215 /* Avoid allocations with no watermarks from looping endlessly */
3216 if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
3217 goto nopage;
3218
Mel Gorman77f1fe62011-01-13 15:45:57 -08003219 /*
3220 * Try direct compaction. The first pass is asynchronous. Subsequent
3221 * attempts after direct reclaim are synchronous
3222 */
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003223 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
3224 migration_mode,
3225 &contended_compaction,
Vlastimil Babka53853e22014-10-09 15:27:02 -07003226 &deferred_compaction);
Mel Gorman56de7262010-05-24 14:32:30 -07003227 if (page)
3228 goto got_pg;
David Rientjes75f30862014-06-04 16:08:30 -07003229
Vlastimil Babka1f9efde2014-10-09 15:27:14 -07003230 /* Checks for THP-specific high-order allocations */
Mel Gormand0164ad2015-11-06 16:28:21 -08003231 if (is_thp_gfp_mask(gfp_mask)) {
Vlastimil Babka1f9efde2014-10-09 15:27:14 -07003232 /*
3233 * If compaction is deferred for high-order allocations, it is
3234 * because sync compaction recently failed. If this is the case
3235 * and the caller requested a THP allocation, we do not want
3236 * to heavily disrupt the system, so we fail the allocation
3237 * instead of entering direct reclaim.
3238 */
3239 if (deferred_compaction)
3240 goto nopage;
3241
3242 /*
3243 * In all zones where compaction was attempted (and not
3244 * deferred or skipped), lock contention has been detected.
3245 * For THP allocation we do not want to disrupt the others
3246 * so we fallback to base pages instead.
3247 */
3248 if (contended_compaction == COMPACT_CONTENDED_LOCK)
3249 goto nopage;
3250
3251 /*
3252 * If compaction was aborted due to need_resched(), we do not
3253 * want to further increase allocation latency, unless it is
3254 * khugepaged trying to collapse.
3255 */
3256 if (contended_compaction == COMPACT_CONTENDED_SCHED
3257 && !(current->flags & PF_KTHREAD))
3258 goto nopage;
3259 }
Mel Gorman66199712012-01-12 17:19:41 -08003260
David Rientjes8fe78042014-08-06 16:07:54 -07003261 /*
3262 * It can become very expensive to allocate transparent hugepages at
3263 * fault, so use asynchronous memory compaction for THP unless it is
3264 * khugepaged trying to collapse.
3265 */
Mel Gormand0164ad2015-11-06 16:28:21 -08003266 if (!is_thp_gfp_mask(gfp_mask) || (current->flags & PF_KTHREAD))
David Rientjes8fe78042014-08-06 16:07:54 -07003267 migration_mode = MIGRATE_SYNC_LIGHT;
3268
Mel Gorman11e33f62009-06-16 15:31:57 -07003269 /* Try direct reclaim and then allocating */
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003270 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
3271 &did_some_progress);
Mel Gorman11e33f62009-06-16 15:31:57 -07003272 if (page)
3273 goto got_pg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003274
Johannes Weiner90839052015-06-24 16:57:21 -07003275 /* Do not loop if specifically requested */
3276 if (gfp_mask & __GFP_NORETRY)
3277 goto noretry;
3278
3279 /* Keep reclaiming pages as long as there is reasonable progress */
Nishanth Aravamudana41f24e2008-04-29 00:58:25 -07003280 pages_reclaimed += did_some_progress;
Johannes Weiner90839052015-06-24 16:57:21 -07003281 if ((did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) ||
3282 ((gfp_mask & __GFP_REPEAT) && pages_reclaimed < (1 << order))) {
Mel Gorman11e33f62009-06-16 15:31:57 -07003283 /* Wait for some write requests to complete then retry */
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003284 wait_iff_congested(ac->preferred_zone, BLK_RW_ASYNC, HZ/50);
Johannes Weiner9879de72015-01-26 12:58:32 -08003285 goto retry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003286 }
3287
Johannes Weiner90839052015-06-24 16:57:21 -07003288 /* Reclaim has failed us, start killing things */
3289 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
3290 if (page)
3291 goto got_pg;
3292
3293 /* Retry as long as the OOM killer is making progress */
3294 if (did_some_progress)
3295 goto retry;
3296
3297noretry:
3298 /*
3299 * High-order allocations do not necessarily loop after
3300 * direct reclaim and reclaim/compaction depends on compaction
3301 * being called after reclaim so call directly if necessary
3302 */
3303 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags,
3304 ac, migration_mode,
3305 &contended_compaction,
3306 &deferred_compaction);
3307 if (page)
3308 goto got_pg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003309nopage:
Dave Hansena238ab52011-05-24 17:12:16 -07003310 warn_alloc_failed(gfp_mask, order, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003311got_pg:
Mel Gorman072bb0a2012-07-31 16:43:58 -07003312 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003313}
Mel Gorman11e33f62009-06-16 15:31:57 -07003314
3315/*
3316 * This is the 'heart' of the zoned buddy allocator.
3317 */
3318struct page *
3319__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
3320 struct zonelist *zonelist, nodemask_t *nodemask)
3321{
Mel Gormand8846372014-06-04 16:10:33 -07003322 struct zoneref *preferred_zoneref;
Mel Gormancc9a6c82012-03-21 16:34:11 -07003323 struct page *page = NULL;
Mel Gormancc9a6c82012-03-21 16:34:11 -07003324 unsigned int cpuset_mems_cookie;
Johannes Weiner3a025762014-04-07 15:37:48 -07003325 int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR;
Andrew Morton91fbdc02015-02-11 15:25:04 -08003326 gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003327 struct alloc_context ac = {
3328 .high_zoneidx = gfp_zone(gfp_mask),
3329 .nodemask = nodemask,
3330 .migratetype = gfpflags_to_migratetype(gfp_mask),
3331 };
Mel Gorman11e33f62009-06-16 15:31:57 -07003332
Benjamin Herrenschmidtdcce2842009-06-18 13:24:12 +10003333 gfp_mask &= gfp_allowed_mask;
3334
Mel Gorman11e33f62009-06-16 15:31:57 -07003335 lockdep_trace_alloc(gfp_mask);
3336
Mel Gormand0164ad2015-11-06 16:28:21 -08003337 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
Mel Gorman11e33f62009-06-16 15:31:57 -07003338
3339 if (should_fail_alloc_page(gfp_mask, order))
3340 return NULL;
3341
3342 /*
3343 * Check the zones suitable for the gfp_mask contain at least one
3344 * valid zone. It's possible to have an empty zonelist as a result
David Rientjes4167e9b2015-04-14 15:46:55 -07003345 * of __GFP_THISNODE and a memoryless node
Mel Gorman11e33f62009-06-16 15:31:57 -07003346 */
3347 if (unlikely(!zonelist->_zonerefs->zone))
3348 return NULL;
3349
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003350 if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE)
Vlastimil Babka21bb9bd2014-10-09 15:26:51 -07003351 alloc_flags |= ALLOC_CMA;
3352
Mel Gormancc9a6c82012-03-21 16:34:11 -07003353retry_cpuset:
Mel Gormand26914d2014-04-03 14:47:24 -07003354 cpuset_mems_cookie = read_mems_allowed_begin();
Mel Gormancc9a6c82012-03-21 16:34:11 -07003355
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003356 /* We set it here, as __alloc_pages_slowpath might have changed it */
3357 ac.zonelist = zonelist;
Mel Gormanc9ab0c42015-11-06 16:28:12 -08003358
3359 /* Dirty zone balancing only done in the fast path */
3360 ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE);
3361
Mel Gorman5117f452009-06-16 15:31:59 -07003362 /* The preferred zone is used for statistics later */
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003363 preferred_zoneref = first_zones_zonelist(ac.zonelist, ac.high_zoneidx,
3364 ac.nodemask ? : &cpuset_current_mems_allowed,
3365 &ac.preferred_zone);
3366 if (!ac.preferred_zone)
Mel Gormancc9a6c82012-03-21 16:34:11 -07003367 goto out;
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003368 ac.classzone_idx = zonelist_zone_idx(preferred_zoneref);
Mel Gorman5117f452009-06-16 15:31:59 -07003369
3370 /* First allocation attempt */
Andrew Morton91fbdc02015-02-11 15:25:04 -08003371 alloc_mask = gfp_mask|__GFP_HARDWALL;
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003372 page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
Ming Lei21caf2f2013-02-22 16:34:08 -08003373 if (unlikely(!page)) {
3374 /*
3375 * Runtime PM, block IO and its error handling path
3376 * can deadlock because I/O on the device might not
3377 * complete.
3378 */
Andrew Morton91fbdc02015-02-11 15:25:04 -08003379 alloc_mask = memalloc_noio_flags(gfp_mask);
Mel Gormanc9ab0c42015-11-06 16:28:12 -08003380 ac.spread_dirty_pages = false;
Andrew Morton91fbdc02015-02-11 15:25:04 -08003381
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003382 page = __alloc_pages_slowpath(alloc_mask, order, &ac);
Ming Lei21caf2f2013-02-22 16:34:08 -08003383 }
Mel Gorman11e33f62009-06-16 15:31:57 -07003384
Xishi Qiu23f086f2015-02-11 15:25:07 -08003385 if (kmemcheck_enabled && page)
3386 kmemcheck_pagealloc_alloc(page, order, gfp_mask);
3387
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003388 trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
Mel Gormancc9a6c82012-03-21 16:34:11 -07003389
3390out:
3391 /*
3392 * When updating a task's mems_allowed, it is possible to race with
3393 * parallel threads in such a way that an allocation can fail while
3394 * the mask is being updated. If a page allocation is about to fail,
3395 * check if the cpuset changed during allocation and if so, retry.
3396 */
Mel Gormand26914d2014-04-03 14:47:24 -07003397 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
Mel Gormancc9a6c82012-03-21 16:34:11 -07003398 goto retry_cpuset;
3399
Mel Gorman11e33f62009-06-16 15:31:57 -07003400 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003401}
Mel Gormand2391712009-06-16 15:31:52 -07003402EXPORT_SYMBOL(__alloc_pages_nodemask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003403
3404/*
3405 * Common helper functions.
3406 */
Harvey Harrison920c7a52008-02-04 22:29:26 -08003407unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003408{
Akinobu Mita945a1112009-09-21 17:01:47 -07003409 struct page *page;
3410
3411 /*
3412 * __get_free_pages() returns a 32-bit address, which cannot represent
3413 * a highmem page
3414 */
3415 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
3416
Linus Torvalds1da177e2005-04-16 15:20:36 -07003417 page = alloc_pages(gfp_mask, order);
3418 if (!page)
3419 return 0;
3420 return (unsigned long) page_address(page);
3421}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003422EXPORT_SYMBOL(__get_free_pages);
3423
Harvey Harrison920c7a52008-02-04 22:29:26 -08003424unsigned long get_zeroed_page(gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003425{
Akinobu Mita945a1112009-09-21 17:01:47 -07003426 return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003427}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003428EXPORT_SYMBOL(get_zeroed_page);
3429
Harvey Harrison920c7a52008-02-04 22:29:26 -08003430void __free_pages(struct page *page, unsigned int order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003431{
Nick Pigginb5810032005-10-29 18:16:12 -07003432 if (put_page_testzero(page)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003433 if (order == 0)
Mel Gormanb745bc82014-06-04 16:10:22 -07003434 free_hot_cold_page(page, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003435 else
3436 __free_pages_ok(page, order);
3437 }
3438}
3439
3440EXPORT_SYMBOL(__free_pages);
3441
Harvey Harrison920c7a52008-02-04 22:29:26 -08003442void free_pages(unsigned long addr, unsigned int order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003443{
3444 if (addr != 0) {
Nick Piggin725d7042006-09-25 23:30:55 -07003445 VM_BUG_ON(!virt_addr_valid((void *)addr));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003446 __free_pages(virt_to_page((void *)addr), order);
3447 }
3448}
3449
3450EXPORT_SYMBOL(free_pages);
3451
Glauber Costa6a1a0d32012-12-18 14:22:00 -08003452/*
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07003453 * Page Fragment:
3454 * An arbitrary-length arbitrary-offset area of memory which resides
3455 * within a 0 or higher order page. Multiple fragments within that page
3456 * are individually refcounted, in the page's reference counter.
3457 *
3458 * The page_frag functions below provide a simple allocation framework for
3459 * page fragments. This is used by the network stack and network device
3460 * drivers to provide a backing region of memory for use as either an
3461 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
3462 */
3463static struct page *__page_frag_refill(struct page_frag_cache *nc,
3464 gfp_t gfp_mask)
3465{
3466 struct page *page = NULL;
3467 gfp_t gfp = gfp_mask;
3468
3469#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
3470 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
3471 __GFP_NOMEMALLOC;
3472 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
3473 PAGE_FRAG_CACHE_MAX_ORDER);
3474 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
3475#endif
3476 if (unlikely(!page))
3477 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
3478
3479 nc->va = page ? page_address(page) : NULL;
3480
3481 return page;
3482}
3483
3484void *__alloc_page_frag(struct page_frag_cache *nc,
3485 unsigned int fragsz, gfp_t gfp_mask)
3486{
3487 unsigned int size = PAGE_SIZE;
3488 struct page *page;
3489 int offset;
3490
3491 if (unlikely(!nc->va)) {
3492refill:
3493 page = __page_frag_refill(nc, gfp_mask);
3494 if (!page)
3495 return NULL;
3496
3497#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
3498 /* if size can vary use size else just use PAGE_SIZE */
3499 size = nc->size;
3500#endif
3501 /* Even if we own the page, we do not use atomic_set().
3502 * This would break get_page_unless_zero() users.
3503 */
Joonsoo Kimfe896d12016-03-17 14:19:26 -07003504 page_ref_add(page, size - 1);
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07003505
3506 /* reset page count bias and offset to start of new frag */
Michal Hocko2f064f32015-08-21 14:11:51 -07003507 nc->pfmemalloc = page_is_pfmemalloc(page);
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07003508 nc->pagecnt_bias = size;
3509 nc->offset = size;
3510 }
3511
3512 offset = nc->offset - fragsz;
3513 if (unlikely(offset < 0)) {
3514 page = virt_to_page(nc->va);
3515
Joonsoo Kimfe896d12016-03-17 14:19:26 -07003516 if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07003517 goto refill;
3518
3519#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
3520 /* if size can vary use size else just use PAGE_SIZE */
3521 size = nc->size;
3522#endif
3523 /* OK, page count is 0, we can safely set it */
Joonsoo Kimfe896d12016-03-17 14:19:26 -07003524 set_page_count(page, size);
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07003525
3526 /* reset page count bias and offset to start of new frag */
3527 nc->pagecnt_bias = size;
3528 offset = size - fragsz;
3529 }
3530
3531 nc->pagecnt_bias--;
3532 nc->offset = offset;
3533
3534 return nc->va + offset;
3535}
3536EXPORT_SYMBOL(__alloc_page_frag);
3537
3538/*
3539 * Frees a page fragment allocated out of either a compound or order 0 page.
3540 */
3541void __free_page_frag(void *addr)
3542{
3543 struct page *page = virt_to_head_page(addr);
3544
3545 if (unlikely(put_page_testzero(page)))
3546 __free_pages_ok(page, compound_order(page));
3547}
3548EXPORT_SYMBOL(__free_page_frag);
3549
3550/*
Vladimir Davydov52383432014-06-04 16:06:39 -07003551 * alloc_kmem_pages charges newly allocated pages to the kmem resource counter
Vladimir Davydova9bb7e62016-01-14 15:18:12 -08003552 * of the current memory cgroup if __GFP_ACCOUNT is set, other than that it is
3553 * equivalent to alloc_pages.
Glauber Costa6a1a0d32012-12-18 14:22:00 -08003554 *
Vladimir Davydov52383432014-06-04 16:06:39 -07003555 * It should be used when the caller would like to use kmalloc, but since the
3556 * allocation is large, it has to fall back to the page allocator.
Glauber Costa6a1a0d32012-12-18 14:22:00 -08003557 */
Vladimir Davydov52383432014-06-04 16:06:39 -07003558struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order)
3559{
3560 struct page *page;
Vladimir Davydov52383432014-06-04 16:06:39 -07003561
Vladimir Davydov52383432014-06-04 16:06:39 -07003562 page = alloc_pages(gfp_mask, order);
Vladimir Davydovd05e83a2015-11-05 18:48:59 -08003563 if (page && memcg_kmem_charge(page, gfp_mask, order) != 0) {
3564 __free_pages(page, order);
3565 page = NULL;
3566 }
Vladimir Davydov52383432014-06-04 16:06:39 -07003567 return page;
3568}
3569
3570struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
3571{
3572 struct page *page;
Vladimir Davydov52383432014-06-04 16:06:39 -07003573
Vladimir Davydov52383432014-06-04 16:06:39 -07003574 page = alloc_pages_node(nid, gfp_mask, order);
Vladimir Davydovd05e83a2015-11-05 18:48:59 -08003575 if (page && memcg_kmem_charge(page, gfp_mask, order) != 0) {
3576 __free_pages(page, order);
3577 page = NULL;
3578 }
Vladimir Davydov52383432014-06-04 16:06:39 -07003579 return page;
3580}
3581
3582/*
3583 * __free_kmem_pages and free_kmem_pages will free pages allocated with
3584 * alloc_kmem_pages.
3585 */
3586void __free_kmem_pages(struct page *page, unsigned int order)
Glauber Costa6a1a0d32012-12-18 14:22:00 -08003587{
Vladimir Davydovd05e83a2015-11-05 18:48:59 -08003588 memcg_kmem_uncharge(page, order);
Glauber Costa6a1a0d32012-12-18 14:22:00 -08003589 __free_pages(page, order);
3590}
3591
Vladimir Davydov52383432014-06-04 16:06:39 -07003592void free_kmem_pages(unsigned long addr, unsigned int order)
Glauber Costa6a1a0d32012-12-18 14:22:00 -08003593{
3594 if (addr != 0) {
3595 VM_BUG_ON(!virt_addr_valid((void *)addr));
Vladimir Davydov52383432014-06-04 16:06:39 -07003596 __free_kmem_pages(virt_to_page((void *)addr), order);
Glauber Costa6a1a0d32012-12-18 14:22:00 -08003597 }
3598}
3599
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08003600static void *make_alloc_exact(unsigned long addr, unsigned int order,
3601 size_t size)
Andi Kleenee85c2e2011-05-11 15:13:34 -07003602{
3603 if (addr) {
3604 unsigned long alloc_end = addr + (PAGE_SIZE << order);
3605 unsigned long used = addr + PAGE_ALIGN(size);
3606
3607 split_page(virt_to_page((void *)addr), order);
3608 while (used < alloc_end) {
3609 free_page(used);
3610 used += PAGE_SIZE;
3611 }
3612 }
3613 return (void *)addr;
3614}
3615
Timur Tabi2be0ffe2008-07-23 21:28:11 -07003616/**
3617 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
3618 * @size: the number of bytes to allocate
3619 * @gfp_mask: GFP flags for the allocation
3620 *
3621 * This function is similar to alloc_pages(), except that it allocates the
3622 * minimum number of pages to satisfy the request. alloc_pages() can only
3623 * allocate memory in power-of-two pages.
3624 *
3625 * This function is also limited by MAX_ORDER.
3626 *
3627 * Memory allocated by this function must be released by free_pages_exact().
3628 */
3629void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
3630{
3631 unsigned int order = get_order(size);
3632 unsigned long addr;
3633
3634 addr = __get_free_pages(gfp_mask, order);
Andi Kleenee85c2e2011-05-11 15:13:34 -07003635 return make_alloc_exact(addr, order, size);
Timur Tabi2be0ffe2008-07-23 21:28:11 -07003636}
3637EXPORT_SYMBOL(alloc_pages_exact);
3638
3639/**
Andi Kleenee85c2e2011-05-11 15:13:34 -07003640 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
3641 * pages on a node.
Randy Dunlapb5e6ab52011-05-16 13:16:54 -07003642 * @nid: the preferred node ID where memory should be allocated
Andi Kleenee85c2e2011-05-11 15:13:34 -07003643 * @size: the number of bytes to allocate
3644 * @gfp_mask: GFP flags for the allocation
3645 *
3646 * Like alloc_pages_exact(), but try to allocate on node nid first before falling
3647 * back.
Andi Kleenee85c2e2011-05-11 15:13:34 -07003648 */
Fabian Fredericke1931812014-08-06 16:04:59 -07003649void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
Andi Kleenee85c2e2011-05-11 15:13:34 -07003650{
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08003651 unsigned int order = get_order(size);
Andi Kleenee85c2e2011-05-11 15:13:34 -07003652 struct page *p = alloc_pages_node(nid, gfp_mask, order);
3653 if (!p)
3654 return NULL;
3655 return make_alloc_exact((unsigned long)page_address(p), order, size);
3656}
Andi Kleenee85c2e2011-05-11 15:13:34 -07003657
3658/**
Timur Tabi2be0ffe2008-07-23 21:28:11 -07003659 * free_pages_exact - release memory allocated via alloc_pages_exact()
3660 * @virt: the value returned by alloc_pages_exact.
3661 * @size: size of allocation, same value as passed to alloc_pages_exact().
3662 *
3663 * Release the memory allocated by a previous call to alloc_pages_exact.
3664 */
3665void free_pages_exact(void *virt, size_t size)
3666{
3667 unsigned long addr = (unsigned long)virt;
3668 unsigned long end = addr + PAGE_ALIGN(size);
3669
3670 while (addr < end) {
3671 free_page(addr);
3672 addr += PAGE_SIZE;
3673 }
3674}
3675EXPORT_SYMBOL(free_pages_exact);
3676
Zhang Yanfeie0fb5812013-02-22 16:35:54 -08003677/**
3678 * nr_free_zone_pages - count number of pages beyond high watermark
3679 * @offset: The zone index of the highest zone
3680 *
3681 * nr_free_zone_pages() counts the number of counts pages which are beyond the
3682 * high watermark within all zones at or below a given zone index. For each
3683 * zone, the number of pages is calculated as:
Jiang Liu834405c2013-07-03 15:03:04 -07003684 * managed_pages - high_pages
Zhang Yanfeie0fb5812013-02-22 16:35:54 -08003685 */
Zhang Yanfeiebec3862013-02-22 16:35:43 -08003686static unsigned long nr_free_zone_pages(int offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003687{
Mel Gormandd1a2392008-04-28 02:12:17 -07003688 struct zoneref *z;
Mel Gorman54a6eb52008-04-28 02:12:16 -07003689 struct zone *zone;
3690
Martin J. Blighe310fd42005-07-29 22:59:18 -07003691 /* Just pick one node, since fallback list is circular */
Zhang Yanfeiebec3862013-02-22 16:35:43 -08003692 unsigned long sum = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003693
Mel Gorman0e884602008-04-28 02:12:14 -07003694 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003695
Mel Gorman54a6eb52008-04-28 02:12:16 -07003696 for_each_zone_zonelist(zone, z, zonelist, offset) {
Jiang Liub40da042013-02-22 16:33:52 -08003697 unsigned long size = zone->managed_pages;
Mel Gorman41858962009-06-16 15:32:12 -07003698 unsigned long high = high_wmark_pages(zone);
Martin J. Blighe310fd42005-07-29 22:59:18 -07003699 if (size > high)
3700 sum += size - high;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003701 }
3702
3703 return sum;
3704}
3705
Zhang Yanfeie0fb5812013-02-22 16:35:54 -08003706/**
3707 * nr_free_buffer_pages - count number of pages beyond high watermark
3708 *
3709 * nr_free_buffer_pages() counts the number of pages which are beyond the high
3710 * watermark within ZONE_DMA and ZONE_NORMAL.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003711 */
Zhang Yanfeiebec3862013-02-22 16:35:43 -08003712unsigned long nr_free_buffer_pages(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003713{
Al Viroaf4ca452005-10-21 02:55:38 -04003714 return nr_free_zone_pages(gfp_zone(GFP_USER));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003715}
Meelap Shahc2f1a552007-07-17 04:04:39 -07003716EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003717
Zhang Yanfeie0fb5812013-02-22 16:35:54 -08003718/**
3719 * nr_free_pagecache_pages - count number of pages beyond high watermark
3720 *
3721 * nr_free_pagecache_pages() counts the number of pages which are beyond the
3722 * high watermark within all zones.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003723 */
Zhang Yanfeiebec3862013-02-22 16:35:43 -08003724unsigned long nr_free_pagecache_pages(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003725{
Mel Gorman2a1e2742007-07-17 04:03:12 -07003726 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003727}
Christoph Lameter08e0f6a2006-09-27 01:50:06 -07003728
3729static inline void show_node(struct zone *zone)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003730{
Kirill A. Shutemove5adfff2012-12-11 16:00:29 -08003731 if (IS_ENABLED(CONFIG_NUMA))
Andy Whitcroft25ba77c2006-12-06 20:33:03 -08003732 printk("Node %d ", zone_to_nid(zone));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003733}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003734
Igor Redkod02bd272016-03-17 14:19:05 -07003735long si_mem_available(void)
3736{
3737 long available;
3738 unsigned long pagecache;
3739 unsigned long wmark_low = 0;
3740 unsigned long pages[NR_LRU_LISTS];
3741 struct zone *zone;
3742 int lru;
3743
3744 for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
3745 pages[lru] = global_page_state(NR_LRU_BASE + lru);
3746
3747 for_each_zone(zone)
3748 wmark_low += zone->watermark[WMARK_LOW];
3749
3750 /*
3751 * Estimate the amount of memory available for userspace allocations,
3752 * without causing swapping.
3753 */
3754 available = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
3755
3756 /*
3757 * Not all the page cache can be freed, otherwise the system will
3758 * start swapping. Assume at least half of the page cache, or the
3759 * low watermark worth of cache, needs to stay.
3760 */
3761 pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
3762 pagecache -= min(pagecache / 2, wmark_low);
3763 available += pagecache;
3764
3765 /*
3766 * Part of the reclaimable slab consists of items that are in use,
3767 * and cannot be freed. Cap this estimate at the low watermark.
3768 */
3769 available += global_page_state(NR_SLAB_RECLAIMABLE) -
3770 min(global_page_state(NR_SLAB_RECLAIMABLE) / 2, wmark_low);
3771
3772 if (available < 0)
3773 available = 0;
3774 return available;
3775}
3776EXPORT_SYMBOL_GPL(si_mem_available);
3777
Linus Torvalds1da177e2005-04-16 15:20:36 -07003778void si_meminfo(struct sysinfo *val)
3779{
3780 val->totalram = totalram_pages;
Rafael Aquinicc7452b2014-08-06 16:06:38 -07003781 val->sharedram = global_page_state(NR_SHMEM);
Christoph Lameterd23ad422007-02-10 01:43:02 -08003782 val->freeram = global_page_state(NR_FREE_PAGES);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003783 val->bufferram = nr_blockdev_pages();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003784 val->totalhigh = totalhigh_pages;
3785 val->freehigh = nr_free_highpages();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003786 val->mem_unit = PAGE_SIZE;
3787}
3788
3789EXPORT_SYMBOL(si_meminfo);
3790
3791#ifdef CONFIG_NUMA
3792void si_meminfo_node(struct sysinfo *val, int nid)
3793{
Jiang Liucdd91a72013-07-03 15:03:27 -07003794 int zone_type; /* needs to be signed */
3795 unsigned long managed_pages = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003796 pg_data_t *pgdat = NODE_DATA(nid);
3797
Jiang Liucdd91a72013-07-03 15:03:27 -07003798 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
3799 managed_pages += pgdat->node_zones[zone_type].managed_pages;
3800 val->totalram = managed_pages;
Rafael Aquinicc7452b2014-08-06 16:06:38 -07003801 val->sharedram = node_page_state(nid, NR_SHMEM);
Christoph Lameterd23ad422007-02-10 01:43:02 -08003802 val->freeram = node_page_state(nid, NR_FREE_PAGES);
Christoph Lameter98d2b0e2006-09-25 23:31:12 -07003803#ifdef CONFIG_HIGHMEM
Jiang Liub40da042013-02-22 16:33:52 -08003804 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].managed_pages;
Christoph Lameterd23ad422007-02-10 01:43:02 -08003805 val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
3806 NR_FREE_PAGES);
Christoph Lameter98d2b0e2006-09-25 23:31:12 -07003807#else
3808 val->totalhigh = 0;
3809 val->freehigh = 0;
3810#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003811 val->mem_unit = PAGE_SIZE;
3812}
3813#endif
3814
David Rientjesddd588b2011-03-22 16:30:46 -07003815/*
David Rientjes7bf02ea2011-05-24 17:11:16 -07003816 * Determine whether the node should be displayed or not, depending on whether
3817 * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
David Rientjesddd588b2011-03-22 16:30:46 -07003818 */
David Rientjes7bf02ea2011-05-24 17:11:16 -07003819bool skip_free_areas_node(unsigned int flags, int nid)
David Rientjesddd588b2011-03-22 16:30:46 -07003820{
3821 bool ret = false;
Mel Gormancc9a6c82012-03-21 16:34:11 -07003822 unsigned int cpuset_mems_cookie;
David Rientjesddd588b2011-03-22 16:30:46 -07003823
3824 if (!(flags & SHOW_MEM_FILTER_NODES))
3825 goto out;
3826
Mel Gormancc9a6c82012-03-21 16:34:11 -07003827 do {
Mel Gormand26914d2014-04-03 14:47:24 -07003828 cpuset_mems_cookie = read_mems_allowed_begin();
Mel Gormancc9a6c82012-03-21 16:34:11 -07003829 ret = !node_isset(nid, cpuset_current_mems_allowed);
Mel Gormand26914d2014-04-03 14:47:24 -07003830 } while (read_mems_allowed_retry(cpuset_mems_cookie));
David Rientjesddd588b2011-03-22 16:30:46 -07003831out:
3832 return ret;
3833}
3834
Linus Torvalds1da177e2005-04-16 15:20:36 -07003835#define K(x) ((x) << (PAGE_SHIFT-10))
3836
Rabin Vincent377e4f12012-12-11 16:00:24 -08003837static void show_migration_types(unsigned char type)
3838{
3839 static const char types[MIGRATE_TYPES] = {
3840 [MIGRATE_UNMOVABLE] = 'U',
Rabin Vincent377e4f12012-12-11 16:00:24 -08003841 [MIGRATE_MOVABLE] = 'M',
Vlastimil Babka475a2f92015-12-11 13:40:29 -08003842 [MIGRATE_RECLAIMABLE] = 'E',
3843 [MIGRATE_HIGHATOMIC] = 'H',
Rabin Vincent377e4f12012-12-11 16:00:24 -08003844#ifdef CONFIG_CMA
3845 [MIGRATE_CMA] = 'C',
3846#endif
Minchan Kim194159f2013-02-22 16:33:58 -08003847#ifdef CONFIG_MEMORY_ISOLATION
Rabin Vincent377e4f12012-12-11 16:00:24 -08003848 [MIGRATE_ISOLATE] = 'I',
Minchan Kim194159f2013-02-22 16:33:58 -08003849#endif
Rabin Vincent377e4f12012-12-11 16:00:24 -08003850 };
3851 char tmp[MIGRATE_TYPES + 1];
3852 char *p = tmp;
3853 int i;
3854
3855 for (i = 0; i < MIGRATE_TYPES; i++) {
3856 if (type & (1 << i))
3857 *p++ = types[i];
3858 }
3859
3860 *p = '\0';
3861 printk("(%s) ", tmp);
3862}
3863
Linus Torvalds1da177e2005-04-16 15:20:36 -07003864/*
3865 * Show free area list (used inside shift_scroll-lock stuff)
3866 * We also calculate the percentage fragmentation. We do this by counting the
3867 * memory on each free list with the exception of the first item on the list.
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07003868 *
3869 * Bits in @filter:
3870 * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
3871 * cpuset.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003872 */
David Rientjes7bf02ea2011-05-24 17:11:16 -07003873void show_free_areas(unsigned int filter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003874{
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07003875 unsigned long free_pcp = 0;
Jes Sorensenc7241912006-09-27 01:50:05 -07003876 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003877 struct zone *zone;
3878
KOSAKI Motohiroee99c712009-03-31 15:19:31 -07003879 for_each_populated_zone(zone) {
David Rientjes7bf02ea2011-05-24 17:11:16 -07003880 if (skip_free_areas_node(filter, zone_to_nid(zone)))
David Rientjesddd588b2011-03-22 16:30:46 -07003881 continue;
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07003882
Konstantin Khlebnikov761b0672015-04-14 15:45:32 -07003883 for_each_online_cpu(cpu)
3884 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003885 }
3886
KOSAKI Motohiroa7312862009-09-21 17:01:37 -07003887 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
3888 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07003889 " unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n"
3890 " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -07003891 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07003892 " free:%lu free_pcp:%lu free_cma:%lu\n",
Rik van Riel4f98a2f2008-10-18 20:26:32 -07003893 global_page_state(NR_ACTIVE_ANON),
Rik van Riel4f98a2f2008-10-18 20:26:32 -07003894 global_page_state(NR_INACTIVE_ANON),
KOSAKI Motohiroa7312862009-09-21 17:01:37 -07003895 global_page_state(NR_ISOLATED_ANON),
3896 global_page_state(NR_ACTIVE_FILE),
Rik van Riel4f98a2f2008-10-18 20:26:32 -07003897 global_page_state(NR_INACTIVE_FILE),
KOSAKI Motohiroa7312862009-09-21 17:01:37 -07003898 global_page_state(NR_ISOLATED_FILE),
Lee Schermerhorn7b854122008-10-18 20:26:40 -07003899 global_page_state(NR_UNEVICTABLE),
Christoph Lameterb1e7a8f2006-06-30 01:55:39 -07003900 global_page_state(NR_FILE_DIRTY),
Christoph Lameterce866b32006-06-30 01:55:40 -07003901 global_page_state(NR_WRITEBACK),
Christoph Lameterfd39fc82006-06-30 01:55:40 -07003902 global_page_state(NR_UNSTABLE_NFS),
KOSAKI Motohiro3701b032009-09-21 17:01:29 -07003903 global_page_state(NR_SLAB_RECLAIMABLE),
3904 global_page_state(NR_SLAB_UNRECLAIMABLE),
Christoph Lameter65ba55f2006-06-30 01:55:34 -07003905 global_page_state(NR_FILE_MAPPED),
KOSAKI Motohiro4b021082009-09-21 17:01:33 -07003906 global_page_state(NR_SHMEM),
Andrew Mortona25700a2007-02-08 14:20:40 -08003907 global_page_state(NR_PAGETABLE),
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -07003908 global_page_state(NR_BOUNCE),
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07003909 global_page_state(NR_FREE_PAGES),
3910 free_pcp,
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -07003911 global_page_state(NR_FREE_CMA_PAGES));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003912
KOSAKI Motohiroee99c712009-03-31 15:19:31 -07003913 for_each_populated_zone(zone) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003914 int i;
3915
David Rientjes7bf02ea2011-05-24 17:11:16 -07003916 if (skip_free_areas_node(filter, zone_to_nid(zone)))
David Rientjesddd588b2011-03-22 16:30:46 -07003917 continue;
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07003918
3919 free_pcp = 0;
3920 for_each_online_cpu(cpu)
3921 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
3922
Linus Torvalds1da177e2005-04-16 15:20:36 -07003923 show_node(zone);
3924 printk("%s"
3925 " free:%lukB"
3926 " min:%lukB"
3927 " low:%lukB"
3928 " high:%lukB"
Rik van Riel4f98a2f2008-10-18 20:26:32 -07003929 " active_anon:%lukB"
3930 " inactive_anon:%lukB"
3931 " active_file:%lukB"
3932 " inactive_file:%lukB"
Lee Schermerhorn7b854122008-10-18 20:26:40 -07003933 " unevictable:%lukB"
KOSAKI Motohiroa7312862009-09-21 17:01:37 -07003934 " isolated(anon):%lukB"
3935 " isolated(file):%lukB"
Linus Torvalds1da177e2005-04-16 15:20:36 -07003936 " present:%lukB"
Jiang Liu9feedc92012-12-12 13:52:12 -08003937 " managed:%lukB"
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07003938 " mlocked:%lukB"
3939 " dirty:%lukB"
3940 " writeback:%lukB"
3941 " mapped:%lukB"
KOSAKI Motohiro4b021082009-09-21 17:01:33 -07003942 " shmem:%lukB"
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07003943 " slab_reclaimable:%lukB"
3944 " slab_unreclaimable:%lukB"
KOSAKI Motohiroc6a7f572009-09-21 17:01:32 -07003945 " kernel_stack:%lukB"
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07003946 " pagetables:%lukB"
3947 " unstable:%lukB"
3948 " bounce:%lukB"
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07003949 " free_pcp:%lukB"
3950 " local_pcp:%ukB"
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -07003951 " free_cma:%lukB"
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07003952 " writeback_tmp:%lukB"
Linus Torvalds1da177e2005-04-16 15:20:36 -07003953 " pages_scanned:%lu"
3954 " all_unreclaimable? %s"
3955 "\n",
3956 zone->name,
Mel Gorman88f5acf2011-01-13 15:45:41 -08003957 K(zone_page_state(zone, NR_FREE_PAGES)),
Mel Gorman41858962009-06-16 15:32:12 -07003958 K(min_wmark_pages(zone)),
3959 K(low_wmark_pages(zone)),
3960 K(high_wmark_pages(zone)),
Rik van Riel4f98a2f2008-10-18 20:26:32 -07003961 K(zone_page_state(zone, NR_ACTIVE_ANON)),
3962 K(zone_page_state(zone, NR_INACTIVE_ANON)),
3963 K(zone_page_state(zone, NR_ACTIVE_FILE)),
3964 K(zone_page_state(zone, NR_INACTIVE_FILE)),
Lee Schermerhorn7b854122008-10-18 20:26:40 -07003965 K(zone_page_state(zone, NR_UNEVICTABLE)),
KOSAKI Motohiroa7312862009-09-21 17:01:37 -07003966 K(zone_page_state(zone, NR_ISOLATED_ANON)),
3967 K(zone_page_state(zone, NR_ISOLATED_FILE)),
Linus Torvalds1da177e2005-04-16 15:20:36 -07003968 K(zone->present_pages),
Jiang Liu9feedc92012-12-12 13:52:12 -08003969 K(zone->managed_pages),
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07003970 K(zone_page_state(zone, NR_MLOCK)),
3971 K(zone_page_state(zone, NR_FILE_DIRTY)),
3972 K(zone_page_state(zone, NR_WRITEBACK)),
3973 K(zone_page_state(zone, NR_FILE_MAPPED)),
KOSAKI Motohiro4b021082009-09-21 17:01:33 -07003974 K(zone_page_state(zone, NR_SHMEM)),
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07003975 K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
3976 K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
KOSAKI Motohiroc6a7f572009-09-21 17:01:32 -07003977 zone_page_state(zone, NR_KERNEL_STACK) *
3978 THREAD_SIZE / 1024,
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07003979 K(zone_page_state(zone, NR_PAGETABLE)),
3980 K(zone_page_state(zone, NR_UNSTABLE_NFS)),
3981 K(zone_page_state(zone, NR_BOUNCE)),
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07003982 K(free_pcp),
3983 K(this_cpu_read(zone->pageset->pcp.count)),
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -07003984 K(zone_page_state(zone, NR_FREE_CMA_PAGES)),
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07003985 K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
Mel Gorman0d5d8232014-08-06 16:07:16 -07003986 K(zone_page_state(zone, NR_PAGES_SCANNED)),
Lisa Du6e543d52013-09-11 14:22:36 -07003987 (!zone_reclaimable(zone) ? "yes" : "no")
Linus Torvalds1da177e2005-04-16 15:20:36 -07003988 );
3989 printk("lowmem_reserve[]:");
3990 for (i = 0; i < MAX_NR_ZONES; i++)
Mel Gorman3484b2d2014-08-06 16:07:14 -07003991 printk(" %ld", zone->lowmem_reserve[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003992 printk("\n");
3993 }
3994
KOSAKI Motohiroee99c712009-03-31 15:19:31 -07003995 for_each_populated_zone(zone) {
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08003996 unsigned int order;
3997 unsigned long nr[MAX_ORDER], flags, total = 0;
Rabin Vincent377e4f12012-12-11 16:00:24 -08003998 unsigned char types[MAX_ORDER];
Linus Torvalds1da177e2005-04-16 15:20:36 -07003999
David Rientjes7bf02ea2011-05-24 17:11:16 -07004000 if (skip_free_areas_node(filter, zone_to_nid(zone)))
David Rientjesddd588b2011-03-22 16:30:46 -07004001 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004002 show_node(zone);
4003 printk("%s: ", zone->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004004
4005 spin_lock_irqsave(&zone->lock, flags);
4006 for (order = 0; order < MAX_ORDER; order++) {
Rabin Vincent377e4f12012-12-11 16:00:24 -08004007 struct free_area *area = &zone->free_area[order];
4008 int type;
4009
4010 nr[order] = area->nr_free;
Kirill Korotaev8f9de512006-06-23 02:03:50 -07004011 total += nr[order] << order;
Rabin Vincent377e4f12012-12-11 16:00:24 -08004012
4013 types[order] = 0;
4014 for (type = 0; type < MIGRATE_TYPES; type++) {
4015 if (!list_empty(&area->free_list[type]))
4016 types[order] |= 1 << type;
4017 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004018 }
4019 spin_unlock_irqrestore(&zone->lock, flags);
Rabin Vincent377e4f12012-12-11 16:00:24 -08004020 for (order = 0; order < MAX_ORDER; order++) {
Kirill Korotaev8f9de512006-06-23 02:03:50 -07004021 printk("%lu*%lukB ", nr[order], K(1UL) << order);
Rabin Vincent377e4f12012-12-11 16:00:24 -08004022 if (nr[order])
4023 show_migration_types(types[order]);
4024 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004025 printk("= %lukB\n", K(total));
4026 }
4027
David Rientjes949f7ec2013-04-29 15:07:48 -07004028 hugetlb_show_meminfo();
4029
Larry Woodmane6f36022008-02-04 22:29:30 -08004030 printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
4031
Linus Torvalds1da177e2005-04-16 15:20:36 -07004032 show_swap_cache_info();
4033}
4034
Mel Gorman19770b32008-04-28 02:12:18 -07004035static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
4036{
4037 zoneref->zone = zone;
4038 zoneref->zone_idx = zone_idx(zone);
4039}
4040
Linus Torvalds1da177e2005-04-16 15:20:36 -07004041/*
4042 * Builds allocation fallback zone lists.
Christoph Lameter1a932052006-01-06 00:11:16 -08004043 *
4044 * Add all populated zones of a node to the zonelist.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004045 */
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004046static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
Zhang Yanfeibc732f12013-07-08 16:00:06 -07004047 int nr_zones)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004048{
Christoph Lameter1a932052006-01-06 00:11:16 -08004049 struct zone *zone;
Zhang Yanfeibc732f12013-07-08 16:00:06 -07004050 enum zone_type zone_type = MAX_NR_ZONES;
Christoph Lameter02a68a52006-01-06 00:11:18 -08004051
4052 do {
Christoph Lameter2f6726e2006-09-25 23:31:18 -07004053 zone_type--;
Christoph Lameter070f8032006-01-06 00:11:19 -08004054 zone = pgdat->node_zones + zone_type;
Christoph Lameter1a932052006-01-06 00:11:16 -08004055 if (populated_zone(zone)) {
Mel Gormandd1a2392008-04-28 02:12:17 -07004056 zoneref_set_zone(zone,
4057 &zonelist->_zonerefs[nr_zones++]);
Christoph Lameter070f8032006-01-06 00:11:19 -08004058 check_highest_zone(zone_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004059 }
Christoph Lameter2f6726e2006-09-25 23:31:18 -07004060 } while (zone_type);
Zhang Yanfeibc732f12013-07-08 16:00:06 -07004061
Christoph Lameter070f8032006-01-06 00:11:19 -08004062 return nr_zones;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004063}
4064
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004065
4066/*
4067 * zonelist_order:
4068 * 0 = automatic detection of better ordering.
4069 * 1 = order by ([node] distance, -zonetype)
4070 * 2 = order by (-zonetype, [node] distance)
4071 *
4072 * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
4073 * the same zonelist. So only NUMA can configure this param.
4074 */
4075#define ZONELIST_ORDER_DEFAULT 0
4076#define ZONELIST_ORDER_NODE 1
4077#define ZONELIST_ORDER_ZONE 2
4078
4079/* zonelist order in the kernel.
4080 * set_zonelist_order() will set this to NODE or ZONE.
4081 */
4082static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
4083static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
4084
4085
Linus Torvalds1da177e2005-04-16 15:20:36 -07004086#ifdef CONFIG_NUMA
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004087/* The value user specified ....changed by config */
4088static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
4089/* string for sysctl */
4090#define NUMA_ZONELIST_ORDER_LEN 16
4091char numa_zonelist_order[16] = "default";
4092
4093/*
4094 * interface for configure zonelist ordering.
4095 * command line option "numa_zonelist_order"
4096 * = "[dD]efault - default, automatic configuration.
4097 * = "[nN]ode - order by node locality, then by zone within node
4098 * = "[zZ]one - order by zone, then by locality within zone
4099 */
4100
4101static int __parse_numa_zonelist_order(char *s)
4102{
4103 if (*s == 'd' || *s == 'D') {
4104 user_zonelist_order = ZONELIST_ORDER_DEFAULT;
4105 } else if (*s == 'n' || *s == 'N') {
4106 user_zonelist_order = ZONELIST_ORDER_NODE;
4107 } else if (*s == 'z' || *s == 'Z') {
4108 user_zonelist_order = ZONELIST_ORDER_ZONE;
4109 } else {
Joe Perches11705322016-03-17 14:19:50 -07004110 pr_warn("Ignoring invalid numa_zonelist_order value: %s\n", s);
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004111 return -EINVAL;
4112 }
4113 return 0;
4114}
4115
4116static __init int setup_numa_zonelist_order(char *s)
4117{
Volodymyr G. Lukiianykecb256f2011-01-13 15:46:26 -08004118 int ret;
4119
4120 if (!s)
4121 return 0;
4122
4123 ret = __parse_numa_zonelist_order(s);
4124 if (ret == 0)
4125 strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
4126
4127 return ret;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004128}
4129early_param("numa_zonelist_order", setup_numa_zonelist_order);
4130
4131/*
4132 * sysctl handler for numa_zonelist_order
4133 */
Joe Perchescccad5b2014-06-06 14:38:09 -07004134int numa_zonelist_order_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07004135 void __user *buffer, size_t *length,
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004136 loff_t *ppos)
4137{
4138 char saved_string[NUMA_ZONELIST_ORDER_LEN];
4139 int ret;
Andi Kleen443c6f12009-12-23 21:00:47 +01004140 static DEFINE_MUTEX(zl_order_mutex);
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004141
Andi Kleen443c6f12009-12-23 21:00:47 +01004142 mutex_lock(&zl_order_mutex);
Chen Gangdacbde02013-07-03 15:02:35 -07004143 if (write) {
4144 if (strlen((char *)table->data) >= NUMA_ZONELIST_ORDER_LEN) {
4145 ret = -EINVAL;
4146 goto out;
4147 }
4148 strcpy(saved_string, (char *)table->data);
4149 }
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07004150 ret = proc_dostring(table, write, buffer, length, ppos);
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004151 if (ret)
Andi Kleen443c6f12009-12-23 21:00:47 +01004152 goto out;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004153 if (write) {
4154 int oldval = user_zonelist_order;
Chen Gangdacbde02013-07-03 15:02:35 -07004155
4156 ret = __parse_numa_zonelist_order((char *)table->data);
4157 if (ret) {
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004158 /*
4159 * bogus value. restore saved string
4160 */
Chen Gangdacbde02013-07-03 15:02:35 -07004161 strncpy((char *)table->data, saved_string,
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004162 NUMA_ZONELIST_ORDER_LEN);
4163 user_zonelist_order = oldval;
Haicheng Li4eaf3f62010-05-24 14:32:52 -07004164 } else if (oldval != user_zonelist_order) {
4165 mutex_lock(&zonelists_mutex);
Jiang Liu9adb62a2012-07-31 16:43:28 -07004166 build_all_zonelists(NULL, NULL);
Haicheng Li4eaf3f62010-05-24 14:32:52 -07004167 mutex_unlock(&zonelists_mutex);
4168 }
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004169 }
Andi Kleen443c6f12009-12-23 21:00:47 +01004170out:
4171 mutex_unlock(&zl_order_mutex);
4172 return ret;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004173}
4174
4175
Christoph Lameter62bc62a2009-06-16 15:32:15 -07004176#define MAX_NODE_LOAD (nr_online_nodes)
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004177static int node_load[MAX_NUMNODES];
4178
Linus Torvalds1da177e2005-04-16 15:20:36 -07004179/**
Pavel Pisa4dc3b162005-05-01 08:59:25 -07004180 * find_next_best_node - find the next node that should appear in a given node's fallback list
Linus Torvalds1da177e2005-04-16 15:20:36 -07004181 * @node: node whose fallback list we're appending
4182 * @used_node_mask: nodemask_t of already used nodes
4183 *
4184 * We use a number of factors to determine which is the next node that should
4185 * appear on a given node's fallback list. The node should not have appeared
4186 * already in @node's fallback list, and it should be the next closest node
4187 * according to the distance array (which contains arbitrary distance values
4188 * from each node to each node in the system), and should also prefer nodes
4189 * with no CPUs, since presumably they'll have very little allocation pressure
4190 * on them otherwise.
4191 * It returns -1 if no node is found.
4192 */
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004193static int find_next_best_node(int node, nodemask_t *used_node_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004194{
Linus Torvalds4cf808eb2006-02-17 20:38:21 +01004195 int n, val;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004196 int min_val = INT_MAX;
David Rientjes00ef2d22013-02-22 16:35:36 -08004197 int best_node = NUMA_NO_NODE;
Rusty Russella70f7302009-03-13 14:49:46 +10304198 const struct cpumask *tmp = cpumask_of_node(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004199
Linus Torvalds4cf808eb2006-02-17 20:38:21 +01004200 /* Use the local node if we haven't already */
4201 if (!node_isset(node, *used_node_mask)) {
4202 node_set(node, *used_node_mask);
4203 return node;
4204 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004205
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08004206 for_each_node_state(n, N_MEMORY) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004207
4208 /* Don't want a node to appear more than once */
4209 if (node_isset(n, *used_node_mask))
4210 continue;
4211
Linus Torvalds1da177e2005-04-16 15:20:36 -07004212 /* Use the distance array to find the distance */
4213 val = node_distance(node, n);
4214
Linus Torvalds4cf808eb2006-02-17 20:38:21 +01004215 /* Penalize nodes under us ("prefer the next node") */
4216 val += (n < node);
4217
Linus Torvalds1da177e2005-04-16 15:20:36 -07004218 /* Give preference to headless and unused nodes */
Rusty Russella70f7302009-03-13 14:49:46 +10304219 tmp = cpumask_of_node(n);
4220 if (!cpumask_empty(tmp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004221 val += PENALTY_FOR_NODE_WITH_CPUS;
4222
4223 /* Slight preference for less loaded node */
4224 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
4225 val += node_load[n];
4226
4227 if (val < min_val) {
4228 min_val = val;
4229 best_node = n;
4230 }
4231 }
4232
4233 if (best_node >= 0)
4234 node_set(best_node, *used_node_mask);
4235
4236 return best_node;
4237}
4238
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004239
4240/*
4241 * Build zonelists ordered by node and zones within node.
4242 * This results in maximum locality--normal zone overflows into local
4243 * DMA zone, if any--but risks exhausting DMA zone.
4244 */
4245static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004246{
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004247 int j;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004248 struct zonelist *zonelist;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004249
Mel Gorman54a6eb52008-04-28 02:12:16 -07004250 zonelist = &pgdat->node_zonelists[0];
Mel Gormandd1a2392008-04-28 02:12:17 -07004251 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
Mel Gorman54a6eb52008-04-28 02:12:16 -07004252 ;
Zhang Yanfeibc732f12013-07-08 16:00:06 -07004253 j = build_zonelists_node(NODE_DATA(node), zonelist, j);
Mel Gormandd1a2392008-04-28 02:12:17 -07004254 zonelist->_zonerefs[j].zone = NULL;
4255 zonelist->_zonerefs[j].zone_idx = 0;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004256}
4257
4258/*
Christoph Lameter523b9452007-10-16 01:25:37 -07004259 * Build gfp_thisnode zonelists
4260 */
4261static void build_thisnode_zonelists(pg_data_t *pgdat)
4262{
Christoph Lameter523b9452007-10-16 01:25:37 -07004263 int j;
4264 struct zonelist *zonelist;
4265
Mel Gorman54a6eb52008-04-28 02:12:16 -07004266 zonelist = &pgdat->node_zonelists[1];
Zhang Yanfeibc732f12013-07-08 16:00:06 -07004267 j = build_zonelists_node(pgdat, zonelist, 0);
Mel Gormandd1a2392008-04-28 02:12:17 -07004268 zonelist->_zonerefs[j].zone = NULL;
4269 zonelist->_zonerefs[j].zone_idx = 0;
Christoph Lameter523b9452007-10-16 01:25:37 -07004270}
4271
4272/*
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004273 * Build zonelists ordered by zone and nodes within zones.
4274 * This results in conserving DMA zone[s] until all Normal memory is
4275 * exhausted, but results in overflowing to remote node while memory
4276 * may still exist in local DMA zone.
4277 */
4278static int node_order[MAX_NUMNODES];
4279
4280static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
4281{
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004282 int pos, j, node;
4283 int zone_type; /* needs to be signed */
4284 struct zone *z;
4285 struct zonelist *zonelist;
4286
Mel Gorman54a6eb52008-04-28 02:12:16 -07004287 zonelist = &pgdat->node_zonelists[0];
4288 pos = 0;
4289 for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
4290 for (j = 0; j < nr_nodes; j++) {
4291 node = node_order[j];
4292 z = &NODE_DATA(node)->node_zones[zone_type];
4293 if (populated_zone(z)) {
Mel Gormandd1a2392008-04-28 02:12:17 -07004294 zoneref_set_zone(z,
4295 &zonelist->_zonerefs[pos++]);
Mel Gorman54a6eb52008-04-28 02:12:16 -07004296 check_highest_zone(zone_type);
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004297 }
4298 }
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004299 }
Mel Gormandd1a2392008-04-28 02:12:17 -07004300 zonelist->_zonerefs[pos].zone = NULL;
4301 zonelist->_zonerefs[pos].zone_idx = 0;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004302}
4303
Mel Gorman31939132014-10-09 15:28:30 -07004304#if defined(CONFIG_64BIT)
4305/*
4306 * Devices that require DMA32/DMA are relatively rare and do not justify a
4307 * penalty to every machine in case the specialised case applies. Default
4308 * to Node-ordering on 64-bit NUMA machines
4309 */
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004310static int default_zonelist_order(void)
4311{
Mel Gorman31939132014-10-09 15:28:30 -07004312 return ZONELIST_ORDER_NODE;
4313}
4314#else
4315/*
4316 * On 32-bit, the Normal zone needs to be preserved for allocations accessible
4317 * by the kernel. If processes running on node 0 deplete the low memory zone
4318 * then reclaim will occur more frequency increasing stalls and potentially
4319 * be easier to OOM if a large percentage of the zone is under writeback or
4320 * dirty. The problem is significantly worse if CONFIG_HIGHPTE is not set.
4321 * Hence, default to zone ordering on 32-bit.
4322 */
4323static int default_zonelist_order(void)
4324{
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004325 return ZONELIST_ORDER_ZONE;
4326}
Mel Gorman31939132014-10-09 15:28:30 -07004327#endif /* CONFIG_64BIT */
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004328
4329static void set_zonelist_order(void)
4330{
4331 if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
4332 current_zonelist_order = default_zonelist_order();
4333 else
4334 current_zonelist_order = user_zonelist_order;
4335}
4336
4337static void build_zonelists(pg_data_t *pgdat)
4338{
Yaowei Baic00eb152016-01-14 15:19:00 -08004339 int i, node, load;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004340 nodemask_t used_mask;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004341 int local_node, prev_node;
4342 struct zonelist *zonelist;
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08004343 unsigned int order = current_zonelist_order;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004344
4345 /* initialize zonelists */
Christoph Lameter523b9452007-10-16 01:25:37 -07004346 for (i = 0; i < MAX_ZONELISTS; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004347 zonelist = pgdat->node_zonelists + i;
Mel Gormandd1a2392008-04-28 02:12:17 -07004348 zonelist->_zonerefs[0].zone = NULL;
4349 zonelist->_zonerefs[0].zone_idx = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004350 }
4351
4352 /* NUMA-aware ordering of nodes */
4353 local_node = pgdat->node_id;
Christoph Lameter62bc62a2009-06-16 15:32:15 -07004354 load = nr_online_nodes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004355 prev_node = local_node;
4356 nodes_clear(used_mask);
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004357
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004358 memset(node_order, 0, sizeof(node_order));
Yaowei Baic00eb152016-01-14 15:19:00 -08004359 i = 0;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004360
Linus Torvalds1da177e2005-04-16 15:20:36 -07004361 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
4362 /*
4363 * We don't want to pressure a particular node.
4364 * So adding penalty to the first node in same
4365 * distance group to make it round-robin.
4366 */
David Rientjes957f8222012-10-08 16:33:24 -07004367 if (node_distance(local_node, node) !=
4368 node_distance(local_node, prev_node))
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004369 node_load[node] = load;
4370
Linus Torvalds1da177e2005-04-16 15:20:36 -07004371 prev_node = node;
4372 load--;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004373 if (order == ZONELIST_ORDER_NODE)
4374 build_zonelists_in_node_order(pgdat, node);
4375 else
Yaowei Baic00eb152016-01-14 15:19:00 -08004376 node_order[i++] = node; /* remember order */
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004377 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004378
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004379 if (order == ZONELIST_ORDER_ZONE) {
4380 /* calculate node order -- i.e., DMA last! */
Yaowei Baic00eb152016-01-14 15:19:00 -08004381 build_zonelists_in_zone_order(pgdat, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004382 }
Christoph Lameter523b9452007-10-16 01:25:37 -07004383
4384 build_thisnode_zonelists(pgdat);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004385}
4386
Lee Schermerhorn7aac7892010-05-26 14:45:00 -07004387#ifdef CONFIG_HAVE_MEMORYLESS_NODES
4388/*
4389 * Return node id of node used for "local" allocations.
4390 * I.e., first node id of first zone in arg node's generic zonelist.
4391 * Used for initializing percpu 'numa_mem', which is used primarily
4392 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
4393 */
4394int local_memory_node(int node)
4395{
4396 struct zone *zone;
4397
4398 (void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
4399 gfp_zone(GFP_KERNEL),
4400 NULL,
4401 &zone);
4402 return zone->node;
4403}
4404#endif
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004405
Linus Torvalds1da177e2005-04-16 15:20:36 -07004406#else /* CONFIG_NUMA */
4407
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004408static void set_zonelist_order(void)
4409{
4410 current_zonelist_order = ZONELIST_ORDER_ZONE;
4411}
4412
4413static void build_zonelists(pg_data_t *pgdat)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004414{
Christoph Lameter19655d32006-09-25 23:31:19 -07004415 int node, local_node;
Mel Gorman54a6eb52008-04-28 02:12:16 -07004416 enum zone_type j;
4417 struct zonelist *zonelist;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004418
4419 local_node = pgdat->node_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004420
Mel Gorman54a6eb52008-04-28 02:12:16 -07004421 zonelist = &pgdat->node_zonelists[0];
Zhang Yanfeibc732f12013-07-08 16:00:06 -07004422 j = build_zonelists_node(pgdat, zonelist, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004423
Mel Gorman54a6eb52008-04-28 02:12:16 -07004424 /*
4425 * Now we build the zonelist so that it contains the zones
4426 * of all the other nodes.
4427 * We don't want to pressure a particular node, so when
4428 * building the zones for node N, we make sure that the
4429 * zones coming right after the local ones are those from
4430 * node N+1 (modulo N)
4431 */
4432 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
4433 if (!node_online(node))
4434 continue;
Zhang Yanfeibc732f12013-07-08 16:00:06 -07004435 j = build_zonelists_node(NODE_DATA(node), zonelist, j);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004436 }
Mel Gorman54a6eb52008-04-28 02:12:16 -07004437 for (node = 0; node < local_node; node++) {
4438 if (!node_online(node))
4439 continue;
Zhang Yanfeibc732f12013-07-08 16:00:06 -07004440 j = build_zonelists_node(NODE_DATA(node), zonelist, j);
Mel Gorman54a6eb52008-04-28 02:12:16 -07004441 }
4442
Mel Gormandd1a2392008-04-28 02:12:17 -07004443 zonelist->_zonerefs[j].zone = NULL;
4444 zonelist->_zonerefs[j].zone_idx = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004445}
4446
4447#endif /* CONFIG_NUMA */
4448
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09004449/*
4450 * Boot pageset table. One per cpu which is going to be used for all
4451 * zones and all nodes. The parameters will be set in such a way
4452 * that an item put on a list will immediately be handed over to
4453 * the buddy list. This is safe since pageset manipulation is done
4454 * with interrupts disabled.
4455 *
4456 * The boot_pagesets must be kept even after bootup is complete for
4457 * unused processors and/or zones. They do play a role for bootstrapping
4458 * hotplugged processors.
4459 *
4460 * zoneinfo_show() and maybe other functions do
4461 * not check if the processor is online before following the pageset pointer.
4462 * Other parts of the kernel may not check if the zone is available.
4463 */
4464static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
4465static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
Haicheng Li1f522502010-05-24 14:32:51 -07004466static void setup_zone_pageset(struct zone *zone);
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09004467
Haicheng Li4eaf3f62010-05-24 14:32:52 -07004468/*
4469 * Global mutex to protect against size modification of zonelists
4470 * as well as to serialize pageset setup for the new populated zone.
4471 */
4472DEFINE_MUTEX(zonelists_mutex);
4473
Rusty Russell9b1a4d32008-07-28 12:16:30 -05004474/* return values int ....just for stop_machine() */
Jiang Liu4ed7e022012-07-31 16:43:35 -07004475static int __build_all_zonelists(void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004476{
Yasunori Goto68113782006-06-23 02:03:11 -07004477 int nid;
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09004478 int cpu;
Jiang Liu9adb62a2012-07-31 16:43:28 -07004479 pg_data_t *self = data;
Paul Jackson9276b1bc2006-12-06 20:31:48 -08004480
Bo Liu7f9cfb32009-08-18 14:11:19 -07004481#ifdef CONFIG_NUMA
4482 memset(node_load, 0, sizeof(node_load));
4483#endif
Jiang Liu9adb62a2012-07-31 16:43:28 -07004484
4485 if (self && !node_online(self->node_id)) {
4486 build_zonelists(self);
Jiang Liu9adb62a2012-07-31 16:43:28 -07004487 }
4488
Paul Jackson9276b1bc2006-12-06 20:31:48 -08004489 for_each_online_node(nid) {
Christoph Lameter7ea15302007-10-16 01:25:29 -07004490 pg_data_t *pgdat = NODE_DATA(nid);
4491
4492 build_zonelists(pgdat);
Paul Jackson9276b1bc2006-12-06 20:31:48 -08004493 }
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09004494
4495 /*
4496 * Initialize the boot_pagesets that are going to be used
4497 * for bootstrapping processors. The real pagesets for
4498 * each zone will be allocated later when the per cpu
4499 * allocator is available.
4500 *
4501 * boot_pagesets are used also for bootstrapping offline
4502 * cpus if the system is already booted because the pagesets
4503 * are needed to initialize allocators on a specific cpu too.
4504 * F.e. the percpu allocator needs the page allocator which
4505 * needs the percpu allocator in order to allocate its pagesets
4506 * (a chicken-egg dilemma).
4507 */
Lee Schermerhorn7aac7892010-05-26 14:45:00 -07004508 for_each_possible_cpu(cpu) {
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09004509 setup_pageset(&per_cpu(boot_pageset, cpu), 0);
4510
Lee Schermerhorn7aac7892010-05-26 14:45:00 -07004511#ifdef CONFIG_HAVE_MEMORYLESS_NODES
4512 /*
4513 * We now know the "local memory node" for each node--
4514 * i.e., the node of the first zone in the generic zonelist.
4515 * Set up numa_mem percpu variable for on-line cpus. During
4516 * boot, only the boot cpu should be on-line; we'll init the
4517 * secondary cpus' numa_mem as they come on-line. During
4518 * node/memory hotplug, we'll fixup all on-line cpus.
4519 */
4520 if (cpu_online(cpu))
4521 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
4522#endif
4523 }
4524
Yasunori Goto68113782006-06-23 02:03:11 -07004525 return 0;
4526}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004527
Rasmus Villemoes061f67b2015-02-12 15:00:06 -08004528static noinline void __init
4529build_all_zonelists_init(void)
4530{
4531 __build_all_zonelists(NULL);
4532 mminit_verify_zonelist();
4533 cpuset_init_current_mems_allowed();
4534}
4535
Haicheng Li4eaf3f62010-05-24 14:32:52 -07004536/*
4537 * Called with zonelists_mutex held always
4538 * unless system_state == SYSTEM_BOOTING.
Rasmus Villemoes061f67b2015-02-12 15:00:06 -08004539 *
4540 * __ref due to (1) call of __meminit annotated setup_zone_pageset
4541 * [we're only called with non-NULL zone through __meminit paths] and
4542 * (2) call of __init annotated helper build_all_zonelists_init
4543 * [protected by SYSTEM_BOOTING].
Haicheng Li4eaf3f62010-05-24 14:32:52 -07004544 */
Jiang Liu9adb62a2012-07-31 16:43:28 -07004545void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone)
Yasunori Goto68113782006-06-23 02:03:11 -07004546{
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004547 set_zonelist_order();
4548
Yasunori Goto68113782006-06-23 02:03:11 -07004549 if (system_state == SYSTEM_BOOTING) {
Rasmus Villemoes061f67b2015-02-12 15:00:06 -08004550 build_all_zonelists_init();
Yasunori Goto68113782006-06-23 02:03:11 -07004551 } else {
KAMEZAWA Hiroyukie9959f02010-11-24 12:57:09 -08004552#ifdef CONFIG_MEMORY_HOTPLUG
Jiang Liu9adb62a2012-07-31 16:43:28 -07004553 if (zone)
4554 setup_zone_pageset(zone);
KAMEZAWA Hiroyukie9959f02010-11-24 12:57:09 -08004555#endif
Cody P Schaferdd1895e2013-07-03 15:01:36 -07004556 /* we have to stop all cpus to guarantee there is no user
4557 of zonelist */
Jiang Liu9adb62a2012-07-31 16:43:28 -07004558 stop_machine(__build_all_zonelists, pgdat, NULL);
Yasunori Goto68113782006-06-23 02:03:11 -07004559 /* cpuset refresh routine should be here */
4560 }
Andrew Mortonbd1e22b2006-06-23 02:03:47 -07004561 vm_total_pages = nr_free_pagecache_pages();
Mel Gorman9ef9acb2007-10-16 01:25:54 -07004562 /*
4563 * Disable grouping by mobility if the number of pages in the
4564 * system is too low to allow the mechanism to work. It would be
4565 * more accurate, but expensive to check per-zone. This check is
4566 * made on memory-hotadd so a system can start with mobility
4567 * disabled and enable it later
4568 */
Mel Gormand9c23402007-10-16 01:26:01 -07004569 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
Mel Gorman9ef9acb2007-10-16 01:25:54 -07004570 page_group_by_mobility_disabled = 1;
4571 else
4572 page_group_by_mobility_disabled = 0;
4573
Joe Perches756a0252016-03-17 14:19:47 -07004574 pr_info("Built %i zonelists in %s order, mobility grouping %s. Total pages: %ld\n",
4575 nr_online_nodes,
4576 zonelist_order_name[current_zonelist_order],
4577 page_group_by_mobility_disabled ? "off" : "on",
4578 vm_total_pages);
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004579#ifdef CONFIG_NUMA
Anton Blanchardf88dfff2014-12-10 15:42:53 -08004580 pr_info("Policy zone: %s\n", zone_names[policy_zone]);
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004581#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07004582}
4583
4584/*
4585 * Helper functions to size the waitqueue hash table.
4586 * Essentially these want to choose hash table sizes sufficiently
4587 * large so that collisions trying to wait on pages are rare.
4588 * But in fact, the number of active page waitqueues on typical
4589 * systems is ridiculously low, less than 200. So this is even
4590 * conservative, even though it seems large.
4591 *
4592 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
4593 * waitqueues, i.e. the size of the waitq table given the number of pages.
4594 */
4595#define PAGES_PER_WAITQUEUE 256
4596
Yasunori Gotocca448f2006-06-23 02:03:10 -07004597#ifndef CONFIG_MEMORY_HOTPLUG
Yasunori Goto02b694d2006-06-23 02:03:08 -07004598static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004599{
4600 unsigned long size = 1;
4601
4602 pages /= PAGES_PER_WAITQUEUE;
4603
4604 while (size < pages)
4605 size <<= 1;
4606
4607 /*
4608 * Once we have dozens or even hundreds of threads sleeping
4609 * on IO we've got bigger problems than wait queue collision.
4610 * Limit the size of the wait table to a reasonable size.
4611 */
4612 size = min(size, 4096UL);
4613
4614 return max(size, 4UL);
4615}
Yasunori Gotocca448f2006-06-23 02:03:10 -07004616#else
4617/*
4618 * A zone's size might be changed by hot-add, so it is not possible to determine
4619 * a suitable size for its wait_table. So we use the maximum size now.
4620 *
4621 * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie:
4622 *
4623 * i386 (preemption config) : 4096 x 16 = 64Kbyte.
4624 * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
4625 * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte.
4626 *
4627 * The maximum entries are prepared when a zone's memory is (512K + 256) pages
4628 * or more by the traditional way. (See above). It equals:
4629 *
4630 * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte.
4631 * ia64(16K page size) : = ( 8G + 4M)byte.
4632 * powerpc (64K page size) : = (32G +16M)byte.
4633 */
4634static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
4635{
4636 return 4096UL;
4637}
4638#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07004639
4640/*
4641 * This is an integer logarithm so that shifts can be used later
4642 * to extract the more random high bits from the multiplicative
4643 * hash function before the remainder is taken.
4644 */
4645static inline unsigned long wait_table_bits(unsigned long size)
4646{
4647 return ffz(~size);
4648}
4649
Mel Gorman56fd56b2007-10-16 01:25:58 -07004650/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07004651 * Initially all pages are reserved - free ones are freed
4652 * up by free_all_bootmem() once the early boot process is
4653 * done. Non-atomic initialization, single-pass.
4654 */
Matt Tolentinoc09b4242006-01-17 07:03:44 +01004655void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
Dave Hansena2f3aa022007-01-10 23:15:30 -08004656 unsigned long start_pfn, enum memmap_context context)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004657{
Dan Williams4b94ffd2016-01-15 16:56:22 -08004658 struct vmem_altmap *altmap = to_vmem_altmap(__pfn_to_phys(start_pfn));
Andy Whitcroft29751f62005-06-23 00:08:00 -07004659 unsigned long end_pfn = start_pfn + size;
Dan Williams4b94ffd2016-01-15 16:56:22 -08004660 pg_data_t *pgdat = NODE_DATA(nid);
Andy Whitcroft29751f62005-06-23 00:08:00 -07004661 unsigned long pfn;
Mel Gorman3a80a7f2015-06-30 14:57:02 -07004662 unsigned long nr_initialised = 0;
Taku Izumi342332e2016-03-15 14:55:22 -07004663#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
4664 struct memblock_region *r = NULL, *tmp;
4665#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07004666
Hugh Dickins22b31ee2009-01-06 14:40:09 -08004667 if (highest_memmap_pfn < end_pfn - 1)
4668 highest_memmap_pfn = end_pfn - 1;
4669
Dan Williams4b94ffd2016-01-15 16:56:22 -08004670 /*
4671 * Honor reservation requested by the driver for this ZONE_DEVICE
4672 * memory
4673 */
4674 if (altmap && start_pfn == altmap->base_pfn)
4675 start_pfn += altmap->reserve;
4676
Greg Ungerercbe8dd42006-01-12 01:05:24 -08004677 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
Dave Hansena2f3aa022007-01-10 23:15:30 -08004678 /*
Andrew Mortonb72d0ff2016-03-15 14:55:25 -07004679 * There can be holes in boot-time mem_map[]s handed to this
4680 * function. They do not exist on hotplugged memory.
Dave Hansena2f3aa022007-01-10 23:15:30 -08004681 */
Andrew Mortonb72d0ff2016-03-15 14:55:25 -07004682 if (context != MEMMAP_EARLY)
4683 goto not_early;
4684
4685 if (!early_pfn_valid(pfn))
4686 continue;
4687 if (!early_pfn_in_nid(pfn, nid))
4688 continue;
4689 if (!update_defer_init(pgdat, pfn, end_pfn, &nr_initialised))
4690 break;
Taku Izumi342332e2016-03-15 14:55:22 -07004691
4692#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
Andrew Mortonb72d0ff2016-03-15 14:55:25 -07004693 /*
4694 * If not mirrored_kernelcore and ZONE_MOVABLE exists, range
4695 * from zone_movable_pfn[nid] to end of each node should be
4696 * ZONE_MOVABLE not ZONE_NORMAL. skip it.
4697 */
4698 if (!mirrored_kernelcore && zone_movable_pfn[nid])
4699 if (zone == ZONE_NORMAL && pfn >= zone_movable_pfn[nid])
4700 continue;
Taku Izumi342332e2016-03-15 14:55:22 -07004701
Andrew Mortonb72d0ff2016-03-15 14:55:25 -07004702 /*
4703 * Check given memblock attribute by firmware which can affect
4704 * kernel memory layout. If zone==ZONE_MOVABLE but memory is
4705 * mirrored, it's an overlapped memmap init. skip it.
4706 */
4707 if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
4708 if (!r || pfn >= memblock_region_memory_end_pfn(r)) {
4709 for_each_memblock(memory, tmp)
4710 if (pfn < memblock_region_memory_end_pfn(tmp))
4711 break;
4712 r = tmp;
Taku Izumi342332e2016-03-15 14:55:22 -07004713 }
Andrew Mortonb72d0ff2016-03-15 14:55:25 -07004714 if (pfn >= memblock_region_memory_base_pfn(r) &&
4715 memblock_is_mirror(r)) {
4716 /* already initialized as NORMAL */
4717 pfn = memblock_region_memory_end_pfn(r);
4718 continue;
4719 }
Dave Hansena2f3aa022007-01-10 23:15:30 -08004720 }
Andrew Mortonb72d0ff2016-03-15 14:55:25 -07004721#endif
Mel Gormanac5d2532015-06-30 14:57:20 -07004722
Andrew Mortonb72d0ff2016-03-15 14:55:25 -07004723not_early:
Mel Gormanac5d2532015-06-30 14:57:20 -07004724 /*
4725 * Mark the block movable so that blocks are reserved for
4726 * movable at startup. This will force kernel allocations
4727 * to reserve their blocks rather than leaking throughout
4728 * the address space during boot when many long-lived
Mel Gorman974a7862015-11-06 16:28:34 -08004729 * kernel allocations are made.
Mel Gormanac5d2532015-06-30 14:57:20 -07004730 *
4731 * bitmap is created for zone's valid pfn range. but memmap
4732 * can be created for invalid pages (for alignment)
4733 * check here not to call set_pageblock_migratetype() against
4734 * pfn out of zone.
4735 */
4736 if (!(pfn & (pageblock_nr_pages - 1))) {
4737 struct page *page = pfn_to_page(pfn);
4738
4739 __init_single_page(page, pfn, zone, nid);
4740 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
4741 } else {
4742 __init_single_pfn(pfn, zone, nid);
4743 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004744 }
4745}
4746
Andi Kleen1e548de2008-02-04 22:29:26 -08004747static void __meminit zone_init_free_lists(struct zone *zone)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004748{
Mel Gorman7aeb09f2014-06-04 16:10:21 -07004749 unsigned int order, t;
Mel Gormanb2a0ac82007-10-16 01:25:48 -07004750 for_each_migratetype_order(order, t) {
4751 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004752 zone->free_area[order].nr_free = 0;
4753 }
4754}
4755
4756#ifndef __HAVE_ARCH_MEMMAP_INIT
4757#define memmap_init(size, nid, zone, start_pfn) \
Dave Hansena2f3aa022007-01-10 23:15:30 -08004758 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004759#endif
4760
David Rientjes7cd2b0a2014-06-23 13:22:04 -07004761static int zone_batchsize(struct zone *zone)
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07004762{
David Howells3a6be872009-05-06 16:03:03 -07004763#ifdef CONFIG_MMU
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07004764 int batch;
4765
4766 /*
4767 * The per-cpu-pages pools are set to around 1000th of the
Seth, Rohitba56e912005-10-29 18:15:47 -07004768 * size of the zone. But no more than 1/2 of a meg.
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07004769 *
4770 * OK, so we don't know how big the cache is. So guess.
4771 */
Jiang Liub40da042013-02-22 16:33:52 -08004772 batch = zone->managed_pages / 1024;
Seth, Rohitba56e912005-10-29 18:15:47 -07004773 if (batch * PAGE_SIZE > 512 * 1024)
4774 batch = (512 * 1024) / PAGE_SIZE;
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07004775 batch /= 4; /* We effectively *= 4 below */
4776 if (batch < 1)
4777 batch = 1;
4778
4779 /*
Nick Piggin0ceaacc2005-12-04 13:55:25 +11004780 * Clamp the batch to a 2^n - 1 value. Having a power
4781 * of 2 value was found to be more likely to have
4782 * suboptimal cache aliasing properties in some cases.
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07004783 *
Nick Piggin0ceaacc2005-12-04 13:55:25 +11004784 * For example if 2 tasks are alternately allocating
4785 * batches of pages, one task can end up with a lot
4786 * of pages of one half of the possible page colors
4787 * and the other with pages of the other colors.
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07004788 */
David Howells91552032009-05-06 16:03:02 -07004789 batch = rounddown_pow_of_two(batch + batch/2) - 1;
Seth, Rohitba56e912005-10-29 18:15:47 -07004790
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07004791 return batch;
David Howells3a6be872009-05-06 16:03:03 -07004792
4793#else
4794 /* The deferral and batching of frees should be suppressed under NOMMU
4795 * conditions.
4796 *
4797 * The problem is that NOMMU needs to be able to allocate large chunks
4798 * of contiguous memory as there's no hardware page translation to
4799 * assemble apparent contiguous memory from discontiguous pages.
4800 *
4801 * Queueing large contiguous runs of pages for batching, however,
4802 * causes the pages to actually be freed in smaller chunks. As there
4803 * can be a significant delay between the individual batches being
4804 * recycled, this leads to the once large chunks of space being
4805 * fragmented and becoming unavailable for high-order allocations.
4806 */
4807 return 0;
4808#endif
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07004809}
4810
Cody P Schafer8d7a8fa2013-07-03 15:01:31 -07004811/*
4812 * pcp->high and pcp->batch values are related and dependent on one another:
4813 * ->batch must never be higher then ->high.
4814 * The following function updates them in a safe manner without read side
4815 * locking.
4816 *
4817 * Any new users of pcp->batch and pcp->high should ensure they can cope with
4818 * those fields changing asynchronously (acording the the above rule).
4819 *
4820 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
4821 * outside of boot time (or some other assurance that no concurrent updaters
4822 * exist).
4823 */
4824static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
4825 unsigned long batch)
4826{
4827 /* start with a fail safe value for batch */
4828 pcp->batch = 1;
4829 smp_wmb();
4830
4831 /* Update high, then batch, in order */
4832 pcp->high = high;
4833 smp_wmb();
4834
4835 pcp->batch = batch;
4836}
4837
Cody P Schafer36640332013-07-03 15:01:40 -07004838/* a companion to pageset_set_high() */
Cody P Schafer4008bab2013-07-03 15:01:28 -07004839static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch)
4840{
Cody P Schafer8d7a8fa2013-07-03 15:01:31 -07004841 pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch));
Cody P Schafer4008bab2013-07-03 15:01:28 -07004842}
4843
Cody P Schafer88c90db2013-07-03 15:01:35 -07004844static void pageset_init(struct per_cpu_pageset *p)
Christoph Lameter2caaad42005-06-21 17:15:00 -07004845{
4846 struct per_cpu_pages *pcp;
Mel Gorman5f8dcc22009-09-21 17:03:19 -07004847 int migratetype;
Christoph Lameter2caaad42005-06-21 17:15:00 -07004848
Magnus Damm1c6fe942005-10-26 01:58:59 -07004849 memset(p, 0, sizeof(*p));
4850
Christoph Lameter3dfa5722008-02-04 22:29:19 -08004851 pcp = &p->pcp;
Christoph Lameter2caaad42005-06-21 17:15:00 -07004852 pcp->count = 0;
Mel Gorman5f8dcc22009-09-21 17:03:19 -07004853 for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
4854 INIT_LIST_HEAD(&pcp->lists[migratetype]);
Christoph Lameter2caaad42005-06-21 17:15:00 -07004855}
4856
Cody P Schafer88c90db2013-07-03 15:01:35 -07004857static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
4858{
4859 pageset_init(p);
4860 pageset_set_batch(p, batch);
4861}
4862
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08004863/*
Cody P Schafer36640332013-07-03 15:01:40 -07004864 * pageset_set_high() sets the high water mark for hot per_cpu_pagelist
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08004865 * to the value high for the pageset p.
4866 */
Cody P Schafer36640332013-07-03 15:01:40 -07004867static void pageset_set_high(struct per_cpu_pageset *p,
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08004868 unsigned long high)
4869{
Cody P Schafer8d7a8fa2013-07-03 15:01:31 -07004870 unsigned long batch = max(1UL, high / 4);
4871 if ((high / 4) > (PAGE_SHIFT * 8))
4872 batch = PAGE_SHIFT * 8;
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08004873
Cody P Schafer8d7a8fa2013-07-03 15:01:31 -07004874 pageset_update(&p->pcp, high, batch);
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08004875}
4876
David Rientjes7cd2b0a2014-06-23 13:22:04 -07004877static void pageset_set_high_and_batch(struct zone *zone,
4878 struct per_cpu_pageset *pcp)
Cody P Schafer56cef2b2013-07-03 15:01:38 -07004879{
Cody P Schafer56cef2b2013-07-03 15:01:38 -07004880 if (percpu_pagelist_fraction)
Cody P Schafer36640332013-07-03 15:01:40 -07004881 pageset_set_high(pcp,
Cody P Schafer56cef2b2013-07-03 15:01:38 -07004882 (zone->managed_pages /
4883 percpu_pagelist_fraction));
4884 else
4885 pageset_set_batch(pcp, zone_batchsize(zone));
4886}
4887
Cody P Schafer169f6c12013-07-03 15:01:41 -07004888static void __meminit zone_pageset_init(struct zone *zone, int cpu)
4889{
4890 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
4891
4892 pageset_init(pcp);
4893 pageset_set_high_and_batch(zone, pcp);
4894}
4895
Jiang Liu4ed7e022012-07-31 16:43:35 -07004896static void __meminit setup_zone_pageset(struct zone *zone)
Wu Fengguang319774e2010-05-24 14:32:49 -07004897{
4898 int cpu;
Wu Fengguang319774e2010-05-24 14:32:49 -07004899 zone->pageset = alloc_percpu(struct per_cpu_pageset);
Cody P Schafer56cef2b2013-07-03 15:01:38 -07004900 for_each_possible_cpu(cpu)
4901 zone_pageset_init(zone, cpu);
Wu Fengguang319774e2010-05-24 14:32:49 -07004902}
4903
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07004904/*
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09004905 * Allocate per cpu pagesets and initialize them.
4906 * Before this call only boot pagesets were available.
Christoph Lameter2caaad42005-06-21 17:15:00 -07004907 */
Al Viro78d99552005-12-15 09:18:25 +00004908void __init setup_per_cpu_pageset(void)
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07004909{
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09004910 struct zone *zone;
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07004911
Wu Fengguang319774e2010-05-24 14:32:49 -07004912 for_each_populated_zone(zone)
4913 setup_zone_pageset(zone);
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07004914}
4915
Sam Ravnborg577a32f2007-05-17 23:29:25 +02004916static noinline __init_refok
Yasunori Gotocca448f2006-06-23 02:03:10 -07004917int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
Dave Hansened8ece22005-10-29 18:16:50 -07004918{
4919 int i;
Yasunori Gotocca448f2006-06-23 02:03:10 -07004920 size_t alloc_size;
Dave Hansened8ece22005-10-29 18:16:50 -07004921
4922 /*
4923 * The per-page waitqueue mechanism uses hashed waitqueues
4924 * per zone.
4925 */
Yasunori Goto02b694d2006-06-23 02:03:08 -07004926 zone->wait_table_hash_nr_entries =
4927 wait_table_hash_nr_entries(zone_size_pages);
4928 zone->wait_table_bits =
4929 wait_table_bits(zone->wait_table_hash_nr_entries);
Yasunori Gotocca448f2006-06-23 02:03:10 -07004930 alloc_size = zone->wait_table_hash_nr_entries
4931 * sizeof(wait_queue_head_t);
4932
Heiko Carstenscd94b9d2008-05-23 13:04:52 -07004933 if (!slab_is_available()) {
Yasunori Gotocca448f2006-06-23 02:03:10 -07004934 zone->wait_table = (wait_queue_head_t *)
Santosh Shilimkar67828322014-01-21 15:50:25 -08004935 memblock_virt_alloc_node_nopanic(
4936 alloc_size, zone->zone_pgdat->node_id);
Yasunori Gotocca448f2006-06-23 02:03:10 -07004937 } else {
4938 /*
4939 * This case means that a zone whose size was 0 gets new memory
4940 * via memory hot-add.
4941 * But it may be the case that a new node was hot-added. In
4942 * this case vmalloc() will not be able to use this new node's
4943 * memory - this wait_table must be initialized to use this new
4944 * node itself as well.
4945 * To use this new node's memory, further consideration will be
4946 * necessary.
4947 */
Jesper Juhl8691f3a2007-10-16 01:24:49 -07004948 zone->wait_table = vmalloc(alloc_size);
Yasunori Gotocca448f2006-06-23 02:03:10 -07004949 }
4950 if (!zone->wait_table)
4951 return -ENOMEM;
Dave Hansened8ece22005-10-29 18:16:50 -07004952
Pintu Kumarb8af2942013-09-11 14:20:34 -07004953 for (i = 0; i < zone->wait_table_hash_nr_entries; ++i)
Dave Hansened8ece22005-10-29 18:16:50 -07004954 init_waitqueue_head(zone->wait_table + i);
Yasunori Gotocca448f2006-06-23 02:03:10 -07004955
4956 return 0;
Dave Hansened8ece22005-10-29 18:16:50 -07004957}
4958
Matt Tolentinoc09b4242006-01-17 07:03:44 +01004959static __meminit void zone_pcp_init(struct zone *zone)
Dave Hansened8ece22005-10-29 18:16:50 -07004960{
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09004961 /*
4962 * per cpu subsystem is not up at this point. The following code
4963 * relies on the ability of the linker to provide the
4964 * offset of a (static) per cpu variable into the per cpu area.
4965 */
4966 zone->pageset = &boot_pageset;
Dave Hansened8ece22005-10-29 18:16:50 -07004967
Xishi Qiub38a8722013-11-12 15:07:20 -08004968 if (populated_zone(zone))
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09004969 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
4970 zone->name, zone->present_pages,
4971 zone_batchsize(zone));
Dave Hansened8ece22005-10-29 18:16:50 -07004972}
4973
Jiang Liu4ed7e022012-07-31 16:43:35 -07004974int __meminit init_currently_empty_zone(struct zone *zone,
Yasunori Goto718127c2006-06-23 02:03:10 -07004975 unsigned long zone_start_pfn,
Yaowei Baib171e402015-11-05 18:47:06 -08004976 unsigned long size)
Dave Hansened8ece22005-10-29 18:16:50 -07004977{
4978 struct pglist_data *pgdat = zone->zone_pgdat;
Yasunori Gotocca448f2006-06-23 02:03:10 -07004979 int ret;
4980 ret = zone_wait_table_init(zone, size);
4981 if (ret)
4982 return ret;
Dave Hansened8ece22005-10-29 18:16:50 -07004983 pgdat->nr_zones = zone_idx(zone) + 1;
4984
Dave Hansened8ece22005-10-29 18:16:50 -07004985 zone->zone_start_pfn = zone_start_pfn;
4986
Mel Gorman708614e2008-07-23 21:26:51 -07004987 mminit_dprintk(MMINIT_TRACE, "memmap_init",
4988 "Initialising map node %d zone %lu pfns %lu -> %lu\n",
4989 pgdat->node_id,
4990 (unsigned long)zone_idx(zone),
4991 zone_start_pfn, (zone_start_pfn + size));
4992
Andi Kleen1e548de2008-02-04 22:29:26 -08004993 zone_init_free_lists(zone);
Yasunori Goto718127c2006-06-23 02:03:10 -07004994
4995 return 0;
Dave Hansened8ece22005-10-29 18:16:50 -07004996}
4997
Tejun Heo0ee332c2011-12-08 10:22:09 -08004998#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
Mel Gormanc7132162006-09-27 01:49:43 -07004999#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
Mel Gorman8a942fd2015-06-30 14:56:55 -07005000
Mel Gormanc7132162006-09-27 01:49:43 -07005001/*
5002 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
Mel Gormanc7132162006-09-27 01:49:43 -07005003 */
Mel Gorman8a942fd2015-06-30 14:56:55 -07005004int __meminit __early_pfn_to_nid(unsigned long pfn,
5005 struct mminit_pfnnid_cache *state)
Mel Gormanc7132162006-09-27 01:49:43 -07005006{
Tejun Heoc13291a2011-07-12 10:46:30 +02005007 unsigned long start_pfn, end_pfn;
Yinghai Lue76b63f2013-09-11 14:22:17 -07005008 int nid;
Russ Anderson7c243c72013-04-29 15:07:59 -07005009
Mel Gorman8a942fd2015-06-30 14:56:55 -07005010 if (state->last_start <= pfn && pfn < state->last_end)
5011 return state->last_nid;
Mel Gormanc7132162006-09-27 01:49:43 -07005012
Yinghai Lue76b63f2013-09-11 14:22:17 -07005013 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
5014 if (nid != -1) {
Mel Gorman8a942fd2015-06-30 14:56:55 -07005015 state->last_start = start_pfn;
5016 state->last_end = end_pfn;
5017 state->last_nid = nid;
Yinghai Lue76b63f2013-09-11 14:22:17 -07005018 }
5019
5020 return nid;
Mel Gormanc7132162006-09-27 01:49:43 -07005021}
5022#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
5023
Mel Gormanc7132162006-09-27 01:49:43 -07005024/**
Santosh Shilimkar67828322014-01-21 15:50:25 -08005025 * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range
Randy Dunlap88ca3b92006-10-04 02:15:25 -07005026 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
Santosh Shilimkar67828322014-01-21 15:50:25 -08005027 * @max_low_pfn: The highest PFN that will be passed to memblock_free_early_nid
Mel Gormanc7132162006-09-27 01:49:43 -07005028 *
Zhang Zhen7d018172014-06-04 16:10:53 -07005029 * If an architecture guarantees that all ranges registered contain no holes
5030 * and may be freed, this this function may be used instead of calling
5031 * memblock_free_early_nid() manually.
Mel Gormanc7132162006-09-27 01:49:43 -07005032 */
Tejun Heoc13291a2011-07-12 10:46:30 +02005033void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
Mel Gormanc7132162006-09-27 01:49:43 -07005034{
Tejun Heoc13291a2011-07-12 10:46:30 +02005035 unsigned long start_pfn, end_pfn;
5036 int i, this_nid;
Mel Gormanc7132162006-09-27 01:49:43 -07005037
Tejun Heoc13291a2011-07-12 10:46:30 +02005038 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
5039 start_pfn = min(start_pfn, max_low_pfn);
5040 end_pfn = min(end_pfn, max_low_pfn);
Mel Gormanc7132162006-09-27 01:49:43 -07005041
Tejun Heoc13291a2011-07-12 10:46:30 +02005042 if (start_pfn < end_pfn)
Santosh Shilimkar67828322014-01-21 15:50:25 -08005043 memblock_free_early_nid(PFN_PHYS(start_pfn),
5044 (end_pfn - start_pfn) << PAGE_SHIFT,
5045 this_nid);
Mel Gormanc7132162006-09-27 01:49:43 -07005046 }
5047}
5048
5049/**
5050 * sparse_memory_present_with_active_regions - Call memory_present for each active range
Randy Dunlap88ca3b92006-10-04 02:15:25 -07005051 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
Mel Gormanc7132162006-09-27 01:49:43 -07005052 *
Zhang Zhen7d018172014-06-04 16:10:53 -07005053 * If an architecture guarantees that all ranges registered contain no holes and may
5054 * be freed, this function may be used instead of calling memory_present() manually.
Mel Gormanc7132162006-09-27 01:49:43 -07005055 */
5056void __init sparse_memory_present_with_active_regions(int nid)
5057{
Tejun Heoc13291a2011-07-12 10:46:30 +02005058 unsigned long start_pfn, end_pfn;
5059 int i, this_nid;
Mel Gormanc7132162006-09-27 01:49:43 -07005060
Tejun Heoc13291a2011-07-12 10:46:30 +02005061 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
5062 memory_present(this_nid, start_pfn, end_pfn);
Mel Gormanc7132162006-09-27 01:49:43 -07005063}
5064
5065/**
5066 * get_pfn_range_for_nid - Return the start and end page frames for a node
Randy Dunlap88ca3b92006-10-04 02:15:25 -07005067 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
5068 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
5069 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
Mel Gormanc7132162006-09-27 01:49:43 -07005070 *
5071 * It returns the start and end page frame of a node based on information
Zhang Zhen7d018172014-06-04 16:10:53 -07005072 * provided by memblock_set_node(). If called for a node
Mel Gormanc7132162006-09-27 01:49:43 -07005073 * with no available memory, a warning is printed and the start and end
Randy Dunlap88ca3b92006-10-04 02:15:25 -07005074 * PFNs will be 0.
Mel Gormanc7132162006-09-27 01:49:43 -07005075 */
Yasunori Gotoa3142c82007-05-08 00:23:07 -07005076void __meminit get_pfn_range_for_nid(unsigned int nid,
Mel Gormanc7132162006-09-27 01:49:43 -07005077 unsigned long *start_pfn, unsigned long *end_pfn)
5078{
Tejun Heoc13291a2011-07-12 10:46:30 +02005079 unsigned long this_start_pfn, this_end_pfn;
Mel Gormanc7132162006-09-27 01:49:43 -07005080 int i;
Tejun Heoc13291a2011-07-12 10:46:30 +02005081
Mel Gormanc7132162006-09-27 01:49:43 -07005082 *start_pfn = -1UL;
5083 *end_pfn = 0;
5084
Tejun Heoc13291a2011-07-12 10:46:30 +02005085 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
5086 *start_pfn = min(*start_pfn, this_start_pfn);
5087 *end_pfn = max(*end_pfn, this_end_pfn);
Mel Gormanc7132162006-09-27 01:49:43 -07005088 }
5089
Christoph Lameter633c0662007-10-16 01:25:37 -07005090 if (*start_pfn == -1UL)
Mel Gormanc7132162006-09-27 01:49:43 -07005091 *start_pfn = 0;
Mel Gormanc7132162006-09-27 01:49:43 -07005092}
5093
5094/*
Mel Gorman2a1e2742007-07-17 04:03:12 -07005095 * This finds a zone that can be used for ZONE_MOVABLE pages. The
5096 * assumption is made that zones within a node are ordered in monotonic
5097 * increasing memory addresses so that the "highest" populated zone is used
5098 */
Adrian Bunkb69a7282008-07-23 21:28:12 -07005099static void __init find_usable_zone_for_movable(void)
Mel Gorman2a1e2742007-07-17 04:03:12 -07005100{
5101 int zone_index;
5102 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
5103 if (zone_index == ZONE_MOVABLE)
5104 continue;
5105
5106 if (arch_zone_highest_possible_pfn[zone_index] >
5107 arch_zone_lowest_possible_pfn[zone_index])
5108 break;
5109 }
5110
5111 VM_BUG_ON(zone_index == -1);
5112 movable_zone = zone_index;
5113}
5114
5115/*
5116 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005117 * because it is sized independent of architecture. Unlike the other zones,
Mel Gorman2a1e2742007-07-17 04:03:12 -07005118 * the starting point for ZONE_MOVABLE is not fixed. It may be different
5119 * in each node depending on the size of each node and how evenly kernelcore
5120 * is distributed. This helper function adjusts the zone ranges
5121 * provided by the architecture for a given node by using the end of the
5122 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
5123 * zones within a node are in order of monotonic increases memory addresses
5124 */
Adrian Bunkb69a7282008-07-23 21:28:12 -07005125static void __meminit adjust_zone_range_for_zone_movable(int nid,
Mel Gorman2a1e2742007-07-17 04:03:12 -07005126 unsigned long zone_type,
5127 unsigned long node_start_pfn,
5128 unsigned long node_end_pfn,
5129 unsigned long *zone_start_pfn,
5130 unsigned long *zone_end_pfn)
5131{
5132 /* Only adjust if ZONE_MOVABLE is on this node */
5133 if (zone_movable_pfn[nid]) {
5134 /* Size ZONE_MOVABLE */
5135 if (zone_type == ZONE_MOVABLE) {
5136 *zone_start_pfn = zone_movable_pfn[nid];
5137 *zone_end_pfn = min(node_end_pfn,
5138 arch_zone_highest_possible_pfn[movable_zone]);
5139
Mel Gorman2a1e2742007-07-17 04:03:12 -07005140 /* Check if this whole range is within ZONE_MOVABLE */
5141 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
5142 *zone_start_pfn = *zone_end_pfn;
5143 }
5144}
5145
5146/*
Mel Gormanc7132162006-09-27 01:49:43 -07005147 * Return the number of pages a zone spans in a node, including holes
5148 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
5149 */
Paul Mundt6ea6e682007-07-15 23:38:20 -07005150static unsigned long __meminit zone_spanned_pages_in_node(int nid,
Mel Gormanc7132162006-09-27 01:49:43 -07005151 unsigned long zone_type,
Zhang Yanfei7960aed2013-07-08 15:59:52 -07005152 unsigned long node_start_pfn,
5153 unsigned long node_end_pfn,
Taku Izumid91749c2016-03-15 14:55:18 -07005154 unsigned long *zone_start_pfn,
5155 unsigned long *zone_end_pfn,
Mel Gormanc7132162006-09-27 01:49:43 -07005156 unsigned long *ignored)
5157{
Xishi Qiub5685e92015-09-08 15:04:16 -07005158 /* When hotadd a new node from cpu_up(), the node should be empty */
Xishi Qiuf9126ab2015-08-14 15:35:16 -07005159 if (!node_start_pfn && !node_end_pfn)
5160 return 0;
5161
Zhang Yanfei7960aed2013-07-08 15:59:52 -07005162 /* Get the start and end of the zone */
Taku Izumid91749c2016-03-15 14:55:18 -07005163 *zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
5164 *zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
Mel Gorman2a1e2742007-07-17 04:03:12 -07005165 adjust_zone_range_for_zone_movable(nid, zone_type,
5166 node_start_pfn, node_end_pfn,
Taku Izumid91749c2016-03-15 14:55:18 -07005167 zone_start_pfn, zone_end_pfn);
Mel Gormanc7132162006-09-27 01:49:43 -07005168
5169 /* Check that this node has pages within the zone's required range */
Taku Izumid91749c2016-03-15 14:55:18 -07005170 if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
Mel Gormanc7132162006-09-27 01:49:43 -07005171 return 0;
5172
5173 /* Move the zone boundaries inside the node if necessary */
Taku Izumid91749c2016-03-15 14:55:18 -07005174 *zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
5175 *zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
Mel Gormanc7132162006-09-27 01:49:43 -07005176
5177 /* Return the spanned pages */
Taku Izumid91749c2016-03-15 14:55:18 -07005178 return *zone_end_pfn - *zone_start_pfn;
Mel Gormanc7132162006-09-27 01:49:43 -07005179}
5180
5181/*
5182 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
Randy Dunlap88ca3b92006-10-04 02:15:25 -07005183 * then all holes in the requested range will be accounted for.
Mel Gormanc7132162006-09-27 01:49:43 -07005184 */
Yinghai Lu32996252009-12-15 17:59:02 -08005185unsigned long __meminit __absent_pages_in_range(int nid,
Mel Gormanc7132162006-09-27 01:49:43 -07005186 unsigned long range_start_pfn,
5187 unsigned long range_end_pfn)
5188{
Tejun Heo96e907d2011-07-12 10:46:29 +02005189 unsigned long nr_absent = range_end_pfn - range_start_pfn;
5190 unsigned long start_pfn, end_pfn;
5191 int i;
Mel Gormanc7132162006-09-27 01:49:43 -07005192
Tejun Heo96e907d2011-07-12 10:46:29 +02005193 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
5194 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
5195 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
5196 nr_absent -= end_pfn - start_pfn;
Mel Gormanc7132162006-09-27 01:49:43 -07005197 }
Tejun Heo96e907d2011-07-12 10:46:29 +02005198 return nr_absent;
Mel Gormanc7132162006-09-27 01:49:43 -07005199}
5200
5201/**
5202 * absent_pages_in_range - Return number of page frames in holes within a range
5203 * @start_pfn: The start PFN to start searching for holes
5204 * @end_pfn: The end PFN to stop searching for holes
5205 *
Randy Dunlap88ca3b92006-10-04 02:15:25 -07005206 * It returns the number of pages frames in memory holes within a range.
Mel Gormanc7132162006-09-27 01:49:43 -07005207 */
5208unsigned long __init absent_pages_in_range(unsigned long start_pfn,
5209 unsigned long end_pfn)
5210{
5211 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
5212}
5213
5214/* Return the number of page frames in holes in a zone on a node */
Paul Mundt6ea6e682007-07-15 23:38:20 -07005215static unsigned long __meminit zone_absent_pages_in_node(int nid,
Mel Gormanc7132162006-09-27 01:49:43 -07005216 unsigned long zone_type,
Zhang Yanfei7960aed2013-07-08 15:59:52 -07005217 unsigned long node_start_pfn,
5218 unsigned long node_end_pfn,
Mel Gormanc7132162006-09-27 01:49:43 -07005219 unsigned long *ignored)
5220{
Tejun Heo96e907d2011-07-12 10:46:29 +02005221 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
5222 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
Mel Gorman9c7cd682006-09-27 01:49:58 -07005223 unsigned long zone_start_pfn, zone_end_pfn;
Taku Izumi342332e2016-03-15 14:55:22 -07005224 unsigned long nr_absent;
Mel Gorman9c7cd682006-09-27 01:49:58 -07005225
Xishi Qiub5685e92015-09-08 15:04:16 -07005226 /* When hotadd a new node from cpu_up(), the node should be empty */
Xishi Qiuf9126ab2015-08-14 15:35:16 -07005227 if (!node_start_pfn && !node_end_pfn)
5228 return 0;
5229
Tejun Heo96e907d2011-07-12 10:46:29 +02005230 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
5231 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
Mel Gorman9c7cd682006-09-27 01:49:58 -07005232
Mel Gorman2a1e2742007-07-17 04:03:12 -07005233 adjust_zone_range_for_zone_movable(nid, zone_type,
5234 node_start_pfn, node_end_pfn,
5235 &zone_start_pfn, &zone_end_pfn);
Taku Izumi342332e2016-03-15 14:55:22 -07005236 nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
5237
5238 /*
5239 * ZONE_MOVABLE handling.
5240 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
5241 * and vice versa.
5242 */
5243 if (zone_movable_pfn[nid]) {
5244 if (mirrored_kernelcore) {
5245 unsigned long start_pfn, end_pfn;
5246 struct memblock_region *r;
5247
5248 for_each_memblock(memory, r) {
5249 start_pfn = clamp(memblock_region_memory_base_pfn(r),
5250 zone_start_pfn, zone_end_pfn);
5251 end_pfn = clamp(memblock_region_memory_end_pfn(r),
5252 zone_start_pfn, zone_end_pfn);
5253
5254 if (zone_type == ZONE_MOVABLE &&
5255 memblock_is_mirror(r))
5256 nr_absent += end_pfn - start_pfn;
5257
5258 if (zone_type == ZONE_NORMAL &&
5259 !memblock_is_mirror(r))
5260 nr_absent += end_pfn - start_pfn;
5261 }
5262 } else {
5263 if (zone_type == ZONE_NORMAL)
5264 nr_absent += node_end_pfn - zone_movable_pfn[nid];
5265 }
5266 }
5267
5268 return nr_absent;
Mel Gormanc7132162006-09-27 01:49:43 -07005269}
Mel Gorman0e0b8642006-09-27 01:49:56 -07005270
Tejun Heo0ee332c2011-12-08 10:22:09 -08005271#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
Paul Mundt6ea6e682007-07-15 23:38:20 -07005272static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
Mel Gormanc7132162006-09-27 01:49:43 -07005273 unsigned long zone_type,
Zhang Yanfei7960aed2013-07-08 15:59:52 -07005274 unsigned long node_start_pfn,
5275 unsigned long node_end_pfn,
Taku Izumid91749c2016-03-15 14:55:18 -07005276 unsigned long *zone_start_pfn,
5277 unsigned long *zone_end_pfn,
Mel Gormanc7132162006-09-27 01:49:43 -07005278 unsigned long *zones_size)
5279{
Taku Izumid91749c2016-03-15 14:55:18 -07005280 unsigned int zone;
5281
5282 *zone_start_pfn = node_start_pfn;
5283 for (zone = 0; zone < zone_type; zone++)
5284 *zone_start_pfn += zones_size[zone];
5285
5286 *zone_end_pfn = *zone_start_pfn + zones_size[zone_type];
5287
Mel Gormanc7132162006-09-27 01:49:43 -07005288 return zones_size[zone_type];
5289}
5290
Paul Mundt6ea6e682007-07-15 23:38:20 -07005291static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
Mel Gormanc7132162006-09-27 01:49:43 -07005292 unsigned long zone_type,
Zhang Yanfei7960aed2013-07-08 15:59:52 -07005293 unsigned long node_start_pfn,
5294 unsigned long node_end_pfn,
Mel Gormanc7132162006-09-27 01:49:43 -07005295 unsigned long *zholes_size)
5296{
5297 if (!zholes_size)
5298 return 0;
5299
5300 return zholes_size[zone_type];
5301}
Yinghai Lu20e69262013-03-01 14:51:27 -08005302
Tejun Heo0ee332c2011-12-08 10:22:09 -08005303#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
Mel Gormanc7132162006-09-27 01:49:43 -07005304
Yasunori Gotoa3142c82007-05-08 00:23:07 -07005305static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
Zhang Yanfei7960aed2013-07-08 15:59:52 -07005306 unsigned long node_start_pfn,
5307 unsigned long node_end_pfn,
5308 unsigned long *zones_size,
5309 unsigned long *zholes_size)
Mel Gormanc7132162006-09-27 01:49:43 -07005310{
Gu Zhengfebd5942015-06-24 16:57:02 -07005311 unsigned long realtotalpages = 0, totalpages = 0;
Mel Gormanc7132162006-09-27 01:49:43 -07005312 enum zone_type i;
5313
Gu Zhengfebd5942015-06-24 16:57:02 -07005314 for (i = 0; i < MAX_NR_ZONES; i++) {
5315 struct zone *zone = pgdat->node_zones + i;
Taku Izumid91749c2016-03-15 14:55:18 -07005316 unsigned long zone_start_pfn, zone_end_pfn;
Gu Zhengfebd5942015-06-24 16:57:02 -07005317 unsigned long size, real_size;
Mel Gormanc7132162006-09-27 01:49:43 -07005318
Gu Zhengfebd5942015-06-24 16:57:02 -07005319 size = zone_spanned_pages_in_node(pgdat->node_id, i,
5320 node_start_pfn,
5321 node_end_pfn,
Taku Izumid91749c2016-03-15 14:55:18 -07005322 &zone_start_pfn,
5323 &zone_end_pfn,
Gu Zhengfebd5942015-06-24 16:57:02 -07005324 zones_size);
5325 real_size = size - zone_absent_pages_in_node(pgdat->node_id, i,
Zhang Yanfei7960aed2013-07-08 15:59:52 -07005326 node_start_pfn, node_end_pfn,
5327 zholes_size);
Taku Izumid91749c2016-03-15 14:55:18 -07005328 if (size)
5329 zone->zone_start_pfn = zone_start_pfn;
5330 else
5331 zone->zone_start_pfn = 0;
Gu Zhengfebd5942015-06-24 16:57:02 -07005332 zone->spanned_pages = size;
5333 zone->present_pages = real_size;
5334
5335 totalpages += size;
5336 realtotalpages += real_size;
5337 }
5338
5339 pgdat->node_spanned_pages = totalpages;
Mel Gormanc7132162006-09-27 01:49:43 -07005340 pgdat->node_present_pages = realtotalpages;
5341 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
5342 realtotalpages);
5343}
5344
Mel Gorman835c1342007-10-16 01:25:47 -07005345#ifndef CONFIG_SPARSEMEM
5346/*
5347 * Calculate the size of the zone->blockflags rounded to an unsigned long
Mel Gormand9c23402007-10-16 01:26:01 -07005348 * Start by making sure zonesize is a multiple of pageblock_order by rounding
5349 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
Mel Gorman835c1342007-10-16 01:25:47 -07005350 * round what is now in bits to nearest long in bits, then return it in
5351 * bytes.
5352 */
Linus Torvalds7c455122013-02-18 09:58:02 -08005353static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
Mel Gorman835c1342007-10-16 01:25:47 -07005354{
5355 unsigned long usemapsize;
5356
Linus Torvalds7c455122013-02-18 09:58:02 -08005357 zonesize += zone_start_pfn & (pageblock_nr_pages-1);
Mel Gormand9c23402007-10-16 01:26:01 -07005358 usemapsize = roundup(zonesize, pageblock_nr_pages);
5359 usemapsize = usemapsize >> pageblock_order;
Mel Gorman835c1342007-10-16 01:25:47 -07005360 usemapsize *= NR_PAGEBLOCK_BITS;
5361 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
5362
5363 return usemapsize / 8;
5364}
5365
5366static void __init setup_usemap(struct pglist_data *pgdat,
Linus Torvalds7c455122013-02-18 09:58:02 -08005367 struct zone *zone,
5368 unsigned long zone_start_pfn,
5369 unsigned long zonesize)
Mel Gorman835c1342007-10-16 01:25:47 -07005370{
Linus Torvalds7c455122013-02-18 09:58:02 -08005371 unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
Mel Gorman835c1342007-10-16 01:25:47 -07005372 zone->pageblock_flags = NULL;
Julia Lawall58a01a42009-01-06 14:39:28 -08005373 if (usemapsize)
Santosh Shilimkar67828322014-01-21 15:50:25 -08005374 zone->pageblock_flags =
5375 memblock_virt_alloc_node_nopanic(usemapsize,
5376 pgdat->node_id);
Mel Gorman835c1342007-10-16 01:25:47 -07005377}
5378#else
Linus Torvalds7c455122013-02-18 09:58:02 -08005379static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
5380 unsigned long zone_start_pfn, unsigned long zonesize) {}
Mel Gorman835c1342007-10-16 01:25:47 -07005381#endif /* CONFIG_SPARSEMEM */
5382
Mel Gormand9c23402007-10-16 01:26:01 -07005383#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
Mel Gormanba72cb82007-11-28 16:21:13 -08005384
Mel Gormand9c23402007-10-16 01:26:01 -07005385/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
Chen Gang15ca2202013-09-11 14:20:27 -07005386void __paginginit set_pageblock_order(void)
Mel Gormand9c23402007-10-16 01:26:01 -07005387{
Andrew Morton955c1cd2012-05-29 15:06:31 -07005388 unsigned int order;
5389
Mel Gormand9c23402007-10-16 01:26:01 -07005390 /* Check that pageblock_nr_pages has not already been setup */
5391 if (pageblock_order)
5392 return;
5393
Andrew Morton955c1cd2012-05-29 15:06:31 -07005394 if (HPAGE_SHIFT > PAGE_SHIFT)
5395 order = HUGETLB_PAGE_ORDER;
5396 else
5397 order = MAX_ORDER - 1;
5398
Mel Gormand9c23402007-10-16 01:26:01 -07005399 /*
5400 * Assume the largest contiguous order of interest is a huge page.
Andrew Morton955c1cd2012-05-29 15:06:31 -07005401 * This value may be variable depending on boot parameters on IA64 and
5402 * powerpc.
Mel Gormand9c23402007-10-16 01:26:01 -07005403 */
5404 pageblock_order = order;
5405}
5406#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
5407
Mel Gormanba72cb82007-11-28 16:21:13 -08005408/*
5409 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
Andrew Morton955c1cd2012-05-29 15:06:31 -07005410 * is unused as pageblock_order is set at compile-time. See
5411 * include/linux/pageblock-flags.h for the values of pageblock_order based on
5412 * the kernel config
Mel Gormanba72cb82007-11-28 16:21:13 -08005413 */
Chen Gang15ca2202013-09-11 14:20:27 -07005414void __paginginit set_pageblock_order(void)
Mel Gormanba72cb82007-11-28 16:21:13 -08005415{
Mel Gormanba72cb82007-11-28 16:21:13 -08005416}
Mel Gormand9c23402007-10-16 01:26:01 -07005417
5418#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
5419
Jiang Liu01cefae2012-12-12 13:52:19 -08005420static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages,
5421 unsigned long present_pages)
5422{
5423 unsigned long pages = spanned_pages;
5424
5425 /*
5426 * Provide a more accurate estimation if there are holes within
5427 * the zone and SPARSEMEM is in use. If there are holes within the
5428 * zone, each populated memory region may cost us one or two extra
5429 * memmap pages due to alignment because memmap pages for each
5430 * populated regions may not naturally algined on page boundary.
5431 * So the (present_pages >> 4) heuristic is a tradeoff for that.
5432 */
5433 if (spanned_pages > present_pages + (present_pages >> 4) &&
5434 IS_ENABLED(CONFIG_SPARSEMEM))
5435 pages = present_pages;
5436
5437 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
5438}
5439
Linus Torvalds1da177e2005-04-16 15:20:36 -07005440/*
5441 * Set up the zone data structures:
5442 * - mark all pages reserved
5443 * - mark all memory queues empty
5444 * - clear the memory bitmaps
Minchan Kim6527af52012-07-31 16:46:16 -07005445 *
5446 * NOTE: pgdat should get zeroed by caller.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005447 */
Wei Yang7f3eb552015-09-08 14:59:50 -07005448static void __paginginit free_area_init_core(struct pglist_data *pgdat)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005449{
Christoph Lameter2f1b6242006-09-25 23:31:13 -07005450 enum zone_type j;
Dave Hansened8ece22005-10-29 18:16:50 -07005451 int nid = pgdat->node_id;
Yasunori Goto718127c2006-06-23 02:03:10 -07005452 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005453
Dave Hansen208d54e2005-10-29 18:16:52 -07005454 pgdat_resize_init(pgdat);
Andrea Arcangeli8177a422012-03-23 20:56:34 +01005455#ifdef CONFIG_NUMA_BALANCING
5456 spin_lock_init(&pgdat->numabalancing_migrate_lock);
5457 pgdat->numabalancing_migrate_nr_pages = 0;
5458 pgdat->numabalancing_migrate_next_window = jiffies;
5459#endif
Kirill A. Shutemova3d0a9182016-02-02 16:57:08 -08005460#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5461 spin_lock_init(&pgdat->split_queue_lock);
5462 INIT_LIST_HEAD(&pgdat->split_queue);
5463 pgdat->split_queue_len = 0;
5464#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07005465 init_waitqueue_head(&pgdat->kswapd_wait);
Mel Gorman55150612012-07-31 16:44:35 -07005466 init_waitqueue_head(&pgdat->pfmemalloc_wait);
Vlastimil Babka698b1b32016-03-17 14:18:08 -07005467#ifdef CONFIG_COMPACTION
5468 init_waitqueue_head(&pgdat->kcompactd_wait);
5469#endif
Joonsoo Kimeefa864b2014-12-12 16:55:46 -08005470 pgdat_page_ext_init(pgdat);
Michal Nazarewicz5f63b722012-01-11 15:16:11 +01005471
Linus Torvalds1da177e2005-04-16 15:20:36 -07005472 for (j = 0; j < MAX_NR_ZONES; j++) {
5473 struct zone *zone = pgdat->node_zones + j;
Jiang Liu9feedc92012-12-12 13:52:12 -08005474 unsigned long size, realsize, freesize, memmap_pages;
Taku Izumid91749c2016-03-15 14:55:18 -07005475 unsigned long zone_start_pfn = zone->zone_start_pfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005476
Gu Zhengfebd5942015-06-24 16:57:02 -07005477 size = zone->spanned_pages;
5478 realsize = freesize = zone->present_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005479
Mel Gorman0e0b8642006-09-27 01:49:56 -07005480 /*
Jiang Liu9feedc92012-12-12 13:52:12 -08005481 * Adjust freesize so that it accounts for how much memory
Mel Gorman0e0b8642006-09-27 01:49:56 -07005482 * is used by this zone for memmap. This affects the watermark
5483 * and per-cpu initialisations
5484 */
Jiang Liu01cefae2012-12-12 13:52:19 -08005485 memmap_pages = calc_memmap_size(size, realsize);
Zhong Hongboba914f42014-12-12 16:56:21 -08005486 if (!is_highmem_idx(j)) {
5487 if (freesize >= memmap_pages) {
5488 freesize -= memmap_pages;
5489 if (memmap_pages)
5490 printk(KERN_DEBUG
5491 " %s zone: %lu pages used for memmap\n",
5492 zone_names[j], memmap_pages);
5493 } else
Joe Perches11705322016-03-17 14:19:50 -07005494 pr_warn(" %s zone: %lu pages exceeds freesize %lu\n",
Zhong Hongboba914f42014-12-12 16:56:21 -08005495 zone_names[j], memmap_pages, freesize);
5496 }
Mel Gorman0e0b8642006-09-27 01:49:56 -07005497
Christoph Lameter62672762007-02-10 01:43:07 -08005498 /* Account for reserved pages */
Jiang Liu9feedc92012-12-12 13:52:12 -08005499 if (j == 0 && freesize > dma_reserve) {
5500 freesize -= dma_reserve;
Yinghai Lud903ef92008-10-18 20:27:06 -07005501 printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
Christoph Lameter62672762007-02-10 01:43:07 -08005502 zone_names[0], dma_reserve);
Mel Gorman0e0b8642006-09-27 01:49:56 -07005503 }
5504
Christoph Lameter98d2b0e2006-09-25 23:31:12 -07005505 if (!is_highmem_idx(j))
Jiang Liu9feedc92012-12-12 13:52:12 -08005506 nr_kernel_pages += freesize;
Jiang Liu01cefae2012-12-12 13:52:19 -08005507 /* Charge for highmem memmap if there are enough kernel pages */
5508 else if (nr_kernel_pages > memmap_pages * 2)
5509 nr_kernel_pages -= memmap_pages;
Jiang Liu9feedc92012-12-12 13:52:12 -08005510 nr_all_pages += freesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005511
Jiang Liu9feedc92012-12-12 13:52:12 -08005512 /*
5513 * Set an approximate value for lowmem here, it will be adjusted
5514 * when the bootmem allocator frees pages into the buddy system.
5515 * And all highmem pages will be managed by the buddy system.
5516 */
5517 zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
Christoph Lameter96146342006-07-03 00:24:13 -07005518#ifdef CONFIG_NUMA
Christoph Lameterd5f541e2006-09-27 01:50:08 -07005519 zone->node = nid;
Jiang Liu9feedc92012-12-12 13:52:12 -08005520 zone->min_unmapped_pages = (freesize*sysctl_min_unmapped_ratio)
Christoph Lameter96146342006-07-03 00:24:13 -07005521 / 100;
Jiang Liu9feedc92012-12-12 13:52:12 -08005522 zone->min_slab_pages = (freesize * sysctl_min_slab_ratio) / 100;
Christoph Lameter96146342006-07-03 00:24:13 -07005523#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07005524 zone->name = zone_names[j];
5525 spin_lock_init(&zone->lock);
5526 spin_lock_init(&zone->lru_lock);
Dave Hansenbdc8cb92005-10-29 18:16:53 -07005527 zone_seqlock_init(zone);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005528 zone->zone_pgdat = pgdat;
Dave Hansened8ece22005-10-29 18:16:50 -07005529 zone_pcp_init(zone);
Johannes Weiner81c0a2b2013-09-11 14:20:47 -07005530
5531 /* For bootup, initialized properly in watermark setup */
5532 mod_zone_page_state(zone, NR_ALLOC_BATCH, zone->managed_pages);
5533
Hugh Dickinsbea8c152012-11-16 14:14:54 -08005534 lruvec_init(&zone->lruvec);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005535 if (!size)
5536 continue;
5537
Andrew Morton955c1cd2012-05-29 15:06:31 -07005538 set_pageblock_order();
Linus Torvalds7c455122013-02-18 09:58:02 -08005539 setup_usemap(pgdat, zone, zone_start_pfn, size);
Yaowei Baib171e402015-11-05 18:47:06 -08005540 ret = init_currently_empty_zone(zone, zone_start_pfn, size);
Yasunori Goto718127c2006-06-23 02:03:10 -07005541 BUG_ON(ret);
Heiko Carstens76cdd582008-05-14 16:05:52 -07005542 memmap_init(size, nid, j, zone_start_pfn);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005543 }
5544}
5545
Sam Ravnborg577a32f2007-05-17 23:29:25 +02005546static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005547{
Tony Luckb0aeba72015-11-10 10:09:47 -08005548 unsigned long __maybe_unused start = 0;
Laura Abbotta1c34a32015-11-05 18:48:46 -08005549 unsigned long __maybe_unused offset = 0;
5550
Linus Torvalds1da177e2005-04-16 15:20:36 -07005551 /* Skip empty nodes */
5552 if (!pgdat->node_spanned_pages)
5553 return;
5554
Andy Whitcroftd41dee32005-06-23 00:07:54 -07005555#ifdef CONFIG_FLAT_NODE_MEM_MAP
Tony Luckb0aeba72015-11-10 10:09:47 -08005556 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
5557 offset = pgdat->node_start_pfn - start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005558 /* ia64 gets its own node_mem_map, before this, without bootmem */
5559 if (!pgdat->node_mem_map) {
Tony Luckb0aeba72015-11-10 10:09:47 -08005560 unsigned long size, end;
Andy Whitcroftd41dee32005-06-23 00:07:54 -07005561 struct page *map;
5562
Bob Piccoe984bb42006-05-20 15:00:31 -07005563 /*
5564 * The zone's endpoints aren't required to be MAX_ORDER
5565 * aligned but the node_mem_map endpoints must be in order
5566 * for the buddy allocator to function correctly.
5567 */
Cody P Schafer108bcc92013-02-22 16:35:23 -08005568 end = pgdat_end_pfn(pgdat);
Bob Piccoe984bb42006-05-20 15:00:31 -07005569 end = ALIGN(end, MAX_ORDER_NR_PAGES);
5570 size = (end - start) * sizeof(struct page);
Dave Hansen6f167ec2005-06-23 00:07:39 -07005571 map = alloc_remap(pgdat->node_id, size);
5572 if (!map)
Santosh Shilimkar67828322014-01-21 15:50:25 -08005573 map = memblock_virt_alloc_node_nopanic(size,
5574 pgdat->node_id);
Laura Abbotta1c34a32015-11-05 18:48:46 -08005575 pgdat->node_mem_map = map + offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005576 }
Roman Zippel12d810c2007-05-31 00:40:54 -07005577#ifndef CONFIG_NEED_MULTIPLE_NODES
Linus Torvalds1da177e2005-04-16 15:20:36 -07005578 /*
5579 * With no DISCONTIG, the global mem_map is just set as node 0's
5580 */
Mel Gormanc7132162006-09-27 01:49:43 -07005581 if (pgdat == NODE_DATA(0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005582 mem_map = NODE_DATA(0)->node_mem_map;
Laura Abbotta1c34a32015-11-05 18:48:46 -08005583#if defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) || defined(CONFIG_FLATMEM)
Mel Gormanc7132162006-09-27 01:49:43 -07005584 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
Laura Abbotta1c34a32015-11-05 18:48:46 -08005585 mem_map -= offset;
Tejun Heo0ee332c2011-12-08 10:22:09 -08005586#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
Mel Gormanc7132162006-09-27 01:49:43 -07005587 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005588#endif
Andy Whitcroftd41dee32005-06-23 00:07:54 -07005589#endif /* CONFIG_FLAT_NODE_MEM_MAP */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005590}
5591
Johannes Weiner9109fb72008-07-23 21:27:20 -07005592void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
5593 unsigned long node_start_pfn, unsigned long *zholes_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005594{
Johannes Weiner9109fb72008-07-23 21:27:20 -07005595 pg_data_t *pgdat = NODE_DATA(nid);
Zhang Yanfei7960aed2013-07-08 15:59:52 -07005596 unsigned long start_pfn = 0;
5597 unsigned long end_pfn = 0;
Johannes Weiner9109fb72008-07-23 21:27:20 -07005598
Minchan Kim88fdf752012-07-31 16:46:14 -07005599 /* pg_data_t should be reset to zero when it's allocated */
Linus Torvalds8783b6e2012-08-02 10:37:03 -07005600 WARN_ON(pgdat->nr_zones || pgdat->classzone_idx);
Minchan Kim88fdf752012-07-31 16:46:14 -07005601
Mel Gorman3a80a7f2015-06-30 14:57:02 -07005602 reset_deferred_meminit(pgdat);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005603 pgdat->node_id = nid;
5604 pgdat->node_start_pfn = node_start_pfn;
Zhang Yanfei7960aed2013-07-08 15:59:52 -07005605#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5606 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
Juergen Gross8d29e182015-02-11 15:26:01 -08005607 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
Zhen Lei4ada0c52015-09-08 15:04:19 -07005608 (u64)start_pfn << PAGE_SHIFT,
5609 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
Taku Izumid91749c2016-03-15 14:55:18 -07005610#else
5611 start_pfn = node_start_pfn;
Zhang Yanfei7960aed2013-07-08 15:59:52 -07005612#endif
5613 calculate_node_totalpages(pgdat, start_pfn, end_pfn,
5614 zones_size, zholes_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005615
5616 alloc_node_mem_map(pgdat);
Yinghai Lue8c27ac2008-06-01 13:15:22 -07005617#ifdef CONFIG_FLAT_NODE_MEM_MAP
5618 printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
5619 nid, (unsigned long)pgdat,
5620 (unsigned long)pgdat->node_mem_map);
5621#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07005622
Wei Yang7f3eb552015-09-08 14:59:50 -07005623 free_area_init_core(pgdat);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005624}
5625
Tejun Heo0ee332c2011-12-08 10:22:09 -08005626#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
Miklos Szeredi418508c2007-05-23 13:57:55 -07005627
5628#if MAX_NUMNODES > 1
5629/*
5630 * Figure out the number of possible node ids.
5631 */
Cody P Schaferf9872ca2013-04-29 15:08:01 -07005632void __init setup_nr_node_ids(void)
Miklos Szeredi418508c2007-05-23 13:57:55 -07005633{
Wei Yang904a9552015-09-08 14:59:48 -07005634 unsigned int highest;
Miklos Szeredi418508c2007-05-23 13:57:55 -07005635
Wei Yang904a9552015-09-08 14:59:48 -07005636 highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
Miklos Szeredi418508c2007-05-23 13:57:55 -07005637 nr_node_ids = highest + 1;
5638}
Miklos Szeredi418508c2007-05-23 13:57:55 -07005639#endif
5640
Mel Gormanc7132162006-09-27 01:49:43 -07005641/**
Tejun Heo1e019792011-07-12 09:45:34 +02005642 * node_map_pfn_alignment - determine the maximum internode alignment
5643 *
5644 * This function should be called after node map is populated and sorted.
5645 * It calculates the maximum power of two alignment which can distinguish
5646 * all the nodes.
5647 *
5648 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
5649 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the
5650 * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is
5651 * shifted, 1GiB is enough and this function will indicate so.
5652 *
5653 * This is used to test whether pfn -> nid mapping of the chosen memory
5654 * model has fine enough granularity to avoid incorrect mapping for the
5655 * populated node map.
5656 *
5657 * Returns the determined alignment in pfn's. 0 if there is no alignment
5658 * requirement (single node).
5659 */
5660unsigned long __init node_map_pfn_alignment(void)
5661{
5662 unsigned long accl_mask = 0, last_end = 0;
Tejun Heoc13291a2011-07-12 10:46:30 +02005663 unsigned long start, end, mask;
Tejun Heo1e019792011-07-12 09:45:34 +02005664 int last_nid = -1;
Tejun Heoc13291a2011-07-12 10:46:30 +02005665 int i, nid;
Tejun Heo1e019792011-07-12 09:45:34 +02005666
Tejun Heoc13291a2011-07-12 10:46:30 +02005667 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
Tejun Heo1e019792011-07-12 09:45:34 +02005668 if (!start || last_nid < 0 || last_nid == nid) {
5669 last_nid = nid;
5670 last_end = end;
5671 continue;
5672 }
5673
5674 /*
5675 * Start with a mask granular enough to pin-point to the
5676 * start pfn and tick off bits one-by-one until it becomes
5677 * too coarse to separate the current node from the last.
5678 */
5679 mask = ~((1 << __ffs(start)) - 1);
5680 while (mask && last_end <= (start & (mask << 1)))
5681 mask <<= 1;
5682
5683 /* accumulate all internode masks */
5684 accl_mask |= mask;
5685 }
5686
5687 /* convert mask to number of pages */
5688 return ~accl_mask + 1;
5689}
5690
Mel Gormana6af2bc2007-02-10 01:42:57 -08005691/* Find the lowest pfn for a node */
Adrian Bunkb69a7282008-07-23 21:28:12 -07005692static unsigned long __init find_min_pfn_for_node(int nid)
Mel Gormanc7132162006-09-27 01:49:43 -07005693{
Mel Gormana6af2bc2007-02-10 01:42:57 -08005694 unsigned long min_pfn = ULONG_MAX;
Tejun Heoc13291a2011-07-12 10:46:30 +02005695 unsigned long start_pfn;
5696 int i;
Mel Gorman1abbfb42006-11-23 12:01:41 +00005697
Tejun Heoc13291a2011-07-12 10:46:30 +02005698 for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
5699 min_pfn = min(min_pfn, start_pfn);
Mel Gormanc7132162006-09-27 01:49:43 -07005700
Mel Gormana6af2bc2007-02-10 01:42:57 -08005701 if (min_pfn == ULONG_MAX) {
Joe Perches11705322016-03-17 14:19:50 -07005702 pr_warn("Could not find start_pfn for node %d\n", nid);
Mel Gormana6af2bc2007-02-10 01:42:57 -08005703 return 0;
5704 }
5705
5706 return min_pfn;
Mel Gormanc7132162006-09-27 01:49:43 -07005707}
5708
5709/**
5710 * find_min_pfn_with_active_regions - Find the minimum PFN registered
5711 *
5712 * It returns the minimum PFN based on information provided via
Zhang Zhen7d018172014-06-04 16:10:53 -07005713 * memblock_set_node().
Mel Gormanc7132162006-09-27 01:49:43 -07005714 */
5715unsigned long __init find_min_pfn_with_active_regions(void)
5716{
5717 return find_min_pfn_for_node(MAX_NUMNODES);
5718}
5719
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07005720/*
5721 * early_calculate_totalpages()
5722 * Sum pages in active regions for movable zone.
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08005723 * Populate N_MEMORY for calculating usable_nodes.
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07005724 */
Adrian Bunk484f51f2007-10-16 01:26:03 -07005725static unsigned long __init early_calculate_totalpages(void)
Mel Gorman7e63efef2007-07-17 04:03:15 -07005726{
Mel Gorman7e63efef2007-07-17 04:03:15 -07005727 unsigned long totalpages = 0;
Tejun Heoc13291a2011-07-12 10:46:30 +02005728 unsigned long start_pfn, end_pfn;
5729 int i, nid;
Mel Gorman7e63efef2007-07-17 04:03:15 -07005730
Tejun Heoc13291a2011-07-12 10:46:30 +02005731 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
5732 unsigned long pages = end_pfn - start_pfn;
5733
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07005734 totalpages += pages;
5735 if (pages)
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08005736 node_set_state(nid, N_MEMORY);
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07005737 }
Pintu Kumarb8af2942013-09-11 14:20:34 -07005738 return totalpages;
Mel Gorman7e63efef2007-07-17 04:03:15 -07005739}
5740
Mel Gorman2a1e2742007-07-17 04:03:12 -07005741/*
5742 * Find the PFN the Movable zone begins in each node. Kernel memory
5743 * is spread evenly between nodes as long as the nodes have enough
5744 * memory. When they don't, some nodes will have more kernelcore than
5745 * others
5746 */
Kautuk Consulb224ef82012-03-21 16:34:15 -07005747static void __init find_zone_movable_pfns_for_nodes(void)
Mel Gorman2a1e2742007-07-17 04:03:12 -07005748{
5749 int i, nid;
5750 unsigned long usable_startpfn;
5751 unsigned long kernelcore_node, kernelcore_remaining;
Yinghai Lu66918dc2009-06-30 11:41:37 -07005752 /* save the state before borrow the nodemask */
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08005753 nodemask_t saved_node_state = node_states[N_MEMORY];
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07005754 unsigned long totalpages = early_calculate_totalpages();
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08005755 int usable_nodes = nodes_weight(node_states[N_MEMORY]);
Emil Medve136199f2014-04-07 15:37:52 -07005756 struct memblock_region *r;
Tang Chenb2f3eeb2014-01-21 15:49:38 -08005757
5758 /* Need to find movable_zone earlier when movable_node is specified. */
5759 find_usable_zone_for_movable();
Mel Gorman2a1e2742007-07-17 04:03:12 -07005760
Mel Gorman7e63efef2007-07-17 04:03:15 -07005761 /*
Tang Chenb2f3eeb2014-01-21 15:49:38 -08005762 * If movable_node is specified, ignore kernelcore and movablecore
5763 * options.
5764 */
5765 if (movable_node_is_enabled()) {
Emil Medve136199f2014-04-07 15:37:52 -07005766 for_each_memblock(memory, r) {
5767 if (!memblock_is_hotpluggable(r))
Tang Chenb2f3eeb2014-01-21 15:49:38 -08005768 continue;
5769
Emil Medve136199f2014-04-07 15:37:52 -07005770 nid = r->nid;
Tang Chenb2f3eeb2014-01-21 15:49:38 -08005771
Emil Medve136199f2014-04-07 15:37:52 -07005772 usable_startpfn = PFN_DOWN(r->base);
Tang Chenb2f3eeb2014-01-21 15:49:38 -08005773 zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
5774 min(usable_startpfn, zone_movable_pfn[nid]) :
5775 usable_startpfn;
5776 }
5777
5778 goto out2;
5779 }
5780
5781 /*
Taku Izumi342332e2016-03-15 14:55:22 -07005782 * If kernelcore=mirror is specified, ignore movablecore option
5783 */
5784 if (mirrored_kernelcore) {
5785 bool mem_below_4gb_not_mirrored = false;
5786
5787 for_each_memblock(memory, r) {
5788 if (memblock_is_mirror(r))
5789 continue;
5790
5791 nid = r->nid;
5792
5793 usable_startpfn = memblock_region_memory_base_pfn(r);
5794
5795 if (usable_startpfn < 0x100000) {
5796 mem_below_4gb_not_mirrored = true;
5797 continue;
5798 }
5799
5800 zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
5801 min(usable_startpfn, zone_movable_pfn[nid]) :
5802 usable_startpfn;
5803 }
5804
5805 if (mem_below_4gb_not_mirrored)
5806 pr_warn("This configuration results in unmirrored kernel memory.");
5807
5808 goto out2;
5809 }
5810
5811 /*
Tang Chenb2f3eeb2014-01-21 15:49:38 -08005812 * If movablecore=nn[KMG] was specified, calculate what size of
Mel Gorman7e63efef2007-07-17 04:03:15 -07005813 * kernelcore that corresponds so that memory usable for
5814 * any allocation type is evenly spread. If both kernelcore
5815 * and movablecore are specified, then the value of kernelcore
5816 * will be used for required_kernelcore if it's greater than
5817 * what movablecore would have allowed.
5818 */
5819 if (required_movablecore) {
Mel Gorman7e63efef2007-07-17 04:03:15 -07005820 unsigned long corepages;
5821
5822 /*
5823 * Round-up so that ZONE_MOVABLE is at least as large as what
5824 * was requested by the user
5825 */
5826 required_movablecore =
5827 roundup(required_movablecore, MAX_ORDER_NR_PAGES);
Xishi Qiu9fd745d2015-11-05 18:48:11 -08005828 required_movablecore = min(totalpages, required_movablecore);
Mel Gorman7e63efef2007-07-17 04:03:15 -07005829 corepages = totalpages - required_movablecore;
5830
5831 required_kernelcore = max(required_kernelcore, corepages);
5832 }
5833
Xishi Qiubde304b2015-11-05 18:48:56 -08005834 /*
5835 * If kernelcore was not specified or kernelcore size is larger
5836 * than totalpages, there is no ZONE_MOVABLE.
5837 */
5838 if (!required_kernelcore || required_kernelcore >= totalpages)
Yinghai Lu66918dc2009-06-30 11:41:37 -07005839 goto out;
Mel Gorman2a1e2742007-07-17 04:03:12 -07005840
5841 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
Mel Gorman2a1e2742007-07-17 04:03:12 -07005842 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
5843
5844restart:
5845 /* Spread kernelcore memory as evenly as possible throughout nodes */
5846 kernelcore_node = required_kernelcore / usable_nodes;
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08005847 for_each_node_state(nid, N_MEMORY) {
Tejun Heoc13291a2011-07-12 10:46:30 +02005848 unsigned long start_pfn, end_pfn;
5849
Mel Gorman2a1e2742007-07-17 04:03:12 -07005850 /*
5851 * Recalculate kernelcore_node if the division per node
5852 * now exceeds what is necessary to satisfy the requested
5853 * amount of memory for the kernel
5854 */
5855 if (required_kernelcore < kernelcore_node)
5856 kernelcore_node = required_kernelcore / usable_nodes;
5857
5858 /*
5859 * As the map is walked, we track how much memory is usable
5860 * by the kernel using kernelcore_remaining. When it is
5861 * 0, the rest of the node is usable by ZONE_MOVABLE
5862 */
5863 kernelcore_remaining = kernelcore_node;
5864
5865 /* Go through each range of PFNs within this node */
Tejun Heoc13291a2011-07-12 10:46:30 +02005866 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
Mel Gorman2a1e2742007-07-17 04:03:12 -07005867 unsigned long size_pages;
5868
Tejun Heoc13291a2011-07-12 10:46:30 +02005869 start_pfn = max(start_pfn, zone_movable_pfn[nid]);
Mel Gorman2a1e2742007-07-17 04:03:12 -07005870 if (start_pfn >= end_pfn)
5871 continue;
5872
5873 /* Account for what is only usable for kernelcore */
5874 if (start_pfn < usable_startpfn) {
5875 unsigned long kernel_pages;
5876 kernel_pages = min(end_pfn, usable_startpfn)
5877 - start_pfn;
5878
5879 kernelcore_remaining -= min(kernel_pages,
5880 kernelcore_remaining);
5881 required_kernelcore -= min(kernel_pages,
5882 required_kernelcore);
5883
5884 /* Continue if range is now fully accounted */
5885 if (end_pfn <= usable_startpfn) {
5886
5887 /*
5888 * Push zone_movable_pfn to the end so
5889 * that if we have to rebalance
5890 * kernelcore across nodes, we will
5891 * not double account here
5892 */
5893 zone_movable_pfn[nid] = end_pfn;
5894 continue;
5895 }
5896 start_pfn = usable_startpfn;
5897 }
5898
5899 /*
5900 * The usable PFN range for ZONE_MOVABLE is from
5901 * start_pfn->end_pfn. Calculate size_pages as the
5902 * number of pages used as kernelcore
5903 */
5904 size_pages = end_pfn - start_pfn;
5905 if (size_pages > kernelcore_remaining)
5906 size_pages = kernelcore_remaining;
5907 zone_movable_pfn[nid] = start_pfn + size_pages;
5908
5909 /*
5910 * Some kernelcore has been met, update counts and
5911 * break if the kernelcore for this node has been
Pintu Kumarb8af2942013-09-11 14:20:34 -07005912 * satisfied
Mel Gorman2a1e2742007-07-17 04:03:12 -07005913 */
5914 required_kernelcore -= min(required_kernelcore,
5915 size_pages);
5916 kernelcore_remaining -= size_pages;
5917 if (!kernelcore_remaining)
5918 break;
5919 }
5920 }
5921
5922 /*
5923 * If there is still required_kernelcore, we do another pass with one
5924 * less node in the count. This will push zone_movable_pfn[nid] further
5925 * along on the nodes that still have memory until kernelcore is
Pintu Kumarb8af2942013-09-11 14:20:34 -07005926 * satisfied
Mel Gorman2a1e2742007-07-17 04:03:12 -07005927 */
5928 usable_nodes--;
5929 if (usable_nodes && required_kernelcore > usable_nodes)
5930 goto restart;
5931
Tang Chenb2f3eeb2014-01-21 15:49:38 -08005932out2:
Mel Gorman2a1e2742007-07-17 04:03:12 -07005933 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
5934 for (nid = 0; nid < MAX_NUMNODES; nid++)
5935 zone_movable_pfn[nid] =
5936 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
Yinghai Lu66918dc2009-06-30 11:41:37 -07005937
Yinghai Lu20e69262013-03-01 14:51:27 -08005938out:
Yinghai Lu66918dc2009-06-30 11:41:37 -07005939 /* restore the node_state */
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08005940 node_states[N_MEMORY] = saved_node_state;
Mel Gorman2a1e2742007-07-17 04:03:12 -07005941}
5942
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08005943/* Any regular or high memory on that node ? */
5944static void check_for_memory(pg_data_t *pgdat, int nid)
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07005945{
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07005946 enum zone_type zone_type;
5947
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08005948 if (N_MEMORY == N_NORMAL_MEMORY)
5949 return;
5950
5951 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07005952 struct zone *zone = &pgdat->node_zones[zone_type];
Xishi Qiub38a8722013-11-12 15:07:20 -08005953 if (populated_zone(zone)) {
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08005954 node_set_state(nid, N_HIGH_MEMORY);
5955 if (N_NORMAL_MEMORY != N_HIGH_MEMORY &&
5956 zone_type <= ZONE_NORMAL)
5957 node_set_state(nid, N_NORMAL_MEMORY);
Bob Liud0048b02012-01-12 17:19:07 -08005958 break;
5959 }
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07005960 }
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07005961}
5962
Mel Gormanc7132162006-09-27 01:49:43 -07005963/**
5964 * free_area_init_nodes - Initialise all pg_data_t and zone data
Randy Dunlap88ca3b92006-10-04 02:15:25 -07005965 * @max_zone_pfn: an array of max PFNs for each zone
Mel Gormanc7132162006-09-27 01:49:43 -07005966 *
5967 * This will call free_area_init_node() for each active node in the system.
Zhang Zhen7d018172014-06-04 16:10:53 -07005968 * Using the page ranges provided by memblock_set_node(), the size of each
Mel Gormanc7132162006-09-27 01:49:43 -07005969 * zone in each node and their holes is calculated. If the maximum PFN
5970 * between two adjacent zones match, it is assumed that the zone is empty.
5971 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
5972 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
5973 * starts where the previous one ended. For example, ZONE_DMA32 starts
5974 * at arch_max_dma_pfn.
5975 */
5976void __init free_area_init_nodes(unsigned long *max_zone_pfn)
5977{
Tejun Heoc13291a2011-07-12 10:46:30 +02005978 unsigned long start_pfn, end_pfn;
5979 int i, nid;
Mel Gormana6af2bc2007-02-10 01:42:57 -08005980
Mel Gormanc7132162006-09-27 01:49:43 -07005981 /* Record where the zone boundaries are */
5982 memset(arch_zone_lowest_possible_pfn, 0,
5983 sizeof(arch_zone_lowest_possible_pfn));
5984 memset(arch_zone_highest_possible_pfn, 0,
5985 sizeof(arch_zone_highest_possible_pfn));
5986 arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
5987 arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
5988 for (i = 1; i < MAX_NR_ZONES; i++) {
Mel Gorman2a1e2742007-07-17 04:03:12 -07005989 if (i == ZONE_MOVABLE)
5990 continue;
Mel Gormanc7132162006-09-27 01:49:43 -07005991 arch_zone_lowest_possible_pfn[i] =
5992 arch_zone_highest_possible_pfn[i-1];
5993 arch_zone_highest_possible_pfn[i] =
5994 max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
5995 }
Mel Gorman2a1e2742007-07-17 04:03:12 -07005996 arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
5997 arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
5998
5999 /* Find the PFNs that ZONE_MOVABLE begins at in each node */
6000 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
Kautuk Consulb224ef82012-03-21 16:34:15 -07006001 find_zone_movable_pfns_for_nodes();
Mel Gormanc7132162006-09-27 01:49:43 -07006002
Mel Gormanc7132162006-09-27 01:49:43 -07006003 /* Print out the zone ranges */
Anton Blanchardf88dfff2014-12-10 15:42:53 -08006004 pr_info("Zone ranges:\n");
Mel Gorman2a1e2742007-07-17 04:03:12 -07006005 for (i = 0; i < MAX_NR_ZONES; i++) {
6006 if (i == ZONE_MOVABLE)
6007 continue;
Anton Blanchardf88dfff2014-12-10 15:42:53 -08006008 pr_info(" %-8s ", zone_names[i]);
David Rientjes72f0ba02010-03-05 13:42:14 -08006009 if (arch_zone_lowest_possible_pfn[i] ==
6010 arch_zone_highest_possible_pfn[i])
Anton Blanchardf88dfff2014-12-10 15:42:53 -08006011 pr_cont("empty\n");
David Rientjes72f0ba02010-03-05 13:42:14 -08006012 else
Juergen Gross8d29e182015-02-11 15:26:01 -08006013 pr_cont("[mem %#018Lx-%#018Lx]\n",
6014 (u64)arch_zone_lowest_possible_pfn[i]
6015 << PAGE_SHIFT,
6016 ((u64)arch_zone_highest_possible_pfn[i]
Bjorn Helgaasa62e2f42012-05-29 15:06:30 -07006017 << PAGE_SHIFT) - 1);
Mel Gorman2a1e2742007-07-17 04:03:12 -07006018 }
6019
6020 /* Print out the PFNs ZONE_MOVABLE begins at in each node */
Anton Blanchardf88dfff2014-12-10 15:42:53 -08006021 pr_info("Movable zone start for each node\n");
Mel Gorman2a1e2742007-07-17 04:03:12 -07006022 for (i = 0; i < MAX_NUMNODES; i++) {
6023 if (zone_movable_pfn[i])
Juergen Gross8d29e182015-02-11 15:26:01 -08006024 pr_info(" Node %d: %#018Lx\n", i,
6025 (u64)zone_movable_pfn[i] << PAGE_SHIFT);
Mel Gorman2a1e2742007-07-17 04:03:12 -07006026 }
Mel Gormanc7132162006-09-27 01:49:43 -07006027
Wanpeng Lif2d52fe2012-10-08 16:32:24 -07006028 /* Print out the early node map */
Anton Blanchardf88dfff2014-12-10 15:42:53 -08006029 pr_info("Early memory node ranges\n");
Tejun Heoc13291a2011-07-12 10:46:30 +02006030 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
Juergen Gross8d29e182015-02-11 15:26:01 -08006031 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid,
6032 (u64)start_pfn << PAGE_SHIFT,
6033 ((u64)end_pfn << PAGE_SHIFT) - 1);
Mel Gormanc7132162006-09-27 01:49:43 -07006034
6035 /* Initialise every node */
Mel Gorman708614e2008-07-23 21:26:51 -07006036 mminit_verify_pageflags_layout();
Christoph Lameter8ef82862007-02-20 13:57:52 -08006037 setup_nr_node_ids();
Mel Gormanc7132162006-09-27 01:49:43 -07006038 for_each_online_node(nid) {
6039 pg_data_t *pgdat = NODE_DATA(nid);
Johannes Weiner9109fb72008-07-23 21:27:20 -07006040 free_area_init_node(nid, NULL,
Mel Gormanc7132162006-09-27 01:49:43 -07006041 find_min_pfn_for_node(nid), NULL);
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07006042
6043 /* Any memory on that node */
6044 if (pgdat->node_present_pages)
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08006045 node_set_state(nid, N_MEMORY);
6046 check_for_memory(pgdat, nid);
Mel Gormanc7132162006-09-27 01:49:43 -07006047 }
6048}
Mel Gorman2a1e2742007-07-17 04:03:12 -07006049
Mel Gorman7e63efef2007-07-17 04:03:15 -07006050static int __init cmdline_parse_core(char *p, unsigned long *core)
Mel Gorman2a1e2742007-07-17 04:03:12 -07006051{
6052 unsigned long long coremem;
6053 if (!p)
6054 return -EINVAL;
6055
6056 coremem = memparse(p, &p);
Mel Gorman7e63efef2007-07-17 04:03:15 -07006057 *core = coremem >> PAGE_SHIFT;
Mel Gorman2a1e2742007-07-17 04:03:12 -07006058
Mel Gorman7e63efef2007-07-17 04:03:15 -07006059 /* Paranoid check that UL is enough for the coremem value */
Mel Gorman2a1e2742007-07-17 04:03:12 -07006060 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
6061
6062 return 0;
6063}
Mel Gormaned7ed362007-07-17 04:03:14 -07006064
Mel Gorman7e63efef2007-07-17 04:03:15 -07006065/*
6066 * kernelcore=size sets the amount of memory for use for allocations that
6067 * cannot be reclaimed or migrated.
6068 */
6069static int __init cmdline_parse_kernelcore(char *p)
6070{
Taku Izumi342332e2016-03-15 14:55:22 -07006071 /* parse kernelcore=mirror */
6072 if (parse_option_str(p, "mirror")) {
6073 mirrored_kernelcore = true;
6074 return 0;
6075 }
6076
Mel Gorman7e63efef2007-07-17 04:03:15 -07006077 return cmdline_parse_core(p, &required_kernelcore);
6078}
6079
6080/*
6081 * movablecore=size sets the amount of memory for use for allocations that
6082 * can be reclaimed or migrated.
6083 */
6084static int __init cmdline_parse_movablecore(char *p)
6085{
6086 return cmdline_parse_core(p, &required_movablecore);
6087}
6088
Mel Gormaned7ed362007-07-17 04:03:14 -07006089early_param("kernelcore", cmdline_parse_kernelcore);
Mel Gorman7e63efef2007-07-17 04:03:15 -07006090early_param("movablecore", cmdline_parse_movablecore);
Mel Gormaned7ed362007-07-17 04:03:14 -07006091
Tejun Heo0ee332c2011-12-08 10:22:09 -08006092#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
Mel Gormanc7132162006-09-27 01:49:43 -07006093
Jiang Liuc3d5f5f2013-07-03 15:03:14 -07006094void adjust_managed_page_count(struct page *page, long count)
6095{
6096 spin_lock(&managed_page_count_lock);
6097 page_zone(page)->managed_pages += count;
6098 totalram_pages += count;
Jiang Liu3dcc0572013-07-03 15:03:21 -07006099#ifdef CONFIG_HIGHMEM
6100 if (PageHighMem(page))
6101 totalhigh_pages += count;
6102#endif
Jiang Liuc3d5f5f2013-07-03 15:03:14 -07006103 spin_unlock(&managed_page_count_lock);
6104}
Jiang Liu3dcc0572013-07-03 15:03:21 -07006105EXPORT_SYMBOL(adjust_managed_page_count);
Jiang Liuc3d5f5f2013-07-03 15:03:14 -07006106
Jiang Liu11199692013-07-03 15:02:48 -07006107unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
Jiang Liu69afade2013-04-29 15:06:21 -07006108{
Jiang Liu11199692013-07-03 15:02:48 -07006109 void *pos;
6110 unsigned long pages = 0;
Jiang Liu69afade2013-04-29 15:06:21 -07006111
Jiang Liu11199692013-07-03 15:02:48 -07006112 start = (void *)PAGE_ALIGN((unsigned long)start);
6113 end = (void *)((unsigned long)end & PAGE_MASK);
6114 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
Jiang Liudbe67df2013-07-03 15:02:51 -07006115 if ((unsigned int)poison <= 0xFF)
Jiang Liu11199692013-07-03 15:02:48 -07006116 memset(pos, poison, PAGE_SIZE);
6117 free_reserved_page(virt_to_page(pos));
Jiang Liu69afade2013-04-29 15:06:21 -07006118 }
6119
6120 if (pages && s)
Jiang Liu11199692013-07-03 15:02:48 -07006121 pr_info("Freeing %s memory: %ldK (%p - %p)\n",
Jiang Liu69afade2013-04-29 15:06:21 -07006122 s, pages << (PAGE_SHIFT - 10), start, end);
6123
6124 return pages;
6125}
Jiang Liu11199692013-07-03 15:02:48 -07006126EXPORT_SYMBOL(free_reserved_area);
Jiang Liu69afade2013-04-29 15:06:21 -07006127
Jiang Liucfa11e02013-04-29 15:07:00 -07006128#ifdef CONFIG_HIGHMEM
6129void free_highmem_page(struct page *page)
6130{
6131 __free_reserved_page(page);
6132 totalram_pages++;
Jiang Liu7b4b2a02013-07-03 15:03:11 -07006133 page_zone(page)->managed_pages++;
Jiang Liucfa11e02013-04-29 15:07:00 -07006134 totalhigh_pages++;
6135}
6136#endif
6137
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07006138
6139void __init mem_init_print_info(const char *str)
6140{
6141 unsigned long physpages, codesize, datasize, rosize, bss_size;
6142 unsigned long init_code_size, init_data_size;
6143
6144 physpages = get_num_physpages();
6145 codesize = _etext - _stext;
6146 datasize = _edata - _sdata;
6147 rosize = __end_rodata - __start_rodata;
6148 bss_size = __bss_stop - __bss_start;
6149 init_data_size = __init_end - __init_begin;
6150 init_code_size = _einittext - _sinittext;
6151
6152 /*
6153 * Detect special cases and adjust section sizes accordingly:
6154 * 1) .init.* may be embedded into .data sections
6155 * 2) .init.text.* may be out of [__init_begin, __init_end],
6156 * please refer to arch/tile/kernel/vmlinux.lds.S.
6157 * 3) .rodata.* may be embedded into .text or .data sections.
6158 */
6159#define adj_init_size(start, end, size, pos, adj) \
Pintu Kumarb8af2942013-09-11 14:20:34 -07006160 do { \
6161 if (start <= pos && pos < end && size > adj) \
6162 size -= adj; \
6163 } while (0)
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07006164
6165 adj_init_size(__init_begin, __init_end, init_data_size,
6166 _sinittext, init_code_size);
6167 adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
6168 adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
6169 adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
6170 adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
6171
6172#undef adj_init_size
6173
Joe Perches756a0252016-03-17 14:19:47 -07006174 pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07006175#ifdef CONFIG_HIGHMEM
Joe Perches756a0252016-03-17 14:19:47 -07006176 ", %luK highmem"
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07006177#endif
Joe Perches756a0252016-03-17 14:19:47 -07006178 "%s%s)\n",
6179 nr_free_pages() << (PAGE_SHIFT - 10),
6180 physpages << (PAGE_SHIFT - 10),
6181 codesize >> 10, datasize >> 10, rosize >> 10,
6182 (init_data_size + init_code_size) >> 10, bss_size >> 10,
6183 (physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT - 10),
6184 totalcma_pages << (PAGE_SHIFT - 10),
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07006185#ifdef CONFIG_HIGHMEM
Joe Perches756a0252016-03-17 14:19:47 -07006186 totalhigh_pages << (PAGE_SHIFT - 10),
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07006187#endif
Joe Perches756a0252016-03-17 14:19:47 -07006188 str ? ", " : "", str ? str : "");
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07006189}
6190
Mel Gorman0e0b8642006-09-27 01:49:56 -07006191/**
Randy Dunlap88ca3b92006-10-04 02:15:25 -07006192 * set_dma_reserve - set the specified number of pages reserved in the first zone
6193 * @new_dma_reserve: The number of pages to mark reserved
Mel Gorman0e0b8642006-09-27 01:49:56 -07006194 *
Yaowei Bai013110a2015-09-08 15:04:10 -07006195 * The per-cpu batchsize and zone watermarks are determined by managed_pages.
Mel Gorman0e0b8642006-09-27 01:49:56 -07006196 * In the DMA zone, a significant percentage may be consumed by kernel image
6197 * and other unfreeable allocations which can skew the watermarks badly. This
Randy Dunlap88ca3b92006-10-04 02:15:25 -07006198 * function may optionally be used to account for unfreeable pages in the
6199 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
6200 * smaller per-cpu batchsize.
Mel Gorman0e0b8642006-09-27 01:49:56 -07006201 */
6202void __init set_dma_reserve(unsigned long new_dma_reserve)
6203{
6204 dma_reserve = new_dma_reserve;
6205}
6206
Linus Torvalds1da177e2005-04-16 15:20:36 -07006207void __init free_area_init(unsigned long *zones_size)
6208{
Johannes Weiner9109fb72008-07-23 21:27:20 -07006209 free_area_init_node(0, zones_size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006210 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
6211}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006212
Linus Torvalds1da177e2005-04-16 15:20:36 -07006213static int page_alloc_cpu_notify(struct notifier_block *self,
6214 unsigned long action, void *hcpu)
6215{
6216 int cpu = (unsigned long)hcpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006217
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07006218 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
Konstantin Khlebnikovf0cb3c72012-03-21 16:34:06 -07006219 lru_add_drain_cpu(cpu);
Christoph Lameter9f8f2172008-02-04 22:29:11 -08006220 drain_pages(cpu);
6221
6222 /*
6223 * Spill the event counters of the dead processor
6224 * into the current processors event counters.
6225 * This artificially elevates the count of the current
6226 * processor.
6227 */
Christoph Lameterf8891e52006-06-30 01:55:45 -07006228 vm_events_fold_cpu(cpu);
Christoph Lameter9f8f2172008-02-04 22:29:11 -08006229
6230 /*
6231 * Zero the differential counters of the dead processor
6232 * so that the vm statistics are consistent.
6233 *
6234 * This is only okay since the processor is dead and cannot
6235 * race with what we are doing.
6236 */
Christoph Lameter2bb921e2013-09-11 14:21:30 -07006237 cpu_vm_stats_fold(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006238 }
6239 return NOTIFY_OK;
6240}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006241
6242void __init page_alloc_init(void)
6243{
6244 hotcpu_notifier(page_alloc_cpu_notify, 0);
6245}
6246
6247/*
Yaowei Bai34b10062015-09-08 15:04:13 -07006248 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07006249 * or min_free_kbytes changes.
6250 */
6251static void calculate_totalreserve_pages(void)
6252{
6253 struct pglist_data *pgdat;
6254 unsigned long reserve_pages = 0;
Christoph Lameter2f6726e2006-09-25 23:31:18 -07006255 enum zone_type i, j;
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07006256
6257 for_each_online_pgdat(pgdat) {
6258 for (i = 0; i < MAX_NR_ZONES; i++) {
6259 struct zone *zone = pgdat->node_zones + i;
Mel Gorman3484b2d2014-08-06 16:07:14 -07006260 long max = 0;
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07006261
6262 /* Find valid and maximum lowmem_reserve in the zone */
6263 for (j = i; j < MAX_NR_ZONES; j++) {
6264 if (zone->lowmem_reserve[j] > max)
6265 max = zone->lowmem_reserve[j];
6266 }
6267
Mel Gorman41858962009-06-16 15:32:12 -07006268 /* we treat the high watermark as reserved pages. */
6269 max += high_wmark_pages(zone);
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07006270
Jiang Liub40da042013-02-22 16:33:52 -08006271 if (max > zone->managed_pages)
6272 max = zone->managed_pages;
Johannes Weinera8d01432016-01-14 15:20:15 -08006273
6274 zone->totalreserve_pages = max;
6275
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07006276 reserve_pages += max;
6277 }
6278 }
6279 totalreserve_pages = reserve_pages;
6280}
6281
6282/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07006283 * setup_per_zone_lowmem_reserve - called whenever
Yaowei Bai34b10062015-09-08 15:04:13 -07006284 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone
Linus Torvalds1da177e2005-04-16 15:20:36 -07006285 * has a correct pages reserved value, so an adequate number of
6286 * pages are left in the zone after a successful __alloc_pages().
6287 */
6288static void setup_per_zone_lowmem_reserve(void)
6289{
6290 struct pglist_data *pgdat;
Christoph Lameter2f6726e2006-09-25 23:31:18 -07006291 enum zone_type j, idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006292
KAMEZAWA Hiroyukiec936fc2006-03-27 01:15:59 -08006293 for_each_online_pgdat(pgdat) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006294 for (j = 0; j < MAX_NR_ZONES; j++) {
6295 struct zone *zone = pgdat->node_zones + j;
Jiang Liub40da042013-02-22 16:33:52 -08006296 unsigned long managed_pages = zone->managed_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006297
6298 zone->lowmem_reserve[j] = 0;
6299
Christoph Lameter2f6726e2006-09-25 23:31:18 -07006300 idx = j;
6301 while (idx) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006302 struct zone *lower_zone;
6303
Christoph Lameter2f6726e2006-09-25 23:31:18 -07006304 idx--;
6305
Linus Torvalds1da177e2005-04-16 15:20:36 -07006306 if (sysctl_lowmem_reserve_ratio[idx] < 1)
6307 sysctl_lowmem_reserve_ratio[idx] = 1;
6308
6309 lower_zone = pgdat->node_zones + idx;
Jiang Liub40da042013-02-22 16:33:52 -08006310 lower_zone->lowmem_reserve[j] = managed_pages /
Linus Torvalds1da177e2005-04-16 15:20:36 -07006311 sysctl_lowmem_reserve_ratio[idx];
Jiang Liub40da042013-02-22 16:33:52 -08006312 managed_pages += lower_zone->managed_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006313 }
6314 }
6315 }
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07006316
6317 /* update totalreserve_pages */
6318 calculate_totalreserve_pages();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006319}
6320
Mel Gormancfd3da12011-04-25 21:36:42 +00006321static void __setup_per_zone_wmarks(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006322{
6323 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
6324 unsigned long lowmem_pages = 0;
6325 struct zone *zone;
6326 unsigned long flags;
6327
6328 /* Calculate total number of !ZONE_HIGHMEM pages */
6329 for_each_zone(zone) {
6330 if (!is_highmem(zone))
Jiang Liub40da042013-02-22 16:33:52 -08006331 lowmem_pages += zone->managed_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006332 }
6333
6334 for_each_zone(zone) {
Andrew Mortonac924c62006-05-15 09:43:59 -07006335 u64 tmp;
6336
Gerald Schaefer1125b4e2008-10-18 20:27:11 -07006337 spin_lock_irqsave(&zone->lock, flags);
Jiang Liub40da042013-02-22 16:33:52 -08006338 tmp = (u64)pages_min * zone->managed_pages;
Andrew Mortonac924c62006-05-15 09:43:59 -07006339 do_div(tmp, lowmem_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006340 if (is_highmem(zone)) {
6341 /*
Nick Piggin669ed172005-11-13 16:06:45 -08006342 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
6343 * need highmem pages, so cap pages_min to a small
6344 * value here.
6345 *
Mel Gorman41858962009-06-16 15:32:12 -07006346 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
Yaowei Bai42ff2702015-04-14 15:47:14 -07006347 * deltas control asynch page reclaim, and so should
Nick Piggin669ed172005-11-13 16:06:45 -08006348 * not be capped for highmem.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006349 */
Andrew Morton90ae8d62013-02-22 16:32:22 -08006350 unsigned long min_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006351
Jiang Liub40da042013-02-22 16:33:52 -08006352 min_pages = zone->managed_pages / 1024;
Andrew Morton90ae8d62013-02-22 16:32:22 -08006353 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
Mel Gorman41858962009-06-16 15:32:12 -07006354 zone->watermark[WMARK_MIN] = min_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006355 } else {
Nick Piggin669ed172005-11-13 16:06:45 -08006356 /*
6357 * If it's a lowmem zone, reserve a number of pages
Linus Torvalds1da177e2005-04-16 15:20:36 -07006358 * proportionate to the zone's size.
6359 */
Mel Gorman41858962009-06-16 15:32:12 -07006360 zone->watermark[WMARK_MIN] = tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006361 }
6362
Johannes Weiner795ae7a2016-03-17 14:19:14 -07006363 /*
6364 * Set the kswapd watermarks distance according to the
6365 * scale factor in proportion to available memory, but
6366 * ensure a minimum size on small systems.
6367 */
6368 tmp = max_t(u64, tmp >> 2,
6369 mult_frac(zone->managed_pages,
6370 watermark_scale_factor, 10000));
6371
6372 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp;
6373 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2;
Marek Szyprowski49f223a2012-01-25 12:49:24 +01006374
Johannes Weiner81c0a2b2013-09-11 14:20:47 -07006375 __mod_zone_page_state(zone, NR_ALLOC_BATCH,
Johannes Weinerabe5f972014-10-02 16:21:10 -07006376 high_wmark_pages(zone) - low_wmark_pages(zone) -
6377 atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
Johannes Weiner81c0a2b2013-09-11 14:20:47 -07006378
Gerald Schaefer1125b4e2008-10-18 20:27:11 -07006379 spin_unlock_irqrestore(&zone->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006380 }
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07006381
6382 /* update totalreserve_pages */
6383 calculate_totalreserve_pages();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006384}
6385
Mel Gormancfd3da12011-04-25 21:36:42 +00006386/**
6387 * setup_per_zone_wmarks - called when min_free_kbytes changes
6388 * or when memory is hot-{added|removed}
6389 *
6390 * Ensures that the watermark[min,low,high] values for each zone are set
6391 * correctly with respect to min_free_kbytes.
6392 */
6393void setup_per_zone_wmarks(void)
6394{
6395 mutex_lock(&zonelists_mutex);
6396 __setup_per_zone_wmarks();
6397 mutex_unlock(&zonelists_mutex);
6398}
6399
Randy Dunlap55a44622009-09-21 17:01:20 -07006400/*
Rik van Riel556adec2008-10-18 20:26:34 -07006401 * The inactive anon list should be small enough that the VM never has to
6402 * do too much work, but large enough that each inactive page has a chance
6403 * to be referenced again before it is swapped out.
6404 *
6405 * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
6406 * INACTIVE_ANON pages on this zone's LRU, maintained by the
6407 * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
6408 * the anonymous pages are kept on the inactive list.
6409 *
6410 * total target max
6411 * memory ratio inactive anon
6412 * -------------------------------------
6413 * 10MB 1 5MB
6414 * 100MB 1 50MB
6415 * 1GB 3 250MB
6416 * 10GB 10 0.9GB
6417 * 100GB 31 3GB
6418 * 1TB 101 10GB
6419 * 10TB 320 32GB
6420 */
KOSAKI Motohiro1b79acc2011-05-24 17:11:32 -07006421static void __meminit calculate_zone_inactive_ratio(struct zone *zone)
Minchan Kim96cb4df2009-06-16 15:32:49 -07006422{
6423 unsigned int gb, ratio;
6424
6425 /* Zone size in gigabytes */
Jiang Liub40da042013-02-22 16:33:52 -08006426 gb = zone->managed_pages >> (30 - PAGE_SHIFT);
Minchan Kim96cb4df2009-06-16 15:32:49 -07006427 if (gb)
6428 ratio = int_sqrt(10 * gb);
6429 else
6430 ratio = 1;
6431
6432 zone->inactive_ratio = ratio;
6433}
6434
KOSAKI Motohiro839a4fc2011-05-24 17:11:31 -07006435static void __meminit setup_per_zone_inactive_ratio(void)
Rik van Riel556adec2008-10-18 20:26:34 -07006436{
6437 struct zone *zone;
6438
Minchan Kim96cb4df2009-06-16 15:32:49 -07006439 for_each_zone(zone)
6440 calculate_zone_inactive_ratio(zone);
Rik van Riel556adec2008-10-18 20:26:34 -07006441}
6442
Linus Torvalds1da177e2005-04-16 15:20:36 -07006443/*
6444 * Initialise min_free_kbytes.
6445 *
6446 * For small machines we want it small (128k min). For large machines
6447 * we want it large (64MB max). But it is not linear, because network
6448 * bandwidth does not increase linearly with machine size. We use
6449 *
Pintu Kumarb8af2942013-09-11 14:20:34 -07006450 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006451 * min_free_kbytes = sqrt(lowmem_kbytes * 16)
6452 *
6453 * which yields
6454 *
6455 * 16MB: 512k
6456 * 32MB: 724k
6457 * 64MB: 1024k
6458 * 128MB: 1448k
6459 * 256MB: 2048k
6460 * 512MB: 2896k
6461 * 1024MB: 4096k
6462 * 2048MB: 5792k
6463 * 4096MB: 8192k
6464 * 8192MB: 11584k
6465 * 16384MB: 16384k
6466 */
KOSAKI Motohiro1b79acc2011-05-24 17:11:32 -07006467int __meminit init_per_zone_wmark_min(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006468{
6469 unsigned long lowmem_kbytes;
Michal Hocko5f127332013-07-08 16:00:40 -07006470 int new_min_free_kbytes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006471
6472 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
Michal Hocko5f127332013-07-08 16:00:40 -07006473 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006474
Michal Hocko5f127332013-07-08 16:00:40 -07006475 if (new_min_free_kbytes > user_min_free_kbytes) {
6476 min_free_kbytes = new_min_free_kbytes;
6477 if (min_free_kbytes < 128)
6478 min_free_kbytes = 128;
6479 if (min_free_kbytes > 65536)
6480 min_free_kbytes = 65536;
6481 } else {
6482 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
6483 new_min_free_kbytes, user_min_free_kbytes);
6484 }
Minchan Kimbc75d332009-06-16 15:32:48 -07006485 setup_per_zone_wmarks();
KOSAKI Motohiroa6cccdc2011-05-24 17:11:33 -07006486 refresh_zone_stat_thresholds();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006487 setup_per_zone_lowmem_reserve();
Rik van Riel556adec2008-10-18 20:26:34 -07006488 setup_per_zone_inactive_ratio();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006489 return 0;
6490}
Jason Baronbc22af742016-05-05 16:22:12 -07006491core_initcall(init_per_zone_wmark_min)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006492
6493/*
Pintu Kumarb8af2942013-09-11 14:20:34 -07006494 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
Linus Torvalds1da177e2005-04-16 15:20:36 -07006495 * that we can call two helper functions whenever min_free_kbytes
6496 * changes.
6497 */
Joe Perchescccad5b2014-06-06 14:38:09 -07006498int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07006499 void __user *buffer, size_t *length, loff_t *ppos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006500{
Han Pingtianda8c7572014-01-23 15:53:17 -08006501 int rc;
6502
6503 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6504 if (rc)
6505 return rc;
6506
Michal Hocko5f127332013-07-08 16:00:40 -07006507 if (write) {
6508 user_min_free_kbytes = min_free_kbytes;
Minchan Kimbc75d332009-06-16 15:32:48 -07006509 setup_per_zone_wmarks();
Michal Hocko5f127332013-07-08 16:00:40 -07006510 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006511 return 0;
6512}
6513
Johannes Weiner795ae7a2016-03-17 14:19:14 -07006514int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
6515 void __user *buffer, size_t *length, loff_t *ppos)
6516{
6517 int rc;
6518
6519 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6520 if (rc)
6521 return rc;
6522
6523 if (write)
6524 setup_per_zone_wmarks();
6525
6526 return 0;
6527}
6528
Christoph Lameter96146342006-07-03 00:24:13 -07006529#ifdef CONFIG_NUMA
Joe Perchescccad5b2014-06-06 14:38:09 -07006530int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07006531 void __user *buffer, size_t *length, loff_t *ppos)
Christoph Lameter96146342006-07-03 00:24:13 -07006532{
6533 struct zone *zone;
6534 int rc;
6535
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07006536 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
Christoph Lameter96146342006-07-03 00:24:13 -07006537 if (rc)
6538 return rc;
6539
6540 for_each_zone(zone)
Jiang Liub40da042013-02-22 16:33:52 -08006541 zone->min_unmapped_pages = (zone->managed_pages *
Christoph Lameter96146342006-07-03 00:24:13 -07006542 sysctl_min_unmapped_ratio) / 100;
6543 return 0;
6544}
Christoph Lameter0ff38492006-09-25 23:31:52 -07006545
Joe Perchescccad5b2014-06-06 14:38:09 -07006546int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07006547 void __user *buffer, size_t *length, loff_t *ppos)
Christoph Lameter0ff38492006-09-25 23:31:52 -07006548{
6549 struct zone *zone;
6550 int rc;
6551
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07006552 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
Christoph Lameter0ff38492006-09-25 23:31:52 -07006553 if (rc)
6554 return rc;
6555
6556 for_each_zone(zone)
Jiang Liub40da042013-02-22 16:33:52 -08006557 zone->min_slab_pages = (zone->managed_pages *
Christoph Lameter0ff38492006-09-25 23:31:52 -07006558 sysctl_min_slab_ratio) / 100;
6559 return 0;
6560}
Christoph Lameter96146342006-07-03 00:24:13 -07006561#endif
6562
Linus Torvalds1da177e2005-04-16 15:20:36 -07006563/*
6564 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
6565 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
6566 * whenever sysctl_lowmem_reserve_ratio changes.
6567 *
6568 * The reserve ratio obviously has absolutely no relation with the
Mel Gorman41858962009-06-16 15:32:12 -07006569 * minimum watermarks. The lowmem reserve ratio can only make sense
Linus Torvalds1da177e2005-04-16 15:20:36 -07006570 * if in function of the boot time zone sizes.
6571 */
Joe Perchescccad5b2014-06-06 14:38:09 -07006572int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07006573 void __user *buffer, size_t *length, loff_t *ppos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006574{
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07006575 proc_dointvec_minmax(table, write, buffer, length, ppos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006576 setup_per_zone_lowmem_reserve();
6577 return 0;
6578}
6579
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08006580/*
6581 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
Pintu Kumarb8af2942013-09-11 14:20:34 -07006582 * cpu. It is the fraction of total pages in each zone that a hot per cpu
6583 * pagelist can have before it gets flushed back to buddy allocator.
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08006584 */
Joe Perchescccad5b2014-06-06 14:38:09 -07006585int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07006586 void __user *buffer, size_t *length, loff_t *ppos)
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08006587{
6588 struct zone *zone;
David Rientjes7cd2b0a2014-06-23 13:22:04 -07006589 int old_percpu_pagelist_fraction;
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08006590 int ret;
6591
Cody P Schaferc8e251f2013-07-03 15:01:29 -07006592 mutex_lock(&pcp_batch_high_lock);
David Rientjes7cd2b0a2014-06-23 13:22:04 -07006593 old_percpu_pagelist_fraction = percpu_pagelist_fraction;
6594
6595 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
6596 if (!write || ret < 0)
6597 goto out;
6598
6599 /* Sanity checking to avoid pcp imbalance */
6600 if (percpu_pagelist_fraction &&
6601 percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
6602 percpu_pagelist_fraction = old_percpu_pagelist_fraction;
6603 ret = -EINVAL;
6604 goto out;
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08006605 }
David Rientjes7cd2b0a2014-06-23 13:22:04 -07006606
6607 /* No change? */
6608 if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
6609 goto out;
6610
6611 for_each_populated_zone(zone) {
6612 unsigned int cpu;
6613
6614 for_each_possible_cpu(cpu)
6615 pageset_set_high_and_batch(zone,
6616 per_cpu_ptr(zone->pageset, cpu));
6617 }
6618out:
Cody P Schaferc8e251f2013-07-03 15:01:29 -07006619 mutex_unlock(&pcp_batch_high_lock);
David Rientjes7cd2b0a2014-06-23 13:22:04 -07006620 return ret;
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08006621}
6622
Rasmus Villemoesa9919c72015-06-24 16:56:28 -07006623#ifdef CONFIG_NUMA
David S. Millerf034b5d2006-08-24 03:08:07 -07006624int hashdist = HASHDIST_DEFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006625
Linus Torvalds1da177e2005-04-16 15:20:36 -07006626static int __init set_hashdist(char *str)
6627{
6628 if (!str)
6629 return 0;
6630 hashdist = simple_strtoul(str, &str, 0);
6631 return 1;
6632}
6633__setup("hashdist=", set_hashdist);
6634#endif
6635
6636/*
6637 * allocate a large system hash table from bootmem
6638 * - it is assumed that the hash table must contain an exact power-of-2
6639 * quantity of entries
6640 * - limit is the number of hash buckets, not the total allocation size
6641 */
6642void *__init alloc_large_system_hash(const char *tablename,
6643 unsigned long bucketsize,
6644 unsigned long numentries,
6645 int scale,
6646 int flags,
6647 unsigned int *_hash_shift,
6648 unsigned int *_hash_mask,
Tim Bird31fe62b2012-05-23 13:33:35 +00006649 unsigned long low_limit,
6650 unsigned long high_limit)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006651{
Tim Bird31fe62b2012-05-23 13:33:35 +00006652 unsigned long long max = high_limit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006653 unsigned long log2qty, size;
6654 void *table = NULL;
6655
6656 /* allow the kernel cmdline to have a say */
6657 if (!numentries) {
6658 /* round applicable memory size up to nearest megabyte */
Andrew Morton04903662006-12-06 20:37:33 -08006659 numentries = nr_kernel_pages;
Jerry Zhoua7e83312013-09-11 14:20:26 -07006660
6661 /* It isn't necessary when PAGE_SIZE >= 1MB */
6662 if (PAGE_SHIFT < 20)
6663 numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006664
6665 /* limit to 1 bucket per 2^scale bytes of low memory */
6666 if (scale > PAGE_SHIFT)
6667 numentries >>= (scale - PAGE_SHIFT);
6668 else
6669 numentries <<= (PAGE_SHIFT - scale);
Paul Mundt9ab37b82007-01-05 16:36:30 -08006670
6671 /* Make sure we've got at least a 0-order allocation.. */
Jan Beulich2c85f512009-09-21 17:03:07 -07006672 if (unlikely(flags & HASH_SMALL)) {
6673 /* Makes no sense without HASH_EARLY */
6674 WARN_ON(!(flags & HASH_EARLY));
6675 if (!(numentries >> *_hash_shift)) {
6676 numentries = 1UL << *_hash_shift;
6677 BUG_ON(!numentries);
6678 }
6679 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
Paul Mundt9ab37b82007-01-05 16:36:30 -08006680 numentries = PAGE_SIZE / bucketsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006681 }
John Hawkes6e692ed2006-03-25 03:08:02 -08006682 numentries = roundup_pow_of_two(numentries);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006683
6684 /* limit allocation size to 1/16 total memory by default */
6685 if (max == 0) {
6686 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
6687 do_div(max, bucketsize);
6688 }
Dimitri Sivanich074b8512012-02-08 12:39:07 -08006689 max = min(max, 0x80000000ULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006690
Tim Bird31fe62b2012-05-23 13:33:35 +00006691 if (numentries < low_limit)
6692 numentries = low_limit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006693 if (numentries > max)
6694 numentries = max;
6695
David Howellsf0d1b0b2006-12-08 02:37:49 -08006696 log2qty = ilog2(numentries);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006697
6698 do {
6699 size = bucketsize << log2qty;
6700 if (flags & HASH_EARLY)
Santosh Shilimkar67828322014-01-21 15:50:25 -08006701 table = memblock_virt_alloc_nopanic(size, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006702 else if (hashdist)
6703 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
6704 else {
Eric Dumazet1037b832007-07-15 23:38:05 -07006705 /*
6706 * If bucketsize is not a power-of-two, we may free
Mel Gormana1dd2682009-06-16 15:32:19 -07006707 * some pages at the end of hash table which
6708 * alloc_pages_exact() automatically does
Eric Dumazet1037b832007-07-15 23:38:05 -07006709 */
Catalin Marinas264ef8a2009-07-07 10:33:01 +01006710 if (get_order(size) < MAX_ORDER) {
Mel Gormana1dd2682009-06-16 15:32:19 -07006711 table = alloc_pages_exact(size, GFP_ATOMIC);
Catalin Marinas264ef8a2009-07-07 10:33:01 +01006712 kmemleak_alloc(table, size, 1, GFP_ATOMIC);
6713 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006714 }
6715 } while (!table && size > PAGE_SIZE && --log2qty);
6716
6717 if (!table)
6718 panic("Failed to allocate %s hash table\n", tablename);
6719
Joe Perches11705322016-03-17 14:19:50 -07006720 pr_info("%s hash table entries: %ld (order: %d, %lu bytes)\n",
6721 tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006722
6723 if (_hash_shift)
6724 *_hash_shift = log2qty;
6725 if (_hash_mask)
6726 *_hash_mask = (1 << log2qty) - 1;
6727
6728 return table;
6729}
KAMEZAWA Hiroyukia117e662006-03-27 01:15:25 -08006730
Mel Gorman835c1342007-10-16 01:25:47 -07006731/* Return a pointer to the bitmap storing bits affecting a block of pages */
6732static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
6733 unsigned long pfn)
6734{
6735#ifdef CONFIG_SPARSEMEM
6736 return __pfn_to_section(pfn)->pageblock_flags;
6737#else
6738 return zone->pageblock_flags;
6739#endif /* CONFIG_SPARSEMEM */
6740}
Andrew Morton6220ec72006-10-19 23:29:05 -07006741
Mel Gorman835c1342007-10-16 01:25:47 -07006742static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
6743{
6744#ifdef CONFIG_SPARSEMEM
6745 pfn &= (PAGES_PER_SECTION-1);
Mel Gormand9c23402007-10-16 01:26:01 -07006746 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
Mel Gorman835c1342007-10-16 01:25:47 -07006747#else
Laura Abbottc060f942013-01-11 14:31:51 -08006748 pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages);
Mel Gormand9c23402007-10-16 01:26:01 -07006749 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
Mel Gorman835c1342007-10-16 01:25:47 -07006750#endif /* CONFIG_SPARSEMEM */
6751}
6752
6753/**
Randy Dunlap1aab4d72014-07-27 14:15:33 -07006754 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
Mel Gorman835c1342007-10-16 01:25:47 -07006755 * @page: The page within the block of interest
Randy Dunlap1aab4d72014-07-27 14:15:33 -07006756 * @pfn: The target page frame number
6757 * @end_bitidx: The last bit of interest to retrieve
6758 * @mask: mask of bits that the caller is interested in
6759 *
6760 * Return: pageblock_bits flags
Mel Gorman835c1342007-10-16 01:25:47 -07006761 */
Mel Gormandc4b0ca2014-06-04 16:10:17 -07006762unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
Mel Gormane58469b2014-06-04 16:10:16 -07006763 unsigned long end_bitidx,
6764 unsigned long mask)
Mel Gorman835c1342007-10-16 01:25:47 -07006765{
6766 struct zone *zone;
6767 unsigned long *bitmap;
Mel Gormandc4b0ca2014-06-04 16:10:17 -07006768 unsigned long bitidx, word_bitidx;
Mel Gormane58469b2014-06-04 16:10:16 -07006769 unsigned long word;
Mel Gorman835c1342007-10-16 01:25:47 -07006770
6771 zone = page_zone(page);
Mel Gorman835c1342007-10-16 01:25:47 -07006772 bitmap = get_pageblock_bitmap(zone, pfn);
6773 bitidx = pfn_to_bitidx(zone, pfn);
Mel Gormane58469b2014-06-04 16:10:16 -07006774 word_bitidx = bitidx / BITS_PER_LONG;
6775 bitidx &= (BITS_PER_LONG-1);
Mel Gorman835c1342007-10-16 01:25:47 -07006776
Mel Gormane58469b2014-06-04 16:10:16 -07006777 word = bitmap[word_bitidx];
6778 bitidx += end_bitidx;
6779 return (word >> (BITS_PER_LONG - bitidx - 1)) & mask;
Mel Gorman835c1342007-10-16 01:25:47 -07006780}
6781
6782/**
Mel Gormandc4b0ca2014-06-04 16:10:17 -07006783 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
Mel Gorman835c1342007-10-16 01:25:47 -07006784 * @page: The page within the block of interest
Mel Gorman835c1342007-10-16 01:25:47 -07006785 * @flags: The flags to set
Randy Dunlap1aab4d72014-07-27 14:15:33 -07006786 * @pfn: The target page frame number
6787 * @end_bitidx: The last bit of interest
6788 * @mask: mask of bits that the caller is interested in
Mel Gorman835c1342007-10-16 01:25:47 -07006789 */
Mel Gormandc4b0ca2014-06-04 16:10:17 -07006790void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
6791 unsigned long pfn,
Mel Gormane58469b2014-06-04 16:10:16 -07006792 unsigned long end_bitidx,
6793 unsigned long mask)
Mel Gorman835c1342007-10-16 01:25:47 -07006794{
6795 struct zone *zone;
6796 unsigned long *bitmap;
Mel Gormandc4b0ca2014-06-04 16:10:17 -07006797 unsigned long bitidx, word_bitidx;
Mel Gormane58469b2014-06-04 16:10:16 -07006798 unsigned long old_word, word;
6799
6800 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
Mel Gorman835c1342007-10-16 01:25:47 -07006801
6802 zone = page_zone(page);
Mel Gorman835c1342007-10-16 01:25:47 -07006803 bitmap = get_pageblock_bitmap(zone, pfn);
6804 bitidx = pfn_to_bitidx(zone, pfn);
Mel Gormane58469b2014-06-04 16:10:16 -07006805 word_bitidx = bitidx / BITS_PER_LONG;
6806 bitidx &= (BITS_PER_LONG-1);
6807
Sasha Levin309381fea2014-01-23 15:52:54 -08006808 VM_BUG_ON_PAGE(!zone_spans_pfn(zone, pfn), page);
Mel Gorman835c1342007-10-16 01:25:47 -07006809
Mel Gormane58469b2014-06-04 16:10:16 -07006810 bitidx += end_bitidx;
6811 mask <<= (BITS_PER_LONG - bitidx - 1);
6812 flags <<= (BITS_PER_LONG - bitidx - 1);
6813
Jason Low4db0c3c2015-04-15 16:14:08 -07006814 word = READ_ONCE(bitmap[word_bitidx]);
Mel Gormane58469b2014-06-04 16:10:16 -07006815 for (;;) {
6816 old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
6817 if (word == old_word)
6818 break;
6819 word = old_word;
6820 }
Mel Gorman835c1342007-10-16 01:25:47 -07006821}
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -07006822
6823/*
Minchan Kim80934512012-07-31 16:43:01 -07006824 * This function checks whether pageblock includes unmovable pages or not.
6825 * If @count is not zero, it is okay to include less @count unmovable pages
6826 *
Pintu Kumarb8af2942013-09-11 14:20:34 -07006827 * PageLRU check without isolation or lru_lock could race so that
Minchan Kim80934512012-07-31 16:43:01 -07006828 * MIGRATE_MOVABLE block might include unmovable pages. It means you can't
6829 * expect this function should be exact.
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -07006830 */
Wen Congyangb023f462012-12-11 16:00:45 -08006831bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
6832 bool skip_hwpoisoned_pages)
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07006833{
6834 unsigned long pfn, iter, found;
Michal Nazarewicz47118af2011-12-29 13:09:50 +01006835 int mt;
6836
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07006837 /*
6838 * For avoiding noise data, lru_add_drain_all() should be called
Minchan Kim80934512012-07-31 16:43:01 -07006839 * If ZONE_MOVABLE, the zone never contains unmovable pages
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07006840 */
6841 if (zone_idx(zone) == ZONE_MOVABLE)
Minchan Kim80934512012-07-31 16:43:01 -07006842 return false;
Michal Nazarewicz47118af2011-12-29 13:09:50 +01006843 mt = get_pageblock_migratetype(page);
6844 if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
Minchan Kim80934512012-07-31 16:43:01 -07006845 return false;
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07006846
6847 pfn = page_to_pfn(page);
6848 for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
6849 unsigned long check = pfn + iter;
6850
Namhyung Kim29723fc2011-02-25 14:44:25 -08006851 if (!pfn_valid_within(check))
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07006852 continue;
Namhyung Kim29723fc2011-02-25 14:44:25 -08006853
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07006854 page = pfn_to_page(check);
Naoya Horiguchic8721bb2013-09-11 14:22:09 -07006855
6856 /*
6857 * Hugepages are not in LRU lists, but they're movable.
6858 * We need not scan over tail pages bacause we don't
6859 * handle each tail page individually in migration.
6860 */
6861 if (PageHuge(page)) {
6862 iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
6863 continue;
6864 }
6865
Minchan Kim97d255c2012-07-31 16:42:59 -07006866 /*
6867 * We can't use page_count without pin a page
6868 * because another CPU can free compound page.
6869 * This check already skips compound tails of THP
Joonsoo Kim0139aa72016-05-19 17:10:49 -07006870 * because their page->_refcount is zero at all time.
Minchan Kim97d255c2012-07-31 16:42:59 -07006871 */
Joonsoo Kimfe896d12016-03-17 14:19:26 -07006872 if (!page_ref_count(page)) {
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07006873 if (PageBuddy(page))
6874 iter += (1 << page_order(page)) - 1;
6875 continue;
6876 }
Minchan Kim97d255c2012-07-31 16:42:59 -07006877
Wen Congyangb023f462012-12-11 16:00:45 -08006878 /*
6879 * The HWPoisoned page may be not in buddy system, and
6880 * page_count() is not 0.
6881 */
6882 if (skip_hwpoisoned_pages && PageHWPoison(page))
6883 continue;
6884
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07006885 if (!PageLRU(page))
6886 found++;
6887 /*
Johannes Weiner6b4f7792014-12-12 16:56:13 -08006888 * If there are RECLAIMABLE pages, we need to check
6889 * it. But now, memory offline itself doesn't call
6890 * shrink_node_slabs() and it still to be fixed.
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07006891 */
6892 /*
6893 * If the page is not RAM, page_count()should be 0.
6894 * we don't need more check. This is an _used_ not-movable page.
6895 *
6896 * The problematic thing here is PG_reserved pages. PG_reserved
6897 * is set to both of a memory hole page and a _used_ kernel
6898 * page at boot.
6899 */
6900 if (found > count)
Minchan Kim80934512012-07-31 16:43:01 -07006901 return true;
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07006902 }
Minchan Kim80934512012-07-31 16:43:01 -07006903 return false;
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07006904}
6905
6906bool is_pageblock_removable_nolock(struct page *page)
6907{
Michal Hocko656a0702012-01-20 14:33:58 -08006908 struct zone *zone;
6909 unsigned long pfn;
Michal Hocko687875f2012-01-20 14:33:55 -08006910
6911 /*
6912 * We have to be careful here because we are iterating over memory
6913 * sections which are not zone aware so we might end up outside of
6914 * the zone but still within the section.
Michal Hocko656a0702012-01-20 14:33:58 -08006915 * We have to take care about the node as well. If the node is offline
6916 * its NODE_DATA will be NULL - see page_zone.
Michal Hocko687875f2012-01-20 14:33:55 -08006917 */
Michal Hocko656a0702012-01-20 14:33:58 -08006918 if (!node_online(page_to_nid(page)))
6919 return false;
6920
6921 zone = page_zone(page);
6922 pfn = page_to_pfn(page);
Cody P Schafer108bcc92013-02-22 16:35:23 -08006923 if (!zone_spans_pfn(zone, pfn))
Michal Hocko687875f2012-01-20 14:33:55 -08006924 return false;
6925
Wen Congyangb023f462012-12-11 16:00:45 -08006926 return !has_unmovable_pages(zone, page, 0, true);
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -07006927}
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07006928
Vlastimil Babka080fe202016-02-05 15:36:41 -08006929#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01006930
6931static unsigned long pfn_max_align_down(unsigned long pfn)
6932{
6933 return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
6934 pageblock_nr_pages) - 1);
6935}
6936
6937static unsigned long pfn_max_align_up(unsigned long pfn)
6938{
6939 return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
6940 pageblock_nr_pages));
6941}
6942
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01006943/* [start, end) must belong to a single zone. */
Mel Gormanbb13ffe2012-10-08 16:32:41 -07006944static int __alloc_contig_migrate_range(struct compact_control *cc,
6945 unsigned long start, unsigned long end)
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01006946{
6947 /* This function is based on compact_zone() from compaction.c. */
Minchan Kimbeb51ea2012-10-08 16:33:51 -07006948 unsigned long nr_reclaimed;
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01006949 unsigned long pfn = start;
6950 unsigned int tries = 0;
6951 int ret = 0;
6952
Marek Szyprowskibe49a6e2012-12-12 13:51:19 -08006953 migrate_prep();
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01006954
Mel Gormanbb13ffe2012-10-08 16:32:41 -07006955 while (pfn < end || !list_empty(&cc->migratepages)) {
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01006956 if (fatal_signal_pending(current)) {
6957 ret = -EINTR;
6958 break;
6959 }
6960
Mel Gormanbb13ffe2012-10-08 16:32:41 -07006961 if (list_empty(&cc->migratepages)) {
6962 cc->nr_migratepages = 0;
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07006963 pfn = isolate_migratepages_range(cc, pfn, end);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01006964 if (!pfn) {
6965 ret = -EINTR;
6966 break;
6967 }
6968 tries = 0;
6969 } else if (++tries == 5) {
6970 ret = ret < 0 ? ret : -EBUSY;
6971 break;
6972 }
6973
Minchan Kimbeb51ea2012-10-08 16:33:51 -07006974 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
6975 &cc->migratepages);
6976 cc->nr_migratepages -= nr_reclaimed;
Minchan Kim02c6de82012-10-08 16:31:55 -07006977
Hugh Dickins9c620e22013-02-22 16:35:14 -08006978 ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
David Rientjese0b9dae2014-06-04 16:08:28 -07006979 NULL, 0, cc->mode, MR_CMA);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01006980 }
Srinivas Pandruvada2a6f5122013-02-22 16:32:09 -08006981 if (ret < 0) {
6982 putback_movable_pages(&cc->migratepages);
6983 return ret;
6984 }
6985 return 0;
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01006986}
6987
6988/**
6989 * alloc_contig_range() -- tries to allocate given range of pages
6990 * @start: start PFN to allocate
6991 * @end: one-past-the-last PFN to allocate
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +02006992 * @migratetype: migratetype of the underlaying pageblocks (either
6993 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
6994 * in range must have the same migratetype and it must
6995 * be either of the two.
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01006996 *
6997 * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
6998 * aligned, however it's the caller's responsibility to guarantee that
6999 * we are the only thread that changes migrate type of pageblocks the
7000 * pages fall in.
7001 *
7002 * The PFN range must belong to a single zone.
7003 *
7004 * Returns zero on success or negative error code. On success all
7005 * pages which PFN is in [start, end) are allocated for the caller and
7006 * need to be freed with free_contig_range().
7007 */
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +02007008int alloc_contig_range(unsigned long start, unsigned long end,
7009 unsigned migratetype)
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007010{
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007011 unsigned long outer_start, outer_end;
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08007012 unsigned int order;
7013 int ret = 0;
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007014
Mel Gormanbb13ffe2012-10-08 16:32:41 -07007015 struct compact_control cc = {
7016 .nr_migratepages = 0,
7017 .order = -1,
7018 .zone = page_zone(pfn_to_page(start)),
David Rientjese0b9dae2014-06-04 16:08:28 -07007019 .mode = MIGRATE_SYNC,
Mel Gormanbb13ffe2012-10-08 16:32:41 -07007020 .ignore_skip_hint = true,
7021 };
7022 INIT_LIST_HEAD(&cc.migratepages);
7023
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007024 /*
7025 * What we do here is we mark all pageblocks in range as
7026 * MIGRATE_ISOLATE. Because pageblock and max order pages may
7027 * have different sizes, and due to the way page allocator
7028 * work, we align the range to biggest of the two pages so
7029 * that page allocator won't try to merge buddies from
7030 * different pageblocks and change MIGRATE_ISOLATE to some
7031 * other migration type.
7032 *
7033 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
7034 * migrate the pages from an unaligned range (ie. pages that
7035 * we are interested in). This will put all the pages in
7036 * range back to page allocator as MIGRATE_ISOLATE.
7037 *
7038 * When this is done, we take the pages in range from page
7039 * allocator removing them from the buddy system. This way
7040 * page allocator will never consider using them.
7041 *
7042 * This lets us mark the pageblocks back as
7043 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
7044 * aligned range but not in the unaligned, original range are
7045 * put back to page allocator so that buddy can use them.
7046 */
7047
7048 ret = start_isolate_page_range(pfn_max_align_down(start),
Wen Congyangb023f462012-12-11 16:00:45 -08007049 pfn_max_align_up(end), migratetype,
7050 false);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007051 if (ret)
Bob Liu86a595f2012-10-25 13:37:56 -07007052 return ret;
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007053
Joonsoo Kim8ef58492016-01-14 15:18:45 -08007054 /*
7055 * In case of -EBUSY, we'd like to know which page causes problem.
7056 * So, just fall through. We will check it in test_pages_isolated().
7057 */
Mel Gormanbb13ffe2012-10-08 16:32:41 -07007058 ret = __alloc_contig_migrate_range(&cc, start, end);
Joonsoo Kim8ef58492016-01-14 15:18:45 -08007059 if (ret && ret != -EBUSY)
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007060 goto done;
7061
7062 /*
7063 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
7064 * aligned blocks that are marked as MIGRATE_ISOLATE. What's
7065 * more, all pages in [start, end) are free in page allocator.
7066 * What we are going to do is to allocate all pages from
7067 * [start, end) (that is remove them from page allocator).
7068 *
7069 * The only problem is that pages at the beginning and at the
7070 * end of interesting range may be not aligned with pages that
7071 * page allocator holds, ie. they can be part of higher order
7072 * pages. Because of this, we reserve the bigger range and
7073 * once this is done free the pages we are not interested in.
7074 *
7075 * We don't have to hold zone->lock here because the pages are
7076 * isolated thus they won't get removed from buddy.
7077 */
7078
7079 lru_add_drain_all();
Vlastimil Babka510f5502014-12-10 15:43:07 -08007080 drain_all_pages(cc.zone);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007081
7082 order = 0;
7083 outer_start = start;
7084 while (!PageBuddy(pfn_to_page(outer_start))) {
7085 if (++order >= MAX_ORDER) {
Joonsoo Kim8ef58492016-01-14 15:18:45 -08007086 outer_start = start;
7087 break;
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007088 }
7089 outer_start &= ~0UL << order;
7090 }
7091
Joonsoo Kim8ef58492016-01-14 15:18:45 -08007092 if (outer_start != start) {
7093 order = page_order(pfn_to_page(outer_start));
7094
7095 /*
7096 * outer_start page could be small order buddy page and
7097 * it doesn't include start page. Adjust outer_start
7098 * in this case to report failed page properly
7099 * on tracepoint in test_pages_isolated()
7100 */
7101 if (outer_start + (1UL << order) <= start)
7102 outer_start = start;
7103 }
7104
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007105 /* Make sure the range is really isolated. */
Wen Congyangb023f462012-12-11 16:00:45 -08007106 if (test_pages_isolated(outer_start, end, false)) {
Michal Nazarewiczdae803e2014-11-13 15:19:27 -08007107 pr_info("%s: [%lx, %lx) PFNs busy\n",
7108 __func__, outer_start, end);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007109 ret = -EBUSY;
7110 goto done;
7111 }
7112
Marek Szyprowski49f223a2012-01-25 12:49:24 +01007113 /* Grab isolated pages from freelists. */
Mel Gormanbb13ffe2012-10-08 16:32:41 -07007114 outer_end = isolate_freepages_range(&cc, outer_start, end);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007115 if (!outer_end) {
7116 ret = -EBUSY;
7117 goto done;
7118 }
7119
7120 /* Free head and tail (if any) */
7121 if (start != outer_start)
7122 free_contig_range(outer_start, start - outer_start);
7123 if (end != outer_end)
7124 free_contig_range(end, outer_end - end);
7125
7126done:
7127 undo_isolate_page_range(pfn_max_align_down(start),
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +02007128 pfn_max_align_up(end), migratetype);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007129 return ret;
7130}
7131
7132void free_contig_range(unsigned long pfn, unsigned nr_pages)
7133{
Marek Szyprowskibcc2b022012-12-20 15:05:18 -08007134 unsigned int count = 0;
7135
7136 for (; nr_pages--; pfn++) {
7137 struct page *page = pfn_to_page(pfn);
7138
7139 count += page_count(page) != 1;
7140 __free_page(page);
7141 }
7142 WARN(count != 0, "%d pages are still in use!\n", count);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007143}
7144#endif
7145
Jiang Liu4ed7e022012-07-31 16:43:35 -07007146#ifdef CONFIG_MEMORY_HOTPLUG
Cody P Schafer0a647f32013-07-03 15:01:33 -07007147/*
7148 * The zone indicated has a new number of managed_pages; batch sizes and percpu
7149 * page high values need to be recalulated.
7150 */
Jiang Liu4ed7e022012-07-31 16:43:35 -07007151void __meminit zone_pcp_update(struct zone *zone)
7152{
Cody P Schafer0a647f32013-07-03 15:01:33 -07007153 unsigned cpu;
Cody P Schaferc8e251f2013-07-03 15:01:29 -07007154 mutex_lock(&pcp_batch_high_lock);
Cody P Schafer0a647f32013-07-03 15:01:33 -07007155 for_each_possible_cpu(cpu)
Cody P Schafer169f6c12013-07-03 15:01:41 -07007156 pageset_set_high_and_batch(zone,
7157 per_cpu_ptr(zone->pageset, cpu));
Cody P Schaferc8e251f2013-07-03 15:01:29 -07007158 mutex_unlock(&pcp_batch_high_lock);
Jiang Liu4ed7e022012-07-31 16:43:35 -07007159}
7160#endif
7161
Jiang Liu340175b2012-07-31 16:43:32 -07007162void zone_pcp_reset(struct zone *zone)
7163{
7164 unsigned long flags;
Minchan Kim5a883812012-10-08 16:33:39 -07007165 int cpu;
7166 struct per_cpu_pageset *pset;
Jiang Liu340175b2012-07-31 16:43:32 -07007167
7168 /* avoid races with drain_pages() */
7169 local_irq_save(flags);
7170 if (zone->pageset != &boot_pageset) {
Minchan Kim5a883812012-10-08 16:33:39 -07007171 for_each_online_cpu(cpu) {
7172 pset = per_cpu_ptr(zone->pageset, cpu);
7173 drain_zonestat(zone, pset);
7174 }
Jiang Liu340175b2012-07-31 16:43:32 -07007175 free_percpu(zone->pageset);
7176 zone->pageset = &boot_pageset;
7177 }
7178 local_irq_restore(flags);
7179}
7180
Wen Congyang6dcd73d2012-12-11 16:01:01 -08007181#ifdef CONFIG_MEMORY_HOTREMOVE
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07007182/*
Joonsoo Kimb9eb6312016-05-19 17:12:06 -07007183 * All pages in the range must be in a single zone and isolated
7184 * before calling this.
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07007185 */
7186void
7187__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
7188{
7189 struct page *page;
7190 struct zone *zone;
Mel Gorman7aeb09f2014-06-04 16:10:21 -07007191 unsigned int order, i;
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07007192 unsigned long pfn;
7193 unsigned long flags;
7194 /* find the first valid pfn */
7195 for (pfn = start_pfn; pfn < end_pfn; pfn++)
7196 if (pfn_valid(pfn))
7197 break;
7198 if (pfn == end_pfn)
7199 return;
7200 zone = page_zone(pfn_to_page(pfn));
7201 spin_lock_irqsave(&zone->lock, flags);
7202 pfn = start_pfn;
7203 while (pfn < end_pfn) {
7204 if (!pfn_valid(pfn)) {
7205 pfn++;
7206 continue;
7207 }
7208 page = pfn_to_page(pfn);
Wen Congyangb023f462012-12-11 16:00:45 -08007209 /*
7210 * The HWPoisoned page may be not in buddy system, and
7211 * page_count() is not 0.
7212 */
7213 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
7214 pfn++;
7215 SetPageReserved(page);
7216 continue;
7217 }
7218
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07007219 BUG_ON(page_count(page));
7220 BUG_ON(!PageBuddy(page));
7221 order = page_order(page);
7222#ifdef CONFIG_DEBUG_VM
Joe Perches11705322016-03-17 14:19:50 -07007223 pr_info("remove from free list %lx %d %lx\n",
7224 pfn, 1 << order, end_pfn);
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07007225#endif
7226 list_del(&page->lru);
7227 rmv_page_order(page);
7228 zone->free_area[order].nr_free--;
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07007229 for (i = 0; i < (1 << order); i++)
7230 SetPageReserved((page+i));
7231 pfn += (1 << order);
7232 }
7233 spin_unlock_irqrestore(&zone->lock, flags);
7234}
7235#endif
Wu Fengguang8d22ba12009-12-16 12:19:58 +01007236
Wu Fengguang8d22ba12009-12-16 12:19:58 +01007237bool is_free_buddy_page(struct page *page)
7238{
7239 struct zone *zone = page_zone(page);
7240 unsigned long pfn = page_to_pfn(page);
7241 unsigned long flags;
Mel Gorman7aeb09f2014-06-04 16:10:21 -07007242 unsigned int order;
Wu Fengguang8d22ba12009-12-16 12:19:58 +01007243
7244 spin_lock_irqsave(&zone->lock, flags);
7245 for (order = 0; order < MAX_ORDER; order++) {
7246 struct page *page_head = page - (pfn & ((1 << order) - 1));
7247
7248 if (PageBuddy(page_head) && page_order(page_head) >= order)
7249 break;
7250 }
7251 spin_unlock_irqrestore(&zone->lock, flags);
7252
7253 return order < MAX_ORDER;
7254}