blob: 8dc6e3cd40f08214ed833a682c6ba8150b758770 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/mm/page_alloc.c
3 *
4 * Manages the free list, the system allocates free pages here.
5 * Note that kmalloc() lives in slab.c
6 *
7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Swap reorganised 29.12.95, Stephen Tweedie
9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15 */
16
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/stddef.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/interrupt.h>
21#include <linux/pagemap.h>
KOSAKI Motohiro10ed2732008-03-04 14:28:32 -080022#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/bootmem.h>
Yinghai Luedbe7d22010-08-25 13:39:16 -070024#include <linux/memblock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/compiler.h>
Randy Dunlap9f158332005-09-13 01:25:16 -070026#include <linux/kernel.h>
Vegard Nossumb1eeab62008-11-25 16:55:53 +010027#include <linux/kmemcheck.h>
Andrey Ryabininb8c73fc2015-02-13 14:39:28 -080028#include <linux/kasan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30#include <linux/suspend.h>
31#include <linux/pagevec.h>
32#include <linux/blkdev.h>
33#include <linux/slab.h>
Dave Hansena238ab52011-05-24 17:12:16 -070034#include <linux/ratelimit.h>
David Rientjes5a3135c22007-10-16 23:25:53 -070035#include <linux/oom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <linux/notifier.h>
37#include <linux/topology.h>
38#include <linux/sysctl.h>
39#include <linux/cpu.h>
40#include <linux/cpuset.h>
Dave Hansenbdc8cb92005-10-29 18:16:53 -070041#include <linux/memory_hotplug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/nodemask.h>
43#include <linux/vmalloc.h>
KOSAKI Motohiroa6cccdc2011-05-24 17:11:33 -070044#include <linux/vmstat.h>
Christoph Lameter4be38e32006-01-06 00:11:17 -080045#include <linux/mempolicy.h>
Yasunori Goto68113782006-06-23 02:03:11 -070046#include <linux/stop_machine.h>
Mel Gormanc7132162006-09-27 01:49:43 -070047#include <linux/sort.h>
48#include <linux/pfn.h>
Andrew Morton3fcfab12006-10-19 23:28:16 -070049#include <linux/backing-dev.h>
Akinobu Mita933e3122006-12-08 02:39:45 -080050#include <linux/fault-inject.h>
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -070051#include <linux/page-isolation.h>
Joonsoo Kimeefa864b2014-12-12 16:55:46 -080052#include <linux/page_ext.h>
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070053#include <linux/debugobjects.h>
Catalin Marinasdbb1f812009-06-11 13:23:19 +010054#include <linux/kmemleak.h>
Mel Gorman56de7262010-05-24 14:32:30 -070055#include <linux/compaction.h>
Mel Gorman0d3d0622009-09-21 17:02:44 -070056#include <trace/events/kmem.h>
Linus Torvalds268bb0c2011-05-20 12:50:29 -070057#include <linux/prefetch.h>
Lisa Du6e543d52013-09-11 14:22:36 -070058#include <linux/mm_inline.h>
Michal Nazarewicz041d3a82011-12-29 13:09:50 +010059#include <linux/migrate.h>
Joonsoo Kime30825f2014-12-12 16:55:49 -080060#include <linux/page_ext.h>
David Rientjes949f7ec2013-04-29 15:07:48 -070061#include <linux/hugetlb.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060062#include <linux/sched/rt.h>
Joonsoo Kim48c96a32014-12-12 16:56:01 -080063#include <linux/page_owner.h>
Mel Gorman0e1cc952015-06-30 14:57:27 -070064#include <linux/kthread.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
Jiang Liu7ee3d4e2013-07-03 15:03:41 -070066#include <asm/sections.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070067#include <asm/tlbflush.h>
Andrew Mortonac924c62006-05-15 09:43:59 -070068#include <asm/div64.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070069#include "internal.h"
70
Cody P Schaferc8e251f2013-07-03 15:01:29 -070071/* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
72static DEFINE_MUTEX(pcp_batch_high_lock);
David Rientjes7cd2b0a2014-06-23 13:22:04 -070073#define MIN_PERCPU_PAGELIST_FRACTION (8)
Cody P Schaferc8e251f2013-07-03 15:01:29 -070074
Lee Schermerhorn72812012010-05-26 14:44:56 -070075#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
76DEFINE_PER_CPU(int, numa_node);
77EXPORT_PER_CPU_SYMBOL(numa_node);
78#endif
79
Lee Schermerhorn7aac7892010-05-26 14:45:00 -070080#ifdef CONFIG_HAVE_MEMORYLESS_NODES
81/*
82 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
83 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
84 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
85 * defined in <linux/topology.h>.
86 */
87DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
88EXPORT_PER_CPU_SYMBOL(_numa_mem_);
Joonsoo Kimad2c8142014-10-09 15:26:13 -070089int _node_numa_mem_[MAX_NUMNODES];
Lee Schermerhorn7aac7892010-05-26 14:45:00 -070090#endif
91
Linus Torvalds1da177e2005-04-16 15:20:36 -070092/*
Christoph Lameter13808912007-10-16 01:25:27 -070093 * Array of node states.
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 */
Christoph Lameter13808912007-10-16 01:25:27 -070095nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
96 [N_POSSIBLE] = NODE_MASK_ALL,
97 [N_ONLINE] = { { [0] = 1UL } },
98#ifndef CONFIG_NUMA
99 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
100#ifdef CONFIG_HIGHMEM
101 [N_HIGH_MEMORY] = { { [0] = 1UL } },
102#endif
Lai Jiangshan20b2f522012-12-12 13:52:00 -0800103#ifdef CONFIG_MOVABLE_NODE
104 [N_MEMORY] = { { [0] = 1UL } },
105#endif
Christoph Lameter13808912007-10-16 01:25:27 -0700106 [N_CPU] = { { [0] = 1UL } },
107#endif /* NUMA */
108};
109EXPORT_SYMBOL(node_states);
110
Jiang Liuc3d5f5f2013-07-03 15:03:14 -0700111/* Protect totalram_pages and zone->managed_pages */
112static DEFINE_SPINLOCK(managed_page_count_lock);
113
Ravikiran G Thirumalai6c231b72005-09-06 15:17:45 -0700114unsigned long totalram_pages __read_mostly;
Hideo AOKIcb45b0e2006-04-10 22:52:59 -0700115unsigned long totalreserve_pages __read_mostly;
Pintu Kumare48322a2014-12-18 16:17:15 -0800116unsigned long totalcma_pages __read_mostly;
Johannes Weinerab8fabd2012-01-10 15:07:42 -0800117/*
118 * When calculating the number of globally allowed dirty pages, there
119 * is a certain number of per-zone reserves that should not be
120 * considered dirtyable memory. This is the sum of those reserves
121 * over all existing zones that contribute dirtyable memory.
122 */
123unsigned long dirty_balance_reserve __read_mostly;
124
Hugh Dickins1b76b022012-05-11 01:00:07 -0700125int percpu_pagelist_fraction;
Benjamin Herrenschmidtdcce2842009-06-18 13:24:12 +1000126gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -0700128/*
129 * A cached value of the page's pageblock's migratetype, used when the page is
130 * put on a pcplist. Used to avoid the pageblock migratetype lookup when
131 * freeing from pcplists in most cases, at the cost of possibly becoming stale.
132 * Also the migratetype set in the page does not necessarily match the pcplist
133 * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any
134 * other index - this ensures that it will be put on the correct CMA freelist.
135 */
136static inline int get_pcppage_migratetype(struct page *page)
137{
138 return page->index;
139}
140
141static inline void set_pcppage_migratetype(struct page *page, int migratetype)
142{
143 page->index = migratetype;
144}
145
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800146#ifdef CONFIG_PM_SLEEP
147/*
148 * The following functions are used by the suspend/hibernate code to temporarily
149 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
150 * while devices are suspended. To avoid races with the suspend/hibernate code,
151 * they should always be called with pm_mutex held (gfp_allowed_mask also should
152 * only be modified with pm_mutex held, unless the suspend/hibernate code is
153 * guaranteed not to run in parallel with that modification).
154 */
Rafael J. Wysockic9e664f2010-12-03 22:57:45 +0100155
156static gfp_t saved_gfp_mask;
157
158void pm_restore_gfp_mask(void)
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800159{
160 WARN_ON(!mutex_is_locked(&pm_mutex));
Rafael J. Wysockic9e664f2010-12-03 22:57:45 +0100161 if (saved_gfp_mask) {
162 gfp_allowed_mask = saved_gfp_mask;
163 saved_gfp_mask = 0;
164 }
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800165}
166
Rafael J. Wysockic9e664f2010-12-03 22:57:45 +0100167void pm_restrict_gfp_mask(void)
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800168{
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800169 WARN_ON(!mutex_is_locked(&pm_mutex));
Rafael J. Wysockic9e664f2010-12-03 22:57:45 +0100170 WARN_ON(saved_gfp_mask);
171 saved_gfp_mask = gfp_allowed_mask;
Mel Gormand0164ad2015-11-06 16:28:21 -0800172 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800173}
Mel Gormanf90ac392012-01-10 15:07:15 -0800174
175bool pm_suspended_storage(void)
176{
Mel Gormand0164ad2015-11-06 16:28:21 -0800177 if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
Mel Gormanf90ac392012-01-10 15:07:15 -0800178 return false;
179 return true;
180}
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800181#endif /* CONFIG_PM_SLEEP */
182
Mel Gormand9c23402007-10-16 01:26:01 -0700183#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
184int pageblock_order __read_mostly;
185#endif
186
Hugh Dickinsd98c7a02006-02-14 13:52:59 -0800187static void __free_pages_ok(struct page *page, unsigned int order);
David Howellsa226f6c2006-01-06 00:11:08 -0800188
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189/*
190 * results with 256, 32 in the lowmem_reserve sysctl:
191 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
192 * 1G machine -> (16M dma, 784M normal, 224M high)
193 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
194 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
Yaowei Bai84109e12015-02-12 15:00:22 -0800195 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
Andi Kleena2f1b422005-11-05 17:25:53 +0100196 *
197 * TBD: should special case ZONE_DMA32 machines here - in those we normally
198 * don't need any ZONE_NORMAL reservation
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 */
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700200int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
Christoph Lameter4b51d662007-02-10 01:43:10 -0800201#ifdef CONFIG_ZONE_DMA
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700202 256,
Christoph Lameter4b51d662007-02-10 01:43:10 -0800203#endif
Christoph Lameterfb0e7942006-09-25 23:31:13 -0700204#ifdef CONFIG_ZONE_DMA32
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700205 256,
Christoph Lameterfb0e7942006-09-25 23:31:13 -0700206#endif
Christoph Lametere53ef382006-09-25 23:31:14 -0700207#ifdef CONFIG_HIGHMEM
Mel Gorman2a1e2742007-07-17 04:03:12 -0700208 32,
Christoph Lametere53ef382006-09-25 23:31:14 -0700209#endif
Mel Gorman2a1e2742007-07-17 04:03:12 -0700210 32,
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700211};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212
213EXPORT_SYMBOL(totalram_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
Helge Deller15ad7cd2006-12-06 20:40:36 -0800215static char * const zone_names[MAX_NR_ZONES] = {
Christoph Lameter4b51d662007-02-10 01:43:10 -0800216#ifdef CONFIG_ZONE_DMA
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700217 "DMA",
Christoph Lameter4b51d662007-02-10 01:43:10 -0800218#endif
Christoph Lameterfb0e7942006-09-25 23:31:13 -0700219#ifdef CONFIG_ZONE_DMA32
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700220 "DMA32",
Christoph Lameterfb0e7942006-09-25 23:31:13 -0700221#endif
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700222 "Normal",
Christoph Lametere53ef382006-09-25 23:31:14 -0700223#ifdef CONFIG_HIGHMEM
Mel Gorman2a1e2742007-07-17 04:03:12 -0700224 "HighMem",
Christoph Lametere53ef382006-09-25 23:31:14 -0700225#endif
Mel Gorman2a1e2742007-07-17 04:03:12 -0700226 "Movable",
Dan Williams033fbae2015-08-09 15:29:06 -0400227#ifdef CONFIG_ZONE_DEVICE
228 "Device",
229#endif
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700230};
231
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232int min_free_kbytes = 1024;
Han Pingtian42aa83c2014-01-23 15:53:28 -0800233int user_min_free_kbytes = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234
Jan Beulich2c85f512009-09-21 17:03:07 -0700235static unsigned long __meminitdata nr_kernel_pages;
236static unsigned long __meminitdata nr_all_pages;
Yasunori Gotoa3142c82007-05-08 00:23:07 -0700237static unsigned long __meminitdata dma_reserve;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238
Tejun Heo0ee332c2011-12-08 10:22:09 -0800239#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
240static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
241static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
242static unsigned long __initdata required_kernelcore;
243static unsigned long __initdata required_movablecore;
244static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
Mel Gormanc7132162006-09-27 01:49:43 -0700245
Tejun Heo0ee332c2011-12-08 10:22:09 -0800246/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
247int movable_zone;
248EXPORT_SYMBOL(movable_zone);
249#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
Mel Gormanc7132162006-09-27 01:49:43 -0700250
Miklos Szeredi418508c2007-05-23 13:57:55 -0700251#if MAX_NUMNODES > 1
252int nr_node_ids __read_mostly = MAX_NUMNODES;
Christoph Lameter62bc62a2009-06-16 15:32:15 -0700253int nr_online_nodes __read_mostly = 1;
Miklos Szeredi418508c2007-05-23 13:57:55 -0700254EXPORT_SYMBOL(nr_node_ids);
Christoph Lameter62bc62a2009-06-16 15:32:15 -0700255EXPORT_SYMBOL(nr_online_nodes);
Miklos Szeredi418508c2007-05-23 13:57:55 -0700256#endif
257
Mel Gorman9ef9acb2007-10-16 01:25:54 -0700258int page_group_by_mobility_disabled __read_mostly;
259
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700260#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
261static inline void reset_deferred_meminit(pg_data_t *pgdat)
262{
263 pgdat->first_deferred_pfn = ULONG_MAX;
264}
265
266/* Returns true if the struct page for the pfn is uninitialised */
Mel Gorman0e1cc952015-06-30 14:57:27 -0700267static inline bool __meminit early_page_uninitialised(unsigned long pfn)
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700268{
Mel Gormanae026b22015-07-17 16:23:48 -0700269 if (pfn >= NODE_DATA(early_pfn_to_nid(pfn))->first_deferred_pfn)
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700270 return true;
271
272 return false;
273}
274
Mel Gorman7e18adb2015-06-30 14:57:05 -0700275static inline bool early_page_nid_uninitialised(unsigned long pfn, int nid)
276{
277 if (pfn >= NODE_DATA(nid)->first_deferred_pfn)
278 return true;
279
280 return false;
281}
282
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700283/*
284 * Returns false when the remaining initialisation should be deferred until
285 * later in the boot cycle when it can be parallelised.
286 */
287static inline bool update_defer_init(pg_data_t *pgdat,
288 unsigned long pfn, unsigned long zone_end,
289 unsigned long *nr_initialised)
290{
291 /* Always populate low zones for address-contrained allocations */
292 if (zone_end < pgdat_end_pfn(pgdat))
293 return true;
294
295 /* Initialise at least 2G of the highest zone */
296 (*nr_initialised)++;
297 if (*nr_initialised > (2UL << (30 - PAGE_SHIFT)) &&
298 (pfn & (PAGES_PER_SECTION - 1)) == 0) {
299 pgdat->first_deferred_pfn = pfn;
300 return false;
301 }
302
303 return true;
304}
305#else
306static inline void reset_deferred_meminit(pg_data_t *pgdat)
307{
308}
309
310static inline bool early_page_uninitialised(unsigned long pfn)
311{
312 return false;
313}
314
Mel Gorman7e18adb2015-06-30 14:57:05 -0700315static inline bool early_page_nid_uninitialised(unsigned long pfn, int nid)
316{
317 return false;
318}
319
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700320static inline bool update_defer_init(pg_data_t *pgdat,
321 unsigned long pfn, unsigned long zone_end,
322 unsigned long *nr_initialised)
323{
324 return true;
325}
326#endif
327
328
Minchan Kimee6f5092012-07-31 16:43:50 -0700329void set_pageblock_migratetype(struct page *page, int migratetype)
Mel Gormanb2a0ac82007-10-16 01:25:48 -0700330{
KOSAKI Motohiro5d0f3f72013-11-12 15:08:18 -0800331 if (unlikely(page_group_by_mobility_disabled &&
332 migratetype < MIGRATE_PCPTYPES))
Mel Gorman49255c62009-06-16 15:31:58 -0700333 migratetype = MIGRATE_UNMOVABLE;
334
Mel Gormanb2a0ac82007-10-16 01:25:48 -0700335 set_pageblock_flags_group(page, (unsigned long)migratetype,
336 PB_migrate, PB_migrate_end);
337}
338
Nick Piggin13e74442006-01-06 00:10:58 -0800339#ifdef CONFIG_DEBUG_VM
Dave Hansenc6a57e12005-10-29 18:16:52 -0700340static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341{
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700342 int ret = 0;
343 unsigned seq;
344 unsigned long pfn = page_to_pfn(page);
Cody P Schaferb5e6a5a2013-02-22 16:35:28 -0800345 unsigned long sp, start_pfn;
Dave Hansenc6a57e12005-10-29 18:16:52 -0700346
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700347 do {
348 seq = zone_span_seqbegin(zone);
Cody P Schaferb5e6a5a2013-02-22 16:35:28 -0800349 start_pfn = zone->zone_start_pfn;
350 sp = zone->spanned_pages;
Cody P Schafer108bcc92013-02-22 16:35:23 -0800351 if (!zone_spans_pfn(zone, pfn))
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700352 ret = 1;
353 } while (zone_span_seqretry(zone, seq));
354
Cody P Schaferb5e6a5a2013-02-22 16:35:28 -0800355 if (ret)
Dave Hansen613813e2014-06-04 16:07:27 -0700356 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
357 pfn, zone_to_nid(zone), zone->name,
358 start_pfn, start_pfn + sp);
Cody P Schaferb5e6a5a2013-02-22 16:35:28 -0800359
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700360 return ret;
Dave Hansenc6a57e12005-10-29 18:16:52 -0700361}
362
363static int page_is_consistent(struct zone *zone, struct page *page)
364{
Andy Whitcroft14e07292007-05-06 14:49:14 -0700365 if (!pfn_valid_within(page_to_pfn(page)))
Dave Hansenc6a57e12005-10-29 18:16:52 -0700366 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 if (zone != page_zone(page))
Dave Hansenc6a57e12005-10-29 18:16:52 -0700368 return 0;
369
370 return 1;
371}
372/*
373 * Temporary debugging check for pages not lying within a given zone.
374 */
375static int bad_range(struct zone *zone, struct page *page)
376{
377 if (page_outside_zone_boundaries(zone, page))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 return 1;
Dave Hansenc6a57e12005-10-29 18:16:52 -0700379 if (!page_is_consistent(zone, page))
380 return 1;
381
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 return 0;
383}
Nick Piggin13e74442006-01-06 00:10:58 -0800384#else
385static inline int bad_range(struct zone *zone, struct page *page)
386{
387 return 0;
388}
389#endif
390
Kirill A. Shutemovd230dec2014-04-07 15:37:38 -0700391static void bad_page(struct page *page, const char *reason,
392 unsigned long bad_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393{
Hugh Dickinsd936cf92009-01-06 14:40:12 -0800394 static unsigned long resume;
395 static unsigned long nr_shown;
396 static unsigned long nr_unshown;
397
Wu Fengguang2a7684a2009-09-16 11:50:12 +0200398 /* Don't complain about poisoned pages */
399 if (PageHWPoison(page)) {
Mel Gorman22b751c2013-02-22 16:34:59 -0800400 page_mapcount_reset(page); /* remove PageBuddy */
Wu Fengguang2a7684a2009-09-16 11:50:12 +0200401 return;
402 }
403
Hugh Dickinsd936cf92009-01-06 14:40:12 -0800404 /*
405 * Allow a burst of 60 reports, then keep quiet for that minute;
406 * or allow a steady drip of one report per second.
407 */
408 if (nr_shown == 60) {
409 if (time_before(jiffies, resume)) {
410 nr_unshown++;
411 goto out;
412 }
413 if (nr_unshown) {
Hugh Dickins1e9e6362009-01-06 14:40:13 -0800414 printk(KERN_ALERT
415 "BUG: Bad page state: %lu messages suppressed\n",
Hugh Dickinsd936cf92009-01-06 14:40:12 -0800416 nr_unshown);
417 nr_unshown = 0;
418 }
419 nr_shown = 0;
420 }
421 if (nr_shown++ == 0)
422 resume = jiffies + 60 * HZ;
423
Hugh Dickins1e9e6362009-01-06 14:40:13 -0800424 printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n",
Hugh Dickins3dc14742009-01-06 14:40:08 -0800425 current->comm, page_to_pfn(page));
Dave Hansenf0b791a2014-01-23 15:52:49 -0800426 dump_page_badflags(page, reason, bad_flags);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700427
Dave Jones4f318882011-10-31 17:07:24 -0700428 print_modules();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 dump_stack();
Hugh Dickinsd936cf92009-01-06 14:40:12 -0800430out:
Hugh Dickins8cc3b392009-01-06 14:40:06 -0800431 /* Leave bad fields for debug, except PageBuddy could make trouble */
Mel Gorman22b751c2013-02-22 16:34:59 -0800432 page_mapcount_reset(page); /* remove PageBuddy */
Rusty Russell373d4d02013-01-21 17:17:39 +1030433 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434}
435
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436/*
437 * Higher-order pages are called "compound pages". They are structured thusly:
438 *
439 * The first PAGE_SIZE page is called the "head page".
440 *
441 * The remaining PAGE_SIZE pages are called "tail pages".
442 *
Wang Sheng-Hui6416b9fa2011-11-17 10:53:50 +0100443 * All pages have PG_compound set. All tail pages have their ->first_page
444 * pointing at the head page.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 *
Hugh Dickins41d78ba2006-02-14 13:52:58 -0800446 * The first tail page's ->lru.next holds the address of the compound page's
447 * put_page() function. Its ->lru.prev holds the order of allocation.
448 * This usage means that zero-order pages may not be compound.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 */
Hugh Dickinsd98c7a02006-02-14 13:52:59 -0800450
451static void free_compound_page(struct page *page)
452{
Christoph Lameterd85f3382007-05-06 14:49:39 -0700453 __free_pages_ok(page, compound_order(page));
Hugh Dickinsd98c7a02006-02-14 13:52:59 -0800454}
455
Andi Kleen01ad1c02008-07-23 21:27:46 -0700456void prep_compound_page(struct page *page, unsigned long order)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457{
458 int i;
459 int nr_pages = 1 << order;
460
Andy Whitcroft33f2ef82006-12-06 20:33:32 -0800461 set_compound_page_dtor(page, free_compound_page);
Christoph Lameterd85f3382007-05-06 14:49:39 -0700462 set_compound_order(page, order);
Christoph Lameter6d777952007-05-06 14:49:40 -0700463 __SetPageHead(page);
Andy Whitcroft18229df2008-11-06 12:53:27 -0800464 for (i = 1; i < nr_pages; i++) {
465 struct page *p = page + i;
Youquan Song58a84aa2011-12-08 14:34:18 -0800466 set_page_count(p, 0);
Christoph Lameterd85f3382007-05-06 14:49:39 -0700467 p->first_page = page;
David Rientjes668f9abb2014-03-03 15:38:18 -0800468 /* Make sure p->first_page is always valid for PageTail() */
469 smp_wmb();
470 __SetPageTail(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 }
472}
473
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800474#ifdef CONFIG_DEBUG_PAGEALLOC
475unsigned int _debug_guardpage_minorder;
Joonsoo Kim031bc572014-12-12 16:55:52 -0800476bool _debug_pagealloc_enabled __read_mostly;
Joonsoo Kime30825f2014-12-12 16:55:49 -0800477bool _debug_guardpage_enabled __read_mostly;
478
Joonsoo Kim031bc572014-12-12 16:55:52 -0800479static int __init early_debug_pagealloc(char *buf)
480{
481 if (!buf)
482 return -EINVAL;
483
484 if (strcmp(buf, "on") == 0)
485 _debug_pagealloc_enabled = true;
486
487 return 0;
488}
489early_param("debug_pagealloc", early_debug_pagealloc);
490
Joonsoo Kime30825f2014-12-12 16:55:49 -0800491static bool need_debug_guardpage(void)
492{
Joonsoo Kim031bc572014-12-12 16:55:52 -0800493 /* If we don't use debug_pagealloc, we don't need guard page */
494 if (!debug_pagealloc_enabled())
495 return false;
496
Joonsoo Kime30825f2014-12-12 16:55:49 -0800497 return true;
498}
499
500static void init_debug_guardpage(void)
501{
Joonsoo Kim031bc572014-12-12 16:55:52 -0800502 if (!debug_pagealloc_enabled())
503 return;
504
Joonsoo Kime30825f2014-12-12 16:55:49 -0800505 _debug_guardpage_enabled = true;
506}
507
508struct page_ext_operations debug_guardpage_ops = {
509 .need = need_debug_guardpage,
510 .init = init_debug_guardpage,
511};
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800512
513static int __init debug_guardpage_minorder_setup(char *buf)
514{
515 unsigned long res;
516
517 if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
518 printk(KERN_ERR "Bad debug_guardpage_minorder value\n");
519 return 0;
520 }
521 _debug_guardpage_minorder = res;
522 printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res);
523 return 0;
524}
525__setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
526
Joonsoo Kim2847cf92014-12-12 16:55:01 -0800527static inline void set_page_guard(struct zone *zone, struct page *page,
528 unsigned int order, int migratetype)
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800529{
Joonsoo Kime30825f2014-12-12 16:55:49 -0800530 struct page_ext *page_ext;
531
532 if (!debug_guardpage_enabled())
533 return;
534
535 page_ext = lookup_page_ext(page);
536 __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
537
Joonsoo Kim2847cf92014-12-12 16:55:01 -0800538 INIT_LIST_HEAD(&page->lru);
539 set_page_private(page, order);
540 /* Guard pages are not available for any usage */
541 __mod_zone_freepage_state(zone, -(1 << order), migratetype);
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800542}
543
Joonsoo Kim2847cf92014-12-12 16:55:01 -0800544static inline void clear_page_guard(struct zone *zone, struct page *page,
545 unsigned int order, int migratetype)
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800546{
Joonsoo Kime30825f2014-12-12 16:55:49 -0800547 struct page_ext *page_ext;
548
549 if (!debug_guardpage_enabled())
550 return;
551
552 page_ext = lookup_page_ext(page);
553 __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
554
Joonsoo Kim2847cf92014-12-12 16:55:01 -0800555 set_page_private(page, 0);
556 if (!is_migrate_isolate(migratetype))
557 __mod_zone_freepage_state(zone, (1 << order), migratetype);
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800558}
559#else
Joonsoo Kime30825f2014-12-12 16:55:49 -0800560struct page_ext_operations debug_guardpage_ops = { NULL, };
Joonsoo Kim2847cf92014-12-12 16:55:01 -0800561static inline void set_page_guard(struct zone *zone, struct page *page,
562 unsigned int order, int migratetype) {}
563static inline void clear_page_guard(struct zone *zone, struct page *page,
564 unsigned int order, int migratetype) {}
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800565#endif
566
Mel Gorman7aeb09f2014-06-04 16:10:21 -0700567static inline void set_page_order(struct page *page, unsigned int order)
Andrew Morton6aa3001b22006-04-18 22:20:52 -0700568{
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700569 set_page_private(page, order);
Nick Piggin676165a2006-04-10 11:21:48 +1000570 __SetPageBuddy(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571}
572
573static inline void rmv_page_order(struct page *page)
574{
Nick Piggin676165a2006-04-10 11:21:48 +1000575 __ClearPageBuddy(page);
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700576 set_page_private(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577}
578
579/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 * This function checks whether a page is free && is the buddy
581 * we can do coalesce a page and its buddy if
Nick Piggin13e74442006-01-06 00:10:58 -0800582 * (a) the buddy is not in a hole &&
Nick Piggin676165a2006-04-10 11:21:48 +1000583 * (b) the buddy is in the buddy system &&
Andy Whitcroftcb2b95e2006-06-23 02:03:01 -0700584 * (c) a page and its buddy have the same order &&
585 * (d) a page and its buddy are in the same zone.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 *
Wang Sheng-Huicf6fe942013-09-11 14:22:48 -0700587 * For recording whether a page is in the buddy system, we set ->_mapcount
588 * PAGE_BUDDY_MAPCOUNT_VALUE.
589 * Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is
590 * serialized by zone->lock.
Nick Piggin676165a2006-04-10 11:21:48 +1000591 *
592 * For recording page's order, we use page_private(page).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 */
Andy Whitcroftcb2b95e2006-06-23 02:03:01 -0700594static inline int page_is_buddy(struct page *page, struct page *buddy,
Mel Gorman7aeb09f2014-06-04 16:10:21 -0700595 unsigned int order)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596{
Andy Whitcroft14e07292007-05-06 14:49:14 -0700597 if (!pfn_valid_within(page_to_pfn(buddy)))
Nick Piggin13e74442006-01-06 00:10:58 -0800598 return 0;
Nick Piggin13e74442006-01-06 00:10:58 -0800599
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800600 if (page_is_guard(buddy) && page_order(buddy) == order) {
Mel Gormand34c5fa2014-06-04 16:10:10 -0700601 if (page_zone_id(page) != page_zone_id(buddy))
602 return 0;
603
Weijie Yang4c5018c2015-02-10 14:11:39 -0800604 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
605
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800606 return 1;
607 }
608
Andy Whitcroftcb2b95e2006-06-23 02:03:01 -0700609 if (PageBuddy(buddy) && page_order(buddy) == order) {
Mel Gormand34c5fa2014-06-04 16:10:10 -0700610 /*
611 * zone check is done late to avoid uselessly
612 * calculating zone/node ids for pages that could
613 * never merge.
614 */
615 if (page_zone_id(page) != page_zone_id(buddy))
616 return 0;
617
Weijie Yang4c5018c2015-02-10 14:11:39 -0800618 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
619
Andrew Morton6aa3001b22006-04-18 22:20:52 -0700620 return 1;
Nick Piggin676165a2006-04-10 11:21:48 +1000621 }
Andrew Morton6aa3001b22006-04-18 22:20:52 -0700622 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623}
624
625/*
626 * Freeing function for a buddy system allocator.
627 *
628 * The concept of a buddy system is to maintain direct-mapped table
629 * (containing bit values) for memory blocks of various "orders".
630 * The bottom level table contains the map for the smallest allocatable
631 * units of memory (here, pages), and each level above it describes
632 * pairs of units from the levels below, hence, "buddies".
633 * At a high level, all that happens here is marking the table entry
634 * at the bottom level available, and propagating the changes upward
635 * as necessary, plus some accounting needed to play nicely with other
636 * parts of the VM system.
637 * At each level, we keep a list of pages, which are heads of continuous
Wang Sheng-Huicf6fe942013-09-11 14:22:48 -0700638 * free pages of length of (1 << order) and marked with _mapcount
639 * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page)
640 * field.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 * So when we are allocating or freeing one, we can derive the state of the
Michal Nazarewicz5f63b722012-01-11 15:16:11 +0100642 * other. That is, if we allocate a small block, and both were
643 * free, the remainder of the region must be split into blocks.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644 * If a block is freed, and its buddy is also free, then this
Michal Nazarewicz5f63b722012-01-11 15:16:11 +0100645 * triggers coalescing into a block of larger size.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 *
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +0100647 * -- nyc
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 */
649
Nick Piggin48db57f2006-01-08 01:00:42 -0800650static inline void __free_one_page(struct page *page,
Mel Gormandc4b0ca2014-06-04 16:10:17 -0700651 unsigned long pfn,
Mel Gormaned0ae212009-06-16 15:32:07 -0700652 struct zone *zone, unsigned int order,
653 int migratetype)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654{
655 unsigned long page_idx;
Corrado Zoccolo6dda9d52010-05-24 14:31:54 -0700656 unsigned long combined_idx;
KyongHo Cho43506fa2011-01-13 15:47:24 -0800657 unsigned long uninitialized_var(buddy_idx);
Corrado Zoccolo6dda9d52010-05-24 14:31:54 -0700658 struct page *buddy;
Joonsoo Kim3c605092014-11-13 15:19:21 -0800659 int max_order = MAX_ORDER;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660
Cody P Schaferd29bb972013-02-22 16:35:25 -0800661 VM_BUG_ON(!zone_is_initialized(zone));
Kirill A. Shutemov6e9f0d52015-02-11 15:25:50 -0800662 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663
Mel Gormaned0ae212009-06-16 15:32:07 -0700664 VM_BUG_ON(migratetype == -1);
Joonsoo Kim3c605092014-11-13 15:19:21 -0800665 if (is_migrate_isolate(migratetype)) {
666 /*
667 * We restrict max order of merging to prevent merge
668 * between freepages on isolate pageblock and normal
669 * pageblock. Without this, pageblock isolation
670 * could cause incorrect freepage accounting.
671 */
672 max_order = min(MAX_ORDER, pageblock_order + 1);
673 } else {
Joonsoo Kim8f82b552014-11-13 15:19:18 -0800674 __mod_zone_freepage_state(zone, 1 << order, migratetype);
Joonsoo Kim3c605092014-11-13 15:19:21 -0800675 }
Mel Gormaned0ae212009-06-16 15:32:07 -0700676
Joonsoo Kim3c605092014-11-13 15:19:21 -0800677 page_idx = pfn & ((1 << max_order) - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678
Sasha Levin309381fea2014-01-23 15:52:54 -0800679 VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
680 VM_BUG_ON_PAGE(bad_range(zone, page), page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681
Joonsoo Kim3c605092014-11-13 15:19:21 -0800682 while (order < max_order - 1) {
KyongHo Cho43506fa2011-01-13 15:47:24 -0800683 buddy_idx = __find_buddy_index(page_idx, order);
684 buddy = page + (buddy_idx - page_idx);
Andy Whitcroftcb2b95e2006-06-23 02:03:01 -0700685 if (!page_is_buddy(page, buddy, order))
Andy Whitcroft3c82d0c2008-07-23 21:27:11 -0700686 break;
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800687 /*
688 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
689 * merge with it and move up one order.
690 */
691 if (page_is_guard(buddy)) {
Joonsoo Kim2847cf92014-12-12 16:55:01 -0800692 clear_page_guard(zone, buddy, order, migratetype);
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800693 } else {
694 list_del(&buddy->lru);
695 zone->free_area[order].nr_free--;
696 rmv_page_order(buddy);
697 }
KyongHo Cho43506fa2011-01-13 15:47:24 -0800698 combined_idx = buddy_idx & page_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 page = page + (combined_idx - page_idx);
700 page_idx = combined_idx;
701 order++;
702 }
703 set_page_order(page, order);
Corrado Zoccolo6dda9d52010-05-24 14:31:54 -0700704
705 /*
706 * If this is not the largest possible page, check if the buddy
707 * of the next-highest order is free. If it is, it's possible
708 * that pages are being freed that will coalesce soon. In case,
709 * that is happening, add the free page to the tail of the list
710 * so it's less likely to be used soon and more likely to be merged
711 * as a higher order page
712 */
Mel Gormanb7f50cf2010-10-26 14:21:11 -0700713 if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
Corrado Zoccolo6dda9d52010-05-24 14:31:54 -0700714 struct page *higher_page, *higher_buddy;
KyongHo Cho43506fa2011-01-13 15:47:24 -0800715 combined_idx = buddy_idx & page_idx;
716 higher_page = page + (combined_idx - page_idx);
717 buddy_idx = __find_buddy_index(combined_idx, order + 1);
Li Haifeng0ba8f2d2012-09-17 14:09:21 -0700718 higher_buddy = higher_page + (buddy_idx - combined_idx);
Corrado Zoccolo6dda9d52010-05-24 14:31:54 -0700719 if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
720 list_add_tail(&page->lru,
721 &zone->free_area[order].free_list[migratetype]);
722 goto out;
723 }
724 }
725
726 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
727out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 zone->free_area[order].nr_free++;
729}
730
Nick Piggin224abf92006-01-06 00:11:11 -0800731static inline int free_pages_check(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732{
Kirill A. Shutemovd230dec2014-04-07 15:37:38 -0700733 const char *bad_reason = NULL;
Dave Hansenf0b791a2014-01-23 15:52:49 -0800734 unsigned long bad_flags = 0;
735
736 if (unlikely(page_mapcount(page)))
737 bad_reason = "nonzero mapcount";
738 if (unlikely(page->mapping != NULL))
739 bad_reason = "non-NULL mapping";
740 if (unlikely(atomic_read(&page->_count) != 0))
741 bad_reason = "nonzero _count";
742 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) {
743 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
744 bad_flags = PAGE_FLAGS_CHECK_AT_FREE;
745 }
Johannes Weiner9edad6e2014-12-10 15:44:58 -0800746#ifdef CONFIG_MEMCG
747 if (unlikely(page->mem_cgroup))
748 bad_reason = "page still charged to cgroup";
749#endif
Dave Hansenf0b791a2014-01-23 15:52:49 -0800750 if (unlikely(bad_reason)) {
751 bad_page(page, bad_reason, bad_flags);
Hugh Dickins79f4b7b2009-01-06 14:40:05 -0800752 return 1;
Hugh Dickins8cc3b392009-01-06 14:40:06 -0800753 }
Peter Zijlstra90572892013-10-07 11:29:20 +0100754 page_cpupid_reset_last(page);
Hugh Dickins79f4b7b2009-01-06 14:40:05 -0800755 if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
756 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
757 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758}
759
760/*
Mel Gorman5f8dcc22009-09-21 17:03:19 -0700761 * Frees a number of pages from the PCP lists
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 * Assumes all pages on list are in same zone, and of same order.
Renaud Lienhart207f36e2005-09-10 00:26:59 -0700763 * count is the number of pages to free.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 *
765 * If the zone was previously in an "all pages pinned" state then look to
766 * see if this freeing clears that state.
767 *
768 * And clear the zone's pages_scanned counter, to hold off the "all pages are
769 * pinned" detection logic.
770 */
Mel Gorman5f8dcc22009-09-21 17:03:19 -0700771static void free_pcppages_bulk(struct zone *zone, int count,
772 struct per_cpu_pages *pcp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773{
Mel Gorman5f8dcc22009-09-21 17:03:19 -0700774 int migratetype = 0;
Mel Gormana6f9edd62009-09-21 17:03:20 -0700775 int batch_free = 0;
Mel Gorman72853e22010-09-09 16:38:16 -0700776 int to_free = count;
Mel Gorman0d5d8232014-08-06 16:07:16 -0700777 unsigned long nr_scanned;
Mel Gorman5f8dcc22009-09-21 17:03:19 -0700778
Nick Pigginc54ad302006-01-06 00:10:56 -0800779 spin_lock(&zone->lock);
Mel Gorman0d5d8232014-08-06 16:07:16 -0700780 nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
781 if (nr_scanned)
782 __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
Mel Gormanf2260e62009-06-16 15:32:13 -0700783
Mel Gorman72853e22010-09-09 16:38:16 -0700784 while (to_free) {
Nick Piggin48db57f2006-01-08 01:00:42 -0800785 struct page *page;
Mel Gorman5f8dcc22009-09-21 17:03:19 -0700786 struct list_head *list;
Nick Piggin48db57f2006-01-08 01:00:42 -0800787
Mel Gorman5f8dcc22009-09-21 17:03:19 -0700788 /*
Mel Gormana6f9edd62009-09-21 17:03:20 -0700789 * Remove pages from lists in a round-robin fashion. A
790 * batch_free count is maintained that is incremented when an
791 * empty list is encountered. This is so more pages are freed
792 * off fuller lists instead of spinning excessively around empty
793 * lists
Mel Gorman5f8dcc22009-09-21 17:03:19 -0700794 */
795 do {
Mel Gormana6f9edd62009-09-21 17:03:20 -0700796 batch_free++;
Mel Gorman5f8dcc22009-09-21 17:03:19 -0700797 if (++migratetype == MIGRATE_PCPTYPES)
798 migratetype = 0;
799 list = &pcp->lists[migratetype];
800 } while (list_empty(list));
801
Namhyung Kim1d168712011-03-22 16:32:45 -0700802 /* This is the only non-empty list. Free them all. */
803 if (batch_free == MIGRATE_PCPTYPES)
804 batch_free = to_free;
805
Mel Gormana6f9edd62009-09-21 17:03:20 -0700806 do {
Bartlomiej Zolnierkiewicz770c8aa2012-10-08 16:31:57 -0700807 int mt; /* migratetype of the to-be-freed page */
808
Mel Gormana6f9edd62009-09-21 17:03:20 -0700809 page = list_entry(list->prev, struct page, lru);
810 /* must delete as __free_one_page list manipulates */
811 list_del(&page->lru);
Vlastimil Babkaaa016d12015-09-08 15:01:22 -0700812
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -0700813 mt = get_pcppage_migratetype(page);
Vlastimil Babkaaa016d12015-09-08 15:01:22 -0700814 /* MIGRATE_ISOLATE page should not go to pcplists */
815 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
816 /* Pageblock could have been isolated meanwhile */
Joonsoo Kim8f82b552014-11-13 15:19:18 -0800817 if (unlikely(has_isolate_pageblock(zone)))
Joonsoo Kim51bb1a42014-11-13 15:19:14 -0800818 mt = get_pageblock_migratetype(page);
Joonsoo Kim51bb1a42014-11-13 15:19:14 -0800819
Hugh Dickinsa7016232010-01-29 17:46:34 +0000820 /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
Mel Gormandc4b0ca2014-06-04 16:10:17 -0700821 __free_one_page(page, page_to_pfn(page), zone, 0, mt);
Bartlomiej Zolnierkiewicz770c8aa2012-10-08 16:31:57 -0700822 trace_mm_page_pcpu_drain(page, 0, mt);
Mel Gorman72853e22010-09-09 16:38:16 -0700823 } while (--to_free && --batch_free && !list_empty(list));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 }
Nick Pigginc54ad302006-01-06 00:10:56 -0800825 spin_unlock(&zone->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826}
827
Mel Gormandc4b0ca2014-06-04 16:10:17 -0700828static void free_one_page(struct zone *zone,
829 struct page *page, unsigned long pfn,
Mel Gorman7aeb09f2014-06-04 16:10:21 -0700830 unsigned int order,
Mel Gormaned0ae212009-06-16 15:32:07 -0700831 int migratetype)
Nick Piggin48db57f2006-01-08 01:00:42 -0800832{
Mel Gorman0d5d8232014-08-06 16:07:16 -0700833 unsigned long nr_scanned;
Christoph Lameter006d22d2006-09-25 23:31:48 -0700834 spin_lock(&zone->lock);
Mel Gorman0d5d8232014-08-06 16:07:16 -0700835 nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
836 if (nr_scanned)
837 __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
Mel Gormanf2260e62009-06-16 15:32:13 -0700838
Joonsoo Kimad53f922014-11-13 15:19:11 -0800839 if (unlikely(has_isolate_pageblock(zone) ||
840 is_migrate_isolate(migratetype))) {
841 migratetype = get_pfnblock_migratetype(page, pfn);
Joonsoo Kimad53f922014-11-13 15:19:11 -0800842 }
Mel Gormandc4b0ca2014-06-04 16:10:17 -0700843 __free_one_page(page, pfn, zone, order, migratetype);
Christoph Lameter006d22d2006-09-25 23:31:48 -0700844 spin_unlock(&zone->lock);
Nick Piggin48db57f2006-01-08 01:00:42 -0800845}
846
Kirill A. Shutemov81422f22015-02-11 15:25:52 -0800847static int free_tail_pages_check(struct page *head_page, struct page *page)
848{
849 if (!IS_ENABLED(CONFIG_DEBUG_VM))
850 return 0;
851 if (unlikely(!PageTail(page))) {
852 bad_page(page, "PageTail not set", 0);
853 return 1;
854 }
855 if (unlikely(page->first_page != head_page)) {
856 bad_page(page, "first_page not consistent", 0);
857 return 1;
858 }
859 return 0;
860}
861
Robin Holt1e8ce832015-06-30 14:56:45 -0700862static void __meminit __init_single_page(struct page *page, unsigned long pfn,
863 unsigned long zone, int nid)
864{
Robin Holt1e8ce832015-06-30 14:56:45 -0700865 set_page_links(page, zone, nid, pfn);
Robin Holt1e8ce832015-06-30 14:56:45 -0700866 init_page_count(page);
867 page_mapcount_reset(page);
868 page_cpupid_reset_last(page);
Robin Holt1e8ce832015-06-30 14:56:45 -0700869
Robin Holt1e8ce832015-06-30 14:56:45 -0700870 INIT_LIST_HEAD(&page->lru);
871#ifdef WANT_PAGE_VIRTUAL
872 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
873 if (!is_highmem_idx(zone))
874 set_page_address(page, __va(pfn << PAGE_SHIFT));
875#endif
876}
877
878static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone,
879 int nid)
880{
881 return __init_single_page(pfn_to_page(pfn), pfn, zone, nid);
882}
883
Mel Gorman7e18adb2015-06-30 14:57:05 -0700884#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
885static void init_reserved_page(unsigned long pfn)
886{
887 pg_data_t *pgdat;
888 int nid, zid;
889
890 if (!early_page_uninitialised(pfn))
891 return;
892
893 nid = early_pfn_to_nid(pfn);
894 pgdat = NODE_DATA(nid);
895
896 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
897 struct zone *zone = &pgdat->node_zones[zid];
898
899 if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
900 break;
901 }
902 __init_single_pfn(pfn, zid, nid);
903}
904#else
905static inline void init_reserved_page(unsigned long pfn)
906{
907}
908#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
909
Nathan Zimmer92923ca32015-06-30 14:56:48 -0700910/*
911 * Initialised pages do not have PageReserved set. This function is
912 * called for each range allocated by the bootmem allocator and
913 * marks the pages PageReserved. The remaining valid pages are later
914 * sent to the buddy page allocator.
915 */
Mel Gorman7e18adb2015-06-30 14:57:05 -0700916void __meminit reserve_bootmem_region(unsigned long start, unsigned long end)
Nathan Zimmer92923ca32015-06-30 14:56:48 -0700917{
918 unsigned long start_pfn = PFN_DOWN(start);
919 unsigned long end_pfn = PFN_UP(end);
920
Mel Gorman7e18adb2015-06-30 14:57:05 -0700921 for (; start_pfn < end_pfn; start_pfn++) {
922 if (pfn_valid(start_pfn)) {
923 struct page *page = pfn_to_page(start_pfn);
924
925 init_reserved_page(start_pfn);
926 SetPageReserved(page);
927 }
928 }
Nathan Zimmer92923ca32015-06-30 14:56:48 -0700929}
930
KOSAKI Motohiroec95f532010-05-24 14:32:38 -0700931static bool free_pages_prepare(struct page *page, unsigned int order)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932{
Kirill A. Shutemov81422f22015-02-11 15:25:52 -0800933 bool compound = PageCompound(page);
934 int i, bad = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935
Yu Zhaoab1f3062014-12-10 15:43:17 -0800936 VM_BUG_ON_PAGE(PageTail(page), page);
Kirill A. Shutemov81422f22015-02-11 15:25:52 -0800937 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
Yu Zhaoab1f3062014-12-10 15:43:17 -0800938
Konstantin Khlebnikovb413d482012-01-10 15:07:09 -0800939 trace_mm_page_free(page, order);
Vegard Nossumb1eeab62008-11-25 16:55:53 +0100940 kmemcheck_free_shadow(page, order);
Andrey Ryabininb8c73fc2015-02-13 14:39:28 -0800941 kasan_free_pages(page, order);
Vegard Nossumb1eeab62008-11-25 16:55:53 +0100942
Andrea Arcangeli8dd60a32011-01-13 15:46:34 -0800943 if (PageAnon(page))
944 page->mapping = NULL;
Kirill A. Shutemov81422f22015-02-11 15:25:52 -0800945 bad += free_pages_check(page);
946 for (i = 1; i < (1 << order); i++) {
947 if (compound)
948 bad += free_tail_pages_check(page, page + i);
Andrea Arcangeli8dd60a32011-01-13 15:46:34 -0800949 bad += free_pages_check(page + i);
Kirill A. Shutemov81422f22015-02-11 15:25:52 -0800950 }
Hugh Dickins8cc3b392009-01-06 14:40:06 -0800951 if (bad)
KOSAKI Motohiroec95f532010-05-24 14:32:38 -0700952 return false;
Hugh Dickins689bceb2005-11-21 21:32:20 -0800953
Joonsoo Kim48c96a32014-12-12 16:56:01 -0800954 reset_page_owner(page, order);
955
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700956 if (!PageHighMem(page)) {
Pintu Kumarb8af2942013-09-11 14:20:34 -0700957 debug_check_no_locks_freed(page_address(page),
958 PAGE_SIZE << order);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700959 debug_check_no_obj_freed(page_address(page),
960 PAGE_SIZE << order);
961 }
Nick Piggindafb1362006-10-11 01:21:30 -0700962 arch_free_page(page, order);
Nick Piggin48db57f2006-01-08 01:00:42 -0800963 kernel_map_pages(page, 1 << order, 0);
Nick Piggindafb1362006-10-11 01:21:30 -0700964
KOSAKI Motohiroec95f532010-05-24 14:32:38 -0700965 return true;
966}
967
968static void __free_pages_ok(struct page *page, unsigned int order)
969{
970 unsigned long flags;
Minchan Kim95e34412012-10-08 16:32:11 -0700971 int migratetype;
Mel Gormandc4b0ca2014-06-04 16:10:17 -0700972 unsigned long pfn = page_to_pfn(page);
KOSAKI Motohiroec95f532010-05-24 14:32:38 -0700973
974 if (!free_pages_prepare(page, order))
975 return;
976
Mel Gormancfc47a22014-06-04 16:10:19 -0700977 migratetype = get_pfnblock_migratetype(page, pfn);
Nick Pigginc54ad302006-01-06 00:10:56 -0800978 local_irq_save(flags);
Christoph Lameterf8891e52006-06-30 01:55:45 -0700979 __count_vm_events(PGFREE, 1 << order);
Mel Gormandc4b0ca2014-06-04 16:10:17 -0700980 free_one_page(page_zone(page), page, pfn, order, migratetype);
Nick Pigginc54ad302006-01-06 00:10:56 -0800981 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982}
983
Mel Gorman0e1cc952015-06-30 14:57:27 -0700984static void __init __free_pages_boot_core(struct page *page,
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700985 unsigned long pfn, unsigned int order)
David Howellsa226f6c2006-01-06 00:11:08 -0800986{
Johannes Weinerc3993072012-01-10 15:08:10 -0800987 unsigned int nr_pages = 1 << order;
Yinghai Lue2d0bd22013-09-11 14:20:37 -0700988 struct page *p = page;
Johannes Weinerc3993072012-01-10 15:08:10 -0800989 unsigned int loop;
David Howellsa226f6c2006-01-06 00:11:08 -0800990
Yinghai Lue2d0bd22013-09-11 14:20:37 -0700991 prefetchw(p);
992 for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
993 prefetchw(p + 1);
Johannes Weinerc3993072012-01-10 15:08:10 -0800994 __ClearPageReserved(p);
995 set_page_count(p, 0);
David Howellsa226f6c2006-01-06 00:11:08 -0800996 }
Yinghai Lue2d0bd22013-09-11 14:20:37 -0700997 __ClearPageReserved(p);
998 set_page_count(p, 0);
Johannes Weinerc3993072012-01-10 15:08:10 -0800999
Yinghai Lue2d0bd22013-09-11 14:20:37 -07001000 page_zone(page)->managed_pages += nr_pages;
Johannes Weinerc3993072012-01-10 15:08:10 -08001001 set_page_refcounted(page);
1002 __free_pages(page, order);
David Howellsa226f6c2006-01-06 00:11:08 -08001003}
1004
Mel Gorman75a592a2015-06-30 14:56:59 -07001005#if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \
1006 defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
Mel Gorman7ace9912015-08-06 15:46:13 -07001007
Mel Gorman75a592a2015-06-30 14:56:59 -07001008static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
1009
1010int __meminit early_pfn_to_nid(unsigned long pfn)
1011{
Mel Gorman7ace9912015-08-06 15:46:13 -07001012 static DEFINE_SPINLOCK(early_pfn_lock);
Mel Gorman75a592a2015-06-30 14:56:59 -07001013 int nid;
1014
Mel Gorman7ace9912015-08-06 15:46:13 -07001015 spin_lock(&early_pfn_lock);
Mel Gorman75a592a2015-06-30 14:56:59 -07001016 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
Mel Gorman7ace9912015-08-06 15:46:13 -07001017 if (nid < 0)
1018 nid = 0;
1019 spin_unlock(&early_pfn_lock);
1020
1021 return nid;
Mel Gorman75a592a2015-06-30 14:56:59 -07001022}
1023#endif
1024
1025#ifdef CONFIG_NODES_SPAN_OTHER_NODES
1026static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node,
1027 struct mminit_pfnnid_cache *state)
1028{
1029 int nid;
1030
1031 nid = __early_pfn_to_nid(pfn, state);
1032 if (nid >= 0 && nid != node)
1033 return false;
1034 return true;
1035}
1036
1037/* Only safe to use early in boot when initialisation is single-threaded */
1038static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1039{
1040 return meminit_pfn_in_nid(pfn, node, &early_pfnnid_cache);
1041}
1042
1043#else
1044
1045static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1046{
1047 return true;
1048}
1049static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node,
1050 struct mminit_pfnnid_cache *state)
1051{
1052 return true;
1053}
1054#endif
1055
1056
Mel Gorman0e1cc952015-06-30 14:57:27 -07001057void __init __free_pages_bootmem(struct page *page, unsigned long pfn,
Mel Gorman3a80a7f2015-06-30 14:57:02 -07001058 unsigned int order)
1059{
1060 if (early_page_uninitialised(pfn))
1061 return;
1062 return __free_pages_boot_core(page, pfn, order);
1063}
1064
Mel Gorman7e18adb2015-06-30 14:57:05 -07001065#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
Mel Gorman0e1cc952015-06-30 14:57:27 -07001066static void __init deferred_free_range(struct page *page,
Mel Gormana4de83d2015-06-30 14:57:16 -07001067 unsigned long pfn, int nr_pages)
1068{
1069 int i;
1070
1071 if (!page)
1072 return;
1073
1074 /* Free a large naturally-aligned chunk if possible */
1075 if (nr_pages == MAX_ORDER_NR_PAGES &&
1076 (pfn & (MAX_ORDER_NR_PAGES-1)) == 0) {
Mel Gormanac5d2532015-06-30 14:57:20 -07001077 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
Mel Gormana4de83d2015-06-30 14:57:16 -07001078 __free_pages_boot_core(page, pfn, MAX_ORDER-1);
1079 return;
1080 }
1081
1082 for (i = 0; i < nr_pages; i++, page++, pfn++)
1083 __free_pages_boot_core(page, pfn, 0);
1084}
1085
Nicolai Stanged3cd1312015-08-06 15:46:16 -07001086/* Completion tracking for deferred_init_memmap() threads */
1087static atomic_t pgdat_init_n_undone __initdata;
1088static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1089
1090static inline void __init pgdat_init_report_one_done(void)
1091{
1092 if (atomic_dec_and_test(&pgdat_init_n_undone))
1093 complete(&pgdat_init_all_done_comp);
1094}
Mel Gorman0e1cc952015-06-30 14:57:27 -07001095
Mel Gorman7e18adb2015-06-30 14:57:05 -07001096/* Initialise remaining memory on a node */
Mel Gorman0e1cc952015-06-30 14:57:27 -07001097static int __init deferred_init_memmap(void *data)
Mel Gorman7e18adb2015-06-30 14:57:05 -07001098{
Mel Gorman0e1cc952015-06-30 14:57:27 -07001099 pg_data_t *pgdat = data;
1100 int nid = pgdat->node_id;
Mel Gorman7e18adb2015-06-30 14:57:05 -07001101 struct mminit_pfnnid_cache nid_init_state = { };
1102 unsigned long start = jiffies;
1103 unsigned long nr_pages = 0;
1104 unsigned long walk_start, walk_end;
1105 int i, zid;
1106 struct zone *zone;
Mel Gorman7e18adb2015-06-30 14:57:05 -07001107 unsigned long first_init_pfn = pgdat->first_deferred_pfn;
Mel Gorman0e1cc952015-06-30 14:57:27 -07001108 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
Mel Gorman7e18adb2015-06-30 14:57:05 -07001109
Mel Gorman0e1cc952015-06-30 14:57:27 -07001110 if (first_init_pfn == ULONG_MAX) {
Nicolai Stanged3cd1312015-08-06 15:46:16 -07001111 pgdat_init_report_one_done();
Mel Gorman0e1cc952015-06-30 14:57:27 -07001112 return 0;
1113 }
1114
1115 /* Bind memory initialisation thread to a local node if possible */
1116 if (!cpumask_empty(cpumask))
1117 set_cpus_allowed_ptr(current, cpumask);
Mel Gorman7e18adb2015-06-30 14:57:05 -07001118
1119 /* Sanity check boundaries */
1120 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
1121 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
1122 pgdat->first_deferred_pfn = ULONG_MAX;
1123
1124 /* Only the highest zone is deferred so find it */
1125 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1126 zone = pgdat->node_zones + zid;
1127 if (first_init_pfn < zone_end_pfn(zone))
1128 break;
1129 }
1130
1131 for_each_mem_pfn_range(i, nid, &walk_start, &walk_end, NULL) {
1132 unsigned long pfn, end_pfn;
Mel Gorman54608c32015-06-30 14:57:09 -07001133 struct page *page = NULL;
Mel Gormana4de83d2015-06-30 14:57:16 -07001134 struct page *free_base_page = NULL;
1135 unsigned long free_base_pfn = 0;
1136 int nr_to_free = 0;
Mel Gorman7e18adb2015-06-30 14:57:05 -07001137
1138 end_pfn = min(walk_end, zone_end_pfn(zone));
1139 pfn = first_init_pfn;
1140 if (pfn < walk_start)
1141 pfn = walk_start;
1142 if (pfn < zone->zone_start_pfn)
1143 pfn = zone->zone_start_pfn;
1144
1145 for (; pfn < end_pfn; pfn++) {
Mel Gorman54608c32015-06-30 14:57:09 -07001146 if (!pfn_valid_within(pfn))
Mel Gormana4de83d2015-06-30 14:57:16 -07001147 goto free_range;
Mel Gorman7e18adb2015-06-30 14:57:05 -07001148
Mel Gorman54608c32015-06-30 14:57:09 -07001149 /*
1150 * Ensure pfn_valid is checked every
1151 * MAX_ORDER_NR_PAGES for memory holes
1152 */
1153 if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
1154 if (!pfn_valid(pfn)) {
1155 page = NULL;
Mel Gormana4de83d2015-06-30 14:57:16 -07001156 goto free_range;
Mel Gorman54608c32015-06-30 14:57:09 -07001157 }
1158 }
1159
1160 if (!meminit_pfn_in_nid(pfn, nid, &nid_init_state)) {
1161 page = NULL;
Mel Gormana4de83d2015-06-30 14:57:16 -07001162 goto free_range;
Mel Gorman54608c32015-06-30 14:57:09 -07001163 }
1164
1165 /* Minimise pfn page lookups and scheduler checks */
1166 if (page && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0) {
1167 page++;
1168 } else {
Mel Gormana4de83d2015-06-30 14:57:16 -07001169 nr_pages += nr_to_free;
1170 deferred_free_range(free_base_page,
1171 free_base_pfn, nr_to_free);
1172 free_base_page = NULL;
1173 free_base_pfn = nr_to_free = 0;
1174
Mel Gorman54608c32015-06-30 14:57:09 -07001175 page = pfn_to_page(pfn);
1176 cond_resched();
1177 }
Mel Gorman7e18adb2015-06-30 14:57:05 -07001178
1179 if (page->flags) {
1180 VM_BUG_ON(page_zone(page) != zone);
Mel Gormana4de83d2015-06-30 14:57:16 -07001181 goto free_range;
Mel Gorman7e18adb2015-06-30 14:57:05 -07001182 }
1183
1184 __init_single_page(page, pfn, zid, nid);
Mel Gormana4de83d2015-06-30 14:57:16 -07001185 if (!free_base_page) {
1186 free_base_page = page;
1187 free_base_pfn = pfn;
1188 nr_to_free = 0;
1189 }
1190 nr_to_free++;
1191
1192 /* Where possible, batch up pages for a single free */
1193 continue;
1194free_range:
1195 /* Free the current block of pages to allocator */
1196 nr_pages += nr_to_free;
1197 deferred_free_range(free_base_page, free_base_pfn,
1198 nr_to_free);
1199 free_base_page = NULL;
1200 free_base_pfn = nr_to_free = 0;
Mel Gorman7e18adb2015-06-30 14:57:05 -07001201 }
Mel Gormana4de83d2015-06-30 14:57:16 -07001202
Mel Gorman7e18adb2015-06-30 14:57:05 -07001203 first_init_pfn = max(end_pfn, first_init_pfn);
1204 }
1205
1206 /* Sanity check that the next zone really is unpopulated */
1207 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
1208
Mel Gorman0e1cc952015-06-30 14:57:27 -07001209 pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages,
Mel Gorman7e18adb2015-06-30 14:57:05 -07001210 jiffies_to_msecs(jiffies - start));
Nicolai Stanged3cd1312015-08-06 15:46:16 -07001211
1212 pgdat_init_report_one_done();
Mel Gorman0e1cc952015-06-30 14:57:27 -07001213 return 0;
1214}
1215
1216void __init page_alloc_init_late(void)
1217{
1218 int nid;
1219
Nicolai Stanged3cd1312015-08-06 15:46:16 -07001220 /* There will be num_node_state(N_MEMORY) threads */
1221 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
Mel Gorman0e1cc952015-06-30 14:57:27 -07001222 for_each_node_state(nid, N_MEMORY) {
Mel Gorman0e1cc952015-06-30 14:57:27 -07001223 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
1224 }
1225
1226 /* Block until all are initialised */
Nicolai Stanged3cd1312015-08-06 15:46:16 -07001227 wait_for_completion(&pgdat_init_all_done_comp);
Mel Gorman4248b0d2015-08-06 15:46:20 -07001228
1229 /* Reinit limits that are based on free pages after the kernel is up */
1230 files_maxfiles_init();
Mel Gorman7e18adb2015-06-30 14:57:05 -07001231}
1232#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1233
Michal Nazarewicz47118af2011-12-29 13:09:50 +01001234#ifdef CONFIG_CMA
Li Zhong9cf510a2013-08-23 13:52:52 +08001235/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
Michal Nazarewicz47118af2011-12-29 13:09:50 +01001236void __init init_cma_reserved_pageblock(struct page *page)
1237{
1238 unsigned i = pageblock_nr_pages;
1239 struct page *p = page;
1240
1241 do {
1242 __ClearPageReserved(p);
1243 set_page_count(p, 0);
1244 } while (++p, --i);
1245
Michal Nazarewicz47118af2011-12-29 13:09:50 +01001246 set_pageblock_migratetype(page, MIGRATE_CMA);
Michal Nazarewiczdc783272014-07-02 15:22:35 -07001247
1248 if (pageblock_order >= MAX_ORDER) {
1249 i = pageblock_nr_pages;
1250 p = page;
1251 do {
1252 set_page_refcounted(p);
1253 __free_pages(p, MAX_ORDER - 1);
1254 p += MAX_ORDER_NR_PAGES;
1255 } while (i -= MAX_ORDER_NR_PAGES);
1256 } else {
1257 set_page_refcounted(page);
1258 __free_pages(page, pageblock_order);
1259 }
1260
Jiang Liu3dcc0572013-07-03 15:03:21 -07001261 adjust_managed_page_count(page, pageblock_nr_pages);
Michal Nazarewicz47118af2011-12-29 13:09:50 +01001262}
1263#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264
1265/*
1266 * The order of subdivision here is critical for the IO subsystem.
1267 * Please do not alter this order without good reasons and regression
1268 * testing. Specifically, as large blocks of memory are subdivided,
1269 * the order in which smaller blocks are delivered depends on the order
1270 * they're subdivided in this function. This is the primary factor
1271 * influencing the order in which pages are delivered to the IO
1272 * subsystem according to empirical testing, and this is also justified
1273 * by considering the behavior of a buddy system containing a single
1274 * large block of memory acted on by a series of small allocations.
1275 * This behavior is a critical factor in sglist merging's success.
1276 *
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +01001277 * -- nyc
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278 */
Nick Piggin085cc7d2006-01-06 00:11:01 -08001279static inline void expand(struct zone *zone, struct page *page,
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001280 int low, int high, struct free_area *area,
1281 int migratetype)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282{
1283 unsigned long size = 1 << high;
1284
1285 while (high > low) {
1286 area--;
1287 high--;
1288 size >>= 1;
Sasha Levin309381fea2014-01-23 15:52:54 -08001289 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -08001290
Joonsoo Kim2847cf92014-12-12 16:55:01 -08001291 if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
Joonsoo Kime30825f2014-12-12 16:55:49 -08001292 debug_guardpage_enabled() &&
Joonsoo Kim2847cf92014-12-12 16:55:01 -08001293 high < debug_guardpage_minorder()) {
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -08001294 /*
1295 * Mark as guard pages (or page), that will allow to
1296 * merge back to allocator when buddy will be freed.
1297 * Corresponding page table entries will not be touched,
1298 * pages will stay not present in virtual address space
1299 */
Joonsoo Kim2847cf92014-12-12 16:55:01 -08001300 set_page_guard(zone, &page[size], high, migratetype);
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -08001301 continue;
1302 }
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001303 list_add(&page[size].lru, &area->free_list[migratetype]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304 area->nr_free++;
1305 set_page_order(&page[size], high);
1306 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307}
1308
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309/*
1310 * This page is about to be returned from the page allocator
1311 */
Wu Fengguang2a7684a2009-09-16 11:50:12 +02001312static inline int check_new_page(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313{
Kirill A. Shutemovd230dec2014-04-07 15:37:38 -07001314 const char *bad_reason = NULL;
Dave Hansenf0b791a2014-01-23 15:52:49 -08001315 unsigned long bad_flags = 0;
1316
1317 if (unlikely(page_mapcount(page)))
1318 bad_reason = "nonzero mapcount";
1319 if (unlikely(page->mapping != NULL))
1320 bad_reason = "non-NULL mapping";
1321 if (unlikely(atomic_read(&page->_count) != 0))
1322 bad_reason = "nonzero _count";
Naoya Horiguchif4c18e62015-08-06 15:47:08 -07001323 if (unlikely(page->flags & __PG_HWPOISON)) {
1324 bad_reason = "HWPoisoned (hardware-corrupted)";
1325 bad_flags = __PG_HWPOISON;
1326 }
Dave Hansenf0b791a2014-01-23 15:52:49 -08001327 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
1328 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
1329 bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
1330 }
Johannes Weiner9edad6e2014-12-10 15:44:58 -08001331#ifdef CONFIG_MEMCG
1332 if (unlikely(page->mem_cgroup))
1333 bad_reason = "page still charged to cgroup";
1334#endif
Dave Hansenf0b791a2014-01-23 15:52:49 -08001335 if (unlikely(bad_reason)) {
1336 bad_page(page, bad_reason, bad_flags);
Hugh Dickins689bceb2005-11-21 21:32:20 -08001337 return 1;
Hugh Dickins8cc3b392009-01-06 14:40:06 -08001338 }
Wu Fengguang2a7684a2009-09-16 11:50:12 +02001339 return 0;
1340}
1341
Vlastimil Babka75379192015-02-11 15:25:38 -08001342static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
1343 int alloc_flags)
Wu Fengguang2a7684a2009-09-16 11:50:12 +02001344{
1345 int i;
1346
1347 for (i = 0; i < (1 << order); i++) {
1348 struct page *p = page + i;
1349 if (unlikely(check_new_page(p)))
1350 return 1;
1351 }
Hugh Dickins689bceb2005-11-21 21:32:20 -08001352
Hugh Dickins4c21e2f2005-10-29 18:16:40 -07001353 set_page_private(page, 0);
Nick Piggin7835e982006-03-22 00:08:40 -08001354 set_page_refcounted(page);
Nick Piggincc1025092006-12-06 20:32:00 -08001355
1356 arch_alloc_page(page, order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357 kernel_map_pages(page, 1 << order, 1);
Andrey Ryabininb8c73fc2015-02-13 14:39:28 -08001358 kasan_alloc_pages(page, order);
Nick Piggin17cf4402006-03-22 00:08:41 -08001359
1360 if (gfp_flags & __GFP_ZERO)
Anisse Astierf4d28972015-06-24 16:56:36 -07001361 for (i = 0; i < (1 << order); i++)
1362 clear_highpage(page + i);
Nick Piggin17cf4402006-03-22 00:08:41 -08001363
1364 if (order && (gfp_flags & __GFP_COMP))
1365 prep_compound_page(page, order);
1366
Joonsoo Kim48c96a32014-12-12 16:56:01 -08001367 set_page_owner(page, order, gfp_flags);
1368
Vlastimil Babka75379192015-02-11 15:25:38 -08001369 /*
Michal Hocko2f064f32015-08-21 14:11:51 -07001370 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
Vlastimil Babka75379192015-02-11 15:25:38 -08001371 * allocate the page. The expectation is that the caller is taking
1372 * steps that will free more memory. The caller should avoid the page
1373 * being used for !PFMEMALLOC purposes.
1374 */
Michal Hocko2f064f32015-08-21 14:11:51 -07001375 if (alloc_flags & ALLOC_NO_WATERMARKS)
1376 set_page_pfmemalloc(page);
1377 else
1378 clear_page_pfmemalloc(page);
Vlastimil Babka75379192015-02-11 15:25:38 -08001379
Hugh Dickins689bceb2005-11-21 21:32:20 -08001380 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381}
1382
Mel Gorman56fd56b2007-10-16 01:25:58 -07001383/*
1384 * Go through the free lists for the given migratetype and remove
1385 * the smallest available page from the freelists
1386 */
Mel Gorman728ec982009-06-16 15:32:04 -07001387static inline
1388struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
Mel Gorman56fd56b2007-10-16 01:25:58 -07001389 int migratetype)
1390{
1391 unsigned int current_order;
Pintu Kumarb8af2942013-09-11 14:20:34 -07001392 struct free_area *area;
Mel Gorman56fd56b2007-10-16 01:25:58 -07001393 struct page *page;
1394
1395 /* Find a page of the appropriate size in the preferred list */
1396 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
1397 area = &(zone->free_area[current_order]);
1398 if (list_empty(&area->free_list[migratetype]))
1399 continue;
1400
1401 page = list_entry(area->free_list[migratetype].next,
1402 struct page, lru);
1403 list_del(&page->lru);
1404 rmv_page_order(page);
1405 area->nr_free--;
Mel Gorman56fd56b2007-10-16 01:25:58 -07001406 expand(zone, page, order, current_order, area, migratetype);
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -07001407 set_pcppage_migratetype(page, migratetype);
Mel Gorman56fd56b2007-10-16 01:25:58 -07001408 return page;
1409 }
1410
1411 return NULL;
1412}
1413
1414
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001415/*
1416 * This array describes the order lists are fallen back to when
1417 * the free lists for the desirable migrate type are depleted
1418 */
Michal Nazarewicz47118af2011-12-29 13:09:50 +01001419static int fallbacks[MIGRATE_TYPES][4] = {
1420 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
1421 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
Michal Nazarewicz47118af2011-12-29 13:09:50 +01001422 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
Joonsoo Kimdc676472015-04-14 15:45:15 -07001423#ifdef CONFIG_CMA
1424 [MIGRATE_CMA] = { MIGRATE_RESERVE }, /* Never used */
Michal Nazarewicz47118af2011-12-29 13:09:50 +01001425#endif
Michal Nazarewicz6d4a4912012-01-11 15:31:33 +01001426 [MIGRATE_RESERVE] = { MIGRATE_RESERVE }, /* Never used */
Minchan Kim194159f2013-02-22 16:33:58 -08001427#ifdef CONFIG_MEMORY_ISOLATION
Michal Nazarewicz6d4a4912012-01-11 15:31:33 +01001428 [MIGRATE_ISOLATE] = { MIGRATE_RESERVE }, /* Never used */
Minchan Kim194159f2013-02-22 16:33:58 -08001429#endif
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001430};
1431
Joonsoo Kimdc676472015-04-14 15:45:15 -07001432#ifdef CONFIG_CMA
1433static struct page *__rmqueue_cma_fallback(struct zone *zone,
1434 unsigned int order)
1435{
1436 return __rmqueue_smallest(zone, order, MIGRATE_CMA);
1437}
1438#else
1439static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
1440 unsigned int order) { return NULL; }
1441#endif
1442
Mel Gormanc361be52007-10-16 01:25:51 -07001443/*
1444 * Move the free pages in a range to the free lists of the requested type.
Mel Gormand9c23402007-10-16 01:26:01 -07001445 * Note that start_page and end_pages are not aligned on a pageblock
Mel Gormanc361be52007-10-16 01:25:51 -07001446 * boundary. If alignment is required, use move_freepages_block()
1447 */
Minchan Kim435b4052012-10-08 16:32:16 -07001448int move_freepages(struct zone *zone,
Adrian Bunkb69a7282008-07-23 21:28:12 -07001449 struct page *start_page, struct page *end_page,
1450 int migratetype)
Mel Gormanc361be52007-10-16 01:25:51 -07001451{
1452 struct page *page;
1453 unsigned long order;
Mel Gormand1003132007-10-16 01:26:00 -07001454 int pages_moved = 0;
Mel Gormanc361be52007-10-16 01:25:51 -07001455
1456#ifndef CONFIG_HOLES_IN_ZONE
1457 /*
1458 * page_zone is not safe to call in this context when
1459 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
1460 * anyway as we check zone boundaries in move_freepages_block().
1461 * Remove at a later date when no bug reports exist related to
Mel Gormanac0e5b72007-10-16 01:25:58 -07001462 * grouping pages by mobility
Mel Gormanc361be52007-10-16 01:25:51 -07001463 */
Mel Gorman97ee4ba2014-10-09 15:28:28 -07001464 VM_BUG_ON(page_zone(start_page) != page_zone(end_page));
Mel Gormanc361be52007-10-16 01:25:51 -07001465#endif
1466
1467 for (page = start_page; page <= end_page;) {
Adam Litke344c7902008-09-02 14:35:38 -07001468 /* Make sure we are not inadvertently changing nodes */
Sasha Levin309381fea2014-01-23 15:52:54 -08001469 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
Adam Litke344c7902008-09-02 14:35:38 -07001470
Mel Gormanc361be52007-10-16 01:25:51 -07001471 if (!pfn_valid_within(page_to_pfn(page))) {
1472 page++;
1473 continue;
1474 }
1475
1476 if (!PageBuddy(page)) {
1477 page++;
1478 continue;
1479 }
1480
1481 order = page_order(page);
Kirill A. Shutemov84be48d2011-03-22 16:33:41 -07001482 list_move(&page->lru,
1483 &zone->free_area[order].free_list[migratetype]);
Mel Gormanc361be52007-10-16 01:25:51 -07001484 page += 1 << order;
Mel Gormand1003132007-10-16 01:26:00 -07001485 pages_moved += 1 << order;
Mel Gormanc361be52007-10-16 01:25:51 -07001486 }
1487
Mel Gormand1003132007-10-16 01:26:00 -07001488 return pages_moved;
Mel Gormanc361be52007-10-16 01:25:51 -07001489}
1490
Minchan Kimee6f5092012-07-31 16:43:50 -07001491int move_freepages_block(struct zone *zone, struct page *page,
Linus Torvalds68e3e922012-06-03 20:05:57 -07001492 int migratetype)
Mel Gormanc361be52007-10-16 01:25:51 -07001493{
1494 unsigned long start_pfn, end_pfn;
1495 struct page *start_page, *end_page;
1496
1497 start_pfn = page_to_pfn(page);
Mel Gormand9c23402007-10-16 01:26:01 -07001498 start_pfn = start_pfn & ~(pageblock_nr_pages-1);
Mel Gormanc361be52007-10-16 01:25:51 -07001499 start_page = pfn_to_page(start_pfn);
Mel Gormand9c23402007-10-16 01:26:01 -07001500 end_page = start_page + pageblock_nr_pages - 1;
1501 end_pfn = start_pfn + pageblock_nr_pages - 1;
Mel Gormanc361be52007-10-16 01:25:51 -07001502
1503 /* Do not cross zone boundaries */
Cody P Schafer108bcc92013-02-22 16:35:23 -08001504 if (!zone_spans_pfn(zone, start_pfn))
Mel Gormanc361be52007-10-16 01:25:51 -07001505 start_page = page;
Cody P Schafer108bcc92013-02-22 16:35:23 -08001506 if (!zone_spans_pfn(zone, end_pfn))
Mel Gormanc361be52007-10-16 01:25:51 -07001507 return 0;
1508
1509 return move_freepages(zone, start_page, end_page, migratetype);
1510}
1511
Mel Gorman2f66a682009-09-21 17:02:31 -07001512static void change_pageblock_range(struct page *pageblock_page,
1513 int start_order, int migratetype)
1514{
1515 int nr_pageblocks = 1 << (start_order - pageblock_order);
1516
1517 while (nr_pageblocks--) {
1518 set_pageblock_migratetype(pageblock_page, migratetype);
1519 pageblock_page += pageblock_nr_pages;
1520 }
1521}
1522
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07001523/*
Vlastimil Babka9c0415e2015-02-11 15:28:21 -08001524 * When we are falling back to another migratetype during allocation, try to
1525 * steal extra free pages from the same pageblocks to satisfy further
1526 * allocations, instead of polluting multiple pageblocks.
1527 *
1528 * If we are stealing a relatively large buddy page, it is likely there will
1529 * be more free pages in the pageblock, so try to steal them all. For
1530 * reclaimable and unmovable allocations, we steal regardless of page size,
1531 * as fragmentation caused by those allocations polluting movable pageblocks
1532 * is worse than movable allocations stealing from unmovable and reclaimable
1533 * pageblocks.
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07001534 */
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001535static bool can_steal_fallback(unsigned int order, int start_mt)
1536{
1537 /*
1538 * Leaving this order check is intended, although there is
1539 * relaxed order check in next check. The reason is that
1540 * we can actually steal whole pageblock if this condition met,
1541 * but, below check doesn't guarantee it and that is just heuristic
1542 * so could be changed anytime.
1543 */
1544 if (order >= pageblock_order)
1545 return true;
1546
1547 if (order >= pageblock_order / 2 ||
1548 start_mt == MIGRATE_RECLAIMABLE ||
1549 start_mt == MIGRATE_UNMOVABLE ||
1550 page_group_by_mobility_disabled)
1551 return true;
1552
1553 return false;
1554}
1555
1556/*
1557 * This function implements actual steal behaviour. If order is large enough,
1558 * we can steal whole pageblock. If not, we first move freepages in this
1559 * pageblock and check whether half of pages are moved or not. If half of
1560 * pages are moved, we can change migratetype of pageblock and permanently
1561 * use it's pages as requested migratetype in the future.
1562 */
1563static void steal_suitable_fallback(struct zone *zone, struct page *page,
1564 int start_type)
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07001565{
1566 int current_order = page_order(page);
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001567 int pages;
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07001568
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07001569 /* Take ownership for orders >= pageblock_order */
1570 if (current_order >= pageblock_order) {
1571 change_pageblock_range(page, current_order, start_type);
Vlastimil Babka3a1086f2015-02-11 15:28:18 -08001572 return;
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07001573 }
1574
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001575 pages = move_freepages_block(zone, page, start_type);
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07001576
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001577 /* Claim the whole block if over half of it is free */
1578 if (pages >= (1 << (pageblock_order-1)) ||
1579 page_group_by_mobility_disabled)
1580 set_pageblock_migratetype(page, start_type);
1581}
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07001582
Joonsoo Kim2149cda2015-04-14 15:45:21 -07001583/*
1584 * Check whether there is a suitable fallback freepage with requested order.
1585 * If only_stealable is true, this function returns fallback_mt only if
1586 * we can steal other freepages all together. This would help to reduce
1587 * fragmentation due to mixed migratetype pages in one pageblock.
1588 */
1589int find_suitable_fallback(struct free_area *area, unsigned int order,
1590 int migratetype, bool only_stealable, bool *can_steal)
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001591{
1592 int i;
1593 int fallback_mt;
1594
1595 if (area->nr_free == 0)
1596 return -1;
1597
1598 *can_steal = false;
1599 for (i = 0;; i++) {
1600 fallback_mt = fallbacks[migratetype][i];
1601 if (fallback_mt == MIGRATE_RESERVE)
1602 break;
1603
1604 if (list_empty(&area->free_list[fallback_mt]))
1605 continue;
1606
1607 if (can_steal_fallback(order, migratetype))
1608 *can_steal = true;
1609
Joonsoo Kim2149cda2015-04-14 15:45:21 -07001610 if (!only_stealable)
1611 return fallback_mt;
1612
1613 if (*can_steal)
1614 return fallback_mt;
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07001615 }
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001616
1617 return -1;
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07001618}
1619
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001620/* Remove an element from the buddy allocator from the fallback list */
Mel Gorman0ac3a402009-06-16 15:32:06 -07001621static inline struct page *
Mel Gorman7aeb09f2014-06-04 16:10:21 -07001622__rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001623{
Pintu Kumarb8af2942013-09-11 14:20:34 -07001624 struct free_area *area;
Mel Gorman7aeb09f2014-06-04 16:10:21 -07001625 unsigned int current_order;
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001626 struct page *page;
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001627 int fallback_mt;
1628 bool can_steal;
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001629
1630 /* Find the largest possible block of pages in the other list */
Mel Gorman7aeb09f2014-06-04 16:10:21 -07001631 for (current_order = MAX_ORDER-1;
1632 current_order >= order && current_order <= MAX_ORDER-1;
1633 --current_order) {
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001634 area = &(zone->free_area[current_order]);
1635 fallback_mt = find_suitable_fallback(area, current_order,
Joonsoo Kim2149cda2015-04-14 15:45:21 -07001636 start_migratetype, false, &can_steal);
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001637 if (fallback_mt == -1)
1638 continue;
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001639
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001640 page = list_entry(area->free_list[fallback_mt].next,
1641 struct page, lru);
1642 if (can_steal)
1643 steal_suitable_fallback(zone, page, start_migratetype);
Mel Gormane0104872007-10-16 01:25:53 -07001644
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001645 /* Remove the page from the freelists */
1646 area->nr_free--;
1647 list_del(&page->lru);
1648 rmv_page_order(page);
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001649
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001650 expand(zone, page, order, current_order, area,
1651 start_migratetype);
1652 /*
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -07001653 * The pcppage_migratetype may differ from pageblock's
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001654 * migratetype depending on the decisions in
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -07001655 * find_suitable_fallback(). This is OK as long as it does not
1656 * differ for MIGRATE_CMA pageblocks. Those can be used as
1657 * fallback only via special __rmqueue_cma_fallback() function
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001658 */
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -07001659 set_pcppage_migratetype(page, start_migratetype);
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001660
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001661 trace_mm_page_alloc_extfrag(page, order, current_order,
1662 start_migratetype, fallback_mt);
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001663
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001664 return page;
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001665 }
1666
Mel Gorman728ec982009-06-16 15:32:04 -07001667 return NULL;
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001668}
1669
Mel Gorman56fd56b2007-10-16 01:25:58 -07001670/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671 * Do the hard work of removing an element from the buddy allocator.
1672 * Call me with the zone->lock already held.
1673 */
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001674static struct page *__rmqueue(struct zone *zone, unsigned int order,
1675 int migratetype)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677 struct page *page;
1678
Mel Gorman728ec982009-06-16 15:32:04 -07001679retry_reserve:
Mel Gorman56fd56b2007-10-16 01:25:58 -07001680 page = __rmqueue_smallest(zone, order, migratetype);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681
Mel Gorman728ec982009-06-16 15:32:04 -07001682 if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
Joonsoo Kimdc676472015-04-14 15:45:15 -07001683 if (migratetype == MIGRATE_MOVABLE)
1684 page = __rmqueue_cma_fallback(zone, order);
1685
1686 if (!page)
1687 page = __rmqueue_fallback(zone, order, migratetype);
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001688
Mel Gorman728ec982009-06-16 15:32:04 -07001689 /*
1690 * Use MIGRATE_RESERVE rather than fail an allocation. goto
1691 * is used because __rmqueue_smallest is an inline function
1692 * and we want just one call site
1693 */
1694 if (!page) {
1695 migratetype = MIGRATE_RESERVE;
1696 goto retry_reserve;
1697 }
1698 }
1699
Mel Gorman0d3d0622009-09-21 17:02:44 -07001700 trace_mm_page_alloc_zone_locked(page, order, migratetype);
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001701 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702}
1703
Michal Nazarewicz5f63b722012-01-11 15:16:11 +01001704/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705 * Obtain a specified number of elements from the buddy allocator, all under
1706 * a single hold of the lock, for efficiency. Add them to the supplied list.
1707 * Returns the number of new pages which were placed at *list.
1708 */
Michal Nazarewicz5f63b722012-01-11 15:16:11 +01001709static int rmqueue_bulk(struct zone *zone, unsigned int order,
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001710 unsigned long count, struct list_head *list,
Mel Gormanb745bc82014-06-04 16:10:22 -07001711 int migratetype, bool cold)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712{
Vlastimil Babka5bcc9f82014-06-04 16:07:22 -07001713 int i;
Michal Nazarewicz5f63b722012-01-11 15:16:11 +01001714
Nick Pigginc54ad302006-01-06 00:10:56 -08001715 spin_lock(&zone->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716 for (i = 0; i < count; ++i) {
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001717 struct page *page = __rmqueue(zone, order, migratetype);
Nick Piggin085cc7d2006-01-06 00:11:01 -08001718 if (unlikely(page == NULL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719 break;
Mel Gorman81eabcb2007-12-17 16:20:05 -08001720
1721 /*
1722 * Split buddy pages returned by expand() are received here
1723 * in physical page order. The page is added to the callers and
1724 * list and the list head then moves forward. From the callers
1725 * perspective, the linked list is ordered by page number in
1726 * some conditions. This is useful for IO devices that can
1727 * merge IO requests if the physical pages are ordered
1728 * properly.
1729 */
Mel Gormanb745bc82014-06-04 16:10:22 -07001730 if (likely(!cold))
Mel Gormane084b2d2009-07-29 15:02:04 -07001731 list_add(&page->lru, list);
1732 else
1733 list_add_tail(&page->lru, list);
Mel Gorman81eabcb2007-12-17 16:20:05 -08001734 list = &page->lru;
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -07001735 if (is_migrate_cma(get_pcppage_migratetype(page)))
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -07001736 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
1737 -(1 << order));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738 }
Mel Gormanf2260e62009-06-16 15:32:13 -07001739 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
Nick Pigginc54ad302006-01-06 00:10:56 -08001740 spin_unlock(&zone->lock);
Nick Piggin085cc7d2006-01-06 00:11:01 -08001741 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742}
1743
Christoph Lameter4ae7c032005-06-21 17:14:57 -07001744#ifdef CONFIG_NUMA
Christoph Lameter8fce4d82006-03-09 17:33:54 -08001745/*
Christoph Lameter4037d452007-05-09 02:35:14 -07001746 * Called from the vmstat counter updater to drain pagesets of this
1747 * currently executing processor on remote nodes after they have
1748 * expired.
1749 *
Christoph Lameter879336c2006-03-22 00:09:08 -08001750 * Note that this function must be called with the thread pinned to
1751 * a single processor.
Christoph Lameter8fce4d82006-03-09 17:33:54 -08001752 */
Christoph Lameter4037d452007-05-09 02:35:14 -07001753void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
Christoph Lameter4ae7c032005-06-21 17:14:57 -07001754{
Christoph Lameter4ae7c032005-06-21 17:14:57 -07001755 unsigned long flags;
Michal Nazarewicz7be12fc2014-08-06 16:05:15 -07001756 int to_drain, batch;
Christoph Lameter4ae7c032005-06-21 17:14:57 -07001757
Christoph Lameter4037d452007-05-09 02:35:14 -07001758 local_irq_save(flags);
Jason Low4db0c3c2015-04-15 16:14:08 -07001759 batch = READ_ONCE(pcp->batch);
Michal Nazarewicz7be12fc2014-08-06 16:05:15 -07001760 to_drain = min(pcp->count, batch);
KOSAKI Motohiro2a135152012-07-31 16:42:53 -07001761 if (to_drain > 0) {
1762 free_pcppages_bulk(zone, to_drain, pcp);
1763 pcp->count -= to_drain;
1764 }
Christoph Lameter4037d452007-05-09 02:35:14 -07001765 local_irq_restore(flags);
Christoph Lameter4ae7c032005-06-21 17:14:57 -07001766}
1767#endif
1768
Christoph Lameter9f8f2172008-02-04 22:29:11 -08001769/*
Vlastimil Babka93481ff2014-12-10 15:43:01 -08001770 * Drain pcplists of the indicated processor and zone.
1771 *
1772 * The processor must either be the current processor and the
1773 * thread pinned to the current processor or a processor that
1774 * is not online.
1775 */
1776static void drain_pages_zone(unsigned int cpu, struct zone *zone)
1777{
1778 unsigned long flags;
1779 struct per_cpu_pageset *pset;
1780 struct per_cpu_pages *pcp;
1781
1782 local_irq_save(flags);
1783 pset = per_cpu_ptr(zone->pageset, cpu);
1784
1785 pcp = &pset->pcp;
1786 if (pcp->count) {
1787 free_pcppages_bulk(zone, pcp->count, pcp);
1788 pcp->count = 0;
1789 }
1790 local_irq_restore(flags);
1791}
1792
1793/*
1794 * Drain pcplists of all zones on the indicated processor.
Christoph Lameter9f8f2172008-02-04 22:29:11 -08001795 *
1796 * The processor must either be the current processor and the
1797 * thread pinned to the current processor or a processor that
1798 * is not online.
1799 */
1800static void drain_pages(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801{
1802 struct zone *zone;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803
KOSAKI Motohiroee99c712009-03-31 15:19:31 -07001804 for_each_populated_zone(zone) {
Vlastimil Babka93481ff2014-12-10 15:43:01 -08001805 drain_pages_zone(cpu, zone);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806 }
1807}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808
Christoph Lameter9f8f2172008-02-04 22:29:11 -08001809/*
1810 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
Vlastimil Babka93481ff2014-12-10 15:43:01 -08001811 *
1812 * The CPU has to be pinned. When zone parameter is non-NULL, spill just
1813 * the single zone's pages.
Christoph Lameter9f8f2172008-02-04 22:29:11 -08001814 */
Vlastimil Babka93481ff2014-12-10 15:43:01 -08001815void drain_local_pages(struct zone *zone)
Christoph Lameter9f8f2172008-02-04 22:29:11 -08001816{
Vlastimil Babka93481ff2014-12-10 15:43:01 -08001817 int cpu = smp_processor_id();
1818
1819 if (zone)
1820 drain_pages_zone(cpu, zone);
1821 else
1822 drain_pages(cpu);
Christoph Lameter9f8f2172008-02-04 22:29:11 -08001823}
1824
1825/*
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07001826 * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
1827 *
Vlastimil Babka93481ff2014-12-10 15:43:01 -08001828 * When zone parameter is non-NULL, spill just the single zone's pages.
1829 *
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07001830 * Note that this code is protected against sending an IPI to an offline
1831 * CPU but does not guarantee sending an IPI to newly hotplugged CPUs:
1832 * on_each_cpu_mask() blocks hotplug and won't talk to offlined CPUs but
1833 * nothing keeps CPUs from showing up after we populated the cpumask and
1834 * before the call to on_each_cpu_mask().
Christoph Lameter9f8f2172008-02-04 22:29:11 -08001835 */
Vlastimil Babka93481ff2014-12-10 15:43:01 -08001836void drain_all_pages(struct zone *zone)
Christoph Lameter9f8f2172008-02-04 22:29:11 -08001837{
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07001838 int cpu;
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07001839
1840 /*
1841 * Allocate in the BSS so we wont require allocation in
1842 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
1843 */
1844 static cpumask_t cpus_with_pcps;
1845
1846 /*
1847 * We don't care about racing with CPU hotplug event
1848 * as offline notification will cause the notified
1849 * cpu to drain that CPU pcps and on_each_cpu_mask
1850 * disables preemption as part of its processing
1851 */
1852 for_each_online_cpu(cpu) {
Vlastimil Babka93481ff2014-12-10 15:43:01 -08001853 struct per_cpu_pageset *pcp;
1854 struct zone *z;
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07001855 bool has_pcps = false;
Vlastimil Babka93481ff2014-12-10 15:43:01 -08001856
1857 if (zone) {
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07001858 pcp = per_cpu_ptr(zone->pageset, cpu);
Vlastimil Babka93481ff2014-12-10 15:43:01 -08001859 if (pcp->pcp.count)
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07001860 has_pcps = true;
Vlastimil Babka93481ff2014-12-10 15:43:01 -08001861 } else {
1862 for_each_populated_zone(z) {
1863 pcp = per_cpu_ptr(z->pageset, cpu);
1864 if (pcp->pcp.count) {
1865 has_pcps = true;
1866 break;
1867 }
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07001868 }
1869 }
Vlastimil Babka93481ff2014-12-10 15:43:01 -08001870
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07001871 if (has_pcps)
1872 cpumask_set_cpu(cpu, &cpus_with_pcps);
1873 else
1874 cpumask_clear_cpu(cpu, &cpus_with_pcps);
1875 }
Vlastimil Babka93481ff2014-12-10 15:43:01 -08001876 on_each_cpu_mask(&cpus_with_pcps, (smp_call_func_t) drain_local_pages,
1877 zone, 1);
Christoph Lameter9f8f2172008-02-04 22:29:11 -08001878}
1879
Rafael J. Wysocki296699d2007-07-29 23:27:18 +02001880#ifdef CONFIG_HIBERNATION
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881
1882void mark_free_pages(struct zone *zone)
1883{
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07001884 unsigned long pfn, max_zone_pfn;
1885 unsigned long flags;
Mel Gorman7aeb09f2014-06-04 16:10:21 -07001886 unsigned int order, t;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887 struct list_head *curr;
1888
Xishi Qiu8080fc02013-09-11 14:21:45 -07001889 if (zone_is_empty(zone))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890 return;
1891
1892 spin_lock_irqsave(&zone->lock, flags);
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07001893
Cody P Schafer108bcc92013-02-22 16:35:23 -08001894 max_zone_pfn = zone_end_pfn(zone);
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07001895 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1896 if (pfn_valid(pfn)) {
1897 struct page *page = pfn_to_page(pfn);
1898
Rafael J. Wysocki7be98232007-05-06 14:50:42 -07001899 if (!swsusp_page_is_forbidden(page))
1900 swsusp_unset_page_free(page);
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07001901 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001903 for_each_migratetype_order(order, t) {
1904 list_for_each(curr, &zone->free_area[order].free_list[t]) {
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07001905 unsigned long i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07001907 pfn = page_to_pfn(list_entry(curr, struct page, lru));
1908 for (i = 0; i < (1UL << order); i++)
Rafael J. Wysocki7be98232007-05-06 14:50:42 -07001909 swsusp_set_page_free(pfn_to_page(pfn + i));
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07001910 }
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001911 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912 spin_unlock_irqrestore(&zone->lock, flags);
1913}
Mel Gormane2c55dc2007-10-16 01:25:50 -07001914#endif /* CONFIG_PM */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915
1916/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917 * Free a 0-order page
Mel Gormanb745bc82014-06-04 16:10:22 -07001918 * cold == true ? free a cold page : free a hot page
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919 */
Mel Gormanb745bc82014-06-04 16:10:22 -07001920void free_hot_cold_page(struct page *page, bool cold)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921{
1922 struct zone *zone = page_zone(page);
1923 struct per_cpu_pages *pcp;
1924 unsigned long flags;
Mel Gormandc4b0ca2014-06-04 16:10:17 -07001925 unsigned long pfn = page_to_pfn(page);
Mel Gorman5f8dcc22009-09-21 17:03:19 -07001926 int migratetype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927
KOSAKI Motohiroec95f532010-05-24 14:32:38 -07001928 if (!free_pages_prepare(page, 0))
Hugh Dickins689bceb2005-11-21 21:32:20 -08001929 return;
1930
Mel Gormandc4b0ca2014-06-04 16:10:17 -07001931 migratetype = get_pfnblock_migratetype(page, pfn);
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -07001932 set_pcppage_migratetype(page, migratetype);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933 local_irq_save(flags);
Christoph Lameterf8891e52006-06-30 01:55:45 -07001934 __count_vm_event(PGFREE);
Mel Gormanda456f12009-06-16 15:32:08 -07001935
Mel Gorman5f8dcc22009-09-21 17:03:19 -07001936 /*
1937 * We only track unmovable, reclaimable and movable on pcp lists.
1938 * Free ISOLATE pages back to the allocator because they are being
1939 * offlined but treat RESERVE as movable pages so we can get those
1940 * areas back if necessary. Otherwise, we may have to free
1941 * excessively into the page allocator
1942 */
1943 if (migratetype >= MIGRATE_PCPTYPES) {
Minchan Kim194159f2013-02-22 16:33:58 -08001944 if (unlikely(is_migrate_isolate(migratetype))) {
Mel Gormandc4b0ca2014-06-04 16:10:17 -07001945 free_one_page(zone, page, pfn, 0, migratetype);
Mel Gorman5f8dcc22009-09-21 17:03:19 -07001946 goto out;
1947 }
1948 migratetype = MIGRATE_MOVABLE;
1949 }
1950
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09001951 pcp = &this_cpu_ptr(zone->pageset)->pcp;
Mel Gormanb745bc82014-06-04 16:10:22 -07001952 if (!cold)
Mel Gorman5f8dcc22009-09-21 17:03:19 -07001953 list_add(&page->lru, &pcp->lists[migratetype]);
Mel Gormanb745bc82014-06-04 16:10:22 -07001954 else
1955 list_add_tail(&page->lru, &pcp->lists[migratetype]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956 pcp->count++;
Nick Piggin48db57f2006-01-08 01:00:42 -08001957 if (pcp->count >= pcp->high) {
Jason Low4db0c3c2015-04-15 16:14:08 -07001958 unsigned long batch = READ_ONCE(pcp->batch);
Cody P Schafer998d39cb2013-07-03 15:01:32 -07001959 free_pcppages_bulk(zone, batch, pcp);
1960 pcp->count -= batch;
Nick Piggin48db57f2006-01-08 01:00:42 -08001961 }
Mel Gorman5f8dcc22009-09-21 17:03:19 -07001962
1963out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965}
1966
Nick Piggin8dfcc9b2006-03-22 00:08:05 -08001967/*
Konstantin Khlebnikovcc598502012-01-10 15:07:04 -08001968 * Free a list of 0-order pages
1969 */
Mel Gormanb745bc82014-06-04 16:10:22 -07001970void free_hot_cold_page_list(struct list_head *list, bool cold)
Konstantin Khlebnikovcc598502012-01-10 15:07:04 -08001971{
1972 struct page *page, *next;
1973
1974 list_for_each_entry_safe(page, next, list, lru) {
Konstantin Khlebnikovb413d482012-01-10 15:07:09 -08001975 trace_mm_page_free_batched(page, cold);
Konstantin Khlebnikovcc598502012-01-10 15:07:04 -08001976 free_hot_cold_page(page, cold);
1977 }
1978}
1979
1980/*
Nick Piggin8dfcc9b2006-03-22 00:08:05 -08001981 * split_page takes a non-compound higher-order page, and splits it into
1982 * n (1<<order) sub-pages: page[0..n]
1983 * Each sub-page must be freed individually.
1984 *
1985 * Note: this is probably too low level an operation for use in drivers.
1986 * Please consult with lkml before using this in your driver.
1987 */
1988void split_page(struct page *page, unsigned int order)
1989{
1990 int i;
Joonsoo Kime2cfc912015-07-17 16:24:18 -07001991 gfp_t gfp_mask;
Nick Piggin8dfcc9b2006-03-22 00:08:05 -08001992
Sasha Levin309381fea2014-01-23 15:52:54 -08001993 VM_BUG_ON_PAGE(PageCompound(page), page);
1994 VM_BUG_ON_PAGE(!page_count(page), page);
Vegard Nossumb1eeab62008-11-25 16:55:53 +01001995
1996#ifdef CONFIG_KMEMCHECK
1997 /*
1998 * Split shadow pages too, because free(page[0]) would
1999 * otherwise free the whole shadow.
2000 */
2001 if (kmemcheck_page_is_tracked(page))
2002 split_page(virt_to_page(page[0].shadow), order);
2003#endif
2004
Joonsoo Kime2cfc912015-07-17 16:24:18 -07002005 gfp_mask = get_page_owner_gfp(page);
2006 set_page_owner(page, 0, gfp_mask);
Joonsoo Kim48c96a32014-12-12 16:56:01 -08002007 for (i = 1; i < (1 << order); i++) {
Nick Piggin7835e982006-03-22 00:08:40 -08002008 set_page_refcounted(page + i);
Joonsoo Kime2cfc912015-07-17 16:24:18 -07002009 set_page_owner(page + i, 0, gfp_mask);
Joonsoo Kim48c96a32014-12-12 16:56:01 -08002010 }
Nick Piggin8dfcc9b2006-03-22 00:08:05 -08002011}
K. Y. Srinivasan5853ff22013-03-25 15:47:38 -07002012EXPORT_SYMBOL_GPL(split_page);
Nick Piggin8dfcc9b2006-03-22 00:08:05 -08002013
Joonsoo Kim3c605092014-11-13 15:19:21 -08002014int __isolate_free_page(struct page *page, unsigned int order)
Mel Gorman748446b2010-05-24 14:32:27 -07002015{
Mel Gorman748446b2010-05-24 14:32:27 -07002016 unsigned long watermark;
2017 struct zone *zone;
Bartlomiej Zolnierkiewicz2139cbe2012-10-08 16:32:00 -07002018 int mt;
Mel Gorman748446b2010-05-24 14:32:27 -07002019
2020 BUG_ON(!PageBuddy(page));
2021
2022 zone = page_zone(page);
Marek Szyprowski2e30abd2012-12-11 16:02:57 -08002023 mt = get_pageblock_migratetype(page);
Mel Gorman748446b2010-05-24 14:32:27 -07002024
Minchan Kim194159f2013-02-22 16:33:58 -08002025 if (!is_migrate_isolate(mt)) {
Marek Szyprowski2e30abd2012-12-11 16:02:57 -08002026 /* Obey watermarks as if the page was being allocated */
2027 watermark = low_wmark_pages(zone) + (1 << order);
2028 if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
2029 return 0;
2030
Mel Gorman8fb74b92013-01-11 14:32:16 -08002031 __mod_zone_freepage_state(zone, -(1UL << order), mt);
Marek Szyprowski2e30abd2012-12-11 16:02:57 -08002032 }
Mel Gorman748446b2010-05-24 14:32:27 -07002033
2034 /* Remove page from free list */
2035 list_del(&page->lru);
2036 zone->free_area[order].nr_free--;
2037 rmv_page_order(page);
Bartlomiej Zolnierkiewicz2139cbe2012-10-08 16:32:00 -07002038
Joonsoo Kime2cfc912015-07-17 16:24:18 -07002039 set_page_owner(page, order, __GFP_MOVABLE);
Joonsoo Kimf3a14ce2015-07-17 16:24:15 -07002040
Mel Gorman8fb74b92013-01-11 14:32:16 -08002041 /* Set the pageblock if the isolated page is at least a pageblock */
Mel Gorman748446b2010-05-24 14:32:27 -07002042 if (order >= pageblock_order - 1) {
2043 struct page *endpage = page + (1 << order) - 1;
Michal Nazarewicz47118af2011-12-29 13:09:50 +01002044 for (; page < endpage; page += pageblock_nr_pages) {
2045 int mt = get_pageblock_migratetype(page);
Minchan Kim194159f2013-02-22 16:33:58 -08002046 if (!is_migrate_isolate(mt) && !is_migrate_cma(mt))
Michal Nazarewicz47118af2011-12-29 13:09:50 +01002047 set_pageblock_migratetype(page,
2048 MIGRATE_MOVABLE);
2049 }
Mel Gorman748446b2010-05-24 14:32:27 -07002050 }
2051
Joonsoo Kimf3a14ce2015-07-17 16:24:15 -07002052
Mel Gorman8fb74b92013-01-11 14:32:16 -08002053 return 1UL << order;
Mel Gorman1fb3f8c2012-10-08 16:29:12 -07002054}
2055
2056/*
2057 * Similar to split_page except the page is already free. As this is only
2058 * being used for migration, the migratetype of the block also changes.
2059 * As this is called with interrupts disabled, the caller is responsible
2060 * for calling arch_alloc_page() and kernel_map_page() after interrupts
2061 * are enabled.
2062 *
2063 * Note: this is probably too low level an operation for use in drivers.
2064 * Please consult with lkml before using this in your driver.
2065 */
2066int split_free_page(struct page *page)
2067{
2068 unsigned int order;
2069 int nr_pages;
2070
Mel Gorman1fb3f8c2012-10-08 16:29:12 -07002071 order = page_order(page);
2072
Mel Gorman8fb74b92013-01-11 14:32:16 -08002073 nr_pages = __isolate_free_page(page, order);
Mel Gorman1fb3f8c2012-10-08 16:29:12 -07002074 if (!nr_pages)
2075 return 0;
2076
2077 /* Split into individual pages */
2078 set_page_refcounted(page);
2079 split_page(page, order);
2080 return nr_pages;
Mel Gorman748446b2010-05-24 14:32:27 -07002081}
2082
2083/*
Vlastimil Babka75379192015-02-11 15:25:38 -08002084 * Allocate a page from the given zone. Use pcplists for order-0 allocations.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085 */
Mel Gorman0a15c3e2009-06-16 15:32:05 -07002086static inline
2087struct page *buffered_rmqueue(struct zone *preferred_zone,
Mel Gorman7aeb09f2014-06-04 16:10:21 -07002088 struct zone *zone, unsigned int order,
2089 gfp_t gfp_flags, int migratetype)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090{
2091 unsigned long flags;
Hugh Dickins689bceb2005-11-21 21:32:20 -08002092 struct page *page;
Mel Gormanb745bc82014-06-04 16:10:22 -07002093 bool cold = ((gfp_flags & __GFP_COLD) != 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094
Nick Piggin48db57f2006-01-08 01:00:42 -08002095 if (likely(order == 0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096 struct per_cpu_pages *pcp;
Mel Gorman5f8dcc22009-09-21 17:03:19 -07002097 struct list_head *list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099 local_irq_save(flags);
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09002100 pcp = &this_cpu_ptr(zone->pageset)->pcp;
2101 list = &pcp->lists[migratetype];
Mel Gorman5f8dcc22009-09-21 17:03:19 -07002102 if (list_empty(list)) {
Mel Gorman535131e62007-10-16 01:25:49 -07002103 pcp->count += rmqueue_bulk(zone, 0,
Mel Gorman5f8dcc22009-09-21 17:03:19 -07002104 pcp->batch, list,
Mel Gormane084b2d2009-07-29 15:02:04 -07002105 migratetype, cold);
Mel Gorman5f8dcc22009-09-21 17:03:19 -07002106 if (unlikely(list_empty(list)))
Shaohua Li6fb332f2009-09-21 17:01:17 -07002107 goto failed;
Mel Gorman535131e62007-10-16 01:25:49 -07002108 }
Mel Gormanb92a6ed2007-10-16 01:25:50 -07002109
Mel Gorman5f8dcc22009-09-21 17:03:19 -07002110 if (cold)
2111 page = list_entry(list->prev, struct page, lru);
2112 else
2113 page = list_entry(list->next, struct page, lru);
2114
Mel Gormanb92a6ed2007-10-16 01:25:50 -07002115 list_del(&page->lru);
2116 pcp->count--;
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08002117 } else {
Andrew Mortondab48da2009-06-16 15:32:37 -07002118 if (unlikely(gfp_flags & __GFP_NOFAIL)) {
2119 /*
2120 * __GFP_NOFAIL is not to be used in new code.
2121 *
2122 * All __GFP_NOFAIL callers should be fixed so that they
2123 * properly detect and handle allocation failures.
2124 *
2125 * We most definitely don't want callers attempting to
Linus Torvalds4923abf2009-06-24 12:16:49 -07002126 * allocate greater than order-1 page units with
Andrew Mortondab48da2009-06-16 15:32:37 -07002127 * __GFP_NOFAIL.
2128 */
Linus Torvalds4923abf2009-06-24 12:16:49 -07002129 WARN_ON_ONCE(order > 1);
Andrew Mortondab48da2009-06-16 15:32:37 -07002130 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131 spin_lock_irqsave(&zone->lock, flags);
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002132 page = __rmqueue(zone, order, migratetype);
Nick Piggina74609f2006-01-06 00:11:20 -08002133 spin_unlock(&zone->lock);
2134 if (!page)
2135 goto failed;
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -07002136 __mod_zone_freepage_state(zone, -(1 << order),
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -07002137 get_pcppage_migratetype(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138 }
2139
Johannes Weiner3a025762014-04-07 15:37:48 -07002140 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
Johannes Weinerabe5f972014-10-02 16:21:10 -07002141 if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
Johannes Weiner57054652014-10-09 15:28:17 -07002142 !test_bit(ZONE_FAIR_DEPLETED, &zone->flags))
2143 set_bit(ZONE_FAIR_DEPLETED, &zone->flags);
Johannes Weiner27329362014-03-03 15:38:41 -08002144
Christoph Lameterf8891e52006-06-30 01:55:45 -07002145 __count_zone_vm_events(PGALLOC, zone, 1 << order);
Andi Kleen78afd562011-03-22 16:33:12 -07002146 zone_statistics(preferred_zone, zone, gfp_flags);
Nick Piggina74609f2006-01-06 00:11:20 -08002147 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148
Sasha Levin309381fea2014-01-23 15:52:54 -08002149 VM_BUG_ON_PAGE(bad_range(zone, page), page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150 return page;
Nick Piggina74609f2006-01-06 00:11:20 -08002151
2152failed:
2153 local_irq_restore(flags);
Nick Piggina74609f2006-01-06 00:11:20 -08002154 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155}
2156
Akinobu Mita933e3122006-12-08 02:39:45 -08002157#ifdef CONFIG_FAIL_PAGE_ALLOC
2158
Akinobu Mitab2588c42011-07-26 16:09:03 -07002159static struct {
Akinobu Mita933e3122006-12-08 02:39:45 -08002160 struct fault_attr attr;
2161
Viresh Kumar621a5f72015-09-26 15:04:07 -07002162 bool ignore_gfp_highmem;
Mel Gorman71baba42015-11-06 16:28:28 -08002163 bool ignore_gfp_reclaim;
Akinobu Mita54114992007-07-15 23:40:23 -07002164 u32 min_order;
Akinobu Mita933e3122006-12-08 02:39:45 -08002165} fail_page_alloc = {
2166 .attr = FAULT_ATTR_INITIALIZER,
Mel Gorman71baba42015-11-06 16:28:28 -08002167 .ignore_gfp_reclaim = true,
Viresh Kumar621a5f72015-09-26 15:04:07 -07002168 .ignore_gfp_highmem = true,
Akinobu Mita54114992007-07-15 23:40:23 -07002169 .min_order = 1,
Akinobu Mita933e3122006-12-08 02:39:45 -08002170};
2171
2172static int __init setup_fail_page_alloc(char *str)
2173{
2174 return setup_fault_attr(&fail_page_alloc.attr, str);
2175}
2176__setup("fail_page_alloc=", setup_fail_page_alloc);
2177
Gavin Shandeaf3862012-07-31 16:41:51 -07002178static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
Akinobu Mita933e3122006-12-08 02:39:45 -08002179{
Akinobu Mita54114992007-07-15 23:40:23 -07002180 if (order < fail_page_alloc.min_order)
Gavin Shandeaf3862012-07-31 16:41:51 -07002181 return false;
Akinobu Mita933e3122006-12-08 02:39:45 -08002182 if (gfp_mask & __GFP_NOFAIL)
Gavin Shandeaf3862012-07-31 16:41:51 -07002183 return false;
Akinobu Mita933e3122006-12-08 02:39:45 -08002184 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
Gavin Shandeaf3862012-07-31 16:41:51 -07002185 return false;
Mel Gorman71baba42015-11-06 16:28:28 -08002186 if (fail_page_alloc.ignore_gfp_reclaim &&
2187 (gfp_mask & __GFP_DIRECT_RECLAIM))
Gavin Shandeaf3862012-07-31 16:41:51 -07002188 return false;
Akinobu Mita933e3122006-12-08 02:39:45 -08002189
2190 return should_fail(&fail_page_alloc.attr, 1 << order);
2191}
2192
2193#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
2194
2195static int __init fail_page_alloc_debugfs(void)
2196{
Al Virof4ae40a62011-07-24 04:33:43 -04002197 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
Akinobu Mita933e3122006-12-08 02:39:45 -08002198 struct dentry *dir;
Akinobu Mita933e3122006-12-08 02:39:45 -08002199
Akinobu Mitadd48c082011-08-03 16:21:01 -07002200 dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
2201 &fail_page_alloc.attr);
2202 if (IS_ERR(dir))
2203 return PTR_ERR(dir);
Akinobu Mita933e3122006-12-08 02:39:45 -08002204
Akinobu Mitab2588c42011-07-26 16:09:03 -07002205 if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
Mel Gorman71baba42015-11-06 16:28:28 -08002206 &fail_page_alloc.ignore_gfp_reclaim))
Akinobu Mitab2588c42011-07-26 16:09:03 -07002207 goto fail;
2208 if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
2209 &fail_page_alloc.ignore_gfp_highmem))
2210 goto fail;
2211 if (!debugfs_create_u32("min-order", mode, dir,
2212 &fail_page_alloc.min_order))
2213 goto fail;
Akinobu Mita933e3122006-12-08 02:39:45 -08002214
Akinobu Mitab2588c42011-07-26 16:09:03 -07002215 return 0;
2216fail:
Akinobu Mitadd48c082011-08-03 16:21:01 -07002217 debugfs_remove_recursive(dir);
Akinobu Mita933e3122006-12-08 02:39:45 -08002218
Akinobu Mitab2588c42011-07-26 16:09:03 -07002219 return -ENOMEM;
Akinobu Mita933e3122006-12-08 02:39:45 -08002220}
2221
2222late_initcall(fail_page_alloc_debugfs);
2223
2224#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
2225
2226#else /* CONFIG_FAIL_PAGE_ALLOC */
2227
Gavin Shandeaf3862012-07-31 16:41:51 -07002228static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
Akinobu Mita933e3122006-12-08 02:39:45 -08002229{
Gavin Shandeaf3862012-07-31 16:41:51 -07002230 return false;
Akinobu Mita933e3122006-12-08 02:39:45 -08002231}
2232
2233#endif /* CONFIG_FAIL_PAGE_ALLOC */
2234
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235/*
Mel Gorman88f5acf2011-01-13 15:45:41 -08002236 * Return true if free pages are above 'mark'. This takes into account the order
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237 * of the allocation.
2238 */
Mel Gorman7aeb09f2014-06-04 16:10:21 -07002239static bool __zone_watermark_ok(struct zone *z, unsigned int order,
2240 unsigned long mark, int classzone_idx, int alloc_flags,
2241 long free_pages)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242{
Wei Yuan26086de2014-12-10 15:44:44 -08002243 /* free_pages may go negative - that's OK */
Christoph Lameterd23ad422007-02-10 01:43:02 -08002244 long min = mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245 int o;
Tomasz Stanislawski026b0812013-06-12 14:05:02 -07002246 long free_cma = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247
Michal Hockodf0a6da2012-01-10 15:08:02 -08002248 free_pages -= (1 << order) - 1;
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08002249 if (alloc_flags & ALLOC_HIGH)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250 min -= min / 2;
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08002251 if (alloc_flags & ALLOC_HARDER)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252 min -= min / 4;
Mel Gormane2b19192015-11-06 16:28:09 -08002253
Bartlomiej Zolnierkiewiczd95ea5d2012-10-08 16:32:05 -07002254#ifdef CONFIG_CMA
2255 /* If allocation can't use CMA areas don't use free CMA pages */
2256 if (!(alloc_flags & ALLOC_CMA))
Tomasz Stanislawski026b0812013-06-12 14:05:02 -07002257 free_cma = zone_page_state(z, NR_FREE_CMA_PAGES);
Bartlomiej Zolnierkiewiczd95ea5d2012-10-08 16:32:05 -07002258#endif
Tomasz Stanislawski026b0812013-06-12 14:05:02 -07002259
Mel Gorman3484b2d2014-08-06 16:07:14 -07002260 if (free_pages - free_cma <= min + z->lowmem_reserve[classzone_idx])
Mel Gorman88f5acf2011-01-13 15:45:41 -08002261 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262 for (o = 0; o < order; o++) {
2263 /* At the next order, this order's pages become unavailable */
2264 free_pages -= z->free_area[o].nr_free << o;
2265
2266 /* Require fewer higher order pages to be free */
2267 min >>= 1;
2268
2269 if (free_pages <= min)
Mel Gorman88f5acf2011-01-13 15:45:41 -08002270 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271 }
Mel Gorman88f5acf2011-01-13 15:45:41 -08002272 return true;
2273}
2274
Mel Gorman7aeb09f2014-06-04 16:10:21 -07002275bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
Mel Gorman88f5acf2011-01-13 15:45:41 -08002276 int classzone_idx, int alloc_flags)
2277{
2278 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
2279 zone_page_state(z, NR_FREE_PAGES));
2280}
2281
Mel Gorman7aeb09f2014-06-04 16:10:21 -07002282bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
Mel Gormane2b19192015-11-06 16:28:09 -08002283 unsigned long mark, int classzone_idx)
Mel Gorman88f5acf2011-01-13 15:45:41 -08002284{
2285 long free_pages = zone_page_state(z, NR_FREE_PAGES);
2286
2287 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
2288 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
2289
Mel Gormane2b19192015-11-06 16:28:09 -08002290 return __zone_watermark_ok(z, order, mark, classzone_idx, 0,
Mel Gorman88f5acf2011-01-13 15:45:41 -08002291 free_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292}
2293
Paul Jackson9276b1bc2006-12-06 20:31:48 -08002294#ifdef CONFIG_NUMA
Johannes Weiner81c0a2b2013-09-11 14:20:47 -07002295static bool zone_local(struct zone *local_zone, struct zone *zone)
2296{
Johannes Weinerfff4068c2013-12-20 14:54:12 +00002297 return local_zone->node == zone->node;
Johannes Weiner81c0a2b2013-09-11 14:20:47 -07002298}
2299
David Rientjes957f8222012-10-08 16:33:24 -07002300static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
2301{
Mel Gorman5f7a75a2014-06-04 16:07:15 -07002302 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <
2303 RECLAIM_DISTANCE;
David Rientjes957f8222012-10-08 16:33:24 -07002304}
Paul Jackson9276b1bc2006-12-06 20:31:48 -08002305#else /* CONFIG_NUMA */
Johannes Weiner81c0a2b2013-09-11 14:20:47 -07002306static bool zone_local(struct zone *local_zone, struct zone *zone)
2307{
2308 return true;
2309}
2310
David Rientjes957f8222012-10-08 16:33:24 -07002311static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
2312{
2313 return true;
2314}
Paul Jackson9276b1bc2006-12-06 20:31:48 -08002315#endif /* CONFIG_NUMA */
2316
Mel Gorman4ffeaf32014-08-06 16:07:22 -07002317static void reset_alloc_batches(struct zone *preferred_zone)
2318{
2319 struct zone *zone = preferred_zone->zone_pgdat->node_zones;
2320
2321 do {
2322 mod_zone_page_state(zone, NR_ALLOC_BATCH,
2323 high_wmark_pages(zone) - low_wmark_pages(zone) -
2324 atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
Johannes Weiner57054652014-10-09 15:28:17 -07002325 clear_bit(ZONE_FAIR_DEPLETED, &zone->flags);
Mel Gorman4ffeaf32014-08-06 16:07:22 -07002326 } while (zone++ != preferred_zone);
2327}
2328
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08002329/*
Paul Jackson0798e512006-12-06 20:31:38 -08002330 * get_page_from_freelist goes through the zonelist trying to allocate
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08002331 * a page.
2332 */
2333static struct page *
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002334get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
2335 const struct alloc_context *ac)
Martin Hicks753ee722005-06-21 17:14:41 -07002336{
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002337 struct zonelist *zonelist = ac->zonelist;
Mel Gormandd1a2392008-04-28 02:12:17 -07002338 struct zoneref *z;
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08002339 struct page *page = NULL;
Mel Gorman5117f452009-06-16 15:31:59 -07002340 struct zone *zone;
Mel Gorman4ffeaf32014-08-06 16:07:22 -07002341 int nr_fair_skipped = 0;
2342 bool zonelist_rescan;
Mel Gorman54a6eb52008-04-28 02:12:16 -07002343
Paul Jackson9276b1bc2006-12-06 20:31:48 -08002344zonelist_scan:
Mel Gorman4ffeaf32014-08-06 16:07:22 -07002345 zonelist_rescan = false;
2346
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08002347 /*
Paul Jackson9276b1bc2006-12-06 20:31:48 -08002348 * Scan zonelist, looking for a zone with enough free.
Vladimir Davydov344736f2014-10-20 15:50:30 +04002349 * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08002350 */
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002351 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
2352 ac->nodemask) {
Johannes Weinere085dbc2013-09-11 14:20:46 -07002353 unsigned long mark;
2354
Mel Gorman664eedd2014-06-04 16:10:08 -07002355 if (cpusets_enabled() &&
2356 (alloc_flags & ALLOC_CPUSET) &&
Vladimir Davydov344736f2014-10-20 15:50:30 +04002357 !cpuset_zone_allowed(zone, gfp_mask))
Mel Gormancd38b112011-07-25 17:12:29 -07002358 continue;
Johannes Weinera756cf52012-01-10 15:07:49 -08002359 /*
Johannes Weiner81c0a2b2013-09-11 14:20:47 -07002360 * Distribute pages in proportion to the individual
2361 * zone size to ensure fair page aging. The zone a
2362 * page was allocated in should have no effect on the
2363 * time the page has in memory before being reclaimed.
Johannes Weiner81c0a2b2013-09-11 14:20:47 -07002364 */
Johannes Weiner3a025762014-04-07 15:37:48 -07002365 if (alloc_flags & ALLOC_FAIR) {
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002366 if (!zone_local(ac->preferred_zone, zone))
Mel Gormanf7b5d642014-08-06 16:07:20 -07002367 break;
Johannes Weiner57054652014-10-09 15:28:17 -07002368 if (test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) {
Mel Gorman4ffeaf32014-08-06 16:07:22 -07002369 nr_fair_skipped++;
Johannes Weiner3a025762014-04-07 15:37:48 -07002370 continue;
Mel Gorman4ffeaf32014-08-06 16:07:22 -07002371 }
Johannes Weiner81c0a2b2013-09-11 14:20:47 -07002372 }
2373 /*
Johannes Weinera756cf52012-01-10 15:07:49 -08002374 * When allocating a page cache page for writing, we
2375 * want to get it from a zone that is within its dirty
2376 * limit, such that no single zone holds more than its
2377 * proportional share of globally allowed dirty pages.
2378 * The dirty limits take into account the zone's
2379 * lowmem reserves and high watermark so that kswapd
2380 * should be able to balance it without having to
2381 * write pages from its LRU list.
2382 *
2383 * This may look like it could increase pressure on
2384 * lower zones by failing allocations in higher zones
2385 * before they are full. But the pages that do spill
2386 * over are limited as the lower zones are protected
2387 * by this very same mechanism. It should not become
2388 * a practical burden to them.
2389 *
2390 * XXX: For now, allow allocations to potentially
2391 * exceed the per-zone dirty limit in the slowpath
Mel Gormanc9ab0c42015-11-06 16:28:12 -08002392 * (spread_dirty_pages unset) before going into reclaim,
Johannes Weinera756cf52012-01-10 15:07:49 -08002393 * which is important when on a NUMA setup the allowed
2394 * zones are together not big enough to reach the
2395 * global limit. The proper fix for these situations
2396 * will require awareness of zones in the
2397 * dirty-throttling and the flusher threads.
2398 */
Mel Gormanc9ab0c42015-11-06 16:28:12 -08002399 if (ac->spread_dirty_pages && !zone_dirty_ok(zone))
Mel Gorman800a1e72014-06-04 16:10:06 -07002400 continue;
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08002401
Johannes Weinere085dbc2013-09-11 14:20:46 -07002402 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
2403 if (!zone_watermark_ok(zone, order, mark,
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002404 ac->classzone_idx, alloc_flags)) {
Mel Gormanfa5e0842009-06-16 15:33:22 -07002405 int ret;
2406
Mel Gorman5dab2912014-06-04 16:10:14 -07002407 /* Checked here to keep the fast path fast */
2408 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
2409 if (alloc_flags & ALLOC_NO_WATERMARKS)
2410 goto try_this_zone;
2411
David Rientjes957f8222012-10-08 16:33:24 -07002412 if (zone_reclaim_mode == 0 ||
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002413 !zone_allows_reclaim(ac->preferred_zone, zone))
Mel Gormancd38b112011-07-25 17:12:29 -07002414 continue;
2415
Mel Gormanfa5e0842009-06-16 15:33:22 -07002416 ret = zone_reclaim(zone, gfp_mask, order);
2417 switch (ret) {
2418 case ZONE_RECLAIM_NOSCAN:
2419 /* did not scan */
Mel Gormancd38b112011-07-25 17:12:29 -07002420 continue;
Mel Gormanfa5e0842009-06-16 15:33:22 -07002421 case ZONE_RECLAIM_FULL:
2422 /* scanned but unreclaimable */
Mel Gormancd38b112011-07-25 17:12:29 -07002423 continue;
Mel Gormanfa5e0842009-06-16 15:33:22 -07002424 default:
2425 /* did we reclaim enough */
Mel Gormanfed27192013-04-29 15:07:57 -07002426 if (zone_watermark_ok(zone, order, mark,
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002427 ac->classzone_idx, alloc_flags))
Mel Gormanfed27192013-04-29 15:07:57 -07002428 goto try_this_zone;
2429
Mel Gormanfed27192013-04-29 15:07:57 -07002430 continue;
Paul Jackson0798e512006-12-06 20:31:38 -08002431 }
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08002432 }
2433
Mel Gormanfa5e0842009-06-16 15:33:22 -07002434try_this_zone:
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002435 page = buffered_rmqueue(ac->preferred_zone, zone, order,
2436 gfp_mask, ac->migratetype);
Vlastimil Babka75379192015-02-11 15:25:38 -08002437 if (page) {
2438 if (prep_new_page(page, order, gfp_mask, alloc_flags))
2439 goto try_this_zone;
2440 return page;
2441 }
Mel Gorman54a6eb52008-04-28 02:12:16 -07002442 }
Paul Jackson9276b1bc2006-12-06 20:31:48 -08002443
Mel Gorman4ffeaf32014-08-06 16:07:22 -07002444 /*
2445 * The first pass makes sure allocations are spread fairly within the
2446 * local node. However, the local node might have free pages left
2447 * after the fairness batches are exhausted, and remote zones haven't
2448 * even been considered yet. Try once more without fairness, and
2449 * include remote zones now, before entering the slowpath and waking
2450 * kswapd: prefer spilling to a remote zone over swapping locally.
2451 */
2452 if (alloc_flags & ALLOC_FAIR) {
2453 alloc_flags &= ~ALLOC_FAIR;
2454 if (nr_fair_skipped) {
2455 zonelist_rescan = true;
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002456 reset_alloc_batches(ac->preferred_zone);
Mel Gorman4ffeaf32014-08-06 16:07:22 -07002457 }
2458 if (nr_online_nodes > 1)
2459 zonelist_rescan = true;
2460 }
2461
Mel Gorman4ffeaf32014-08-06 16:07:22 -07002462 if (zonelist_rescan)
2463 goto zonelist_scan;
2464
2465 return NULL;
Martin Hicks753ee722005-06-21 17:14:41 -07002466}
2467
David Rientjes29423e772011-03-22 16:30:47 -07002468/*
2469 * Large machines with many possible nodes should not always dump per-node
2470 * meminfo in irq context.
2471 */
2472static inline bool should_suppress_show_mem(void)
2473{
2474 bool ret = false;
2475
2476#if NODES_SHIFT > 8
2477 ret = in_interrupt();
2478#endif
2479 return ret;
2480}
2481
Dave Hansena238ab52011-05-24 17:12:16 -07002482static DEFINE_RATELIMIT_STATE(nopage_rs,
2483 DEFAULT_RATELIMIT_INTERVAL,
2484 DEFAULT_RATELIMIT_BURST);
2485
2486void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
2487{
Dave Hansena238ab52011-05-24 17:12:16 -07002488 unsigned int filter = SHOW_MEM_FILTER_NODES;
2489
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -08002490 if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) ||
2491 debug_guardpage_minorder() > 0)
Dave Hansena238ab52011-05-24 17:12:16 -07002492 return;
2493
2494 /*
2495 * This documents exceptions given to allocations in certain
2496 * contexts that are allowed to allocate outside current's set
2497 * of allowed nodes.
2498 */
2499 if (!(gfp_mask & __GFP_NOMEMALLOC))
2500 if (test_thread_flag(TIF_MEMDIE) ||
2501 (current->flags & (PF_MEMALLOC | PF_EXITING)))
2502 filter &= ~SHOW_MEM_FILTER_NODES;
Mel Gormand0164ad2015-11-06 16:28:21 -08002503 if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
Dave Hansena238ab52011-05-24 17:12:16 -07002504 filter &= ~SHOW_MEM_FILTER_NODES;
2505
2506 if (fmt) {
Joe Perches3ee9a4f2011-10-31 17:08:35 -07002507 struct va_format vaf;
2508 va_list args;
2509
Dave Hansena238ab52011-05-24 17:12:16 -07002510 va_start(args, fmt);
Joe Perches3ee9a4f2011-10-31 17:08:35 -07002511
2512 vaf.fmt = fmt;
2513 vaf.va = &args;
2514
2515 pr_warn("%pV", &vaf);
2516
Dave Hansena238ab52011-05-24 17:12:16 -07002517 va_end(args);
2518 }
2519
Joe Perches3ee9a4f2011-10-31 17:08:35 -07002520 pr_warn("%s: page allocation failure: order:%d, mode:0x%x\n",
2521 current->comm, order, gfp_mask);
Dave Hansena238ab52011-05-24 17:12:16 -07002522
2523 dump_stack();
2524 if (!should_suppress_show_mem())
2525 show_mem(filter);
2526}
2527
Mel Gorman11e33f62009-06-16 15:31:57 -07002528static inline struct page *
2529__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002530 const struct alloc_context *ac, unsigned long *did_some_progress)
Mel Gorman11e33f62009-06-16 15:31:57 -07002531{
David Rientjes6e0fc462015-09-08 15:00:36 -07002532 struct oom_control oc = {
2533 .zonelist = ac->zonelist,
2534 .nodemask = ac->nodemask,
2535 .gfp_mask = gfp_mask,
2536 .order = order,
David Rientjes6e0fc462015-09-08 15:00:36 -07002537 };
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539
Johannes Weiner9879de72015-01-26 12:58:32 -08002540 *did_some_progress = 0;
2541
Johannes Weiner9879de72015-01-26 12:58:32 -08002542 /*
Johannes Weinerdc564012015-06-24 16:57:19 -07002543 * Acquire the oom lock. If that fails, somebody else is
2544 * making progress for us.
Johannes Weiner9879de72015-01-26 12:58:32 -08002545 */
Johannes Weinerdc564012015-06-24 16:57:19 -07002546 if (!mutex_trylock(&oom_lock)) {
Johannes Weiner9879de72015-01-26 12:58:32 -08002547 *did_some_progress = 1;
Mel Gorman11e33f62009-06-16 15:31:57 -07002548 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002549 return NULL;
2550 }
Jens Axboe6b1de912005-11-17 21:35:02 +01002551
Mel Gorman11e33f62009-06-16 15:31:57 -07002552 /*
2553 * Go through the zonelist yet one more time, keep very high watermark
2554 * here, this is only to catch a parallel oom killing, we must fail if
2555 * we're still under heavy pressure.
2556 */
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002557 page = get_page_from_freelist(gfp_mask | __GFP_HARDWALL, order,
2558 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08002559 if (page)
Mel Gorman11e33f62009-06-16 15:31:57 -07002560 goto out;
2561
KAMEZAWA Hiroyuki4365a562009-12-15 16:45:33 -08002562 if (!(gfp_mask & __GFP_NOFAIL)) {
Johannes Weiner9879de72015-01-26 12:58:32 -08002563 /* Coredumps can quickly deplete all memory reserves */
2564 if (current->flags & PF_DUMPCORE)
2565 goto out;
KAMEZAWA Hiroyuki4365a562009-12-15 16:45:33 -08002566 /* The OOM killer will not help higher order allocs */
2567 if (order > PAGE_ALLOC_COSTLY_ORDER)
2568 goto out;
David Rientjes03668b32010-08-09 17:18:54 -07002569 /* The OOM killer does not needlessly kill tasks for lowmem */
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002570 if (ac->high_zoneidx < ZONE_NORMAL)
David Rientjes03668b32010-08-09 17:18:54 -07002571 goto out;
Johannes Weiner90839052015-06-24 16:57:21 -07002572 /* The OOM killer does not compensate for IO-less reclaim */
Johannes Weinercc873172015-02-27 15:52:09 -08002573 if (!(gfp_mask & __GFP_FS)) {
2574 /*
2575 * XXX: Page reclaim didn't yield anything,
2576 * and the OOM killer can't be invoked, but
Johannes Weiner90839052015-06-24 16:57:21 -07002577 * keep looping as per tradition.
Johannes Weinercc873172015-02-27 15:52:09 -08002578 */
2579 *did_some_progress = 1;
Johannes Weiner9879de72015-01-26 12:58:32 -08002580 goto out;
Johannes Weinercc873172015-02-27 15:52:09 -08002581 }
Johannes Weiner90839052015-06-24 16:57:21 -07002582 if (pm_suspended_storage())
2583 goto out;
David Rientjes4167e9b2015-04-14 15:46:55 -07002584 /* The OOM killer may not free memory on a specific node */
KAMEZAWA Hiroyuki4365a562009-12-15 16:45:33 -08002585 if (gfp_mask & __GFP_THISNODE)
2586 goto out;
2587 }
Mel Gorman11e33f62009-06-16 15:31:57 -07002588 /* Exhausted what can be done so it's blamo time */
David Rientjes6e0fc462015-09-08 15:00:36 -07002589 if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL))
Michal Hockoc32b3cb2015-02-11 15:26:24 -08002590 *did_some_progress = 1;
Mel Gorman11e33f62009-06-16 15:31:57 -07002591out:
Johannes Weinerdc564012015-06-24 16:57:19 -07002592 mutex_unlock(&oom_lock);
Mel Gorman11e33f62009-06-16 15:31:57 -07002593 return page;
2594}
2595
Mel Gorman56de7262010-05-24 14:32:30 -07002596#ifdef CONFIG_COMPACTION
2597/* Try memory compaction for high-order allocations before reclaim */
2598static struct page *
2599__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002600 int alloc_flags, const struct alloc_context *ac,
2601 enum migrate_mode mode, int *contended_compaction,
2602 bool *deferred_compaction)
Mel Gorman56de7262010-05-24 14:32:30 -07002603{
Vlastimil Babka53853e22014-10-09 15:27:02 -07002604 unsigned long compact_result;
Vlastimil Babka98dd3b42014-10-09 15:27:04 -07002605 struct page *page;
Vlastimil Babka53853e22014-10-09 15:27:02 -07002606
Mel Gorman66199712012-01-12 17:19:41 -08002607 if (!order)
Mel Gorman56de7262010-05-24 14:32:30 -07002608 return NULL;
2609
Andrew Mortonc06b1fc2011-01-13 15:47:32 -08002610 current->flags |= PF_MEMALLOC;
Vlastimil Babka1a6d53a2015-02-11 15:25:44 -08002611 compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
2612 mode, contended_compaction);
Andrew Mortonc06b1fc2011-01-13 15:47:32 -08002613 current->flags &= ~PF_MEMALLOC;
Mel Gorman56de7262010-05-24 14:32:30 -07002614
Vlastimil Babka98dd3b42014-10-09 15:27:04 -07002615 switch (compact_result) {
2616 case COMPACT_DEFERRED:
Vlastimil Babka53853e22014-10-09 15:27:02 -07002617 *deferred_compaction = true;
Vlastimil Babka98dd3b42014-10-09 15:27:04 -07002618 /* fall-through */
2619 case COMPACT_SKIPPED:
2620 return NULL;
2621 default:
2622 break;
Mel Gorman56de7262010-05-24 14:32:30 -07002623 }
2624
Vlastimil Babka98dd3b42014-10-09 15:27:04 -07002625 /*
2626 * At least in one zone compaction wasn't deferred or skipped, so let's
2627 * count a compaction stall
2628 */
2629 count_vm_event(COMPACTSTALL);
2630
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002631 page = get_page_from_freelist(gfp_mask, order,
2632 alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
Vlastimil Babka98dd3b42014-10-09 15:27:04 -07002633
2634 if (page) {
2635 struct zone *zone = page_zone(page);
2636
2637 zone->compact_blockskip_flush = false;
2638 compaction_defer_reset(zone, order, true);
2639 count_vm_event(COMPACTSUCCESS);
2640 return page;
2641 }
2642
2643 /*
Vlastimil Babka98dd3b42014-10-09 15:27:04 -07002644 * It's bad if compaction run occurs and fails. The most likely reason
2645 * is that pages exist, but not enough to satisfy watermarks.
2646 */
2647 count_vm_event(COMPACTFAIL);
2648
2649 cond_resched();
2650
Mel Gorman56de7262010-05-24 14:32:30 -07002651 return NULL;
2652}
2653#else
2654static inline struct page *
2655__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002656 int alloc_flags, const struct alloc_context *ac,
2657 enum migrate_mode mode, int *contended_compaction,
2658 bool *deferred_compaction)
Mel Gorman56de7262010-05-24 14:32:30 -07002659{
2660 return NULL;
2661}
2662#endif /* CONFIG_COMPACTION */
2663
Marek Szyprowskibba90712012-01-25 12:09:52 +01002664/* Perform direct synchronous page reclaim */
2665static int
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002666__perform_reclaim(gfp_t gfp_mask, unsigned int order,
2667 const struct alloc_context *ac)
Mel Gorman11e33f62009-06-16 15:31:57 -07002668{
Mel Gorman11e33f62009-06-16 15:31:57 -07002669 struct reclaim_state reclaim_state;
Marek Szyprowskibba90712012-01-25 12:09:52 +01002670 int progress;
Mel Gorman11e33f62009-06-16 15:31:57 -07002671
2672 cond_resched();
2673
2674 /* We now go into synchronous reclaim */
2675 cpuset_memory_pressure_bump();
Andrew Mortonc06b1fc2011-01-13 15:47:32 -08002676 current->flags |= PF_MEMALLOC;
Mel Gorman11e33f62009-06-16 15:31:57 -07002677 lockdep_set_current_reclaim_state(gfp_mask);
2678 reclaim_state.reclaimed_slab = 0;
Andrew Mortonc06b1fc2011-01-13 15:47:32 -08002679 current->reclaim_state = &reclaim_state;
Mel Gorman11e33f62009-06-16 15:31:57 -07002680
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002681 progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
2682 ac->nodemask);
Mel Gorman11e33f62009-06-16 15:31:57 -07002683
Andrew Mortonc06b1fc2011-01-13 15:47:32 -08002684 current->reclaim_state = NULL;
Mel Gorman11e33f62009-06-16 15:31:57 -07002685 lockdep_clear_current_reclaim_state();
Andrew Mortonc06b1fc2011-01-13 15:47:32 -08002686 current->flags &= ~PF_MEMALLOC;
Mel Gorman11e33f62009-06-16 15:31:57 -07002687
2688 cond_resched();
2689
Marek Szyprowskibba90712012-01-25 12:09:52 +01002690 return progress;
2691}
2692
2693/* The really slow allocator path where we enter direct reclaim */
2694static inline struct page *
2695__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002696 int alloc_flags, const struct alloc_context *ac,
2697 unsigned long *did_some_progress)
Marek Szyprowskibba90712012-01-25 12:09:52 +01002698{
2699 struct page *page = NULL;
2700 bool drained = false;
2701
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002702 *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
Mel Gorman9ee493c2010-09-09 16:38:18 -07002703 if (unlikely(!(*did_some_progress)))
2704 return NULL;
Mel Gorman11e33f62009-06-16 15:31:57 -07002705
Mel Gorman9ee493c2010-09-09 16:38:18 -07002706retry:
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002707 page = get_page_from_freelist(gfp_mask, order,
2708 alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
Mel Gorman9ee493c2010-09-09 16:38:18 -07002709
2710 /*
2711 * If an allocation failed after direct reclaim, it could be because
2712 * pages are pinned on the per-cpu lists. Drain them and try again
2713 */
2714 if (!page && !drained) {
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002715 drain_all_pages(NULL);
Mel Gorman9ee493c2010-09-09 16:38:18 -07002716 drained = true;
2717 goto retry;
2718 }
2719
Mel Gorman11e33f62009-06-16 15:31:57 -07002720 return page;
2721}
2722
Mel Gorman11e33f62009-06-16 15:31:57 -07002723/*
2724 * This is called in the allocator slow-path if the allocation request is of
2725 * sufficient urgency to ignore watermarks and take other desperate measures
2726 */
2727static inline struct page *
2728__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002729 const struct alloc_context *ac)
Mel Gorman11e33f62009-06-16 15:31:57 -07002730{
2731 struct page *page;
2732
2733 do {
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002734 page = get_page_from_freelist(gfp_mask, order,
2735 ALLOC_NO_WATERMARKS, ac);
Mel Gorman11e33f62009-06-16 15:31:57 -07002736
2737 if (!page && gfp_mask & __GFP_NOFAIL)
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002738 wait_iff_congested(ac->preferred_zone, BLK_RW_ASYNC,
2739 HZ/50);
Mel Gorman11e33f62009-06-16 15:31:57 -07002740 } while (!page && (gfp_mask & __GFP_NOFAIL));
2741
2742 return page;
2743}
2744
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002745static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac)
Mel Gorman11e33f62009-06-16 15:31:57 -07002746{
2747 struct zoneref *z;
2748 struct zone *zone;
2749
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002750 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
2751 ac->high_zoneidx, ac->nodemask)
2752 wakeup_kswapd(zone, order, zone_idx(ac->preferred_zone));
Mel Gorman11e33f62009-06-16 15:31:57 -07002753}
2754
Peter Zijlstra341ce062009-06-16 15:32:02 -07002755static inline int
2756gfp_to_alloc_flags(gfp_t gfp_mask)
2757{
Peter Zijlstra341ce062009-06-16 15:32:02 -07002758 int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
Peter Zijlstra341ce062009-06-16 15:32:02 -07002759
Mel Gormana56f57f2009-06-16 15:32:02 -07002760 /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
Namhyung Kime6223a32010-10-26 14:21:59 -07002761 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
Mel Gormana56f57f2009-06-16 15:32:02 -07002762
Peter Zijlstra341ce062009-06-16 15:32:02 -07002763 /*
2764 * The caller may dip into page reserves a bit more if the caller
2765 * cannot run direct reclaim, or if the caller has realtime scheduling
2766 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
Mel Gormand0164ad2015-11-06 16:28:21 -08002767 * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH).
Peter Zijlstra341ce062009-06-16 15:32:02 -07002768 */
Namhyung Kime6223a32010-10-26 14:21:59 -07002769 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
Peter Zijlstra341ce062009-06-16 15:32:02 -07002770
Mel Gormand0164ad2015-11-06 16:28:21 -08002771 if (gfp_mask & __GFP_ATOMIC) {
Andrea Arcangeli5c3240d2011-01-13 15:46:49 -08002772 /*
David Rientjesb104a352014-07-30 16:08:24 -07002773 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
2774 * if it can't schedule.
Andrea Arcangeli5c3240d2011-01-13 15:46:49 -08002775 */
David Rientjesb104a352014-07-30 16:08:24 -07002776 if (!(gfp_mask & __GFP_NOMEMALLOC))
Andrea Arcangeli5c3240d2011-01-13 15:46:49 -08002777 alloc_flags |= ALLOC_HARDER;
Peter Zijlstra341ce062009-06-16 15:32:02 -07002778 /*
David Rientjesb104a352014-07-30 16:08:24 -07002779 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
Vladimir Davydov344736f2014-10-20 15:50:30 +04002780 * comment for __cpuset_node_allowed().
Peter Zijlstra341ce062009-06-16 15:32:02 -07002781 */
2782 alloc_flags &= ~ALLOC_CPUSET;
Andrew Mortonc06b1fc2011-01-13 15:47:32 -08002783 } else if (unlikely(rt_task(current)) && !in_interrupt())
Peter Zijlstra341ce062009-06-16 15:32:02 -07002784 alloc_flags |= ALLOC_HARDER;
2785
Mel Gormanb37f1dd2012-07-31 16:44:03 -07002786 if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
2787 if (gfp_mask & __GFP_MEMALLOC)
2788 alloc_flags |= ALLOC_NO_WATERMARKS;
Mel Gorman907aed42012-07-31 16:44:07 -07002789 else if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
2790 alloc_flags |= ALLOC_NO_WATERMARKS;
2791 else if (!in_interrupt() &&
2792 ((current->flags & PF_MEMALLOC) ||
2793 unlikely(test_thread_flag(TIF_MEMDIE))))
Peter Zijlstra341ce062009-06-16 15:32:02 -07002794 alloc_flags |= ALLOC_NO_WATERMARKS;
2795 }
Bartlomiej Zolnierkiewiczd95ea5d2012-10-08 16:32:05 -07002796#ifdef CONFIG_CMA
David Rientjes43e7a342014-10-09 15:27:25 -07002797 if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
Bartlomiej Zolnierkiewiczd95ea5d2012-10-08 16:32:05 -07002798 alloc_flags |= ALLOC_CMA;
2799#endif
Peter Zijlstra341ce062009-06-16 15:32:02 -07002800 return alloc_flags;
2801}
2802
Mel Gorman072bb0a2012-07-31 16:43:58 -07002803bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
2804{
Mel Gormanb37f1dd2012-07-31 16:44:03 -07002805 return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS);
Mel Gorman072bb0a2012-07-31 16:43:58 -07002806}
2807
Mel Gormand0164ad2015-11-06 16:28:21 -08002808static inline bool is_thp_gfp_mask(gfp_t gfp_mask)
2809{
2810 return (gfp_mask & (GFP_TRANSHUGE | __GFP_KSWAPD_RECLAIM)) == GFP_TRANSHUGE;
2811}
2812
Mel Gorman11e33f62009-06-16 15:31:57 -07002813static inline struct page *
2814__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002815 struct alloc_context *ac)
Mel Gorman11e33f62009-06-16 15:31:57 -07002816{
Mel Gormand0164ad2015-11-06 16:28:21 -08002817 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
Mel Gorman11e33f62009-06-16 15:31:57 -07002818 struct page *page = NULL;
2819 int alloc_flags;
2820 unsigned long pages_reclaimed = 0;
2821 unsigned long did_some_progress;
David Rientjese0b9dae2014-06-04 16:08:28 -07002822 enum migrate_mode migration_mode = MIGRATE_ASYNC;
Mel Gorman66199712012-01-12 17:19:41 -08002823 bool deferred_compaction = false;
Vlastimil Babka1f9efde2014-10-09 15:27:14 -07002824 int contended_compaction = COMPACT_CONTENDED_NONE;
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08002825
Christoph Lameter952f3b52006-12-06 20:33:26 -08002826 /*
Mel Gorman72807a72009-06-16 15:32:18 -07002827 * In the slowpath, we sanity check order to avoid ever trying to
2828 * reclaim >= MAX_ORDER areas which will never succeed. Callers may
2829 * be using allocators in order of preference for an area that is
2830 * too large.
2831 */
Mel Gorman1fc28b72009-07-29 15:04:08 -07002832 if (order >= MAX_ORDER) {
2833 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
Mel Gorman72807a72009-06-16 15:32:18 -07002834 return NULL;
Mel Gorman1fc28b72009-07-29 15:04:08 -07002835 }
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08002836
Christoph Lameter952f3b52006-12-06 20:33:26 -08002837 /*
Mel Gormand0164ad2015-11-06 16:28:21 -08002838 * We also sanity check to catch abuse of atomic reserves being used by
2839 * callers that are not in atomic context.
2840 */
2841 if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
2842 (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
2843 gfp_mask &= ~__GFP_ATOMIC;
2844
2845 /*
David Rientjes4167e9b2015-04-14 15:46:55 -07002846 * If this allocation cannot block and it is for a specific node, then
2847 * fail early. There's no need to wakeup kswapd or retry for a
2848 * speculative node-specific allocation.
Christoph Lameter952f3b52006-12-06 20:33:26 -08002849 */
Mel Gormand0164ad2015-11-06 16:28:21 -08002850 if (IS_ENABLED(CONFIG_NUMA) && (gfp_mask & __GFP_THISNODE) && !can_direct_reclaim)
Christoph Lameter952f3b52006-12-06 20:33:26 -08002851 goto nopage;
2852
Johannes Weiner9879de72015-01-26 12:58:32 -08002853retry:
Mel Gormand0164ad2015-11-06 16:28:21 -08002854 if (gfp_mask & __GFP_KSWAPD_RECLAIM)
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002855 wake_all_kswapds(order, ac);
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08002856
Paul Jackson9bf22292005-09-06 15:18:12 -07002857 /*
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08002858 * OK, we're below the kswapd watermark and have kicked background
2859 * reclaim. Now things get more complex, so set up alloc_flags according
2860 * to how we want to proceed.
Paul Jackson9bf22292005-09-06 15:18:12 -07002861 */
Peter Zijlstra341ce062009-06-16 15:32:02 -07002862 alloc_flags = gfp_to_alloc_flags(gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002863
David Rientjesf33261d2011-01-25 15:07:20 -08002864 /*
2865 * Find the true preferred zone if the allocation is unconstrained by
2866 * cpusets.
2867 */
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002868 if (!(alloc_flags & ALLOC_CPUSET) && !ac->nodemask) {
Mel Gormand8846372014-06-04 16:10:33 -07002869 struct zoneref *preferred_zoneref;
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002870 preferred_zoneref = first_zones_zonelist(ac->zonelist,
2871 ac->high_zoneidx, NULL, &ac->preferred_zone);
2872 ac->classzone_idx = zonelist_zone_idx(preferred_zoneref);
Mel Gormand8846372014-06-04 16:10:33 -07002873 }
David Rientjesf33261d2011-01-25 15:07:20 -08002874
Peter Zijlstra341ce062009-06-16 15:32:02 -07002875 /* This is the last chance, in general, before the goto nopage. */
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002876 page = get_page_from_freelist(gfp_mask, order,
2877 alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08002878 if (page)
2879 goto got_pg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002880
Mel Gorman11e33f62009-06-16 15:31:57 -07002881 /* Allocate without watermarks if the context allows */
Peter Zijlstra341ce062009-06-16 15:32:02 -07002882 if (alloc_flags & ALLOC_NO_WATERMARKS) {
Mel Gorman183f6372012-07-31 16:44:12 -07002883 /*
2884 * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds
2885 * the allocation is high priority and these type of
2886 * allocations are system rather than user orientated
2887 */
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002888 ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
Mel Gorman183f6372012-07-31 16:44:12 -07002889
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002890 page = __alloc_pages_high_priority(gfp_mask, order, ac);
2891
Mel Gormancfd19c52012-07-31 16:44:10 -07002892 if (page) {
Peter Zijlstra341ce062009-06-16 15:32:02 -07002893 goto got_pg;
Mel Gormancfd19c52012-07-31 16:44:10 -07002894 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002895 }
2896
Mel Gormand0164ad2015-11-06 16:28:21 -08002897 /* Caller is not willing to reclaim, we can't balance anything */
2898 if (!can_direct_reclaim) {
David Rientjesaed0a0e2014-01-21 15:51:12 -08002899 /*
2900 * All existing users of the deprecated __GFP_NOFAIL are
2901 * blockable, so warn of any new users that actually allow this
2902 * type of allocation to fail.
2903 */
2904 WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002905 goto nopage;
David Rientjesaed0a0e2014-01-21 15:51:12 -08002906 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002907
Peter Zijlstra341ce062009-06-16 15:32:02 -07002908 /* Avoid recursion of direct reclaim */
Andrew Mortonc06b1fc2011-01-13 15:47:32 -08002909 if (current->flags & PF_MEMALLOC)
Peter Zijlstra341ce062009-06-16 15:32:02 -07002910 goto nopage;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911
David Rientjes6583bb62009-07-29 15:02:06 -07002912 /* Avoid allocations with no watermarks from looping endlessly */
2913 if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
2914 goto nopage;
2915
Mel Gorman77f1fe62011-01-13 15:45:57 -08002916 /*
2917 * Try direct compaction. The first pass is asynchronous. Subsequent
2918 * attempts after direct reclaim are synchronous
2919 */
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002920 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
2921 migration_mode,
2922 &contended_compaction,
Vlastimil Babka53853e22014-10-09 15:27:02 -07002923 &deferred_compaction);
Mel Gorman56de7262010-05-24 14:32:30 -07002924 if (page)
2925 goto got_pg;
David Rientjes75f30862014-06-04 16:08:30 -07002926
Vlastimil Babka1f9efde2014-10-09 15:27:14 -07002927 /* Checks for THP-specific high-order allocations */
Mel Gormand0164ad2015-11-06 16:28:21 -08002928 if (is_thp_gfp_mask(gfp_mask)) {
Vlastimil Babka1f9efde2014-10-09 15:27:14 -07002929 /*
2930 * If compaction is deferred for high-order allocations, it is
2931 * because sync compaction recently failed. If this is the case
2932 * and the caller requested a THP allocation, we do not want
2933 * to heavily disrupt the system, so we fail the allocation
2934 * instead of entering direct reclaim.
2935 */
2936 if (deferred_compaction)
2937 goto nopage;
2938
2939 /*
2940 * In all zones where compaction was attempted (and not
2941 * deferred or skipped), lock contention has been detected.
2942 * For THP allocation we do not want to disrupt the others
2943 * so we fallback to base pages instead.
2944 */
2945 if (contended_compaction == COMPACT_CONTENDED_LOCK)
2946 goto nopage;
2947
2948 /*
2949 * If compaction was aborted due to need_resched(), we do not
2950 * want to further increase allocation latency, unless it is
2951 * khugepaged trying to collapse.
2952 */
2953 if (contended_compaction == COMPACT_CONTENDED_SCHED
2954 && !(current->flags & PF_KTHREAD))
2955 goto nopage;
2956 }
Mel Gorman66199712012-01-12 17:19:41 -08002957
David Rientjes8fe78042014-08-06 16:07:54 -07002958 /*
2959 * It can become very expensive to allocate transparent hugepages at
2960 * fault, so use asynchronous memory compaction for THP unless it is
2961 * khugepaged trying to collapse.
2962 */
Mel Gormand0164ad2015-11-06 16:28:21 -08002963 if (!is_thp_gfp_mask(gfp_mask) || (current->flags & PF_KTHREAD))
David Rientjes8fe78042014-08-06 16:07:54 -07002964 migration_mode = MIGRATE_SYNC_LIGHT;
2965
Mel Gorman11e33f62009-06-16 15:31:57 -07002966 /* Try direct reclaim and then allocating */
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002967 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
2968 &did_some_progress);
Mel Gorman11e33f62009-06-16 15:31:57 -07002969 if (page)
2970 goto got_pg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002971
Johannes Weiner90839052015-06-24 16:57:21 -07002972 /* Do not loop if specifically requested */
2973 if (gfp_mask & __GFP_NORETRY)
2974 goto noretry;
2975
2976 /* Keep reclaiming pages as long as there is reasonable progress */
Nishanth Aravamudana41f24e2008-04-29 00:58:25 -07002977 pages_reclaimed += did_some_progress;
Johannes Weiner90839052015-06-24 16:57:21 -07002978 if ((did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) ||
2979 ((gfp_mask & __GFP_REPEAT) && pages_reclaimed < (1 << order))) {
Mel Gorman11e33f62009-06-16 15:31:57 -07002980 /* Wait for some write requests to complete then retry */
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002981 wait_iff_congested(ac->preferred_zone, BLK_RW_ASYNC, HZ/50);
Johannes Weiner9879de72015-01-26 12:58:32 -08002982 goto retry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002983 }
2984
Johannes Weiner90839052015-06-24 16:57:21 -07002985 /* Reclaim has failed us, start killing things */
2986 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
2987 if (page)
2988 goto got_pg;
2989
2990 /* Retry as long as the OOM killer is making progress */
2991 if (did_some_progress)
2992 goto retry;
2993
2994noretry:
2995 /*
2996 * High-order allocations do not necessarily loop after
2997 * direct reclaim and reclaim/compaction depends on compaction
2998 * being called after reclaim so call directly if necessary
2999 */
3000 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags,
3001 ac, migration_mode,
3002 &contended_compaction,
3003 &deferred_compaction);
3004 if (page)
3005 goto got_pg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003006nopage:
Dave Hansena238ab52011-05-24 17:12:16 -07003007 warn_alloc_failed(gfp_mask, order, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003008got_pg:
Mel Gorman072bb0a2012-07-31 16:43:58 -07003009 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003010}
Mel Gorman11e33f62009-06-16 15:31:57 -07003011
3012/*
3013 * This is the 'heart' of the zoned buddy allocator.
3014 */
3015struct page *
3016__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
3017 struct zonelist *zonelist, nodemask_t *nodemask)
3018{
Mel Gormand8846372014-06-04 16:10:33 -07003019 struct zoneref *preferred_zoneref;
Mel Gormancc9a6c82012-03-21 16:34:11 -07003020 struct page *page = NULL;
Mel Gormancc9a6c82012-03-21 16:34:11 -07003021 unsigned int cpuset_mems_cookie;
Johannes Weiner3a025762014-04-07 15:37:48 -07003022 int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR;
Andrew Morton91fbdc02015-02-11 15:25:04 -08003023 gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003024 struct alloc_context ac = {
3025 .high_zoneidx = gfp_zone(gfp_mask),
3026 .nodemask = nodemask,
3027 .migratetype = gfpflags_to_migratetype(gfp_mask),
3028 };
Mel Gorman11e33f62009-06-16 15:31:57 -07003029
Benjamin Herrenschmidtdcce2842009-06-18 13:24:12 +10003030 gfp_mask &= gfp_allowed_mask;
3031
Mel Gorman11e33f62009-06-16 15:31:57 -07003032 lockdep_trace_alloc(gfp_mask);
3033
Mel Gormand0164ad2015-11-06 16:28:21 -08003034 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
Mel Gorman11e33f62009-06-16 15:31:57 -07003035
3036 if (should_fail_alloc_page(gfp_mask, order))
3037 return NULL;
3038
3039 /*
3040 * Check the zones suitable for the gfp_mask contain at least one
3041 * valid zone. It's possible to have an empty zonelist as a result
David Rientjes4167e9b2015-04-14 15:46:55 -07003042 * of __GFP_THISNODE and a memoryless node
Mel Gorman11e33f62009-06-16 15:31:57 -07003043 */
3044 if (unlikely(!zonelist->_zonerefs->zone))
3045 return NULL;
3046
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003047 if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE)
Vlastimil Babka21bb9bd2014-10-09 15:26:51 -07003048 alloc_flags |= ALLOC_CMA;
3049
Mel Gormancc9a6c82012-03-21 16:34:11 -07003050retry_cpuset:
Mel Gormand26914d2014-04-03 14:47:24 -07003051 cpuset_mems_cookie = read_mems_allowed_begin();
Mel Gormancc9a6c82012-03-21 16:34:11 -07003052
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003053 /* We set it here, as __alloc_pages_slowpath might have changed it */
3054 ac.zonelist = zonelist;
Mel Gormanc9ab0c42015-11-06 16:28:12 -08003055
3056 /* Dirty zone balancing only done in the fast path */
3057 ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE);
3058
Mel Gorman5117f452009-06-16 15:31:59 -07003059 /* The preferred zone is used for statistics later */
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003060 preferred_zoneref = first_zones_zonelist(ac.zonelist, ac.high_zoneidx,
3061 ac.nodemask ? : &cpuset_current_mems_allowed,
3062 &ac.preferred_zone);
3063 if (!ac.preferred_zone)
Mel Gormancc9a6c82012-03-21 16:34:11 -07003064 goto out;
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003065 ac.classzone_idx = zonelist_zone_idx(preferred_zoneref);
Mel Gorman5117f452009-06-16 15:31:59 -07003066
3067 /* First allocation attempt */
Andrew Morton91fbdc02015-02-11 15:25:04 -08003068 alloc_mask = gfp_mask|__GFP_HARDWALL;
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003069 page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
Ming Lei21caf2f2013-02-22 16:34:08 -08003070 if (unlikely(!page)) {
3071 /*
3072 * Runtime PM, block IO and its error handling path
3073 * can deadlock because I/O on the device might not
3074 * complete.
3075 */
Andrew Morton91fbdc02015-02-11 15:25:04 -08003076 alloc_mask = memalloc_noio_flags(gfp_mask);
Mel Gormanc9ab0c42015-11-06 16:28:12 -08003077 ac.spread_dirty_pages = false;
Andrew Morton91fbdc02015-02-11 15:25:04 -08003078
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003079 page = __alloc_pages_slowpath(alloc_mask, order, &ac);
Ming Lei21caf2f2013-02-22 16:34:08 -08003080 }
Mel Gorman11e33f62009-06-16 15:31:57 -07003081
Xishi Qiu23f086f2015-02-11 15:25:07 -08003082 if (kmemcheck_enabled && page)
3083 kmemcheck_pagealloc_alloc(page, order, gfp_mask);
3084
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003085 trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
Mel Gormancc9a6c82012-03-21 16:34:11 -07003086
3087out:
3088 /*
3089 * When updating a task's mems_allowed, it is possible to race with
3090 * parallel threads in such a way that an allocation can fail while
3091 * the mask is being updated. If a page allocation is about to fail,
3092 * check if the cpuset changed during allocation and if so, retry.
3093 */
Mel Gormand26914d2014-04-03 14:47:24 -07003094 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
Mel Gormancc9a6c82012-03-21 16:34:11 -07003095 goto retry_cpuset;
3096
Mel Gorman11e33f62009-06-16 15:31:57 -07003097 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003098}
Mel Gormand2391712009-06-16 15:31:52 -07003099EXPORT_SYMBOL(__alloc_pages_nodemask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003100
3101/*
3102 * Common helper functions.
3103 */
Harvey Harrison920c7a52008-02-04 22:29:26 -08003104unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003105{
Akinobu Mita945a1112009-09-21 17:01:47 -07003106 struct page *page;
3107
3108 /*
3109 * __get_free_pages() returns a 32-bit address, which cannot represent
3110 * a highmem page
3111 */
3112 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
3113
Linus Torvalds1da177e2005-04-16 15:20:36 -07003114 page = alloc_pages(gfp_mask, order);
3115 if (!page)
3116 return 0;
3117 return (unsigned long) page_address(page);
3118}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003119EXPORT_SYMBOL(__get_free_pages);
3120
Harvey Harrison920c7a52008-02-04 22:29:26 -08003121unsigned long get_zeroed_page(gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003122{
Akinobu Mita945a1112009-09-21 17:01:47 -07003123 return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003124}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003125EXPORT_SYMBOL(get_zeroed_page);
3126
Harvey Harrison920c7a52008-02-04 22:29:26 -08003127void __free_pages(struct page *page, unsigned int order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003128{
Nick Pigginb5810032005-10-29 18:16:12 -07003129 if (put_page_testzero(page)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003130 if (order == 0)
Mel Gormanb745bc82014-06-04 16:10:22 -07003131 free_hot_cold_page(page, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003132 else
3133 __free_pages_ok(page, order);
3134 }
3135}
3136
3137EXPORT_SYMBOL(__free_pages);
3138
Harvey Harrison920c7a52008-02-04 22:29:26 -08003139void free_pages(unsigned long addr, unsigned int order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003140{
3141 if (addr != 0) {
Nick Piggin725d7042006-09-25 23:30:55 -07003142 VM_BUG_ON(!virt_addr_valid((void *)addr));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003143 __free_pages(virt_to_page((void *)addr), order);
3144 }
3145}
3146
3147EXPORT_SYMBOL(free_pages);
3148
Glauber Costa6a1a0d32012-12-18 14:22:00 -08003149/*
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07003150 * Page Fragment:
3151 * An arbitrary-length arbitrary-offset area of memory which resides
3152 * within a 0 or higher order page. Multiple fragments within that page
3153 * are individually refcounted, in the page's reference counter.
3154 *
3155 * The page_frag functions below provide a simple allocation framework for
3156 * page fragments. This is used by the network stack and network device
3157 * drivers to provide a backing region of memory for use as either an
3158 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
3159 */
3160static struct page *__page_frag_refill(struct page_frag_cache *nc,
3161 gfp_t gfp_mask)
3162{
3163 struct page *page = NULL;
3164 gfp_t gfp = gfp_mask;
3165
3166#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
3167 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
3168 __GFP_NOMEMALLOC;
3169 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
3170 PAGE_FRAG_CACHE_MAX_ORDER);
3171 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
3172#endif
3173 if (unlikely(!page))
3174 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
3175
3176 nc->va = page ? page_address(page) : NULL;
3177
3178 return page;
3179}
3180
3181void *__alloc_page_frag(struct page_frag_cache *nc,
3182 unsigned int fragsz, gfp_t gfp_mask)
3183{
3184 unsigned int size = PAGE_SIZE;
3185 struct page *page;
3186 int offset;
3187
3188 if (unlikely(!nc->va)) {
3189refill:
3190 page = __page_frag_refill(nc, gfp_mask);
3191 if (!page)
3192 return NULL;
3193
3194#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
3195 /* if size can vary use size else just use PAGE_SIZE */
3196 size = nc->size;
3197#endif
3198 /* Even if we own the page, we do not use atomic_set().
3199 * This would break get_page_unless_zero() users.
3200 */
3201 atomic_add(size - 1, &page->_count);
3202
3203 /* reset page count bias and offset to start of new frag */
Michal Hocko2f064f32015-08-21 14:11:51 -07003204 nc->pfmemalloc = page_is_pfmemalloc(page);
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07003205 nc->pagecnt_bias = size;
3206 nc->offset = size;
3207 }
3208
3209 offset = nc->offset - fragsz;
3210 if (unlikely(offset < 0)) {
3211 page = virt_to_page(nc->va);
3212
3213 if (!atomic_sub_and_test(nc->pagecnt_bias, &page->_count))
3214 goto refill;
3215
3216#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
3217 /* if size can vary use size else just use PAGE_SIZE */
3218 size = nc->size;
3219#endif
3220 /* OK, page count is 0, we can safely set it */
3221 atomic_set(&page->_count, size);
3222
3223 /* reset page count bias and offset to start of new frag */
3224 nc->pagecnt_bias = size;
3225 offset = size - fragsz;
3226 }
3227
3228 nc->pagecnt_bias--;
3229 nc->offset = offset;
3230
3231 return nc->va + offset;
3232}
3233EXPORT_SYMBOL(__alloc_page_frag);
3234
3235/*
3236 * Frees a page fragment allocated out of either a compound or order 0 page.
3237 */
3238void __free_page_frag(void *addr)
3239{
3240 struct page *page = virt_to_head_page(addr);
3241
3242 if (unlikely(put_page_testzero(page)))
3243 __free_pages_ok(page, compound_order(page));
3244}
3245EXPORT_SYMBOL(__free_page_frag);
3246
3247/*
Vladimir Davydov52383432014-06-04 16:06:39 -07003248 * alloc_kmem_pages charges newly allocated pages to the kmem resource counter
3249 * of the current memory cgroup.
Glauber Costa6a1a0d32012-12-18 14:22:00 -08003250 *
Vladimir Davydov52383432014-06-04 16:06:39 -07003251 * It should be used when the caller would like to use kmalloc, but since the
3252 * allocation is large, it has to fall back to the page allocator.
Glauber Costa6a1a0d32012-12-18 14:22:00 -08003253 */
Vladimir Davydov52383432014-06-04 16:06:39 -07003254struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order)
3255{
3256 struct page *page;
Vladimir Davydov52383432014-06-04 16:06:39 -07003257
Vladimir Davydov52383432014-06-04 16:06:39 -07003258 page = alloc_pages(gfp_mask, order);
Vladimir Davydovd05e83a2015-11-05 18:48:59 -08003259 if (page && memcg_kmem_charge(page, gfp_mask, order) != 0) {
3260 __free_pages(page, order);
3261 page = NULL;
3262 }
Vladimir Davydov52383432014-06-04 16:06:39 -07003263 return page;
3264}
3265
3266struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
3267{
3268 struct page *page;
Vladimir Davydov52383432014-06-04 16:06:39 -07003269
Vladimir Davydov52383432014-06-04 16:06:39 -07003270 page = alloc_pages_node(nid, gfp_mask, order);
Vladimir Davydovd05e83a2015-11-05 18:48:59 -08003271 if (page && memcg_kmem_charge(page, gfp_mask, order) != 0) {
3272 __free_pages(page, order);
3273 page = NULL;
3274 }
Vladimir Davydov52383432014-06-04 16:06:39 -07003275 return page;
3276}
3277
3278/*
3279 * __free_kmem_pages and free_kmem_pages will free pages allocated with
3280 * alloc_kmem_pages.
3281 */
3282void __free_kmem_pages(struct page *page, unsigned int order)
Glauber Costa6a1a0d32012-12-18 14:22:00 -08003283{
Vladimir Davydovd05e83a2015-11-05 18:48:59 -08003284 memcg_kmem_uncharge(page, order);
Glauber Costa6a1a0d32012-12-18 14:22:00 -08003285 __free_pages(page, order);
3286}
3287
Vladimir Davydov52383432014-06-04 16:06:39 -07003288void free_kmem_pages(unsigned long addr, unsigned int order)
Glauber Costa6a1a0d32012-12-18 14:22:00 -08003289{
3290 if (addr != 0) {
3291 VM_BUG_ON(!virt_addr_valid((void *)addr));
Vladimir Davydov52383432014-06-04 16:06:39 -07003292 __free_kmem_pages(virt_to_page((void *)addr), order);
Glauber Costa6a1a0d32012-12-18 14:22:00 -08003293 }
3294}
3295
Andi Kleenee85c2e2011-05-11 15:13:34 -07003296static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size)
3297{
3298 if (addr) {
3299 unsigned long alloc_end = addr + (PAGE_SIZE << order);
3300 unsigned long used = addr + PAGE_ALIGN(size);
3301
3302 split_page(virt_to_page((void *)addr), order);
3303 while (used < alloc_end) {
3304 free_page(used);
3305 used += PAGE_SIZE;
3306 }
3307 }
3308 return (void *)addr;
3309}
3310
Timur Tabi2be0ffe2008-07-23 21:28:11 -07003311/**
3312 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
3313 * @size: the number of bytes to allocate
3314 * @gfp_mask: GFP flags for the allocation
3315 *
3316 * This function is similar to alloc_pages(), except that it allocates the
3317 * minimum number of pages to satisfy the request. alloc_pages() can only
3318 * allocate memory in power-of-two pages.
3319 *
3320 * This function is also limited by MAX_ORDER.
3321 *
3322 * Memory allocated by this function must be released by free_pages_exact().
3323 */
3324void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
3325{
3326 unsigned int order = get_order(size);
3327 unsigned long addr;
3328
3329 addr = __get_free_pages(gfp_mask, order);
Andi Kleenee85c2e2011-05-11 15:13:34 -07003330 return make_alloc_exact(addr, order, size);
Timur Tabi2be0ffe2008-07-23 21:28:11 -07003331}
3332EXPORT_SYMBOL(alloc_pages_exact);
3333
3334/**
Andi Kleenee85c2e2011-05-11 15:13:34 -07003335 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
3336 * pages on a node.
Randy Dunlapb5e6ab52011-05-16 13:16:54 -07003337 * @nid: the preferred node ID where memory should be allocated
Andi Kleenee85c2e2011-05-11 15:13:34 -07003338 * @size: the number of bytes to allocate
3339 * @gfp_mask: GFP flags for the allocation
3340 *
3341 * Like alloc_pages_exact(), but try to allocate on node nid first before falling
3342 * back.
Andi Kleenee85c2e2011-05-11 15:13:34 -07003343 */
Fabian Fredericke1931812014-08-06 16:04:59 -07003344void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
Andi Kleenee85c2e2011-05-11 15:13:34 -07003345{
3346 unsigned order = get_order(size);
3347 struct page *p = alloc_pages_node(nid, gfp_mask, order);
3348 if (!p)
3349 return NULL;
3350 return make_alloc_exact((unsigned long)page_address(p), order, size);
3351}
Andi Kleenee85c2e2011-05-11 15:13:34 -07003352
3353/**
Timur Tabi2be0ffe2008-07-23 21:28:11 -07003354 * free_pages_exact - release memory allocated via alloc_pages_exact()
3355 * @virt: the value returned by alloc_pages_exact.
3356 * @size: size of allocation, same value as passed to alloc_pages_exact().
3357 *
3358 * Release the memory allocated by a previous call to alloc_pages_exact.
3359 */
3360void free_pages_exact(void *virt, size_t size)
3361{
3362 unsigned long addr = (unsigned long)virt;
3363 unsigned long end = addr + PAGE_ALIGN(size);
3364
3365 while (addr < end) {
3366 free_page(addr);
3367 addr += PAGE_SIZE;
3368 }
3369}
3370EXPORT_SYMBOL(free_pages_exact);
3371
Zhang Yanfeie0fb5812013-02-22 16:35:54 -08003372/**
3373 * nr_free_zone_pages - count number of pages beyond high watermark
3374 * @offset: The zone index of the highest zone
3375 *
3376 * nr_free_zone_pages() counts the number of counts pages which are beyond the
3377 * high watermark within all zones at or below a given zone index. For each
3378 * zone, the number of pages is calculated as:
Jiang Liu834405c2013-07-03 15:03:04 -07003379 * managed_pages - high_pages
Zhang Yanfeie0fb5812013-02-22 16:35:54 -08003380 */
Zhang Yanfeiebec3862013-02-22 16:35:43 -08003381static unsigned long nr_free_zone_pages(int offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003382{
Mel Gormandd1a2392008-04-28 02:12:17 -07003383 struct zoneref *z;
Mel Gorman54a6eb52008-04-28 02:12:16 -07003384 struct zone *zone;
3385
Martin J. Blighe310fd42005-07-29 22:59:18 -07003386 /* Just pick one node, since fallback list is circular */
Zhang Yanfeiebec3862013-02-22 16:35:43 -08003387 unsigned long sum = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003388
Mel Gorman0e884602008-04-28 02:12:14 -07003389 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003390
Mel Gorman54a6eb52008-04-28 02:12:16 -07003391 for_each_zone_zonelist(zone, z, zonelist, offset) {
Jiang Liub40da042013-02-22 16:33:52 -08003392 unsigned long size = zone->managed_pages;
Mel Gorman41858962009-06-16 15:32:12 -07003393 unsigned long high = high_wmark_pages(zone);
Martin J. Blighe310fd42005-07-29 22:59:18 -07003394 if (size > high)
3395 sum += size - high;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003396 }
3397
3398 return sum;
3399}
3400
Zhang Yanfeie0fb5812013-02-22 16:35:54 -08003401/**
3402 * nr_free_buffer_pages - count number of pages beyond high watermark
3403 *
3404 * nr_free_buffer_pages() counts the number of pages which are beyond the high
3405 * watermark within ZONE_DMA and ZONE_NORMAL.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003406 */
Zhang Yanfeiebec3862013-02-22 16:35:43 -08003407unsigned long nr_free_buffer_pages(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003408{
Al Viroaf4ca452005-10-21 02:55:38 -04003409 return nr_free_zone_pages(gfp_zone(GFP_USER));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003410}
Meelap Shahc2f1a552007-07-17 04:04:39 -07003411EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003412
Zhang Yanfeie0fb5812013-02-22 16:35:54 -08003413/**
3414 * nr_free_pagecache_pages - count number of pages beyond high watermark
3415 *
3416 * nr_free_pagecache_pages() counts the number of pages which are beyond the
3417 * high watermark within all zones.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003418 */
Zhang Yanfeiebec3862013-02-22 16:35:43 -08003419unsigned long nr_free_pagecache_pages(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003420{
Mel Gorman2a1e2742007-07-17 04:03:12 -07003421 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003422}
Christoph Lameter08e0f6a2006-09-27 01:50:06 -07003423
3424static inline void show_node(struct zone *zone)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003425{
Kirill A. Shutemove5adfff2012-12-11 16:00:29 -08003426 if (IS_ENABLED(CONFIG_NUMA))
Andy Whitcroft25ba77c2006-12-06 20:33:03 -08003427 printk("Node %d ", zone_to_nid(zone));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003428}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003429
Linus Torvalds1da177e2005-04-16 15:20:36 -07003430void si_meminfo(struct sysinfo *val)
3431{
3432 val->totalram = totalram_pages;
Rafael Aquinicc7452b2014-08-06 16:06:38 -07003433 val->sharedram = global_page_state(NR_SHMEM);
Christoph Lameterd23ad422007-02-10 01:43:02 -08003434 val->freeram = global_page_state(NR_FREE_PAGES);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003435 val->bufferram = nr_blockdev_pages();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003436 val->totalhigh = totalhigh_pages;
3437 val->freehigh = nr_free_highpages();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003438 val->mem_unit = PAGE_SIZE;
3439}
3440
3441EXPORT_SYMBOL(si_meminfo);
3442
3443#ifdef CONFIG_NUMA
3444void si_meminfo_node(struct sysinfo *val, int nid)
3445{
Jiang Liucdd91a72013-07-03 15:03:27 -07003446 int zone_type; /* needs to be signed */
3447 unsigned long managed_pages = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003448 pg_data_t *pgdat = NODE_DATA(nid);
3449
Jiang Liucdd91a72013-07-03 15:03:27 -07003450 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
3451 managed_pages += pgdat->node_zones[zone_type].managed_pages;
3452 val->totalram = managed_pages;
Rafael Aquinicc7452b2014-08-06 16:06:38 -07003453 val->sharedram = node_page_state(nid, NR_SHMEM);
Christoph Lameterd23ad422007-02-10 01:43:02 -08003454 val->freeram = node_page_state(nid, NR_FREE_PAGES);
Christoph Lameter98d2b0e2006-09-25 23:31:12 -07003455#ifdef CONFIG_HIGHMEM
Jiang Liub40da042013-02-22 16:33:52 -08003456 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].managed_pages;
Christoph Lameterd23ad422007-02-10 01:43:02 -08003457 val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
3458 NR_FREE_PAGES);
Christoph Lameter98d2b0e2006-09-25 23:31:12 -07003459#else
3460 val->totalhigh = 0;
3461 val->freehigh = 0;
3462#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003463 val->mem_unit = PAGE_SIZE;
3464}
3465#endif
3466
David Rientjesddd588b2011-03-22 16:30:46 -07003467/*
David Rientjes7bf02ea2011-05-24 17:11:16 -07003468 * Determine whether the node should be displayed or not, depending on whether
3469 * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
David Rientjesddd588b2011-03-22 16:30:46 -07003470 */
David Rientjes7bf02ea2011-05-24 17:11:16 -07003471bool skip_free_areas_node(unsigned int flags, int nid)
David Rientjesddd588b2011-03-22 16:30:46 -07003472{
3473 bool ret = false;
Mel Gormancc9a6c82012-03-21 16:34:11 -07003474 unsigned int cpuset_mems_cookie;
David Rientjesddd588b2011-03-22 16:30:46 -07003475
3476 if (!(flags & SHOW_MEM_FILTER_NODES))
3477 goto out;
3478
Mel Gormancc9a6c82012-03-21 16:34:11 -07003479 do {
Mel Gormand26914d2014-04-03 14:47:24 -07003480 cpuset_mems_cookie = read_mems_allowed_begin();
Mel Gormancc9a6c82012-03-21 16:34:11 -07003481 ret = !node_isset(nid, cpuset_current_mems_allowed);
Mel Gormand26914d2014-04-03 14:47:24 -07003482 } while (read_mems_allowed_retry(cpuset_mems_cookie));
David Rientjesddd588b2011-03-22 16:30:46 -07003483out:
3484 return ret;
3485}
3486
Linus Torvalds1da177e2005-04-16 15:20:36 -07003487#define K(x) ((x) << (PAGE_SHIFT-10))
3488
Rabin Vincent377e4f12012-12-11 16:00:24 -08003489static void show_migration_types(unsigned char type)
3490{
3491 static const char types[MIGRATE_TYPES] = {
3492 [MIGRATE_UNMOVABLE] = 'U',
3493 [MIGRATE_RECLAIMABLE] = 'E',
3494 [MIGRATE_MOVABLE] = 'M',
3495 [MIGRATE_RESERVE] = 'R',
3496#ifdef CONFIG_CMA
3497 [MIGRATE_CMA] = 'C',
3498#endif
Minchan Kim194159f2013-02-22 16:33:58 -08003499#ifdef CONFIG_MEMORY_ISOLATION
Rabin Vincent377e4f12012-12-11 16:00:24 -08003500 [MIGRATE_ISOLATE] = 'I',
Minchan Kim194159f2013-02-22 16:33:58 -08003501#endif
Rabin Vincent377e4f12012-12-11 16:00:24 -08003502 };
3503 char tmp[MIGRATE_TYPES + 1];
3504 char *p = tmp;
3505 int i;
3506
3507 for (i = 0; i < MIGRATE_TYPES; i++) {
3508 if (type & (1 << i))
3509 *p++ = types[i];
3510 }
3511
3512 *p = '\0';
3513 printk("(%s) ", tmp);
3514}
3515
Linus Torvalds1da177e2005-04-16 15:20:36 -07003516/*
3517 * Show free area list (used inside shift_scroll-lock stuff)
3518 * We also calculate the percentage fragmentation. We do this by counting the
3519 * memory on each free list with the exception of the first item on the list.
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07003520 *
3521 * Bits in @filter:
3522 * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
3523 * cpuset.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003524 */
David Rientjes7bf02ea2011-05-24 17:11:16 -07003525void show_free_areas(unsigned int filter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003526{
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07003527 unsigned long free_pcp = 0;
Jes Sorensenc7241912006-09-27 01:50:05 -07003528 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003529 struct zone *zone;
3530
KOSAKI Motohiroee99c712009-03-31 15:19:31 -07003531 for_each_populated_zone(zone) {
David Rientjes7bf02ea2011-05-24 17:11:16 -07003532 if (skip_free_areas_node(filter, zone_to_nid(zone)))
David Rientjesddd588b2011-03-22 16:30:46 -07003533 continue;
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07003534
Konstantin Khlebnikov761b0672015-04-14 15:45:32 -07003535 for_each_online_cpu(cpu)
3536 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003537 }
3538
KOSAKI Motohiroa7312862009-09-21 17:01:37 -07003539 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
3540 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07003541 " unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n"
3542 " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -07003543 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07003544 " free:%lu free_pcp:%lu free_cma:%lu\n",
Rik van Riel4f98a2f2008-10-18 20:26:32 -07003545 global_page_state(NR_ACTIVE_ANON),
Rik van Riel4f98a2f2008-10-18 20:26:32 -07003546 global_page_state(NR_INACTIVE_ANON),
KOSAKI Motohiroa7312862009-09-21 17:01:37 -07003547 global_page_state(NR_ISOLATED_ANON),
3548 global_page_state(NR_ACTIVE_FILE),
Rik van Riel4f98a2f2008-10-18 20:26:32 -07003549 global_page_state(NR_INACTIVE_FILE),
KOSAKI Motohiroa7312862009-09-21 17:01:37 -07003550 global_page_state(NR_ISOLATED_FILE),
Lee Schermerhorn7b854122008-10-18 20:26:40 -07003551 global_page_state(NR_UNEVICTABLE),
Christoph Lameterb1e7a8f2006-06-30 01:55:39 -07003552 global_page_state(NR_FILE_DIRTY),
Christoph Lameterce866b32006-06-30 01:55:40 -07003553 global_page_state(NR_WRITEBACK),
Christoph Lameterfd39fc82006-06-30 01:55:40 -07003554 global_page_state(NR_UNSTABLE_NFS),
KOSAKI Motohiro3701b032009-09-21 17:01:29 -07003555 global_page_state(NR_SLAB_RECLAIMABLE),
3556 global_page_state(NR_SLAB_UNRECLAIMABLE),
Christoph Lameter65ba55f2006-06-30 01:55:34 -07003557 global_page_state(NR_FILE_MAPPED),
KOSAKI Motohiro4b021082009-09-21 17:01:33 -07003558 global_page_state(NR_SHMEM),
Andrew Mortona25700a2007-02-08 14:20:40 -08003559 global_page_state(NR_PAGETABLE),
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -07003560 global_page_state(NR_BOUNCE),
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07003561 global_page_state(NR_FREE_PAGES),
3562 free_pcp,
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -07003563 global_page_state(NR_FREE_CMA_PAGES));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003564
KOSAKI Motohiroee99c712009-03-31 15:19:31 -07003565 for_each_populated_zone(zone) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003566 int i;
3567
David Rientjes7bf02ea2011-05-24 17:11:16 -07003568 if (skip_free_areas_node(filter, zone_to_nid(zone)))
David Rientjesddd588b2011-03-22 16:30:46 -07003569 continue;
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07003570
3571 free_pcp = 0;
3572 for_each_online_cpu(cpu)
3573 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
3574
Linus Torvalds1da177e2005-04-16 15:20:36 -07003575 show_node(zone);
3576 printk("%s"
3577 " free:%lukB"
3578 " min:%lukB"
3579 " low:%lukB"
3580 " high:%lukB"
Rik van Riel4f98a2f2008-10-18 20:26:32 -07003581 " active_anon:%lukB"
3582 " inactive_anon:%lukB"
3583 " active_file:%lukB"
3584 " inactive_file:%lukB"
Lee Schermerhorn7b854122008-10-18 20:26:40 -07003585 " unevictable:%lukB"
KOSAKI Motohiroa7312862009-09-21 17:01:37 -07003586 " isolated(anon):%lukB"
3587 " isolated(file):%lukB"
Linus Torvalds1da177e2005-04-16 15:20:36 -07003588 " present:%lukB"
Jiang Liu9feedc92012-12-12 13:52:12 -08003589 " managed:%lukB"
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07003590 " mlocked:%lukB"
3591 " dirty:%lukB"
3592 " writeback:%lukB"
3593 " mapped:%lukB"
KOSAKI Motohiro4b021082009-09-21 17:01:33 -07003594 " shmem:%lukB"
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07003595 " slab_reclaimable:%lukB"
3596 " slab_unreclaimable:%lukB"
KOSAKI Motohiroc6a7f572009-09-21 17:01:32 -07003597 " kernel_stack:%lukB"
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07003598 " pagetables:%lukB"
3599 " unstable:%lukB"
3600 " bounce:%lukB"
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07003601 " free_pcp:%lukB"
3602 " local_pcp:%ukB"
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -07003603 " free_cma:%lukB"
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07003604 " writeback_tmp:%lukB"
Linus Torvalds1da177e2005-04-16 15:20:36 -07003605 " pages_scanned:%lu"
3606 " all_unreclaimable? %s"
3607 "\n",
3608 zone->name,
Mel Gorman88f5acf2011-01-13 15:45:41 -08003609 K(zone_page_state(zone, NR_FREE_PAGES)),
Mel Gorman41858962009-06-16 15:32:12 -07003610 K(min_wmark_pages(zone)),
3611 K(low_wmark_pages(zone)),
3612 K(high_wmark_pages(zone)),
Rik van Riel4f98a2f2008-10-18 20:26:32 -07003613 K(zone_page_state(zone, NR_ACTIVE_ANON)),
3614 K(zone_page_state(zone, NR_INACTIVE_ANON)),
3615 K(zone_page_state(zone, NR_ACTIVE_FILE)),
3616 K(zone_page_state(zone, NR_INACTIVE_FILE)),
Lee Schermerhorn7b854122008-10-18 20:26:40 -07003617 K(zone_page_state(zone, NR_UNEVICTABLE)),
KOSAKI Motohiroa7312862009-09-21 17:01:37 -07003618 K(zone_page_state(zone, NR_ISOLATED_ANON)),
3619 K(zone_page_state(zone, NR_ISOLATED_FILE)),
Linus Torvalds1da177e2005-04-16 15:20:36 -07003620 K(zone->present_pages),
Jiang Liu9feedc92012-12-12 13:52:12 -08003621 K(zone->managed_pages),
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07003622 K(zone_page_state(zone, NR_MLOCK)),
3623 K(zone_page_state(zone, NR_FILE_DIRTY)),
3624 K(zone_page_state(zone, NR_WRITEBACK)),
3625 K(zone_page_state(zone, NR_FILE_MAPPED)),
KOSAKI Motohiro4b021082009-09-21 17:01:33 -07003626 K(zone_page_state(zone, NR_SHMEM)),
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07003627 K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
3628 K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
KOSAKI Motohiroc6a7f572009-09-21 17:01:32 -07003629 zone_page_state(zone, NR_KERNEL_STACK) *
3630 THREAD_SIZE / 1024,
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07003631 K(zone_page_state(zone, NR_PAGETABLE)),
3632 K(zone_page_state(zone, NR_UNSTABLE_NFS)),
3633 K(zone_page_state(zone, NR_BOUNCE)),
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07003634 K(free_pcp),
3635 K(this_cpu_read(zone->pageset->pcp.count)),
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -07003636 K(zone_page_state(zone, NR_FREE_CMA_PAGES)),
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07003637 K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
Mel Gorman0d5d8232014-08-06 16:07:16 -07003638 K(zone_page_state(zone, NR_PAGES_SCANNED)),
Lisa Du6e543d52013-09-11 14:22:36 -07003639 (!zone_reclaimable(zone) ? "yes" : "no")
Linus Torvalds1da177e2005-04-16 15:20:36 -07003640 );
3641 printk("lowmem_reserve[]:");
3642 for (i = 0; i < MAX_NR_ZONES; i++)
Mel Gorman3484b2d2014-08-06 16:07:14 -07003643 printk(" %ld", zone->lowmem_reserve[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003644 printk("\n");
3645 }
3646
KOSAKI Motohiroee99c712009-03-31 15:19:31 -07003647 for_each_populated_zone(zone) {
Pintu Kumarb8af2942013-09-11 14:20:34 -07003648 unsigned long nr[MAX_ORDER], flags, order, total = 0;
Rabin Vincent377e4f12012-12-11 16:00:24 -08003649 unsigned char types[MAX_ORDER];
Linus Torvalds1da177e2005-04-16 15:20:36 -07003650
David Rientjes7bf02ea2011-05-24 17:11:16 -07003651 if (skip_free_areas_node(filter, zone_to_nid(zone)))
David Rientjesddd588b2011-03-22 16:30:46 -07003652 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003653 show_node(zone);
3654 printk("%s: ", zone->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003655
3656 spin_lock_irqsave(&zone->lock, flags);
3657 for (order = 0; order < MAX_ORDER; order++) {
Rabin Vincent377e4f12012-12-11 16:00:24 -08003658 struct free_area *area = &zone->free_area[order];
3659 int type;
3660
3661 nr[order] = area->nr_free;
Kirill Korotaev8f9de512006-06-23 02:03:50 -07003662 total += nr[order] << order;
Rabin Vincent377e4f12012-12-11 16:00:24 -08003663
3664 types[order] = 0;
3665 for (type = 0; type < MIGRATE_TYPES; type++) {
3666 if (!list_empty(&area->free_list[type]))
3667 types[order] |= 1 << type;
3668 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003669 }
3670 spin_unlock_irqrestore(&zone->lock, flags);
Rabin Vincent377e4f12012-12-11 16:00:24 -08003671 for (order = 0; order < MAX_ORDER; order++) {
Kirill Korotaev8f9de512006-06-23 02:03:50 -07003672 printk("%lu*%lukB ", nr[order], K(1UL) << order);
Rabin Vincent377e4f12012-12-11 16:00:24 -08003673 if (nr[order])
3674 show_migration_types(types[order]);
3675 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003676 printk("= %lukB\n", K(total));
3677 }
3678
David Rientjes949f7ec2013-04-29 15:07:48 -07003679 hugetlb_show_meminfo();
3680
Larry Woodmane6f36022008-02-04 22:29:30 -08003681 printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
3682
Linus Torvalds1da177e2005-04-16 15:20:36 -07003683 show_swap_cache_info();
3684}
3685
Mel Gorman19770b32008-04-28 02:12:18 -07003686static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
3687{
3688 zoneref->zone = zone;
3689 zoneref->zone_idx = zone_idx(zone);
3690}
3691
Linus Torvalds1da177e2005-04-16 15:20:36 -07003692/*
3693 * Builds allocation fallback zone lists.
Christoph Lameter1a932052006-01-06 00:11:16 -08003694 *
3695 * Add all populated zones of a node to the zonelist.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003696 */
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07003697static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
Zhang Yanfeibc732f12013-07-08 16:00:06 -07003698 int nr_zones)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003699{
Christoph Lameter1a932052006-01-06 00:11:16 -08003700 struct zone *zone;
Zhang Yanfeibc732f12013-07-08 16:00:06 -07003701 enum zone_type zone_type = MAX_NR_ZONES;
Christoph Lameter02a68a52006-01-06 00:11:18 -08003702
3703 do {
Christoph Lameter2f6726e2006-09-25 23:31:18 -07003704 zone_type--;
Christoph Lameter070f8032006-01-06 00:11:19 -08003705 zone = pgdat->node_zones + zone_type;
Christoph Lameter1a932052006-01-06 00:11:16 -08003706 if (populated_zone(zone)) {
Mel Gormandd1a2392008-04-28 02:12:17 -07003707 zoneref_set_zone(zone,
3708 &zonelist->_zonerefs[nr_zones++]);
Christoph Lameter070f8032006-01-06 00:11:19 -08003709 check_highest_zone(zone_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003710 }
Christoph Lameter2f6726e2006-09-25 23:31:18 -07003711 } while (zone_type);
Zhang Yanfeibc732f12013-07-08 16:00:06 -07003712
Christoph Lameter070f8032006-01-06 00:11:19 -08003713 return nr_zones;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003714}
3715
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07003716
3717/*
3718 * zonelist_order:
3719 * 0 = automatic detection of better ordering.
3720 * 1 = order by ([node] distance, -zonetype)
3721 * 2 = order by (-zonetype, [node] distance)
3722 *
3723 * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
3724 * the same zonelist. So only NUMA can configure this param.
3725 */
3726#define ZONELIST_ORDER_DEFAULT 0
3727#define ZONELIST_ORDER_NODE 1
3728#define ZONELIST_ORDER_ZONE 2
3729
3730/* zonelist order in the kernel.
3731 * set_zonelist_order() will set this to NODE or ZONE.
3732 */
3733static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
3734static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
3735
3736
Linus Torvalds1da177e2005-04-16 15:20:36 -07003737#ifdef CONFIG_NUMA
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07003738/* The value user specified ....changed by config */
3739static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
3740/* string for sysctl */
3741#define NUMA_ZONELIST_ORDER_LEN 16
3742char numa_zonelist_order[16] = "default";
3743
3744/*
3745 * interface for configure zonelist ordering.
3746 * command line option "numa_zonelist_order"
3747 * = "[dD]efault - default, automatic configuration.
3748 * = "[nN]ode - order by node locality, then by zone within node
3749 * = "[zZ]one - order by zone, then by locality within zone
3750 */
3751
3752static int __parse_numa_zonelist_order(char *s)
3753{
3754 if (*s == 'd' || *s == 'D') {
3755 user_zonelist_order = ZONELIST_ORDER_DEFAULT;
3756 } else if (*s == 'n' || *s == 'N') {
3757 user_zonelist_order = ZONELIST_ORDER_NODE;
3758 } else if (*s == 'z' || *s == 'Z') {
3759 user_zonelist_order = ZONELIST_ORDER_ZONE;
3760 } else {
3761 printk(KERN_WARNING
3762 "Ignoring invalid numa_zonelist_order value: "
3763 "%s\n", s);
3764 return -EINVAL;
3765 }
3766 return 0;
3767}
3768
3769static __init int setup_numa_zonelist_order(char *s)
3770{
Volodymyr G. Lukiianykecb256f2011-01-13 15:46:26 -08003771 int ret;
3772
3773 if (!s)
3774 return 0;
3775
3776 ret = __parse_numa_zonelist_order(s);
3777 if (ret == 0)
3778 strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
3779
3780 return ret;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07003781}
3782early_param("numa_zonelist_order", setup_numa_zonelist_order);
3783
3784/*
3785 * sysctl handler for numa_zonelist_order
3786 */
Joe Perchescccad5b2014-06-06 14:38:09 -07003787int numa_zonelist_order_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07003788 void __user *buffer, size_t *length,
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07003789 loff_t *ppos)
3790{
3791 char saved_string[NUMA_ZONELIST_ORDER_LEN];
3792 int ret;
Andi Kleen443c6f12009-12-23 21:00:47 +01003793 static DEFINE_MUTEX(zl_order_mutex);
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07003794
Andi Kleen443c6f12009-12-23 21:00:47 +01003795 mutex_lock(&zl_order_mutex);
Chen Gangdacbde02013-07-03 15:02:35 -07003796 if (write) {
3797 if (strlen((char *)table->data) >= NUMA_ZONELIST_ORDER_LEN) {
3798 ret = -EINVAL;
3799 goto out;
3800 }
3801 strcpy(saved_string, (char *)table->data);
3802 }
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07003803 ret = proc_dostring(table, write, buffer, length, ppos);
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07003804 if (ret)
Andi Kleen443c6f12009-12-23 21:00:47 +01003805 goto out;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07003806 if (write) {
3807 int oldval = user_zonelist_order;
Chen Gangdacbde02013-07-03 15:02:35 -07003808
3809 ret = __parse_numa_zonelist_order((char *)table->data);
3810 if (ret) {
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07003811 /*
3812 * bogus value. restore saved string
3813 */
Chen Gangdacbde02013-07-03 15:02:35 -07003814 strncpy((char *)table->data, saved_string,
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07003815 NUMA_ZONELIST_ORDER_LEN);
3816 user_zonelist_order = oldval;
Haicheng Li4eaf3f62010-05-24 14:32:52 -07003817 } else if (oldval != user_zonelist_order) {
3818 mutex_lock(&zonelists_mutex);
Jiang Liu9adb62a2012-07-31 16:43:28 -07003819 build_all_zonelists(NULL, NULL);
Haicheng Li4eaf3f62010-05-24 14:32:52 -07003820 mutex_unlock(&zonelists_mutex);
3821 }
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07003822 }
Andi Kleen443c6f12009-12-23 21:00:47 +01003823out:
3824 mutex_unlock(&zl_order_mutex);
3825 return ret;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07003826}
3827
3828
Christoph Lameter62bc62a2009-06-16 15:32:15 -07003829#define MAX_NODE_LOAD (nr_online_nodes)
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07003830static int node_load[MAX_NUMNODES];
3831
Linus Torvalds1da177e2005-04-16 15:20:36 -07003832/**
Pavel Pisa4dc3b162005-05-01 08:59:25 -07003833 * find_next_best_node - find the next node that should appear in a given node's fallback list
Linus Torvalds1da177e2005-04-16 15:20:36 -07003834 * @node: node whose fallback list we're appending
3835 * @used_node_mask: nodemask_t of already used nodes
3836 *
3837 * We use a number of factors to determine which is the next node that should
3838 * appear on a given node's fallback list. The node should not have appeared
3839 * already in @node's fallback list, and it should be the next closest node
3840 * according to the distance array (which contains arbitrary distance values
3841 * from each node to each node in the system), and should also prefer nodes
3842 * with no CPUs, since presumably they'll have very little allocation pressure
3843 * on them otherwise.
3844 * It returns -1 if no node is found.
3845 */
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07003846static int find_next_best_node(int node, nodemask_t *used_node_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003847{
Linus Torvalds4cf808eb2006-02-17 20:38:21 +01003848 int n, val;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003849 int min_val = INT_MAX;
David Rientjes00ef2d22013-02-22 16:35:36 -08003850 int best_node = NUMA_NO_NODE;
Rusty Russella70f7302009-03-13 14:49:46 +10303851 const struct cpumask *tmp = cpumask_of_node(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003852
Linus Torvalds4cf808eb2006-02-17 20:38:21 +01003853 /* Use the local node if we haven't already */
3854 if (!node_isset(node, *used_node_mask)) {
3855 node_set(node, *used_node_mask);
3856 return node;
3857 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003858
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08003859 for_each_node_state(n, N_MEMORY) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003860
3861 /* Don't want a node to appear more than once */
3862 if (node_isset(n, *used_node_mask))
3863 continue;
3864
Linus Torvalds1da177e2005-04-16 15:20:36 -07003865 /* Use the distance array to find the distance */
3866 val = node_distance(node, n);
3867
Linus Torvalds4cf808eb2006-02-17 20:38:21 +01003868 /* Penalize nodes under us ("prefer the next node") */
3869 val += (n < node);
3870
Linus Torvalds1da177e2005-04-16 15:20:36 -07003871 /* Give preference to headless and unused nodes */
Rusty Russella70f7302009-03-13 14:49:46 +10303872 tmp = cpumask_of_node(n);
3873 if (!cpumask_empty(tmp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003874 val += PENALTY_FOR_NODE_WITH_CPUS;
3875
3876 /* Slight preference for less loaded node */
3877 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
3878 val += node_load[n];
3879
3880 if (val < min_val) {
3881 min_val = val;
3882 best_node = n;
3883 }
3884 }
3885
3886 if (best_node >= 0)
3887 node_set(best_node, *used_node_mask);
3888
3889 return best_node;
3890}
3891
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07003892
3893/*
3894 * Build zonelists ordered by node and zones within node.
3895 * This results in maximum locality--normal zone overflows into local
3896 * DMA zone, if any--but risks exhausting DMA zone.
3897 */
3898static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003899{
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07003900 int j;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003901 struct zonelist *zonelist;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07003902
Mel Gorman54a6eb52008-04-28 02:12:16 -07003903 zonelist = &pgdat->node_zonelists[0];
Mel Gormandd1a2392008-04-28 02:12:17 -07003904 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
Mel Gorman54a6eb52008-04-28 02:12:16 -07003905 ;
Zhang Yanfeibc732f12013-07-08 16:00:06 -07003906 j = build_zonelists_node(NODE_DATA(node), zonelist, j);
Mel Gormandd1a2392008-04-28 02:12:17 -07003907 zonelist->_zonerefs[j].zone = NULL;
3908 zonelist->_zonerefs[j].zone_idx = 0;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07003909}
3910
3911/*
Christoph Lameter523b9452007-10-16 01:25:37 -07003912 * Build gfp_thisnode zonelists
3913 */
3914static void build_thisnode_zonelists(pg_data_t *pgdat)
3915{
Christoph Lameter523b9452007-10-16 01:25:37 -07003916 int j;
3917 struct zonelist *zonelist;
3918
Mel Gorman54a6eb52008-04-28 02:12:16 -07003919 zonelist = &pgdat->node_zonelists[1];
Zhang Yanfeibc732f12013-07-08 16:00:06 -07003920 j = build_zonelists_node(pgdat, zonelist, 0);
Mel Gormandd1a2392008-04-28 02:12:17 -07003921 zonelist->_zonerefs[j].zone = NULL;
3922 zonelist->_zonerefs[j].zone_idx = 0;
Christoph Lameter523b9452007-10-16 01:25:37 -07003923}
3924
3925/*
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07003926 * Build zonelists ordered by zone and nodes within zones.
3927 * This results in conserving DMA zone[s] until all Normal memory is
3928 * exhausted, but results in overflowing to remote node while memory
3929 * may still exist in local DMA zone.
3930 */
3931static int node_order[MAX_NUMNODES];
3932
3933static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
3934{
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07003935 int pos, j, node;
3936 int zone_type; /* needs to be signed */
3937 struct zone *z;
3938 struct zonelist *zonelist;
3939
Mel Gorman54a6eb52008-04-28 02:12:16 -07003940 zonelist = &pgdat->node_zonelists[0];
3941 pos = 0;
3942 for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
3943 for (j = 0; j < nr_nodes; j++) {
3944 node = node_order[j];
3945 z = &NODE_DATA(node)->node_zones[zone_type];
3946 if (populated_zone(z)) {
Mel Gormandd1a2392008-04-28 02:12:17 -07003947 zoneref_set_zone(z,
3948 &zonelist->_zonerefs[pos++]);
Mel Gorman54a6eb52008-04-28 02:12:16 -07003949 check_highest_zone(zone_type);
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07003950 }
3951 }
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07003952 }
Mel Gormandd1a2392008-04-28 02:12:17 -07003953 zonelist->_zonerefs[pos].zone = NULL;
3954 zonelist->_zonerefs[pos].zone_idx = 0;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07003955}
3956
Mel Gorman31939132014-10-09 15:28:30 -07003957#if defined(CONFIG_64BIT)
3958/*
3959 * Devices that require DMA32/DMA are relatively rare and do not justify a
3960 * penalty to every machine in case the specialised case applies. Default
3961 * to Node-ordering on 64-bit NUMA machines
3962 */
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07003963static int default_zonelist_order(void)
3964{
Mel Gorman31939132014-10-09 15:28:30 -07003965 return ZONELIST_ORDER_NODE;
3966}
3967#else
3968/*
3969 * On 32-bit, the Normal zone needs to be preserved for allocations accessible
3970 * by the kernel. If processes running on node 0 deplete the low memory zone
3971 * then reclaim will occur more frequency increasing stalls and potentially
3972 * be easier to OOM if a large percentage of the zone is under writeback or
3973 * dirty. The problem is significantly worse if CONFIG_HIGHPTE is not set.
3974 * Hence, default to zone ordering on 32-bit.
3975 */
3976static int default_zonelist_order(void)
3977{
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07003978 return ZONELIST_ORDER_ZONE;
3979}
Mel Gorman31939132014-10-09 15:28:30 -07003980#endif /* CONFIG_64BIT */
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07003981
3982static void set_zonelist_order(void)
3983{
3984 if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
3985 current_zonelist_order = default_zonelist_order();
3986 else
3987 current_zonelist_order = user_zonelist_order;
3988}
3989
3990static void build_zonelists(pg_data_t *pgdat)
3991{
3992 int j, node, load;
3993 enum zone_type i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003994 nodemask_t used_mask;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07003995 int local_node, prev_node;
3996 struct zonelist *zonelist;
3997 int order = current_zonelist_order;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003998
3999 /* initialize zonelists */
Christoph Lameter523b9452007-10-16 01:25:37 -07004000 for (i = 0; i < MAX_ZONELISTS; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004001 zonelist = pgdat->node_zonelists + i;
Mel Gormandd1a2392008-04-28 02:12:17 -07004002 zonelist->_zonerefs[0].zone = NULL;
4003 zonelist->_zonerefs[0].zone_idx = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004004 }
4005
4006 /* NUMA-aware ordering of nodes */
4007 local_node = pgdat->node_id;
Christoph Lameter62bc62a2009-06-16 15:32:15 -07004008 load = nr_online_nodes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004009 prev_node = local_node;
4010 nodes_clear(used_mask);
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004011
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004012 memset(node_order, 0, sizeof(node_order));
4013 j = 0;
4014
Linus Torvalds1da177e2005-04-16 15:20:36 -07004015 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
4016 /*
4017 * We don't want to pressure a particular node.
4018 * So adding penalty to the first node in same
4019 * distance group to make it round-robin.
4020 */
David Rientjes957f8222012-10-08 16:33:24 -07004021 if (node_distance(local_node, node) !=
4022 node_distance(local_node, prev_node))
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004023 node_load[node] = load;
4024
Linus Torvalds1da177e2005-04-16 15:20:36 -07004025 prev_node = node;
4026 load--;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004027 if (order == ZONELIST_ORDER_NODE)
4028 build_zonelists_in_node_order(pgdat, node);
4029 else
4030 node_order[j++] = node; /* remember order */
4031 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004032
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004033 if (order == ZONELIST_ORDER_ZONE) {
4034 /* calculate node order -- i.e., DMA last! */
4035 build_zonelists_in_zone_order(pgdat, j);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004036 }
Christoph Lameter523b9452007-10-16 01:25:37 -07004037
4038 build_thisnode_zonelists(pgdat);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004039}
4040
Lee Schermerhorn7aac7892010-05-26 14:45:00 -07004041#ifdef CONFIG_HAVE_MEMORYLESS_NODES
4042/*
4043 * Return node id of node used for "local" allocations.
4044 * I.e., first node id of first zone in arg node's generic zonelist.
4045 * Used for initializing percpu 'numa_mem', which is used primarily
4046 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
4047 */
4048int local_memory_node(int node)
4049{
4050 struct zone *zone;
4051
4052 (void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
4053 gfp_zone(GFP_KERNEL),
4054 NULL,
4055 &zone);
4056 return zone->node;
4057}
4058#endif
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004059
Linus Torvalds1da177e2005-04-16 15:20:36 -07004060#else /* CONFIG_NUMA */
4061
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004062static void set_zonelist_order(void)
4063{
4064 current_zonelist_order = ZONELIST_ORDER_ZONE;
4065}
4066
4067static void build_zonelists(pg_data_t *pgdat)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004068{
Christoph Lameter19655d32006-09-25 23:31:19 -07004069 int node, local_node;
Mel Gorman54a6eb52008-04-28 02:12:16 -07004070 enum zone_type j;
4071 struct zonelist *zonelist;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004072
4073 local_node = pgdat->node_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004074
Mel Gorman54a6eb52008-04-28 02:12:16 -07004075 zonelist = &pgdat->node_zonelists[0];
Zhang Yanfeibc732f12013-07-08 16:00:06 -07004076 j = build_zonelists_node(pgdat, zonelist, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004077
Mel Gorman54a6eb52008-04-28 02:12:16 -07004078 /*
4079 * Now we build the zonelist so that it contains the zones
4080 * of all the other nodes.
4081 * We don't want to pressure a particular node, so when
4082 * building the zones for node N, we make sure that the
4083 * zones coming right after the local ones are those from
4084 * node N+1 (modulo N)
4085 */
4086 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
4087 if (!node_online(node))
4088 continue;
Zhang Yanfeibc732f12013-07-08 16:00:06 -07004089 j = build_zonelists_node(NODE_DATA(node), zonelist, j);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004090 }
Mel Gorman54a6eb52008-04-28 02:12:16 -07004091 for (node = 0; node < local_node; node++) {
4092 if (!node_online(node))
4093 continue;
Zhang Yanfeibc732f12013-07-08 16:00:06 -07004094 j = build_zonelists_node(NODE_DATA(node), zonelist, j);
Mel Gorman54a6eb52008-04-28 02:12:16 -07004095 }
4096
Mel Gormandd1a2392008-04-28 02:12:17 -07004097 zonelist->_zonerefs[j].zone = NULL;
4098 zonelist->_zonerefs[j].zone_idx = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004099}
4100
4101#endif /* CONFIG_NUMA */
4102
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09004103/*
4104 * Boot pageset table. One per cpu which is going to be used for all
4105 * zones and all nodes. The parameters will be set in such a way
4106 * that an item put on a list will immediately be handed over to
4107 * the buddy list. This is safe since pageset manipulation is done
4108 * with interrupts disabled.
4109 *
4110 * The boot_pagesets must be kept even after bootup is complete for
4111 * unused processors and/or zones. They do play a role for bootstrapping
4112 * hotplugged processors.
4113 *
4114 * zoneinfo_show() and maybe other functions do
4115 * not check if the processor is online before following the pageset pointer.
4116 * Other parts of the kernel may not check if the zone is available.
4117 */
4118static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
4119static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
Haicheng Li1f522502010-05-24 14:32:51 -07004120static void setup_zone_pageset(struct zone *zone);
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09004121
Haicheng Li4eaf3f62010-05-24 14:32:52 -07004122/*
4123 * Global mutex to protect against size modification of zonelists
4124 * as well as to serialize pageset setup for the new populated zone.
4125 */
4126DEFINE_MUTEX(zonelists_mutex);
4127
Rusty Russell9b1a4d32008-07-28 12:16:30 -05004128/* return values int ....just for stop_machine() */
Jiang Liu4ed7e022012-07-31 16:43:35 -07004129static int __build_all_zonelists(void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004130{
Yasunori Goto68113782006-06-23 02:03:11 -07004131 int nid;
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09004132 int cpu;
Jiang Liu9adb62a2012-07-31 16:43:28 -07004133 pg_data_t *self = data;
Paul Jackson9276b1bc2006-12-06 20:31:48 -08004134
Bo Liu7f9cfb32009-08-18 14:11:19 -07004135#ifdef CONFIG_NUMA
4136 memset(node_load, 0, sizeof(node_load));
4137#endif
Jiang Liu9adb62a2012-07-31 16:43:28 -07004138
4139 if (self && !node_online(self->node_id)) {
4140 build_zonelists(self);
Jiang Liu9adb62a2012-07-31 16:43:28 -07004141 }
4142
Paul Jackson9276b1bc2006-12-06 20:31:48 -08004143 for_each_online_node(nid) {
Christoph Lameter7ea15302007-10-16 01:25:29 -07004144 pg_data_t *pgdat = NODE_DATA(nid);
4145
4146 build_zonelists(pgdat);
Paul Jackson9276b1bc2006-12-06 20:31:48 -08004147 }
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09004148
4149 /*
4150 * Initialize the boot_pagesets that are going to be used
4151 * for bootstrapping processors. The real pagesets for
4152 * each zone will be allocated later when the per cpu
4153 * allocator is available.
4154 *
4155 * boot_pagesets are used also for bootstrapping offline
4156 * cpus if the system is already booted because the pagesets
4157 * are needed to initialize allocators on a specific cpu too.
4158 * F.e. the percpu allocator needs the page allocator which
4159 * needs the percpu allocator in order to allocate its pagesets
4160 * (a chicken-egg dilemma).
4161 */
Lee Schermerhorn7aac7892010-05-26 14:45:00 -07004162 for_each_possible_cpu(cpu) {
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09004163 setup_pageset(&per_cpu(boot_pageset, cpu), 0);
4164
Lee Schermerhorn7aac7892010-05-26 14:45:00 -07004165#ifdef CONFIG_HAVE_MEMORYLESS_NODES
4166 /*
4167 * We now know the "local memory node" for each node--
4168 * i.e., the node of the first zone in the generic zonelist.
4169 * Set up numa_mem percpu variable for on-line cpus. During
4170 * boot, only the boot cpu should be on-line; we'll init the
4171 * secondary cpus' numa_mem as they come on-line. During
4172 * node/memory hotplug, we'll fixup all on-line cpus.
4173 */
4174 if (cpu_online(cpu))
4175 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
4176#endif
4177 }
4178
Yasunori Goto68113782006-06-23 02:03:11 -07004179 return 0;
4180}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004181
Rasmus Villemoes061f67b2015-02-12 15:00:06 -08004182static noinline void __init
4183build_all_zonelists_init(void)
4184{
4185 __build_all_zonelists(NULL);
4186 mminit_verify_zonelist();
4187 cpuset_init_current_mems_allowed();
4188}
4189
Haicheng Li4eaf3f62010-05-24 14:32:52 -07004190/*
4191 * Called with zonelists_mutex held always
4192 * unless system_state == SYSTEM_BOOTING.
Rasmus Villemoes061f67b2015-02-12 15:00:06 -08004193 *
4194 * __ref due to (1) call of __meminit annotated setup_zone_pageset
4195 * [we're only called with non-NULL zone through __meminit paths] and
4196 * (2) call of __init annotated helper build_all_zonelists_init
4197 * [protected by SYSTEM_BOOTING].
Haicheng Li4eaf3f62010-05-24 14:32:52 -07004198 */
Jiang Liu9adb62a2012-07-31 16:43:28 -07004199void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone)
Yasunori Goto68113782006-06-23 02:03:11 -07004200{
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004201 set_zonelist_order();
4202
Yasunori Goto68113782006-06-23 02:03:11 -07004203 if (system_state == SYSTEM_BOOTING) {
Rasmus Villemoes061f67b2015-02-12 15:00:06 -08004204 build_all_zonelists_init();
Yasunori Goto68113782006-06-23 02:03:11 -07004205 } else {
KAMEZAWA Hiroyukie9959f02010-11-24 12:57:09 -08004206#ifdef CONFIG_MEMORY_HOTPLUG
Jiang Liu9adb62a2012-07-31 16:43:28 -07004207 if (zone)
4208 setup_zone_pageset(zone);
KAMEZAWA Hiroyukie9959f02010-11-24 12:57:09 -08004209#endif
Cody P Schaferdd1895e2013-07-03 15:01:36 -07004210 /* we have to stop all cpus to guarantee there is no user
4211 of zonelist */
Jiang Liu9adb62a2012-07-31 16:43:28 -07004212 stop_machine(__build_all_zonelists, pgdat, NULL);
Yasunori Goto68113782006-06-23 02:03:11 -07004213 /* cpuset refresh routine should be here */
4214 }
Andrew Mortonbd1e22b2006-06-23 02:03:47 -07004215 vm_total_pages = nr_free_pagecache_pages();
Mel Gorman9ef9acb2007-10-16 01:25:54 -07004216 /*
4217 * Disable grouping by mobility if the number of pages in the
4218 * system is too low to allow the mechanism to work. It would be
4219 * more accurate, but expensive to check per-zone. This check is
4220 * made on memory-hotadd so a system can start with mobility
4221 * disabled and enable it later
4222 */
Mel Gormand9c23402007-10-16 01:26:01 -07004223 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
Mel Gorman9ef9acb2007-10-16 01:25:54 -07004224 page_group_by_mobility_disabled = 1;
4225 else
4226 page_group_by_mobility_disabled = 0;
4227
Anton Blanchardf88dfff2014-12-10 15:42:53 -08004228 pr_info("Built %i zonelists in %s order, mobility grouping %s. "
Mel Gorman9ef9acb2007-10-16 01:25:54 -07004229 "Total pages: %ld\n",
Christoph Lameter62bc62a2009-06-16 15:32:15 -07004230 nr_online_nodes,
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004231 zonelist_order_name[current_zonelist_order],
Mel Gorman9ef9acb2007-10-16 01:25:54 -07004232 page_group_by_mobility_disabled ? "off" : "on",
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004233 vm_total_pages);
4234#ifdef CONFIG_NUMA
Anton Blanchardf88dfff2014-12-10 15:42:53 -08004235 pr_info("Policy zone: %s\n", zone_names[policy_zone]);
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004236#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07004237}
4238
4239/*
4240 * Helper functions to size the waitqueue hash table.
4241 * Essentially these want to choose hash table sizes sufficiently
4242 * large so that collisions trying to wait on pages are rare.
4243 * But in fact, the number of active page waitqueues on typical
4244 * systems is ridiculously low, less than 200. So this is even
4245 * conservative, even though it seems large.
4246 *
4247 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
4248 * waitqueues, i.e. the size of the waitq table given the number of pages.
4249 */
4250#define PAGES_PER_WAITQUEUE 256
4251
Yasunori Gotocca448f2006-06-23 02:03:10 -07004252#ifndef CONFIG_MEMORY_HOTPLUG
Yasunori Goto02b694d2006-06-23 02:03:08 -07004253static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004254{
4255 unsigned long size = 1;
4256
4257 pages /= PAGES_PER_WAITQUEUE;
4258
4259 while (size < pages)
4260 size <<= 1;
4261
4262 /*
4263 * Once we have dozens or even hundreds of threads sleeping
4264 * on IO we've got bigger problems than wait queue collision.
4265 * Limit the size of the wait table to a reasonable size.
4266 */
4267 size = min(size, 4096UL);
4268
4269 return max(size, 4UL);
4270}
Yasunori Gotocca448f2006-06-23 02:03:10 -07004271#else
4272/*
4273 * A zone's size might be changed by hot-add, so it is not possible to determine
4274 * a suitable size for its wait_table. So we use the maximum size now.
4275 *
4276 * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie:
4277 *
4278 * i386 (preemption config) : 4096 x 16 = 64Kbyte.
4279 * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
4280 * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte.
4281 *
4282 * The maximum entries are prepared when a zone's memory is (512K + 256) pages
4283 * or more by the traditional way. (See above). It equals:
4284 *
4285 * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte.
4286 * ia64(16K page size) : = ( 8G + 4M)byte.
4287 * powerpc (64K page size) : = (32G +16M)byte.
4288 */
4289static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
4290{
4291 return 4096UL;
4292}
4293#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07004294
4295/*
4296 * This is an integer logarithm so that shifts can be used later
4297 * to extract the more random high bits from the multiplicative
4298 * hash function before the remainder is taken.
4299 */
4300static inline unsigned long wait_table_bits(unsigned long size)
4301{
4302 return ffz(~size);
4303}
4304
Mel Gorman56fd56b2007-10-16 01:25:58 -07004305/*
Arve Hjønnevåg6d3163c2011-05-24 17:12:24 -07004306 * Check if a pageblock contains reserved pages
4307 */
4308static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
4309{
4310 unsigned long pfn;
4311
4312 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
4313 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
4314 return 1;
4315 }
4316 return 0;
4317}
4318
4319/*
Mel Gormand9c23402007-10-16 01:26:01 -07004320 * Mark a number of pageblocks as MIGRATE_RESERVE. The number
Mel Gorman41858962009-06-16 15:32:12 -07004321 * of blocks reserved is based on min_wmark_pages(zone). The memory within
4322 * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
Mel Gorman56fd56b2007-10-16 01:25:58 -07004323 * higher will lead to a bigger reserve which will get freed as contiguous
4324 * blocks as reclaim kicks in
4325 */
4326static void setup_zone_migrate_reserve(struct zone *zone)
4327{
Arve Hjønnevåg6d3163c2011-05-24 17:12:24 -07004328 unsigned long start_pfn, pfn, end_pfn, block_end_pfn;
Mel Gorman56fd56b2007-10-16 01:25:58 -07004329 struct page *page;
Mel Gorman78986a62009-09-21 17:03:02 -07004330 unsigned long block_migratetype;
4331 int reserve;
Yasuaki Ishimatsu943dca12014-01-21 15:49:06 -08004332 int old_reserve;
Mel Gorman56fd56b2007-10-16 01:25:58 -07004333
Michal Hockod02156382011-12-08 14:34:27 -08004334 /*
4335 * Get the start pfn, end pfn and the number of blocks to reserve
4336 * We have to be careful to be aligned to pageblock_nr_pages to
4337 * make sure that we always check pfn_valid for the first page in
4338 * the block.
4339 */
Mel Gorman56fd56b2007-10-16 01:25:58 -07004340 start_pfn = zone->zone_start_pfn;
Cody P Schafer108bcc92013-02-22 16:35:23 -08004341 end_pfn = zone_end_pfn(zone);
Michal Hockod02156382011-12-08 14:34:27 -08004342 start_pfn = roundup(start_pfn, pageblock_nr_pages);
Mel Gorman41858962009-06-16 15:32:12 -07004343 reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
Mel Gormand9c23402007-10-16 01:26:01 -07004344 pageblock_order;
Mel Gorman56fd56b2007-10-16 01:25:58 -07004345
Mel Gorman78986a62009-09-21 17:03:02 -07004346 /*
4347 * Reserve blocks are generally in place to help high-order atomic
4348 * allocations that are short-lived. A min_free_kbytes value that
4349 * would result in more than 2 reserve blocks for atomic allocations
4350 * is assumed to be in place to help anti-fragmentation for the
4351 * future allocation of hugepages at runtime.
4352 */
4353 reserve = min(2, reserve);
Yasuaki Ishimatsu943dca12014-01-21 15:49:06 -08004354 old_reserve = zone->nr_migrate_reserve_block;
4355
4356 /* When memory hot-add, we almost always need to do nothing */
4357 if (reserve == old_reserve)
4358 return;
4359 zone->nr_migrate_reserve_block = reserve;
Mel Gorman78986a62009-09-21 17:03:02 -07004360
Mel Gormand9c23402007-10-16 01:26:01 -07004361 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
Mel Gorman7e18adb2015-06-30 14:57:05 -07004362 if (!early_page_nid_uninitialised(pfn, zone_to_nid(zone)))
4363 return;
4364
Mel Gorman56fd56b2007-10-16 01:25:58 -07004365 if (!pfn_valid(pfn))
4366 continue;
4367 page = pfn_to_page(pfn);
4368
Adam Litke344c7902008-09-02 14:35:38 -07004369 /* Watch out for overlapping nodes */
4370 if (page_to_nid(page) != zone_to_nid(zone))
4371 continue;
4372
Mel Gorman56fd56b2007-10-16 01:25:58 -07004373 block_migratetype = get_pageblock_migratetype(page);
4374
Mel Gorman938929f2012-01-10 15:07:14 -08004375 /* Only test what is necessary when the reserves are not met */
4376 if (reserve > 0) {
4377 /*
4378 * Blocks with reserved pages will never free, skip
4379 * them.
4380 */
4381 block_end_pfn = min(pfn + pageblock_nr_pages, end_pfn);
4382 if (pageblock_is_reserved(pfn, block_end_pfn))
4383 continue;
Mel Gorman56fd56b2007-10-16 01:25:58 -07004384
Mel Gorman938929f2012-01-10 15:07:14 -08004385 /* If this block is reserved, account for it */
4386 if (block_migratetype == MIGRATE_RESERVE) {
4387 reserve--;
4388 continue;
4389 }
4390
4391 /* Suitable for reserving if this block is movable */
4392 if (block_migratetype == MIGRATE_MOVABLE) {
4393 set_pageblock_migratetype(page,
4394 MIGRATE_RESERVE);
4395 move_freepages_block(zone, page,
4396 MIGRATE_RESERVE);
4397 reserve--;
4398 continue;
4399 }
Yasuaki Ishimatsu943dca12014-01-21 15:49:06 -08004400 } else if (!old_reserve) {
4401 /*
4402 * At boot time we don't need to scan the whole zone
4403 * for turning off MIGRATE_RESERVE.
4404 */
4405 break;
Mel Gorman56fd56b2007-10-16 01:25:58 -07004406 }
4407
4408 /*
4409 * If the reserve is met and this is a previous reserved block,
4410 * take it back
4411 */
4412 if (block_migratetype == MIGRATE_RESERVE) {
4413 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
4414 move_freepages_block(zone, page, MIGRATE_MOVABLE);
4415 }
4416 }
4417}
Mel Gormanac0e5b72007-10-16 01:25:58 -07004418
Linus Torvalds1da177e2005-04-16 15:20:36 -07004419/*
4420 * Initially all pages are reserved - free ones are freed
4421 * up by free_all_bootmem() once the early boot process is
4422 * done. Non-atomic initialization, single-pass.
4423 */
Matt Tolentinoc09b4242006-01-17 07:03:44 +01004424void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
Dave Hansena2f3aa022007-01-10 23:15:30 -08004425 unsigned long start_pfn, enum memmap_context context)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004426{
Mel Gorman3a80a7f2015-06-30 14:57:02 -07004427 pg_data_t *pgdat = NODE_DATA(nid);
Andy Whitcroft29751f62005-06-23 00:08:00 -07004428 unsigned long end_pfn = start_pfn + size;
4429 unsigned long pfn;
KAMEZAWA Hiroyuki86051ca2008-04-29 00:58:21 -07004430 struct zone *z;
Mel Gorman3a80a7f2015-06-30 14:57:02 -07004431 unsigned long nr_initialised = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004432
Hugh Dickins22b31ee2009-01-06 14:40:09 -08004433 if (highest_memmap_pfn < end_pfn - 1)
4434 highest_memmap_pfn = end_pfn - 1;
4435
Mel Gorman3a80a7f2015-06-30 14:57:02 -07004436 z = &pgdat->node_zones[zone];
Greg Ungerercbe8dd42006-01-12 01:05:24 -08004437 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
Dave Hansena2f3aa022007-01-10 23:15:30 -08004438 /*
4439 * There can be holes in boot-time mem_map[]s
4440 * handed to this function. They do not
4441 * exist on hotplugged memory.
4442 */
4443 if (context == MEMMAP_EARLY) {
4444 if (!early_pfn_valid(pfn))
4445 continue;
4446 if (!early_pfn_in_nid(pfn, nid))
4447 continue;
Mel Gorman3a80a7f2015-06-30 14:57:02 -07004448 if (!update_defer_init(pgdat, pfn, end_pfn,
4449 &nr_initialised))
4450 break;
Dave Hansena2f3aa022007-01-10 23:15:30 -08004451 }
Mel Gormanac5d2532015-06-30 14:57:20 -07004452
4453 /*
4454 * Mark the block movable so that blocks are reserved for
4455 * movable at startup. This will force kernel allocations
4456 * to reserve their blocks rather than leaking throughout
4457 * the address space during boot when many long-lived
4458 * kernel allocations are made. Later some blocks near
4459 * the start are marked MIGRATE_RESERVE by
4460 * setup_zone_migrate_reserve()
4461 *
4462 * bitmap is created for zone's valid pfn range. but memmap
4463 * can be created for invalid pages (for alignment)
4464 * check here not to call set_pageblock_migratetype() against
4465 * pfn out of zone.
4466 */
4467 if (!(pfn & (pageblock_nr_pages - 1))) {
4468 struct page *page = pfn_to_page(pfn);
4469
4470 __init_single_page(page, pfn, zone, nid);
4471 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
4472 } else {
4473 __init_single_pfn(pfn, zone, nid);
4474 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004475 }
4476}
4477
Andi Kleen1e548de2008-02-04 22:29:26 -08004478static void __meminit zone_init_free_lists(struct zone *zone)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004479{
Mel Gorman7aeb09f2014-06-04 16:10:21 -07004480 unsigned int order, t;
Mel Gormanb2a0ac82007-10-16 01:25:48 -07004481 for_each_migratetype_order(order, t) {
4482 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004483 zone->free_area[order].nr_free = 0;
4484 }
4485}
4486
4487#ifndef __HAVE_ARCH_MEMMAP_INIT
4488#define memmap_init(size, nid, zone, start_pfn) \
Dave Hansena2f3aa022007-01-10 23:15:30 -08004489 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004490#endif
4491
David Rientjes7cd2b0a2014-06-23 13:22:04 -07004492static int zone_batchsize(struct zone *zone)
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07004493{
David Howells3a6be872009-05-06 16:03:03 -07004494#ifdef CONFIG_MMU
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07004495 int batch;
4496
4497 /*
4498 * The per-cpu-pages pools are set to around 1000th of the
Seth, Rohitba56e912005-10-29 18:15:47 -07004499 * size of the zone. But no more than 1/2 of a meg.
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07004500 *
4501 * OK, so we don't know how big the cache is. So guess.
4502 */
Jiang Liub40da042013-02-22 16:33:52 -08004503 batch = zone->managed_pages / 1024;
Seth, Rohitba56e912005-10-29 18:15:47 -07004504 if (batch * PAGE_SIZE > 512 * 1024)
4505 batch = (512 * 1024) / PAGE_SIZE;
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07004506 batch /= 4; /* We effectively *= 4 below */
4507 if (batch < 1)
4508 batch = 1;
4509
4510 /*
Nick Piggin0ceaacc2005-12-04 13:55:25 +11004511 * Clamp the batch to a 2^n - 1 value. Having a power
4512 * of 2 value was found to be more likely to have
4513 * suboptimal cache aliasing properties in some cases.
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07004514 *
Nick Piggin0ceaacc2005-12-04 13:55:25 +11004515 * For example if 2 tasks are alternately allocating
4516 * batches of pages, one task can end up with a lot
4517 * of pages of one half of the possible page colors
4518 * and the other with pages of the other colors.
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07004519 */
David Howells91552032009-05-06 16:03:02 -07004520 batch = rounddown_pow_of_two(batch + batch/2) - 1;
Seth, Rohitba56e912005-10-29 18:15:47 -07004521
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07004522 return batch;
David Howells3a6be872009-05-06 16:03:03 -07004523
4524#else
4525 /* The deferral and batching of frees should be suppressed under NOMMU
4526 * conditions.
4527 *
4528 * The problem is that NOMMU needs to be able to allocate large chunks
4529 * of contiguous memory as there's no hardware page translation to
4530 * assemble apparent contiguous memory from discontiguous pages.
4531 *
4532 * Queueing large contiguous runs of pages for batching, however,
4533 * causes the pages to actually be freed in smaller chunks. As there
4534 * can be a significant delay between the individual batches being
4535 * recycled, this leads to the once large chunks of space being
4536 * fragmented and becoming unavailable for high-order allocations.
4537 */
4538 return 0;
4539#endif
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07004540}
4541
Cody P Schafer8d7a8fa2013-07-03 15:01:31 -07004542/*
4543 * pcp->high and pcp->batch values are related and dependent on one another:
4544 * ->batch must never be higher then ->high.
4545 * The following function updates them in a safe manner without read side
4546 * locking.
4547 *
4548 * Any new users of pcp->batch and pcp->high should ensure they can cope with
4549 * those fields changing asynchronously (acording the the above rule).
4550 *
4551 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
4552 * outside of boot time (or some other assurance that no concurrent updaters
4553 * exist).
4554 */
4555static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
4556 unsigned long batch)
4557{
4558 /* start with a fail safe value for batch */
4559 pcp->batch = 1;
4560 smp_wmb();
4561
4562 /* Update high, then batch, in order */
4563 pcp->high = high;
4564 smp_wmb();
4565
4566 pcp->batch = batch;
4567}
4568
Cody P Schafer36640332013-07-03 15:01:40 -07004569/* a companion to pageset_set_high() */
Cody P Schafer4008bab2013-07-03 15:01:28 -07004570static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch)
4571{
Cody P Schafer8d7a8fa2013-07-03 15:01:31 -07004572 pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch));
Cody P Schafer4008bab2013-07-03 15:01:28 -07004573}
4574
Cody P Schafer88c90db2013-07-03 15:01:35 -07004575static void pageset_init(struct per_cpu_pageset *p)
Christoph Lameter2caaad42005-06-21 17:15:00 -07004576{
4577 struct per_cpu_pages *pcp;
Mel Gorman5f8dcc22009-09-21 17:03:19 -07004578 int migratetype;
Christoph Lameter2caaad42005-06-21 17:15:00 -07004579
Magnus Damm1c6fe942005-10-26 01:58:59 -07004580 memset(p, 0, sizeof(*p));
4581
Christoph Lameter3dfa5722008-02-04 22:29:19 -08004582 pcp = &p->pcp;
Christoph Lameter2caaad42005-06-21 17:15:00 -07004583 pcp->count = 0;
Mel Gorman5f8dcc22009-09-21 17:03:19 -07004584 for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
4585 INIT_LIST_HEAD(&pcp->lists[migratetype]);
Christoph Lameter2caaad42005-06-21 17:15:00 -07004586}
4587
Cody P Schafer88c90db2013-07-03 15:01:35 -07004588static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
4589{
4590 pageset_init(p);
4591 pageset_set_batch(p, batch);
4592}
4593
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08004594/*
Cody P Schafer36640332013-07-03 15:01:40 -07004595 * pageset_set_high() sets the high water mark for hot per_cpu_pagelist
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08004596 * to the value high for the pageset p.
4597 */
Cody P Schafer36640332013-07-03 15:01:40 -07004598static void pageset_set_high(struct per_cpu_pageset *p,
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08004599 unsigned long high)
4600{
Cody P Schafer8d7a8fa2013-07-03 15:01:31 -07004601 unsigned long batch = max(1UL, high / 4);
4602 if ((high / 4) > (PAGE_SHIFT * 8))
4603 batch = PAGE_SHIFT * 8;
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08004604
Cody P Schafer8d7a8fa2013-07-03 15:01:31 -07004605 pageset_update(&p->pcp, high, batch);
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08004606}
4607
David Rientjes7cd2b0a2014-06-23 13:22:04 -07004608static void pageset_set_high_and_batch(struct zone *zone,
4609 struct per_cpu_pageset *pcp)
Cody P Schafer56cef2b2013-07-03 15:01:38 -07004610{
Cody P Schafer56cef2b2013-07-03 15:01:38 -07004611 if (percpu_pagelist_fraction)
Cody P Schafer36640332013-07-03 15:01:40 -07004612 pageset_set_high(pcp,
Cody P Schafer56cef2b2013-07-03 15:01:38 -07004613 (zone->managed_pages /
4614 percpu_pagelist_fraction));
4615 else
4616 pageset_set_batch(pcp, zone_batchsize(zone));
4617}
4618
Cody P Schafer169f6c12013-07-03 15:01:41 -07004619static void __meminit zone_pageset_init(struct zone *zone, int cpu)
4620{
4621 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
4622
4623 pageset_init(pcp);
4624 pageset_set_high_and_batch(zone, pcp);
4625}
4626
Jiang Liu4ed7e022012-07-31 16:43:35 -07004627static void __meminit setup_zone_pageset(struct zone *zone)
Wu Fengguang319774e2010-05-24 14:32:49 -07004628{
4629 int cpu;
Wu Fengguang319774e2010-05-24 14:32:49 -07004630 zone->pageset = alloc_percpu(struct per_cpu_pageset);
Cody P Schafer56cef2b2013-07-03 15:01:38 -07004631 for_each_possible_cpu(cpu)
4632 zone_pageset_init(zone, cpu);
Wu Fengguang319774e2010-05-24 14:32:49 -07004633}
4634
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07004635/*
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09004636 * Allocate per cpu pagesets and initialize them.
4637 * Before this call only boot pagesets were available.
Christoph Lameter2caaad42005-06-21 17:15:00 -07004638 */
Al Viro78d99552005-12-15 09:18:25 +00004639void __init setup_per_cpu_pageset(void)
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07004640{
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09004641 struct zone *zone;
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07004642
Wu Fengguang319774e2010-05-24 14:32:49 -07004643 for_each_populated_zone(zone)
4644 setup_zone_pageset(zone);
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07004645}
4646
Sam Ravnborg577a32f2007-05-17 23:29:25 +02004647static noinline __init_refok
Yasunori Gotocca448f2006-06-23 02:03:10 -07004648int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
Dave Hansened8ece22005-10-29 18:16:50 -07004649{
4650 int i;
Yasunori Gotocca448f2006-06-23 02:03:10 -07004651 size_t alloc_size;
Dave Hansened8ece22005-10-29 18:16:50 -07004652
4653 /*
4654 * The per-page waitqueue mechanism uses hashed waitqueues
4655 * per zone.
4656 */
Yasunori Goto02b694d2006-06-23 02:03:08 -07004657 zone->wait_table_hash_nr_entries =
4658 wait_table_hash_nr_entries(zone_size_pages);
4659 zone->wait_table_bits =
4660 wait_table_bits(zone->wait_table_hash_nr_entries);
Yasunori Gotocca448f2006-06-23 02:03:10 -07004661 alloc_size = zone->wait_table_hash_nr_entries
4662 * sizeof(wait_queue_head_t);
4663
Heiko Carstenscd94b9d2008-05-23 13:04:52 -07004664 if (!slab_is_available()) {
Yasunori Gotocca448f2006-06-23 02:03:10 -07004665 zone->wait_table = (wait_queue_head_t *)
Santosh Shilimkar67828322014-01-21 15:50:25 -08004666 memblock_virt_alloc_node_nopanic(
4667 alloc_size, zone->zone_pgdat->node_id);
Yasunori Gotocca448f2006-06-23 02:03:10 -07004668 } else {
4669 /*
4670 * This case means that a zone whose size was 0 gets new memory
4671 * via memory hot-add.
4672 * But it may be the case that a new node was hot-added. In
4673 * this case vmalloc() will not be able to use this new node's
4674 * memory - this wait_table must be initialized to use this new
4675 * node itself as well.
4676 * To use this new node's memory, further consideration will be
4677 * necessary.
4678 */
Jesper Juhl8691f3a2007-10-16 01:24:49 -07004679 zone->wait_table = vmalloc(alloc_size);
Yasunori Gotocca448f2006-06-23 02:03:10 -07004680 }
4681 if (!zone->wait_table)
4682 return -ENOMEM;
Dave Hansened8ece22005-10-29 18:16:50 -07004683
Pintu Kumarb8af2942013-09-11 14:20:34 -07004684 for (i = 0; i < zone->wait_table_hash_nr_entries; ++i)
Dave Hansened8ece22005-10-29 18:16:50 -07004685 init_waitqueue_head(zone->wait_table + i);
Yasunori Gotocca448f2006-06-23 02:03:10 -07004686
4687 return 0;
Dave Hansened8ece22005-10-29 18:16:50 -07004688}
4689
Matt Tolentinoc09b4242006-01-17 07:03:44 +01004690static __meminit void zone_pcp_init(struct zone *zone)
Dave Hansened8ece22005-10-29 18:16:50 -07004691{
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09004692 /*
4693 * per cpu subsystem is not up at this point. The following code
4694 * relies on the ability of the linker to provide the
4695 * offset of a (static) per cpu variable into the per cpu area.
4696 */
4697 zone->pageset = &boot_pageset;
Dave Hansened8ece22005-10-29 18:16:50 -07004698
Xishi Qiub38a8722013-11-12 15:07:20 -08004699 if (populated_zone(zone))
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09004700 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
4701 zone->name, zone->present_pages,
4702 zone_batchsize(zone));
Dave Hansened8ece22005-10-29 18:16:50 -07004703}
4704
Jiang Liu4ed7e022012-07-31 16:43:35 -07004705int __meminit init_currently_empty_zone(struct zone *zone,
Yasunori Goto718127c2006-06-23 02:03:10 -07004706 unsigned long zone_start_pfn,
Yaowei Baib171e402015-11-05 18:47:06 -08004707 unsigned long size)
Dave Hansened8ece22005-10-29 18:16:50 -07004708{
4709 struct pglist_data *pgdat = zone->zone_pgdat;
Yasunori Gotocca448f2006-06-23 02:03:10 -07004710 int ret;
4711 ret = zone_wait_table_init(zone, size);
4712 if (ret)
4713 return ret;
Dave Hansened8ece22005-10-29 18:16:50 -07004714 pgdat->nr_zones = zone_idx(zone) + 1;
4715
Dave Hansened8ece22005-10-29 18:16:50 -07004716 zone->zone_start_pfn = zone_start_pfn;
4717
Mel Gorman708614e2008-07-23 21:26:51 -07004718 mminit_dprintk(MMINIT_TRACE, "memmap_init",
4719 "Initialising map node %d zone %lu pfns %lu -> %lu\n",
4720 pgdat->node_id,
4721 (unsigned long)zone_idx(zone),
4722 zone_start_pfn, (zone_start_pfn + size));
4723
Andi Kleen1e548de2008-02-04 22:29:26 -08004724 zone_init_free_lists(zone);
Yasunori Goto718127c2006-06-23 02:03:10 -07004725
4726 return 0;
Dave Hansened8ece22005-10-29 18:16:50 -07004727}
4728
Tejun Heo0ee332c2011-12-08 10:22:09 -08004729#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
Mel Gormanc7132162006-09-27 01:49:43 -07004730#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
Mel Gorman8a942fd2015-06-30 14:56:55 -07004731
Mel Gormanc7132162006-09-27 01:49:43 -07004732/*
4733 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
Mel Gormanc7132162006-09-27 01:49:43 -07004734 */
Mel Gorman8a942fd2015-06-30 14:56:55 -07004735int __meminit __early_pfn_to_nid(unsigned long pfn,
4736 struct mminit_pfnnid_cache *state)
Mel Gormanc7132162006-09-27 01:49:43 -07004737{
Tejun Heoc13291a2011-07-12 10:46:30 +02004738 unsigned long start_pfn, end_pfn;
Yinghai Lue76b63f2013-09-11 14:22:17 -07004739 int nid;
Russ Anderson7c243c72013-04-29 15:07:59 -07004740
Mel Gorman8a942fd2015-06-30 14:56:55 -07004741 if (state->last_start <= pfn && pfn < state->last_end)
4742 return state->last_nid;
Mel Gormanc7132162006-09-27 01:49:43 -07004743
Yinghai Lue76b63f2013-09-11 14:22:17 -07004744 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
4745 if (nid != -1) {
Mel Gorman8a942fd2015-06-30 14:56:55 -07004746 state->last_start = start_pfn;
4747 state->last_end = end_pfn;
4748 state->last_nid = nid;
Yinghai Lue76b63f2013-09-11 14:22:17 -07004749 }
4750
4751 return nid;
Mel Gormanc7132162006-09-27 01:49:43 -07004752}
4753#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
4754
Mel Gormanc7132162006-09-27 01:49:43 -07004755/**
Santosh Shilimkar67828322014-01-21 15:50:25 -08004756 * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range
Randy Dunlap88ca3b92006-10-04 02:15:25 -07004757 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
Santosh Shilimkar67828322014-01-21 15:50:25 -08004758 * @max_low_pfn: The highest PFN that will be passed to memblock_free_early_nid
Mel Gormanc7132162006-09-27 01:49:43 -07004759 *
Zhang Zhen7d018172014-06-04 16:10:53 -07004760 * If an architecture guarantees that all ranges registered contain no holes
4761 * and may be freed, this this function may be used instead of calling
4762 * memblock_free_early_nid() manually.
Mel Gormanc7132162006-09-27 01:49:43 -07004763 */
Tejun Heoc13291a2011-07-12 10:46:30 +02004764void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
Mel Gormanc7132162006-09-27 01:49:43 -07004765{
Tejun Heoc13291a2011-07-12 10:46:30 +02004766 unsigned long start_pfn, end_pfn;
4767 int i, this_nid;
Mel Gormanc7132162006-09-27 01:49:43 -07004768
Tejun Heoc13291a2011-07-12 10:46:30 +02004769 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
4770 start_pfn = min(start_pfn, max_low_pfn);
4771 end_pfn = min(end_pfn, max_low_pfn);
Mel Gormanc7132162006-09-27 01:49:43 -07004772
Tejun Heoc13291a2011-07-12 10:46:30 +02004773 if (start_pfn < end_pfn)
Santosh Shilimkar67828322014-01-21 15:50:25 -08004774 memblock_free_early_nid(PFN_PHYS(start_pfn),
4775 (end_pfn - start_pfn) << PAGE_SHIFT,
4776 this_nid);
Mel Gormanc7132162006-09-27 01:49:43 -07004777 }
4778}
4779
4780/**
4781 * sparse_memory_present_with_active_regions - Call memory_present for each active range
Randy Dunlap88ca3b92006-10-04 02:15:25 -07004782 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
Mel Gormanc7132162006-09-27 01:49:43 -07004783 *
Zhang Zhen7d018172014-06-04 16:10:53 -07004784 * If an architecture guarantees that all ranges registered contain no holes and may
4785 * be freed, this function may be used instead of calling memory_present() manually.
Mel Gormanc7132162006-09-27 01:49:43 -07004786 */
4787void __init sparse_memory_present_with_active_regions(int nid)
4788{
Tejun Heoc13291a2011-07-12 10:46:30 +02004789 unsigned long start_pfn, end_pfn;
4790 int i, this_nid;
Mel Gormanc7132162006-09-27 01:49:43 -07004791
Tejun Heoc13291a2011-07-12 10:46:30 +02004792 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
4793 memory_present(this_nid, start_pfn, end_pfn);
Mel Gormanc7132162006-09-27 01:49:43 -07004794}
4795
4796/**
4797 * get_pfn_range_for_nid - Return the start and end page frames for a node
Randy Dunlap88ca3b92006-10-04 02:15:25 -07004798 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
4799 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
4800 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
Mel Gormanc7132162006-09-27 01:49:43 -07004801 *
4802 * It returns the start and end page frame of a node based on information
Zhang Zhen7d018172014-06-04 16:10:53 -07004803 * provided by memblock_set_node(). If called for a node
Mel Gormanc7132162006-09-27 01:49:43 -07004804 * with no available memory, a warning is printed and the start and end
Randy Dunlap88ca3b92006-10-04 02:15:25 -07004805 * PFNs will be 0.
Mel Gormanc7132162006-09-27 01:49:43 -07004806 */
Yasunori Gotoa3142c82007-05-08 00:23:07 -07004807void __meminit get_pfn_range_for_nid(unsigned int nid,
Mel Gormanc7132162006-09-27 01:49:43 -07004808 unsigned long *start_pfn, unsigned long *end_pfn)
4809{
Tejun Heoc13291a2011-07-12 10:46:30 +02004810 unsigned long this_start_pfn, this_end_pfn;
Mel Gormanc7132162006-09-27 01:49:43 -07004811 int i;
Tejun Heoc13291a2011-07-12 10:46:30 +02004812
Mel Gormanc7132162006-09-27 01:49:43 -07004813 *start_pfn = -1UL;
4814 *end_pfn = 0;
4815
Tejun Heoc13291a2011-07-12 10:46:30 +02004816 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
4817 *start_pfn = min(*start_pfn, this_start_pfn);
4818 *end_pfn = max(*end_pfn, this_end_pfn);
Mel Gormanc7132162006-09-27 01:49:43 -07004819 }
4820
Christoph Lameter633c0662007-10-16 01:25:37 -07004821 if (*start_pfn == -1UL)
Mel Gormanc7132162006-09-27 01:49:43 -07004822 *start_pfn = 0;
Mel Gormanc7132162006-09-27 01:49:43 -07004823}
4824
4825/*
Mel Gorman2a1e2742007-07-17 04:03:12 -07004826 * This finds a zone that can be used for ZONE_MOVABLE pages. The
4827 * assumption is made that zones within a node are ordered in monotonic
4828 * increasing memory addresses so that the "highest" populated zone is used
4829 */
Adrian Bunkb69a7282008-07-23 21:28:12 -07004830static void __init find_usable_zone_for_movable(void)
Mel Gorman2a1e2742007-07-17 04:03:12 -07004831{
4832 int zone_index;
4833 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
4834 if (zone_index == ZONE_MOVABLE)
4835 continue;
4836
4837 if (arch_zone_highest_possible_pfn[zone_index] >
4838 arch_zone_lowest_possible_pfn[zone_index])
4839 break;
4840 }
4841
4842 VM_BUG_ON(zone_index == -1);
4843 movable_zone = zone_index;
4844}
4845
4846/*
4847 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004848 * because it is sized independent of architecture. Unlike the other zones,
Mel Gorman2a1e2742007-07-17 04:03:12 -07004849 * the starting point for ZONE_MOVABLE is not fixed. It may be different
4850 * in each node depending on the size of each node and how evenly kernelcore
4851 * is distributed. This helper function adjusts the zone ranges
4852 * provided by the architecture for a given node by using the end of the
4853 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
4854 * zones within a node are in order of monotonic increases memory addresses
4855 */
Adrian Bunkb69a7282008-07-23 21:28:12 -07004856static void __meminit adjust_zone_range_for_zone_movable(int nid,
Mel Gorman2a1e2742007-07-17 04:03:12 -07004857 unsigned long zone_type,
4858 unsigned long node_start_pfn,
4859 unsigned long node_end_pfn,
4860 unsigned long *zone_start_pfn,
4861 unsigned long *zone_end_pfn)
4862{
4863 /* Only adjust if ZONE_MOVABLE is on this node */
4864 if (zone_movable_pfn[nid]) {
4865 /* Size ZONE_MOVABLE */
4866 if (zone_type == ZONE_MOVABLE) {
4867 *zone_start_pfn = zone_movable_pfn[nid];
4868 *zone_end_pfn = min(node_end_pfn,
4869 arch_zone_highest_possible_pfn[movable_zone]);
4870
4871 /* Adjust for ZONE_MOVABLE starting within this range */
4872 } else if (*zone_start_pfn < zone_movable_pfn[nid] &&
4873 *zone_end_pfn > zone_movable_pfn[nid]) {
4874 *zone_end_pfn = zone_movable_pfn[nid];
4875
4876 /* Check if this whole range is within ZONE_MOVABLE */
4877 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
4878 *zone_start_pfn = *zone_end_pfn;
4879 }
4880}
4881
4882/*
Mel Gormanc7132162006-09-27 01:49:43 -07004883 * Return the number of pages a zone spans in a node, including holes
4884 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
4885 */
Paul Mundt6ea6e682007-07-15 23:38:20 -07004886static unsigned long __meminit zone_spanned_pages_in_node(int nid,
Mel Gormanc7132162006-09-27 01:49:43 -07004887 unsigned long zone_type,
Zhang Yanfei7960aed2013-07-08 15:59:52 -07004888 unsigned long node_start_pfn,
4889 unsigned long node_end_pfn,
Mel Gormanc7132162006-09-27 01:49:43 -07004890 unsigned long *ignored)
4891{
Mel Gormanc7132162006-09-27 01:49:43 -07004892 unsigned long zone_start_pfn, zone_end_pfn;
4893
Xishi Qiub5685e92015-09-08 15:04:16 -07004894 /* When hotadd a new node from cpu_up(), the node should be empty */
Xishi Qiuf9126ab2015-08-14 15:35:16 -07004895 if (!node_start_pfn && !node_end_pfn)
4896 return 0;
4897
Zhang Yanfei7960aed2013-07-08 15:59:52 -07004898 /* Get the start and end of the zone */
Mel Gormanc7132162006-09-27 01:49:43 -07004899 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
4900 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
Mel Gorman2a1e2742007-07-17 04:03:12 -07004901 adjust_zone_range_for_zone_movable(nid, zone_type,
4902 node_start_pfn, node_end_pfn,
4903 &zone_start_pfn, &zone_end_pfn);
Mel Gormanc7132162006-09-27 01:49:43 -07004904
4905 /* Check that this node has pages within the zone's required range */
4906 if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
4907 return 0;
4908
4909 /* Move the zone boundaries inside the node if necessary */
4910 zone_end_pfn = min(zone_end_pfn, node_end_pfn);
4911 zone_start_pfn = max(zone_start_pfn, node_start_pfn);
4912
4913 /* Return the spanned pages */
4914 return zone_end_pfn - zone_start_pfn;
4915}
4916
4917/*
4918 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
Randy Dunlap88ca3b92006-10-04 02:15:25 -07004919 * then all holes in the requested range will be accounted for.
Mel Gormanc7132162006-09-27 01:49:43 -07004920 */
Yinghai Lu32996252009-12-15 17:59:02 -08004921unsigned long __meminit __absent_pages_in_range(int nid,
Mel Gormanc7132162006-09-27 01:49:43 -07004922 unsigned long range_start_pfn,
4923 unsigned long range_end_pfn)
4924{
Tejun Heo96e907d2011-07-12 10:46:29 +02004925 unsigned long nr_absent = range_end_pfn - range_start_pfn;
4926 unsigned long start_pfn, end_pfn;
4927 int i;
Mel Gormanc7132162006-09-27 01:49:43 -07004928
Tejun Heo96e907d2011-07-12 10:46:29 +02004929 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
4930 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
4931 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
4932 nr_absent -= end_pfn - start_pfn;
Mel Gormanc7132162006-09-27 01:49:43 -07004933 }
Tejun Heo96e907d2011-07-12 10:46:29 +02004934 return nr_absent;
Mel Gormanc7132162006-09-27 01:49:43 -07004935}
4936
4937/**
4938 * absent_pages_in_range - Return number of page frames in holes within a range
4939 * @start_pfn: The start PFN to start searching for holes
4940 * @end_pfn: The end PFN to stop searching for holes
4941 *
Randy Dunlap88ca3b92006-10-04 02:15:25 -07004942 * It returns the number of pages frames in memory holes within a range.
Mel Gormanc7132162006-09-27 01:49:43 -07004943 */
4944unsigned long __init absent_pages_in_range(unsigned long start_pfn,
4945 unsigned long end_pfn)
4946{
4947 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
4948}
4949
4950/* Return the number of page frames in holes in a zone on a node */
Paul Mundt6ea6e682007-07-15 23:38:20 -07004951static unsigned long __meminit zone_absent_pages_in_node(int nid,
Mel Gormanc7132162006-09-27 01:49:43 -07004952 unsigned long zone_type,
Zhang Yanfei7960aed2013-07-08 15:59:52 -07004953 unsigned long node_start_pfn,
4954 unsigned long node_end_pfn,
Mel Gormanc7132162006-09-27 01:49:43 -07004955 unsigned long *ignored)
4956{
Tejun Heo96e907d2011-07-12 10:46:29 +02004957 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
4958 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
Mel Gorman9c7cd682006-09-27 01:49:58 -07004959 unsigned long zone_start_pfn, zone_end_pfn;
4960
Xishi Qiub5685e92015-09-08 15:04:16 -07004961 /* When hotadd a new node from cpu_up(), the node should be empty */
Xishi Qiuf9126ab2015-08-14 15:35:16 -07004962 if (!node_start_pfn && !node_end_pfn)
4963 return 0;
4964
Tejun Heo96e907d2011-07-12 10:46:29 +02004965 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
4966 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
Mel Gorman9c7cd682006-09-27 01:49:58 -07004967
Mel Gorman2a1e2742007-07-17 04:03:12 -07004968 adjust_zone_range_for_zone_movable(nid, zone_type,
4969 node_start_pfn, node_end_pfn,
4970 &zone_start_pfn, &zone_end_pfn);
Mel Gorman9c7cd682006-09-27 01:49:58 -07004971 return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
Mel Gormanc7132162006-09-27 01:49:43 -07004972}
Mel Gorman0e0b8642006-09-27 01:49:56 -07004973
Tejun Heo0ee332c2011-12-08 10:22:09 -08004974#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
Paul Mundt6ea6e682007-07-15 23:38:20 -07004975static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
Mel Gormanc7132162006-09-27 01:49:43 -07004976 unsigned long zone_type,
Zhang Yanfei7960aed2013-07-08 15:59:52 -07004977 unsigned long node_start_pfn,
4978 unsigned long node_end_pfn,
Mel Gormanc7132162006-09-27 01:49:43 -07004979 unsigned long *zones_size)
4980{
4981 return zones_size[zone_type];
4982}
4983
Paul Mundt6ea6e682007-07-15 23:38:20 -07004984static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
Mel Gormanc7132162006-09-27 01:49:43 -07004985 unsigned long zone_type,
Zhang Yanfei7960aed2013-07-08 15:59:52 -07004986 unsigned long node_start_pfn,
4987 unsigned long node_end_pfn,
Mel Gormanc7132162006-09-27 01:49:43 -07004988 unsigned long *zholes_size)
4989{
4990 if (!zholes_size)
4991 return 0;
4992
4993 return zholes_size[zone_type];
4994}
Yinghai Lu20e69262013-03-01 14:51:27 -08004995
Tejun Heo0ee332c2011-12-08 10:22:09 -08004996#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
Mel Gormanc7132162006-09-27 01:49:43 -07004997
Yasunori Gotoa3142c82007-05-08 00:23:07 -07004998static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
Zhang Yanfei7960aed2013-07-08 15:59:52 -07004999 unsigned long node_start_pfn,
5000 unsigned long node_end_pfn,
5001 unsigned long *zones_size,
5002 unsigned long *zholes_size)
Mel Gormanc7132162006-09-27 01:49:43 -07005003{
Gu Zhengfebd5942015-06-24 16:57:02 -07005004 unsigned long realtotalpages = 0, totalpages = 0;
Mel Gormanc7132162006-09-27 01:49:43 -07005005 enum zone_type i;
5006
Gu Zhengfebd5942015-06-24 16:57:02 -07005007 for (i = 0; i < MAX_NR_ZONES; i++) {
5008 struct zone *zone = pgdat->node_zones + i;
5009 unsigned long size, real_size;
Mel Gormanc7132162006-09-27 01:49:43 -07005010
Gu Zhengfebd5942015-06-24 16:57:02 -07005011 size = zone_spanned_pages_in_node(pgdat->node_id, i,
5012 node_start_pfn,
5013 node_end_pfn,
5014 zones_size);
5015 real_size = size - zone_absent_pages_in_node(pgdat->node_id, i,
Zhang Yanfei7960aed2013-07-08 15:59:52 -07005016 node_start_pfn, node_end_pfn,
5017 zholes_size);
Gu Zhengfebd5942015-06-24 16:57:02 -07005018 zone->spanned_pages = size;
5019 zone->present_pages = real_size;
5020
5021 totalpages += size;
5022 realtotalpages += real_size;
5023 }
5024
5025 pgdat->node_spanned_pages = totalpages;
Mel Gormanc7132162006-09-27 01:49:43 -07005026 pgdat->node_present_pages = realtotalpages;
5027 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
5028 realtotalpages);
5029}
5030
Mel Gorman835c1342007-10-16 01:25:47 -07005031#ifndef CONFIG_SPARSEMEM
5032/*
5033 * Calculate the size of the zone->blockflags rounded to an unsigned long
Mel Gormand9c23402007-10-16 01:26:01 -07005034 * Start by making sure zonesize is a multiple of pageblock_order by rounding
5035 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
Mel Gorman835c1342007-10-16 01:25:47 -07005036 * round what is now in bits to nearest long in bits, then return it in
5037 * bytes.
5038 */
Linus Torvalds7c455122013-02-18 09:58:02 -08005039static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
Mel Gorman835c1342007-10-16 01:25:47 -07005040{
5041 unsigned long usemapsize;
5042
Linus Torvalds7c455122013-02-18 09:58:02 -08005043 zonesize += zone_start_pfn & (pageblock_nr_pages-1);
Mel Gormand9c23402007-10-16 01:26:01 -07005044 usemapsize = roundup(zonesize, pageblock_nr_pages);
5045 usemapsize = usemapsize >> pageblock_order;
Mel Gorman835c1342007-10-16 01:25:47 -07005046 usemapsize *= NR_PAGEBLOCK_BITS;
5047 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
5048
5049 return usemapsize / 8;
5050}
5051
5052static void __init setup_usemap(struct pglist_data *pgdat,
Linus Torvalds7c455122013-02-18 09:58:02 -08005053 struct zone *zone,
5054 unsigned long zone_start_pfn,
5055 unsigned long zonesize)
Mel Gorman835c1342007-10-16 01:25:47 -07005056{
Linus Torvalds7c455122013-02-18 09:58:02 -08005057 unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
Mel Gorman835c1342007-10-16 01:25:47 -07005058 zone->pageblock_flags = NULL;
Julia Lawall58a01a42009-01-06 14:39:28 -08005059 if (usemapsize)
Santosh Shilimkar67828322014-01-21 15:50:25 -08005060 zone->pageblock_flags =
5061 memblock_virt_alloc_node_nopanic(usemapsize,
5062 pgdat->node_id);
Mel Gorman835c1342007-10-16 01:25:47 -07005063}
5064#else
Linus Torvalds7c455122013-02-18 09:58:02 -08005065static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
5066 unsigned long zone_start_pfn, unsigned long zonesize) {}
Mel Gorman835c1342007-10-16 01:25:47 -07005067#endif /* CONFIG_SPARSEMEM */
5068
Mel Gormand9c23402007-10-16 01:26:01 -07005069#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
Mel Gormanba72cb82007-11-28 16:21:13 -08005070
Mel Gormand9c23402007-10-16 01:26:01 -07005071/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
Chen Gang15ca2202013-09-11 14:20:27 -07005072void __paginginit set_pageblock_order(void)
Mel Gormand9c23402007-10-16 01:26:01 -07005073{
Andrew Morton955c1cd2012-05-29 15:06:31 -07005074 unsigned int order;
5075
Mel Gormand9c23402007-10-16 01:26:01 -07005076 /* Check that pageblock_nr_pages has not already been setup */
5077 if (pageblock_order)
5078 return;
5079
Andrew Morton955c1cd2012-05-29 15:06:31 -07005080 if (HPAGE_SHIFT > PAGE_SHIFT)
5081 order = HUGETLB_PAGE_ORDER;
5082 else
5083 order = MAX_ORDER - 1;
5084
Mel Gormand9c23402007-10-16 01:26:01 -07005085 /*
5086 * Assume the largest contiguous order of interest is a huge page.
Andrew Morton955c1cd2012-05-29 15:06:31 -07005087 * This value may be variable depending on boot parameters on IA64 and
5088 * powerpc.
Mel Gormand9c23402007-10-16 01:26:01 -07005089 */
5090 pageblock_order = order;
5091}
5092#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
5093
Mel Gormanba72cb82007-11-28 16:21:13 -08005094/*
5095 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
Andrew Morton955c1cd2012-05-29 15:06:31 -07005096 * is unused as pageblock_order is set at compile-time. See
5097 * include/linux/pageblock-flags.h for the values of pageblock_order based on
5098 * the kernel config
Mel Gormanba72cb82007-11-28 16:21:13 -08005099 */
Chen Gang15ca2202013-09-11 14:20:27 -07005100void __paginginit set_pageblock_order(void)
Mel Gormanba72cb82007-11-28 16:21:13 -08005101{
Mel Gormanba72cb82007-11-28 16:21:13 -08005102}
Mel Gormand9c23402007-10-16 01:26:01 -07005103
5104#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
5105
Jiang Liu01cefae2012-12-12 13:52:19 -08005106static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages,
5107 unsigned long present_pages)
5108{
5109 unsigned long pages = spanned_pages;
5110
5111 /*
5112 * Provide a more accurate estimation if there are holes within
5113 * the zone and SPARSEMEM is in use. If there are holes within the
5114 * zone, each populated memory region may cost us one or two extra
5115 * memmap pages due to alignment because memmap pages for each
5116 * populated regions may not naturally algined on page boundary.
5117 * So the (present_pages >> 4) heuristic is a tradeoff for that.
5118 */
5119 if (spanned_pages > present_pages + (present_pages >> 4) &&
5120 IS_ENABLED(CONFIG_SPARSEMEM))
5121 pages = present_pages;
5122
5123 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
5124}
5125
Linus Torvalds1da177e2005-04-16 15:20:36 -07005126/*
5127 * Set up the zone data structures:
5128 * - mark all pages reserved
5129 * - mark all memory queues empty
5130 * - clear the memory bitmaps
Minchan Kim6527af52012-07-31 16:46:16 -07005131 *
5132 * NOTE: pgdat should get zeroed by caller.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005133 */
Wei Yang7f3eb552015-09-08 14:59:50 -07005134static void __paginginit free_area_init_core(struct pglist_data *pgdat)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005135{
Christoph Lameter2f1b6242006-09-25 23:31:13 -07005136 enum zone_type j;
Dave Hansened8ece22005-10-29 18:16:50 -07005137 int nid = pgdat->node_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005138 unsigned long zone_start_pfn = pgdat->node_start_pfn;
Yasunori Goto718127c2006-06-23 02:03:10 -07005139 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005140
Dave Hansen208d54e2005-10-29 18:16:52 -07005141 pgdat_resize_init(pgdat);
Andrea Arcangeli8177a422012-03-23 20:56:34 +01005142#ifdef CONFIG_NUMA_BALANCING
5143 spin_lock_init(&pgdat->numabalancing_migrate_lock);
5144 pgdat->numabalancing_migrate_nr_pages = 0;
5145 pgdat->numabalancing_migrate_next_window = jiffies;
5146#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07005147 init_waitqueue_head(&pgdat->kswapd_wait);
Mel Gorman55150612012-07-31 16:44:35 -07005148 init_waitqueue_head(&pgdat->pfmemalloc_wait);
Joonsoo Kimeefa864b2014-12-12 16:55:46 -08005149 pgdat_page_ext_init(pgdat);
Michal Nazarewicz5f63b722012-01-11 15:16:11 +01005150
Linus Torvalds1da177e2005-04-16 15:20:36 -07005151 for (j = 0; j < MAX_NR_ZONES; j++) {
5152 struct zone *zone = pgdat->node_zones + j;
Jiang Liu9feedc92012-12-12 13:52:12 -08005153 unsigned long size, realsize, freesize, memmap_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005154
Gu Zhengfebd5942015-06-24 16:57:02 -07005155 size = zone->spanned_pages;
5156 realsize = freesize = zone->present_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005157
Mel Gorman0e0b8642006-09-27 01:49:56 -07005158 /*
Jiang Liu9feedc92012-12-12 13:52:12 -08005159 * Adjust freesize so that it accounts for how much memory
Mel Gorman0e0b8642006-09-27 01:49:56 -07005160 * is used by this zone for memmap. This affects the watermark
5161 * and per-cpu initialisations
5162 */
Jiang Liu01cefae2012-12-12 13:52:19 -08005163 memmap_pages = calc_memmap_size(size, realsize);
Zhong Hongboba914f42014-12-12 16:56:21 -08005164 if (!is_highmem_idx(j)) {
5165 if (freesize >= memmap_pages) {
5166 freesize -= memmap_pages;
5167 if (memmap_pages)
5168 printk(KERN_DEBUG
5169 " %s zone: %lu pages used for memmap\n",
5170 zone_names[j], memmap_pages);
5171 } else
5172 printk(KERN_WARNING
5173 " %s zone: %lu pages exceeds freesize %lu\n",
5174 zone_names[j], memmap_pages, freesize);
5175 }
Mel Gorman0e0b8642006-09-27 01:49:56 -07005176
Christoph Lameter62672762007-02-10 01:43:07 -08005177 /* Account for reserved pages */
Jiang Liu9feedc92012-12-12 13:52:12 -08005178 if (j == 0 && freesize > dma_reserve) {
5179 freesize -= dma_reserve;
Yinghai Lud903ef92008-10-18 20:27:06 -07005180 printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
Christoph Lameter62672762007-02-10 01:43:07 -08005181 zone_names[0], dma_reserve);
Mel Gorman0e0b8642006-09-27 01:49:56 -07005182 }
5183
Christoph Lameter98d2b0e2006-09-25 23:31:12 -07005184 if (!is_highmem_idx(j))
Jiang Liu9feedc92012-12-12 13:52:12 -08005185 nr_kernel_pages += freesize;
Jiang Liu01cefae2012-12-12 13:52:19 -08005186 /* Charge for highmem memmap if there are enough kernel pages */
5187 else if (nr_kernel_pages > memmap_pages * 2)
5188 nr_kernel_pages -= memmap_pages;
Jiang Liu9feedc92012-12-12 13:52:12 -08005189 nr_all_pages += freesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005190
Jiang Liu9feedc92012-12-12 13:52:12 -08005191 /*
5192 * Set an approximate value for lowmem here, it will be adjusted
5193 * when the bootmem allocator frees pages into the buddy system.
5194 * And all highmem pages will be managed by the buddy system.
5195 */
5196 zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
Christoph Lameter96146342006-07-03 00:24:13 -07005197#ifdef CONFIG_NUMA
Christoph Lameterd5f541e2006-09-27 01:50:08 -07005198 zone->node = nid;
Jiang Liu9feedc92012-12-12 13:52:12 -08005199 zone->min_unmapped_pages = (freesize*sysctl_min_unmapped_ratio)
Christoph Lameter96146342006-07-03 00:24:13 -07005200 / 100;
Jiang Liu9feedc92012-12-12 13:52:12 -08005201 zone->min_slab_pages = (freesize * sysctl_min_slab_ratio) / 100;
Christoph Lameter96146342006-07-03 00:24:13 -07005202#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07005203 zone->name = zone_names[j];
5204 spin_lock_init(&zone->lock);
5205 spin_lock_init(&zone->lru_lock);
Dave Hansenbdc8cb92005-10-29 18:16:53 -07005206 zone_seqlock_init(zone);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005207 zone->zone_pgdat = pgdat;
Dave Hansened8ece22005-10-29 18:16:50 -07005208 zone_pcp_init(zone);
Johannes Weiner81c0a2b2013-09-11 14:20:47 -07005209
5210 /* For bootup, initialized properly in watermark setup */
5211 mod_zone_page_state(zone, NR_ALLOC_BATCH, zone->managed_pages);
5212
Hugh Dickinsbea8c152012-11-16 14:14:54 -08005213 lruvec_init(&zone->lruvec);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005214 if (!size)
5215 continue;
5216
Andrew Morton955c1cd2012-05-29 15:06:31 -07005217 set_pageblock_order();
Linus Torvalds7c455122013-02-18 09:58:02 -08005218 setup_usemap(pgdat, zone, zone_start_pfn, size);
Yaowei Baib171e402015-11-05 18:47:06 -08005219 ret = init_currently_empty_zone(zone, zone_start_pfn, size);
Yasunori Goto718127c2006-06-23 02:03:10 -07005220 BUG_ON(ret);
Heiko Carstens76cdd582008-05-14 16:05:52 -07005221 memmap_init(size, nid, j, zone_start_pfn);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005222 zone_start_pfn += size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005223 }
5224}
5225
Sam Ravnborg577a32f2007-05-17 23:29:25 +02005226static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005227{
Laura Abbotta1c34a32015-11-05 18:48:46 -08005228 unsigned long __maybe_unused offset = 0;
5229
Linus Torvalds1da177e2005-04-16 15:20:36 -07005230 /* Skip empty nodes */
5231 if (!pgdat->node_spanned_pages)
5232 return;
5233
Andy Whitcroftd41dee32005-06-23 00:07:54 -07005234#ifdef CONFIG_FLAT_NODE_MEM_MAP
Linus Torvalds1da177e2005-04-16 15:20:36 -07005235 /* ia64 gets its own node_mem_map, before this, without bootmem */
5236 if (!pgdat->node_mem_map) {
Bob Piccoe984bb42006-05-20 15:00:31 -07005237 unsigned long size, start, end;
Andy Whitcroftd41dee32005-06-23 00:07:54 -07005238 struct page *map;
5239
Bob Piccoe984bb42006-05-20 15:00:31 -07005240 /*
5241 * The zone's endpoints aren't required to be MAX_ORDER
5242 * aligned but the node_mem_map endpoints must be in order
5243 * for the buddy allocator to function correctly.
5244 */
5245 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
Laura Abbotta1c34a32015-11-05 18:48:46 -08005246 offset = pgdat->node_start_pfn - start;
Cody P Schafer108bcc92013-02-22 16:35:23 -08005247 end = pgdat_end_pfn(pgdat);
Bob Piccoe984bb42006-05-20 15:00:31 -07005248 end = ALIGN(end, MAX_ORDER_NR_PAGES);
5249 size = (end - start) * sizeof(struct page);
Dave Hansen6f167ec2005-06-23 00:07:39 -07005250 map = alloc_remap(pgdat->node_id, size);
5251 if (!map)
Santosh Shilimkar67828322014-01-21 15:50:25 -08005252 map = memblock_virt_alloc_node_nopanic(size,
5253 pgdat->node_id);
Laura Abbotta1c34a32015-11-05 18:48:46 -08005254 pgdat->node_mem_map = map + offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005255 }
Roman Zippel12d810c2007-05-31 00:40:54 -07005256#ifndef CONFIG_NEED_MULTIPLE_NODES
Linus Torvalds1da177e2005-04-16 15:20:36 -07005257 /*
5258 * With no DISCONTIG, the global mem_map is just set as node 0's
5259 */
Mel Gormanc7132162006-09-27 01:49:43 -07005260 if (pgdat == NODE_DATA(0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005261 mem_map = NODE_DATA(0)->node_mem_map;
Laura Abbotta1c34a32015-11-05 18:48:46 -08005262#if defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) || defined(CONFIG_FLATMEM)
Mel Gormanc7132162006-09-27 01:49:43 -07005263 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
Laura Abbotta1c34a32015-11-05 18:48:46 -08005264 mem_map -= offset;
Tejun Heo0ee332c2011-12-08 10:22:09 -08005265#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
Mel Gormanc7132162006-09-27 01:49:43 -07005266 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005267#endif
Andy Whitcroftd41dee32005-06-23 00:07:54 -07005268#endif /* CONFIG_FLAT_NODE_MEM_MAP */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005269}
5270
Johannes Weiner9109fb72008-07-23 21:27:20 -07005271void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
5272 unsigned long node_start_pfn, unsigned long *zholes_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005273{
Johannes Weiner9109fb72008-07-23 21:27:20 -07005274 pg_data_t *pgdat = NODE_DATA(nid);
Zhang Yanfei7960aed2013-07-08 15:59:52 -07005275 unsigned long start_pfn = 0;
5276 unsigned long end_pfn = 0;
Johannes Weiner9109fb72008-07-23 21:27:20 -07005277
Minchan Kim88fdf752012-07-31 16:46:14 -07005278 /* pg_data_t should be reset to zero when it's allocated */
Linus Torvalds8783b6e2012-08-02 10:37:03 -07005279 WARN_ON(pgdat->nr_zones || pgdat->classzone_idx);
Minchan Kim88fdf752012-07-31 16:46:14 -07005280
Mel Gorman3a80a7f2015-06-30 14:57:02 -07005281 reset_deferred_meminit(pgdat);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005282 pgdat->node_id = nid;
5283 pgdat->node_start_pfn = node_start_pfn;
Zhang Yanfei7960aed2013-07-08 15:59:52 -07005284#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5285 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
Juergen Gross8d29e182015-02-11 15:26:01 -08005286 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
Zhen Lei4ada0c52015-09-08 15:04:19 -07005287 (u64)start_pfn << PAGE_SHIFT,
5288 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
Zhang Yanfei7960aed2013-07-08 15:59:52 -07005289#endif
5290 calculate_node_totalpages(pgdat, start_pfn, end_pfn,
5291 zones_size, zholes_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005292
5293 alloc_node_mem_map(pgdat);
Yinghai Lue8c27ac2008-06-01 13:15:22 -07005294#ifdef CONFIG_FLAT_NODE_MEM_MAP
5295 printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
5296 nid, (unsigned long)pgdat,
5297 (unsigned long)pgdat->node_mem_map);
5298#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07005299
Wei Yang7f3eb552015-09-08 14:59:50 -07005300 free_area_init_core(pgdat);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005301}
5302
Tejun Heo0ee332c2011-12-08 10:22:09 -08005303#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
Miklos Szeredi418508c2007-05-23 13:57:55 -07005304
5305#if MAX_NUMNODES > 1
5306/*
5307 * Figure out the number of possible node ids.
5308 */
Cody P Schaferf9872ca2013-04-29 15:08:01 -07005309void __init setup_nr_node_ids(void)
Miklos Szeredi418508c2007-05-23 13:57:55 -07005310{
Wei Yang904a9552015-09-08 14:59:48 -07005311 unsigned int highest;
Miklos Szeredi418508c2007-05-23 13:57:55 -07005312
Wei Yang904a9552015-09-08 14:59:48 -07005313 highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
Miklos Szeredi418508c2007-05-23 13:57:55 -07005314 nr_node_ids = highest + 1;
5315}
Miklos Szeredi418508c2007-05-23 13:57:55 -07005316#endif
5317
Mel Gormanc7132162006-09-27 01:49:43 -07005318/**
Tejun Heo1e019792011-07-12 09:45:34 +02005319 * node_map_pfn_alignment - determine the maximum internode alignment
5320 *
5321 * This function should be called after node map is populated and sorted.
5322 * It calculates the maximum power of two alignment which can distinguish
5323 * all the nodes.
5324 *
5325 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
5326 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the
5327 * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is
5328 * shifted, 1GiB is enough and this function will indicate so.
5329 *
5330 * This is used to test whether pfn -> nid mapping of the chosen memory
5331 * model has fine enough granularity to avoid incorrect mapping for the
5332 * populated node map.
5333 *
5334 * Returns the determined alignment in pfn's. 0 if there is no alignment
5335 * requirement (single node).
5336 */
5337unsigned long __init node_map_pfn_alignment(void)
5338{
5339 unsigned long accl_mask = 0, last_end = 0;
Tejun Heoc13291a2011-07-12 10:46:30 +02005340 unsigned long start, end, mask;
Tejun Heo1e019792011-07-12 09:45:34 +02005341 int last_nid = -1;
Tejun Heoc13291a2011-07-12 10:46:30 +02005342 int i, nid;
Tejun Heo1e019792011-07-12 09:45:34 +02005343
Tejun Heoc13291a2011-07-12 10:46:30 +02005344 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
Tejun Heo1e019792011-07-12 09:45:34 +02005345 if (!start || last_nid < 0 || last_nid == nid) {
5346 last_nid = nid;
5347 last_end = end;
5348 continue;
5349 }
5350
5351 /*
5352 * Start with a mask granular enough to pin-point to the
5353 * start pfn and tick off bits one-by-one until it becomes
5354 * too coarse to separate the current node from the last.
5355 */
5356 mask = ~((1 << __ffs(start)) - 1);
5357 while (mask && last_end <= (start & (mask << 1)))
5358 mask <<= 1;
5359
5360 /* accumulate all internode masks */
5361 accl_mask |= mask;
5362 }
5363
5364 /* convert mask to number of pages */
5365 return ~accl_mask + 1;
5366}
5367
Mel Gormana6af2bc2007-02-10 01:42:57 -08005368/* Find the lowest pfn for a node */
Adrian Bunkb69a7282008-07-23 21:28:12 -07005369static unsigned long __init find_min_pfn_for_node(int nid)
Mel Gormanc7132162006-09-27 01:49:43 -07005370{
Mel Gormana6af2bc2007-02-10 01:42:57 -08005371 unsigned long min_pfn = ULONG_MAX;
Tejun Heoc13291a2011-07-12 10:46:30 +02005372 unsigned long start_pfn;
5373 int i;
Mel Gorman1abbfb42006-11-23 12:01:41 +00005374
Tejun Heoc13291a2011-07-12 10:46:30 +02005375 for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
5376 min_pfn = min(min_pfn, start_pfn);
Mel Gormanc7132162006-09-27 01:49:43 -07005377
Mel Gormana6af2bc2007-02-10 01:42:57 -08005378 if (min_pfn == ULONG_MAX) {
5379 printk(KERN_WARNING
Paul Jackson2bc0d2612008-06-22 07:22:17 -07005380 "Could not find start_pfn for node %d\n", nid);
Mel Gormana6af2bc2007-02-10 01:42:57 -08005381 return 0;
5382 }
5383
5384 return min_pfn;
Mel Gormanc7132162006-09-27 01:49:43 -07005385}
5386
5387/**
5388 * find_min_pfn_with_active_regions - Find the minimum PFN registered
5389 *
5390 * It returns the minimum PFN based on information provided via
Zhang Zhen7d018172014-06-04 16:10:53 -07005391 * memblock_set_node().
Mel Gormanc7132162006-09-27 01:49:43 -07005392 */
5393unsigned long __init find_min_pfn_with_active_regions(void)
5394{
5395 return find_min_pfn_for_node(MAX_NUMNODES);
5396}
5397
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07005398/*
5399 * early_calculate_totalpages()
5400 * Sum pages in active regions for movable zone.
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08005401 * Populate N_MEMORY for calculating usable_nodes.
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07005402 */
Adrian Bunk484f51f2007-10-16 01:26:03 -07005403static unsigned long __init early_calculate_totalpages(void)
Mel Gorman7e63efe2007-07-17 04:03:15 -07005404{
Mel Gorman7e63efe2007-07-17 04:03:15 -07005405 unsigned long totalpages = 0;
Tejun Heoc13291a2011-07-12 10:46:30 +02005406 unsigned long start_pfn, end_pfn;
5407 int i, nid;
Mel Gorman7e63efe2007-07-17 04:03:15 -07005408
Tejun Heoc13291a2011-07-12 10:46:30 +02005409 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
5410 unsigned long pages = end_pfn - start_pfn;
5411
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07005412 totalpages += pages;
5413 if (pages)
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08005414 node_set_state(nid, N_MEMORY);
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07005415 }
Pintu Kumarb8af2942013-09-11 14:20:34 -07005416 return totalpages;
Mel Gorman7e63efe2007-07-17 04:03:15 -07005417}
5418
Mel Gorman2a1e2742007-07-17 04:03:12 -07005419/*
5420 * Find the PFN the Movable zone begins in each node. Kernel memory
5421 * is spread evenly between nodes as long as the nodes have enough
5422 * memory. When they don't, some nodes will have more kernelcore than
5423 * others
5424 */
Kautuk Consulb224ef82012-03-21 16:34:15 -07005425static void __init find_zone_movable_pfns_for_nodes(void)
Mel Gorman2a1e2742007-07-17 04:03:12 -07005426{
5427 int i, nid;
5428 unsigned long usable_startpfn;
5429 unsigned long kernelcore_node, kernelcore_remaining;
Yinghai Lu66918dc2009-06-30 11:41:37 -07005430 /* save the state before borrow the nodemask */
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08005431 nodemask_t saved_node_state = node_states[N_MEMORY];
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07005432 unsigned long totalpages = early_calculate_totalpages();
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08005433 int usable_nodes = nodes_weight(node_states[N_MEMORY]);
Emil Medve136199f2014-04-07 15:37:52 -07005434 struct memblock_region *r;
Tang Chenb2f3eeb2014-01-21 15:49:38 -08005435
5436 /* Need to find movable_zone earlier when movable_node is specified. */
5437 find_usable_zone_for_movable();
Mel Gorman2a1e2742007-07-17 04:03:12 -07005438
Mel Gorman7e63efe2007-07-17 04:03:15 -07005439 /*
Tang Chenb2f3eeb2014-01-21 15:49:38 -08005440 * If movable_node is specified, ignore kernelcore and movablecore
5441 * options.
5442 */
5443 if (movable_node_is_enabled()) {
Emil Medve136199f2014-04-07 15:37:52 -07005444 for_each_memblock(memory, r) {
5445 if (!memblock_is_hotpluggable(r))
Tang Chenb2f3eeb2014-01-21 15:49:38 -08005446 continue;
5447
Emil Medve136199f2014-04-07 15:37:52 -07005448 nid = r->nid;
Tang Chenb2f3eeb2014-01-21 15:49:38 -08005449
Emil Medve136199f2014-04-07 15:37:52 -07005450 usable_startpfn = PFN_DOWN(r->base);
Tang Chenb2f3eeb2014-01-21 15:49:38 -08005451 zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
5452 min(usable_startpfn, zone_movable_pfn[nid]) :
5453 usable_startpfn;
5454 }
5455
5456 goto out2;
5457 }
5458
5459 /*
5460 * If movablecore=nn[KMG] was specified, calculate what size of
Mel Gorman7e63efe2007-07-17 04:03:15 -07005461 * kernelcore that corresponds so that memory usable for
5462 * any allocation type is evenly spread. If both kernelcore
5463 * and movablecore are specified, then the value of kernelcore
5464 * will be used for required_kernelcore if it's greater than
5465 * what movablecore would have allowed.
5466 */
5467 if (required_movablecore) {
Mel Gorman7e63efe2007-07-17 04:03:15 -07005468 unsigned long corepages;
5469
5470 /*
5471 * Round-up so that ZONE_MOVABLE is at least as large as what
5472 * was requested by the user
5473 */
5474 required_movablecore =
5475 roundup(required_movablecore, MAX_ORDER_NR_PAGES);
Xishi Qiu9fd745d2015-11-05 18:48:11 -08005476 required_movablecore = min(totalpages, required_movablecore);
Mel Gorman7e63efe2007-07-17 04:03:15 -07005477 corepages = totalpages - required_movablecore;
5478
5479 required_kernelcore = max(required_kernelcore, corepages);
5480 }
5481
Xishi Qiubde304b2015-11-05 18:48:56 -08005482 /*
5483 * If kernelcore was not specified or kernelcore size is larger
5484 * than totalpages, there is no ZONE_MOVABLE.
5485 */
5486 if (!required_kernelcore || required_kernelcore >= totalpages)
Yinghai Lu66918dc2009-06-30 11:41:37 -07005487 goto out;
Mel Gorman2a1e2742007-07-17 04:03:12 -07005488
5489 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
Mel Gorman2a1e2742007-07-17 04:03:12 -07005490 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
5491
5492restart:
5493 /* Spread kernelcore memory as evenly as possible throughout nodes */
5494 kernelcore_node = required_kernelcore / usable_nodes;
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08005495 for_each_node_state(nid, N_MEMORY) {
Tejun Heoc13291a2011-07-12 10:46:30 +02005496 unsigned long start_pfn, end_pfn;
5497
Mel Gorman2a1e2742007-07-17 04:03:12 -07005498 /*
5499 * Recalculate kernelcore_node if the division per node
5500 * now exceeds what is necessary to satisfy the requested
5501 * amount of memory for the kernel
5502 */
5503 if (required_kernelcore < kernelcore_node)
5504 kernelcore_node = required_kernelcore / usable_nodes;
5505
5506 /*
5507 * As the map is walked, we track how much memory is usable
5508 * by the kernel using kernelcore_remaining. When it is
5509 * 0, the rest of the node is usable by ZONE_MOVABLE
5510 */
5511 kernelcore_remaining = kernelcore_node;
5512
5513 /* Go through each range of PFNs within this node */
Tejun Heoc13291a2011-07-12 10:46:30 +02005514 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
Mel Gorman2a1e2742007-07-17 04:03:12 -07005515 unsigned long size_pages;
5516
Tejun Heoc13291a2011-07-12 10:46:30 +02005517 start_pfn = max(start_pfn, zone_movable_pfn[nid]);
Mel Gorman2a1e2742007-07-17 04:03:12 -07005518 if (start_pfn >= end_pfn)
5519 continue;
5520
5521 /* Account for what is only usable for kernelcore */
5522 if (start_pfn < usable_startpfn) {
5523 unsigned long kernel_pages;
5524 kernel_pages = min(end_pfn, usable_startpfn)
5525 - start_pfn;
5526
5527 kernelcore_remaining -= min(kernel_pages,
5528 kernelcore_remaining);
5529 required_kernelcore -= min(kernel_pages,
5530 required_kernelcore);
5531
5532 /* Continue if range is now fully accounted */
5533 if (end_pfn <= usable_startpfn) {
5534
5535 /*
5536 * Push zone_movable_pfn to the end so
5537 * that if we have to rebalance
5538 * kernelcore across nodes, we will
5539 * not double account here
5540 */
5541 zone_movable_pfn[nid] = end_pfn;
5542 continue;
5543 }
5544 start_pfn = usable_startpfn;
5545 }
5546
5547 /*
5548 * The usable PFN range for ZONE_MOVABLE is from
5549 * start_pfn->end_pfn. Calculate size_pages as the
5550 * number of pages used as kernelcore
5551 */
5552 size_pages = end_pfn - start_pfn;
5553 if (size_pages > kernelcore_remaining)
5554 size_pages = kernelcore_remaining;
5555 zone_movable_pfn[nid] = start_pfn + size_pages;
5556
5557 /*
5558 * Some kernelcore has been met, update counts and
5559 * break if the kernelcore for this node has been
Pintu Kumarb8af2942013-09-11 14:20:34 -07005560 * satisfied
Mel Gorman2a1e2742007-07-17 04:03:12 -07005561 */
5562 required_kernelcore -= min(required_kernelcore,
5563 size_pages);
5564 kernelcore_remaining -= size_pages;
5565 if (!kernelcore_remaining)
5566 break;
5567 }
5568 }
5569
5570 /*
5571 * If there is still required_kernelcore, we do another pass with one
5572 * less node in the count. This will push zone_movable_pfn[nid] further
5573 * along on the nodes that still have memory until kernelcore is
Pintu Kumarb8af2942013-09-11 14:20:34 -07005574 * satisfied
Mel Gorman2a1e2742007-07-17 04:03:12 -07005575 */
5576 usable_nodes--;
5577 if (usable_nodes && required_kernelcore > usable_nodes)
5578 goto restart;
5579
Tang Chenb2f3eeb2014-01-21 15:49:38 -08005580out2:
Mel Gorman2a1e2742007-07-17 04:03:12 -07005581 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
5582 for (nid = 0; nid < MAX_NUMNODES; nid++)
5583 zone_movable_pfn[nid] =
5584 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
Yinghai Lu66918dc2009-06-30 11:41:37 -07005585
Yinghai Lu20e69262013-03-01 14:51:27 -08005586out:
Yinghai Lu66918dc2009-06-30 11:41:37 -07005587 /* restore the node_state */
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08005588 node_states[N_MEMORY] = saved_node_state;
Mel Gorman2a1e2742007-07-17 04:03:12 -07005589}
5590
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08005591/* Any regular or high memory on that node ? */
5592static void check_for_memory(pg_data_t *pgdat, int nid)
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07005593{
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07005594 enum zone_type zone_type;
5595
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08005596 if (N_MEMORY == N_NORMAL_MEMORY)
5597 return;
5598
5599 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07005600 struct zone *zone = &pgdat->node_zones[zone_type];
Xishi Qiub38a8722013-11-12 15:07:20 -08005601 if (populated_zone(zone)) {
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08005602 node_set_state(nid, N_HIGH_MEMORY);
5603 if (N_NORMAL_MEMORY != N_HIGH_MEMORY &&
5604 zone_type <= ZONE_NORMAL)
5605 node_set_state(nid, N_NORMAL_MEMORY);
Bob Liud0048b02012-01-12 17:19:07 -08005606 break;
5607 }
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07005608 }
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07005609}
5610
Mel Gormanc7132162006-09-27 01:49:43 -07005611/**
5612 * free_area_init_nodes - Initialise all pg_data_t and zone data
Randy Dunlap88ca3b92006-10-04 02:15:25 -07005613 * @max_zone_pfn: an array of max PFNs for each zone
Mel Gormanc7132162006-09-27 01:49:43 -07005614 *
5615 * This will call free_area_init_node() for each active node in the system.
Zhang Zhen7d018172014-06-04 16:10:53 -07005616 * Using the page ranges provided by memblock_set_node(), the size of each
Mel Gormanc7132162006-09-27 01:49:43 -07005617 * zone in each node and their holes is calculated. If the maximum PFN
5618 * between two adjacent zones match, it is assumed that the zone is empty.
5619 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
5620 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
5621 * starts where the previous one ended. For example, ZONE_DMA32 starts
5622 * at arch_max_dma_pfn.
5623 */
5624void __init free_area_init_nodes(unsigned long *max_zone_pfn)
5625{
Tejun Heoc13291a2011-07-12 10:46:30 +02005626 unsigned long start_pfn, end_pfn;
5627 int i, nid;
Mel Gormana6af2bc2007-02-10 01:42:57 -08005628
Mel Gormanc7132162006-09-27 01:49:43 -07005629 /* Record where the zone boundaries are */
5630 memset(arch_zone_lowest_possible_pfn, 0,
5631 sizeof(arch_zone_lowest_possible_pfn));
5632 memset(arch_zone_highest_possible_pfn, 0,
5633 sizeof(arch_zone_highest_possible_pfn));
5634 arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
5635 arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
5636 for (i = 1; i < MAX_NR_ZONES; i++) {
Mel Gorman2a1e2742007-07-17 04:03:12 -07005637 if (i == ZONE_MOVABLE)
5638 continue;
Mel Gormanc7132162006-09-27 01:49:43 -07005639 arch_zone_lowest_possible_pfn[i] =
5640 arch_zone_highest_possible_pfn[i-1];
5641 arch_zone_highest_possible_pfn[i] =
5642 max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
5643 }
Mel Gorman2a1e2742007-07-17 04:03:12 -07005644 arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
5645 arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
5646
5647 /* Find the PFNs that ZONE_MOVABLE begins at in each node */
5648 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
Kautuk Consulb224ef82012-03-21 16:34:15 -07005649 find_zone_movable_pfns_for_nodes();
Mel Gormanc7132162006-09-27 01:49:43 -07005650
Mel Gormanc7132162006-09-27 01:49:43 -07005651 /* Print out the zone ranges */
Anton Blanchardf88dfff2014-12-10 15:42:53 -08005652 pr_info("Zone ranges:\n");
Mel Gorman2a1e2742007-07-17 04:03:12 -07005653 for (i = 0; i < MAX_NR_ZONES; i++) {
5654 if (i == ZONE_MOVABLE)
5655 continue;
Anton Blanchardf88dfff2014-12-10 15:42:53 -08005656 pr_info(" %-8s ", zone_names[i]);
David Rientjes72f0ba02010-03-05 13:42:14 -08005657 if (arch_zone_lowest_possible_pfn[i] ==
5658 arch_zone_highest_possible_pfn[i])
Anton Blanchardf88dfff2014-12-10 15:42:53 -08005659 pr_cont("empty\n");
David Rientjes72f0ba02010-03-05 13:42:14 -08005660 else
Juergen Gross8d29e182015-02-11 15:26:01 -08005661 pr_cont("[mem %#018Lx-%#018Lx]\n",
5662 (u64)arch_zone_lowest_possible_pfn[i]
5663 << PAGE_SHIFT,
5664 ((u64)arch_zone_highest_possible_pfn[i]
Bjorn Helgaasa62e2f42012-05-29 15:06:30 -07005665 << PAGE_SHIFT) - 1);
Mel Gorman2a1e2742007-07-17 04:03:12 -07005666 }
5667
5668 /* Print out the PFNs ZONE_MOVABLE begins at in each node */
Anton Blanchardf88dfff2014-12-10 15:42:53 -08005669 pr_info("Movable zone start for each node\n");
Mel Gorman2a1e2742007-07-17 04:03:12 -07005670 for (i = 0; i < MAX_NUMNODES; i++) {
5671 if (zone_movable_pfn[i])
Juergen Gross8d29e182015-02-11 15:26:01 -08005672 pr_info(" Node %d: %#018Lx\n", i,
5673 (u64)zone_movable_pfn[i] << PAGE_SHIFT);
Mel Gorman2a1e2742007-07-17 04:03:12 -07005674 }
Mel Gormanc7132162006-09-27 01:49:43 -07005675
Wanpeng Lif2d52fe2012-10-08 16:32:24 -07005676 /* Print out the early node map */
Anton Blanchardf88dfff2014-12-10 15:42:53 -08005677 pr_info("Early memory node ranges\n");
Tejun Heoc13291a2011-07-12 10:46:30 +02005678 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
Juergen Gross8d29e182015-02-11 15:26:01 -08005679 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid,
5680 (u64)start_pfn << PAGE_SHIFT,
5681 ((u64)end_pfn << PAGE_SHIFT) - 1);
Mel Gormanc7132162006-09-27 01:49:43 -07005682
5683 /* Initialise every node */
Mel Gorman708614e2008-07-23 21:26:51 -07005684 mminit_verify_pageflags_layout();
Christoph Lameter8ef82862007-02-20 13:57:52 -08005685 setup_nr_node_ids();
Mel Gormanc7132162006-09-27 01:49:43 -07005686 for_each_online_node(nid) {
5687 pg_data_t *pgdat = NODE_DATA(nid);
Johannes Weiner9109fb72008-07-23 21:27:20 -07005688 free_area_init_node(nid, NULL,
Mel Gormanc7132162006-09-27 01:49:43 -07005689 find_min_pfn_for_node(nid), NULL);
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07005690
5691 /* Any memory on that node */
5692 if (pgdat->node_present_pages)
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08005693 node_set_state(nid, N_MEMORY);
5694 check_for_memory(pgdat, nid);
Mel Gormanc7132162006-09-27 01:49:43 -07005695 }
5696}
Mel Gorman2a1e2742007-07-17 04:03:12 -07005697
Mel Gorman7e63efe2007-07-17 04:03:15 -07005698static int __init cmdline_parse_core(char *p, unsigned long *core)
Mel Gorman2a1e2742007-07-17 04:03:12 -07005699{
5700 unsigned long long coremem;
5701 if (!p)
5702 return -EINVAL;
5703
5704 coremem = memparse(p, &p);
Mel Gorman7e63efe2007-07-17 04:03:15 -07005705 *core = coremem >> PAGE_SHIFT;
Mel Gorman2a1e2742007-07-17 04:03:12 -07005706
Mel Gorman7e63efe2007-07-17 04:03:15 -07005707 /* Paranoid check that UL is enough for the coremem value */
Mel Gorman2a1e2742007-07-17 04:03:12 -07005708 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
5709
5710 return 0;
5711}
Mel Gormaned7ed362007-07-17 04:03:14 -07005712
Mel Gorman7e63efe2007-07-17 04:03:15 -07005713/*
5714 * kernelcore=size sets the amount of memory for use for allocations that
5715 * cannot be reclaimed or migrated.
5716 */
5717static int __init cmdline_parse_kernelcore(char *p)
5718{
5719 return cmdline_parse_core(p, &required_kernelcore);
5720}
5721
5722/*
5723 * movablecore=size sets the amount of memory for use for allocations that
5724 * can be reclaimed or migrated.
5725 */
5726static int __init cmdline_parse_movablecore(char *p)
5727{
5728 return cmdline_parse_core(p, &required_movablecore);
5729}
5730
Mel Gormaned7ed362007-07-17 04:03:14 -07005731early_param("kernelcore", cmdline_parse_kernelcore);
Mel Gorman7e63efe2007-07-17 04:03:15 -07005732early_param("movablecore", cmdline_parse_movablecore);
Mel Gormaned7ed362007-07-17 04:03:14 -07005733
Tejun Heo0ee332c2011-12-08 10:22:09 -08005734#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
Mel Gormanc7132162006-09-27 01:49:43 -07005735
Jiang Liuc3d5f5f2013-07-03 15:03:14 -07005736void adjust_managed_page_count(struct page *page, long count)
5737{
5738 spin_lock(&managed_page_count_lock);
5739 page_zone(page)->managed_pages += count;
5740 totalram_pages += count;
Jiang Liu3dcc0572013-07-03 15:03:21 -07005741#ifdef CONFIG_HIGHMEM
5742 if (PageHighMem(page))
5743 totalhigh_pages += count;
5744#endif
Jiang Liuc3d5f5f2013-07-03 15:03:14 -07005745 spin_unlock(&managed_page_count_lock);
5746}
Jiang Liu3dcc0572013-07-03 15:03:21 -07005747EXPORT_SYMBOL(adjust_managed_page_count);
Jiang Liuc3d5f5f2013-07-03 15:03:14 -07005748
Jiang Liu11199692013-07-03 15:02:48 -07005749unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
Jiang Liu69afade2013-04-29 15:06:21 -07005750{
Jiang Liu11199692013-07-03 15:02:48 -07005751 void *pos;
5752 unsigned long pages = 0;
Jiang Liu69afade2013-04-29 15:06:21 -07005753
Jiang Liu11199692013-07-03 15:02:48 -07005754 start = (void *)PAGE_ALIGN((unsigned long)start);
5755 end = (void *)((unsigned long)end & PAGE_MASK);
5756 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
Jiang Liudbe67df2013-07-03 15:02:51 -07005757 if ((unsigned int)poison <= 0xFF)
Jiang Liu11199692013-07-03 15:02:48 -07005758 memset(pos, poison, PAGE_SIZE);
5759 free_reserved_page(virt_to_page(pos));
Jiang Liu69afade2013-04-29 15:06:21 -07005760 }
5761
5762 if (pages && s)
Jiang Liu11199692013-07-03 15:02:48 -07005763 pr_info("Freeing %s memory: %ldK (%p - %p)\n",
Jiang Liu69afade2013-04-29 15:06:21 -07005764 s, pages << (PAGE_SHIFT - 10), start, end);
5765
5766 return pages;
5767}
Jiang Liu11199692013-07-03 15:02:48 -07005768EXPORT_SYMBOL(free_reserved_area);
Jiang Liu69afade2013-04-29 15:06:21 -07005769
Jiang Liucfa11e02013-04-29 15:07:00 -07005770#ifdef CONFIG_HIGHMEM
5771void free_highmem_page(struct page *page)
5772{
5773 __free_reserved_page(page);
5774 totalram_pages++;
Jiang Liu7b4b2a02013-07-03 15:03:11 -07005775 page_zone(page)->managed_pages++;
Jiang Liucfa11e02013-04-29 15:07:00 -07005776 totalhigh_pages++;
5777}
5778#endif
5779
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07005780
5781void __init mem_init_print_info(const char *str)
5782{
5783 unsigned long physpages, codesize, datasize, rosize, bss_size;
5784 unsigned long init_code_size, init_data_size;
5785
5786 physpages = get_num_physpages();
5787 codesize = _etext - _stext;
5788 datasize = _edata - _sdata;
5789 rosize = __end_rodata - __start_rodata;
5790 bss_size = __bss_stop - __bss_start;
5791 init_data_size = __init_end - __init_begin;
5792 init_code_size = _einittext - _sinittext;
5793
5794 /*
5795 * Detect special cases and adjust section sizes accordingly:
5796 * 1) .init.* may be embedded into .data sections
5797 * 2) .init.text.* may be out of [__init_begin, __init_end],
5798 * please refer to arch/tile/kernel/vmlinux.lds.S.
5799 * 3) .rodata.* may be embedded into .text or .data sections.
5800 */
5801#define adj_init_size(start, end, size, pos, adj) \
Pintu Kumarb8af2942013-09-11 14:20:34 -07005802 do { \
5803 if (start <= pos && pos < end && size > adj) \
5804 size -= adj; \
5805 } while (0)
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07005806
5807 adj_init_size(__init_begin, __init_end, init_data_size,
5808 _sinittext, init_code_size);
5809 adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
5810 adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
5811 adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
5812 adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
5813
5814#undef adj_init_size
5815
Anton Blanchardf88dfff2014-12-10 15:42:53 -08005816 pr_info("Memory: %luK/%luK available "
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07005817 "(%luK kernel code, %luK rwdata, %luK rodata, "
Pintu Kumare48322a2014-12-18 16:17:15 -08005818 "%luK init, %luK bss, %luK reserved, %luK cma-reserved"
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07005819#ifdef CONFIG_HIGHMEM
5820 ", %luK highmem"
5821#endif
5822 "%s%s)\n",
5823 nr_free_pages() << (PAGE_SHIFT-10), physpages << (PAGE_SHIFT-10),
5824 codesize >> 10, datasize >> 10, rosize >> 10,
5825 (init_data_size + init_code_size) >> 10, bss_size >> 10,
Pintu Kumare48322a2014-12-18 16:17:15 -08005826 (physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT-10),
5827 totalcma_pages << (PAGE_SHIFT-10),
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07005828#ifdef CONFIG_HIGHMEM
5829 totalhigh_pages << (PAGE_SHIFT-10),
5830#endif
5831 str ? ", " : "", str ? str : "");
5832}
5833
Mel Gorman0e0b8642006-09-27 01:49:56 -07005834/**
Randy Dunlap88ca3b92006-10-04 02:15:25 -07005835 * set_dma_reserve - set the specified number of pages reserved in the first zone
5836 * @new_dma_reserve: The number of pages to mark reserved
Mel Gorman0e0b8642006-09-27 01:49:56 -07005837 *
Yaowei Bai013110a2015-09-08 15:04:10 -07005838 * The per-cpu batchsize and zone watermarks are determined by managed_pages.
Mel Gorman0e0b8642006-09-27 01:49:56 -07005839 * In the DMA zone, a significant percentage may be consumed by kernel image
5840 * and other unfreeable allocations which can skew the watermarks badly. This
Randy Dunlap88ca3b92006-10-04 02:15:25 -07005841 * function may optionally be used to account for unfreeable pages in the
5842 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
5843 * smaller per-cpu batchsize.
Mel Gorman0e0b8642006-09-27 01:49:56 -07005844 */
5845void __init set_dma_reserve(unsigned long new_dma_reserve)
5846{
5847 dma_reserve = new_dma_reserve;
5848}
5849
Linus Torvalds1da177e2005-04-16 15:20:36 -07005850void __init free_area_init(unsigned long *zones_size)
5851{
Johannes Weiner9109fb72008-07-23 21:27:20 -07005852 free_area_init_node(0, zones_size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005853 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
5854}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005855
Linus Torvalds1da177e2005-04-16 15:20:36 -07005856static int page_alloc_cpu_notify(struct notifier_block *self,
5857 unsigned long action, void *hcpu)
5858{
5859 int cpu = (unsigned long)hcpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005860
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07005861 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
Konstantin Khlebnikovf0cb3c72012-03-21 16:34:06 -07005862 lru_add_drain_cpu(cpu);
Christoph Lameter9f8f2172008-02-04 22:29:11 -08005863 drain_pages(cpu);
5864
5865 /*
5866 * Spill the event counters of the dead processor
5867 * into the current processors event counters.
5868 * This artificially elevates the count of the current
5869 * processor.
5870 */
Christoph Lameterf8891e52006-06-30 01:55:45 -07005871 vm_events_fold_cpu(cpu);
Christoph Lameter9f8f2172008-02-04 22:29:11 -08005872
5873 /*
5874 * Zero the differential counters of the dead processor
5875 * so that the vm statistics are consistent.
5876 *
5877 * This is only okay since the processor is dead and cannot
5878 * race with what we are doing.
5879 */
Christoph Lameter2bb921e2013-09-11 14:21:30 -07005880 cpu_vm_stats_fold(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005881 }
5882 return NOTIFY_OK;
5883}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005884
5885void __init page_alloc_init(void)
5886{
5887 hotcpu_notifier(page_alloc_cpu_notify, 0);
5888}
5889
5890/*
Yaowei Bai34b10062015-09-08 15:04:13 -07005891 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07005892 * or min_free_kbytes changes.
5893 */
5894static void calculate_totalreserve_pages(void)
5895{
5896 struct pglist_data *pgdat;
5897 unsigned long reserve_pages = 0;
Christoph Lameter2f6726e2006-09-25 23:31:18 -07005898 enum zone_type i, j;
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07005899
5900 for_each_online_pgdat(pgdat) {
5901 for (i = 0; i < MAX_NR_ZONES; i++) {
5902 struct zone *zone = pgdat->node_zones + i;
Mel Gorman3484b2d2014-08-06 16:07:14 -07005903 long max = 0;
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07005904
5905 /* Find valid and maximum lowmem_reserve in the zone */
5906 for (j = i; j < MAX_NR_ZONES; j++) {
5907 if (zone->lowmem_reserve[j] > max)
5908 max = zone->lowmem_reserve[j];
5909 }
5910
Mel Gorman41858962009-06-16 15:32:12 -07005911 /* we treat the high watermark as reserved pages. */
5912 max += high_wmark_pages(zone);
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07005913
Jiang Liub40da042013-02-22 16:33:52 -08005914 if (max > zone->managed_pages)
5915 max = zone->managed_pages;
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07005916 reserve_pages += max;
Johannes Weinerab8fabd2012-01-10 15:07:42 -08005917 /*
5918 * Lowmem reserves are not available to
5919 * GFP_HIGHUSER page cache allocations and
5920 * kswapd tries to balance zones to their high
5921 * watermark. As a result, neither should be
5922 * regarded as dirtyable memory, to prevent a
5923 * situation where reclaim has to clean pages
5924 * in order to balance the zones.
5925 */
5926 zone->dirty_balance_reserve = max;
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07005927 }
5928 }
Johannes Weinerab8fabd2012-01-10 15:07:42 -08005929 dirty_balance_reserve = reserve_pages;
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07005930 totalreserve_pages = reserve_pages;
5931}
5932
5933/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07005934 * setup_per_zone_lowmem_reserve - called whenever
Yaowei Bai34b10062015-09-08 15:04:13 -07005935 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone
Linus Torvalds1da177e2005-04-16 15:20:36 -07005936 * has a correct pages reserved value, so an adequate number of
5937 * pages are left in the zone after a successful __alloc_pages().
5938 */
5939static void setup_per_zone_lowmem_reserve(void)
5940{
5941 struct pglist_data *pgdat;
Christoph Lameter2f6726e2006-09-25 23:31:18 -07005942 enum zone_type j, idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005943
KAMEZAWA Hiroyukiec936fc2006-03-27 01:15:59 -08005944 for_each_online_pgdat(pgdat) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005945 for (j = 0; j < MAX_NR_ZONES; j++) {
5946 struct zone *zone = pgdat->node_zones + j;
Jiang Liub40da042013-02-22 16:33:52 -08005947 unsigned long managed_pages = zone->managed_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005948
5949 zone->lowmem_reserve[j] = 0;
5950
Christoph Lameter2f6726e2006-09-25 23:31:18 -07005951 idx = j;
5952 while (idx) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005953 struct zone *lower_zone;
5954
Christoph Lameter2f6726e2006-09-25 23:31:18 -07005955 idx--;
5956
Linus Torvalds1da177e2005-04-16 15:20:36 -07005957 if (sysctl_lowmem_reserve_ratio[idx] < 1)
5958 sysctl_lowmem_reserve_ratio[idx] = 1;
5959
5960 lower_zone = pgdat->node_zones + idx;
Jiang Liub40da042013-02-22 16:33:52 -08005961 lower_zone->lowmem_reserve[j] = managed_pages /
Linus Torvalds1da177e2005-04-16 15:20:36 -07005962 sysctl_lowmem_reserve_ratio[idx];
Jiang Liub40da042013-02-22 16:33:52 -08005963 managed_pages += lower_zone->managed_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005964 }
5965 }
5966 }
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07005967
5968 /* update totalreserve_pages */
5969 calculate_totalreserve_pages();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005970}
5971
Mel Gormancfd3da12011-04-25 21:36:42 +00005972static void __setup_per_zone_wmarks(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005973{
5974 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
5975 unsigned long lowmem_pages = 0;
5976 struct zone *zone;
5977 unsigned long flags;
5978
5979 /* Calculate total number of !ZONE_HIGHMEM pages */
5980 for_each_zone(zone) {
5981 if (!is_highmem(zone))
Jiang Liub40da042013-02-22 16:33:52 -08005982 lowmem_pages += zone->managed_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005983 }
5984
5985 for_each_zone(zone) {
Andrew Mortonac924c62006-05-15 09:43:59 -07005986 u64 tmp;
5987
Gerald Schaefer1125b4e2008-10-18 20:27:11 -07005988 spin_lock_irqsave(&zone->lock, flags);
Jiang Liub40da042013-02-22 16:33:52 -08005989 tmp = (u64)pages_min * zone->managed_pages;
Andrew Mortonac924c62006-05-15 09:43:59 -07005990 do_div(tmp, lowmem_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005991 if (is_highmem(zone)) {
5992 /*
Nick Piggin669ed172005-11-13 16:06:45 -08005993 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
5994 * need highmem pages, so cap pages_min to a small
5995 * value here.
5996 *
Mel Gorman41858962009-06-16 15:32:12 -07005997 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
Yaowei Bai42ff2702015-04-14 15:47:14 -07005998 * deltas control asynch page reclaim, and so should
Nick Piggin669ed172005-11-13 16:06:45 -08005999 * not be capped for highmem.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006000 */
Andrew Morton90ae8d62013-02-22 16:32:22 -08006001 unsigned long min_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006002
Jiang Liub40da042013-02-22 16:33:52 -08006003 min_pages = zone->managed_pages / 1024;
Andrew Morton90ae8d62013-02-22 16:32:22 -08006004 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
Mel Gorman41858962009-06-16 15:32:12 -07006005 zone->watermark[WMARK_MIN] = min_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006006 } else {
Nick Piggin669ed172005-11-13 16:06:45 -08006007 /*
6008 * If it's a lowmem zone, reserve a number of pages
Linus Torvalds1da177e2005-04-16 15:20:36 -07006009 * proportionate to the zone's size.
6010 */
Mel Gorman41858962009-06-16 15:32:12 -07006011 zone->watermark[WMARK_MIN] = tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006012 }
6013
Mel Gorman41858962009-06-16 15:32:12 -07006014 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2);
6015 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
Marek Szyprowski49f223a2012-01-25 12:49:24 +01006016
Johannes Weiner81c0a2b2013-09-11 14:20:47 -07006017 __mod_zone_page_state(zone, NR_ALLOC_BATCH,
Johannes Weinerabe5f972014-10-02 16:21:10 -07006018 high_wmark_pages(zone) - low_wmark_pages(zone) -
6019 atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
Johannes Weiner81c0a2b2013-09-11 14:20:47 -07006020
Mel Gorman56fd56b2007-10-16 01:25:58 -07006021 setup_zone_migrate_reserve(zone);
Gerald Schaefer1125b4e2008-10-18 20:27:11 -07006022 spin_unlock_irqrestore(&zone->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006023 }
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07006024
6025 /* update totalreserve_pages */
6026 calculate_totalreserve_pages();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006027}
6028
Mel Gormancfd3da12011-04-25 21:36:42 +00006029/**
6030 * setup_per_zone_wmarks - called when min_free_kbytes changes
6031 * or when memory is hot-{added|removed}
6032 *
6033 * Ensures that the watermark[min,low,high] values for each zone are set
6034 * correctly with respect to min_free_kbytes.
6035 */
6036void setup_per_zone_wmarks(void)
6037{
6038 mutex_lock(&zonelists_mutex);
6039 __setup_per_zone_wmarks();
6040 mutex_unlock(&zonelists_mutex);
6041}
6042
Randy Dunlap55a44622009-09-21 17:01:20 -07006043/*
Rik van Riel556adec2008-10-18 20:26:34 -07006044 * The inactive anon list should be small enough that the VM never has to
6045 * do too much work, but large enough that each inactive page has a chance
6046 * to be referenced again before it is swapped out.
6047 *
6048 * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
6049 * INACTIVE_ANON pages on this zone's LRU, maintained by the
6050 * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
6051 * the anonymous pages are kept on the inactive list.
6052 *
6053 * total target max
6054 * memory ratio inactive anon
6055 * -------------------------------------
6056 * 10MB 1 5MB
6057 * 100MB 1 50MB
6058 * 1GB 3 250MB
6059 * 10GB 10 0.9GB
6060 * 100GB 31 3GB
6061 * 1TB 101 10GB
6062 * 10TB 320 32GB
6063 */
KOSAKI Motohiro1b79acc2011-05-24 17:11:32 -07006064static void __meminit calculate_zone_inactive_ratio(struct zone *zone)
Minchan Kim96cb4df2009-06-16 15:32:49 -07006065{
6066 unsigned int gb, ratio;
6067
6068 /* Zone size in gigabytes */
Jiang Liub40da042013-02-22 16:33:52 -08006069 gb = zone->managed_pages >> (30 - PAGE_SHIFT);
Minchan Kim96cb4df2009-06-16 15:32:49 -07006070 if (gb)
6071 ratio = int_sqrt(10 * gb);
6072 else
6073 ratio = 1;
6074
6075 zone->inactive_ratio = ratio;
6076}
6077
KOSAKI Motohiro839a4fc2011-05-24 17:11:31 -07006078static void __meminit setup_per_zone_inactive_ratio(void)
Rik van Riel556adec2008-10-18 20:26:34 -07006079{
6080 struct zone *zone;
6081
Minchan Kim96cb4df2009-06-16 15:32:49 -07006082 for_each_zone(zone)
6083 calculate_zone_inactive_ratio(zone);
Rik van Riel556adec2008-10-18 20:26:34 -07006084}
6085
Linus Torvalds1da177e2005-04-16 15:20:36 -07006086/*
6087 * Initialise min_free_kbytes.
6088 *
6089 * For small machines we want it small (128k min). For large machines
6090 * we want it large (64MB max). But it is not linear, because network
6091 * bandwidth does not increase linearly with machine size. We use
6092 *
Pintu Kumarb8af2942013-09-11 14:20:34 -07006093 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006094 * min_free_kbytes = sqrt(lowmem_kbytes * 16)
6095 *
6096 * which yields
6097 *
6098 * 16MB: 512k
6099 * 32MB: 724k
6100 * 64MB: 1024k
6101 * 128MB: 1448k
6102 * 256MB: 2048k
6103 * 512MB: 2896k
6104 * 1024MB: 4096k
6105 * 2048MB: 5792k
6106 * 4096MB: 8192k
6107 * 8192MB: 11584k
6108 * 16384MB: 16384k
6109 */
KOSAKI Motohiro1b79acc2011-05-24 17:11:32 -07006110int __meminit init_per_zone_wmark_min(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006111{
6112 unsigned long lowmem_kbytes;
Michal Hocko5f127332013-07-08 16:00:40 -07006113 int new_min_free_kbytes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006114
6115 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
Michal Hocko5f127332013-07-08 16:00:40 -07006116 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006117
Michal Hocko5f127332013-07-08 16:00:40 -07006118 if (new_min_free_kbytes > user_min_free_kbytes) {
6119 min_free_kbytes = new_min_free_kbytes;
6120 if (min_free_kbytes < 128)
6121 min_free_kbytes = 128;
6122 if (min_free_kbytes > 65536)
6123 min_free_kbytes = 65536;
6124 } else {
6125 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
6126 new_min_free_kbytes, user_min_free_kbytes);
6127 }
Minchan Kimbc75d332009-06-16 15:32:48 -07006128 setup_per_zone_wmarks();
KOSAKI Motohiroa6cccdc2011-05-24 17:11:33 -07006129 refresh_zone_stat_thresholds();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006130 setup_per_zone_lowmem_reserve();
Rik van Riel556adec2008-10-18 20:26:34 -07006131 setup_per_zone_inactive_ratio();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006132 return 0;
6133}
Minchan Kimbc75d332009-06-16 15:32:48 -07006134module_init(init_per_zone_wmark_min)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006135
6136/*
Pintu Kumarb8af2942013-09-11 14:20:34 -07006137 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
Linus Torvalds1da177e2005-04-16 15:20:36 -07006138 * that we can call two helper functions whenever min_free_kbytes
6139 * changes.
6140 */
Joe Perchescccad5b2014-06-06 14:38:09 -07006141int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07006142 void __user *buffer, size_t *length, loff_t *ppos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006143{
Han Pingtianda8c7572014-01-23 15:53:17 -08006144 int rc;
6145
6146 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6147 if (rc)
6148 return rc;
6149
Michal Hocko5f127332013-07-08 16:00:40 -07006150 if (write) {
6151 user_min_free_kbytes = min_free_kbytes;
Minchan Kimbc75d332009-06-16 15:32:48 -07006152 setup_per_zone_wmarks();
Michal Hocko5f127332013-07-08 16:00:40 -07006153 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006154 return 0;
6155}
6156
Christoph Lameter96146342006-07-03 00:24:13 -07006157#ifdef CONFIG_NUMA
Joe Perchescccad5b2014-06-06 14:38:09 -07006158int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07006159 void __user *buffer, size_t *length, loff_t *ppos)
Christoph Lameter96146342006-07-03 00:24:13 -07006160{
6161 struct zone *zone;
6162 int rc;
6163
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07006164 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
Christoph Lameter96146342006-07-03 00:24:13 -07006165 if (rc)
6166 return rc;
6167
6168 for_each_zone(zone)
Jiang Liub40da042013-02-22 16:33:52 -08006169 zone->min_unmapped_pages = (zone->managed_pages *
Christoph Lameter96146342006-07-03 00:24:13 -07006170 sysctl_min_unmapped_ratio) / 100;
6171 return 0;
6172}
Christoph Lameter0ff38492006-09-25 23:31:52 -07006173
Joe Perchescccad5b2014-06-06 14:38:09 -07006174int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07006175 void __user *buffer, size_t *length, loff_t *ppos)
Christoph Lameter0ff38492006-09-25 23:31:52 -07006176{
6177 struct zone *zone;
6178 int rc;
6179
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07006180 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
Christoph Lameter0ff38492006-09-25 23:31:52 -07006181 if (rc)
6182 return rc;
6183
6184 for_each_zone(zone)
Jiang Liub40da042013-02-22 16:33:52 -08006185 zone->min_slab_pages = (zone->managed_pages *
Christoph Lameter0ff38492006-09-25 23:31:52 -07006186 sysctl_min_slab_ratio) / 100;
6187 return 0;
6188}
Christoph Lameter96146342006-07-03 00:24:13 -07006189#endif
6190
Linus Torvalds1da177e2005-04-16 15:20:36 -07006191/*
6192 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
6193 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
6194 * whenever sysctl_lowmem_reserve_ratio changes.
6195 *
6196 * The reserve ratio obviously has absolutely no relation with the
Mel Gorman41858962009-06-16 15:32:12 -07006197 * minimum watermarks. The lowmem reserve ratio can only make sense
Linus Torvalds1da177e2005-04-16 15:20:36 -07006198 * if in function of the boot time zone sizes.
6199 */
Joe Perchescccad5b2014-06-06 14:38:09 -07006200int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07006201 void __user *buffer, size_t *length, loff_t *ppos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006202{
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07006203 proc_dointvec_minmax(table, write, buffer, length, ppos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006204 setup_per_zone_lowmem_reserve();
6205 return 0;
6206}
6207
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08006208/*
6209 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
Pintu Kumarb8af2942013-09-11 14:20:34 -07006210 * cpu. It is the fraction of total pages in each zone that a hot per cpu
6211 * pagelist can have before it gets flushed back to buddy allocator.
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08006212 */
Joe Perchescccad5b2014-06-06 14:38:09 -07006213int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07006214 void __user *buffer, size_t *length, loff_t *ppos)
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08006215{
6216 struct zone *zone;
David Rientjes7cd2b0a2014-06-23 13:22:04 -07006217 int old_percpu_pagelist_fraction;
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08006218 int ret;
6219
Cody P Schaferc8e251f2013-07-03 15:01:29 -07006220 mutex_lock(&pcp_batch_high_lock);
David Rientjes7cd2b0a2014-06-23 13:22:04 -07006221 old_percpu_pagelist_fraction = percpu_pagelist_fraction;
6222
6223 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
6224 if (!write || ret < 0)
6225 goto out;
6226
6227 /* Sanity checking to avoid pcp imbalance */
6228 if (percpu_pagelist_fraction &&
6229 percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
6230 percpu_pagelist_fraction = old_percpu_pagelist_fraction;
6231 ret = -EINVAL;
6232 goto out;
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08006233 }
David Rientjes7cd2b0a2014-06-23 13:22:04 -07006234
6235 /* No change? */
6236 if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
6237 goto out;
6238
6239 for_each_populated_zone(zone) {
6240 unsigned int cpu;
6241
6242 for_each_possible_cpu(cpu)
6243 pageset_set_high_and_batch(zone,
6244 per_cpu_ptr(zone->pageset, cpu));
6245 }
6246out:
Cody P Schaferc8e251f2013-07-03 15:01:29 -07006247 mutex_unlock(&pcp_batch_high_lock);
David Rientjes7cd2b0a2014-06-23 13:22:04 -07006248 return ret;
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08006249}
6250
Rasmus Villemoesa9919c72015-06-24 16:56:28 -07006251#ifdef CONFIG_NUMA
David S. Millerf034b5d2006-08-24 03:08:07 -07006252int hashdist = HASHDIST_DEFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006253
Linus Torvalds1da177e2005-04-16 15:20:36 -07006254static int __init set_hashdist(char *str)
6255{
6256 if (!str)
6257 return 0;
6258 hashdist = simple_strtoul(str, &str, 0);
6259 return 1;
6260}
6261__setup("hashdist=", set_hashdist);
6262#endif
6263
6264/*
6265 * allocate a large system hash table from bootmem
6266 * - it is assumed that the hash table must contain an exact power-of-2
6267 * quantity of entries
6268 * - limit is the number of hash buckets, not the total allocation size
6269 */
6270void *__init alloc_large_system_hash(const char *tablename,
6271 unsigned long bucketsize,
6272 unsigned long numentries,
6273 int scale,
6274 int flags,
6275 unsigned int *_hash_shift,
6276 unsigned int *_hash_mask,
Tim Bird31fe62b2012-05-23 13:33:35 +00006277 unsigned long low_limit,
6278 unsigned long high_limit)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006279{
Tim Bird31fe62b2012-05-23 13:33:35 +00006280 unsigned long long max = high_limit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006281 unsigned long log2qty, size;
6282 void *table = NULL;
6283
6284 /* allow the kernel cmdline to have a say */
6285 if (!numentries) {
6286 /* round applicable memory size up to nearest megabyte */
Andrew Morton04903662006-12-06 20:37:33 -08006287 numentries = nr_kernel_pages;
Jerry Zhoua7e83312013-09-11 14:20:26 -07006288
6289 /* It isn't necessary when PAGE_SIZE >= 1MB */
6290 if (PAGE_SHIFT < 20)
6291 numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006292
6293 /* limit to 1 bucket per 2^scale bytes of low memory */
6294 if (scale > PAGE_SHIFT)
6295 numentries >>= (scale - PAGE_SHIFT);
6296 else
6297 numentries <<= (PAGE_SHIFT - scale);
Paul Mundt9ab37b82007-01-05 16:36:30 -08006298
6299 /* Make sure we've got at least a 0-order allocation.. */
Jan Beulich2c85f512009-09-21 17:03:07 -07006300 if (unlikely(flags & HASH_SMALL)) {
6301 /* Makes no sense without HASH_EARLY */
6302 WARN_ON(!(flags & HASH_EARLY));
6303 if (!(numentries >> *_hash_shift)) {
6304 numentries = 1UL << *_hash_shift;
6305 BUG_ON(!numentries);
6306 }
6307 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
Paul Mundt9ab37b82007-01-05 16:36:30 -08006308 numentries = PAGE_SIZE / bucketsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006309 }
John Hawkes6e692ed2006-03-25 03:08:02 -08006310 numentries = roundup_pow_of_two(numentries);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006311
6312 /* limit allocation size to 1/16 total memory by default */
6313 if (max == 0) {
6314 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
6315 do_div(max, bucketsize);
6316 }
Dimitri Sivanich074b8512012-02-08 12:39:07 -08006317 max = min(max, 0x80000000ULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006318
Tim Bird31fe62b2012-05-23 13:33:35 +00006319 if (numentries < low_limit)
6320 numentries = low_limit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006321 if (numentries > max)
6322 numentries = max;
6323
David Howellsf0d1b0b2006-12-08 02:37:49 -08006324 log2qty = ilog2(numentries);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006325
6326 do {
6327 size = bucketsize << log2qty;
6328 if (flags & HASH_EARLY)
Santosh Shilimkar67828322014-01-21 15:50:25 -08006329 table = memblock_virt_alloc_nopanic(size, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006330 else if (hashdist)
6331 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
6332 else {
Eric Dumazet1037b832007-07-15 23:38:05 -07006333 /*
6334 * If bucketsize is not a power-of-two, we may free
Mel Gormana1dd2682009-06-16 15:32:19 -07006335 * some pages at the end of hash table which
6336 * alloc_pages_exact() automatically does
Eric Dumazet1037b832007-07-15 23:38:05 -07006337 */
Catalin Marinas264ef8a2009-07-07 10:33:01 +01006338 if (get_order(size) < MAX_ORDER) {
Mel Gormana1dd2682009-06-16 15:32:19 -07006339 table = alloc_pages_exact(size, GFP_ATOMIC);
Catalin Marinas264ef8a2009-07-07 10:33:01 +01006340 kmemleak_alloc(table, size, 1, GFP_ATOMIC);
6341 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006342 }
6343 } while (!table && size > PAGE_SIZE && --log2qty);
6344
6345 if (!table)
6346 panic("Failed to allocate %s hash table\n", tablename);
6347
Robin Holtf241e6602010-10-07 12:59:26 -07006348 printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006349 tablename,
Robin Holtf241e6602010-10-07 12:59:26 -07006350 (1UL << log2qty),
David Howellsf0d1b0b2006-12-08 02:37:49 -08006351 ilog2(size) - PAGE_SHIFT,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006352 size);
6353
6354 if (_hash_shift)
6355 *_hash_shift = log2qty;
6356 if (_hash_mask)
6357 *_hash_mask = (1 << log2qty) - 1;
6358
6359 return table;
6360}
KAMEZAWA Hiroyukia117e662006-03-27 01:15:25 -08006361
Mel Gorman835c1342007-10-16 01:25:47 -07006362/* Return a pointer to the bitmap storing bits affecting a block of pages */
6363static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
6364 unsigned long pfn)
6365{
6366#ifdef CONFIG_SPARSEMEM
6367 return __pfn_to_section(pfn)->pageblock_flags;
6368#else
6369 return zone->pageblock_flags;
6370#endif /* CONFIG_SPARSEMEM */
6371}
Andrew Morton6220ec72006-10-19 23:29:05 -07006372
Mel Gorman835c1342007-10-16 01:25:47 -07006373static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
6374{
6375#ifdef CONFIG_SPARSEMEM
6376 pfn &= (PAGES_PER_SECTION-1);
Mel Gormand9c23402007-10-16 01:26:01 -07006377 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
Mel Gorman835c1342007-10-16 01:25:47 -07006378#else
Laura Abbottc060f942013-01-11 14:31:51 -08006379 pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages);
Mel Gormand9c23402007-10-16 01:26:01 -07006380 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
Mel Gorman835c1342007-10-16 01:25:47 -07006381#endif /* CONFIG_SPARSEMEM */
6382}
6383
6384/**
Randy Dunlap1aab4d72014-07-27 14:15:33 -07006385 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
Mel Gorman835c1342007-10-16 01:25:47 -07006386 * @page: The page within the block of interest
Randy Dunlap1aab4d72014-07-27 14:15:33 -07006387 * @pfn: The target page frame number
6388 * @end_bitidx: The last bit of interest to retrieve
6389 * @mask: mask of bits that the caller is interested in
6390 *
6391 * Return: pageblock_bits flags
Mel Gorman835c1342007-10-16 01:25:47 -07006392 */
Mel Gormandc4b0ca2014-06-04 16:10:17 -07006393unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
Mel Gormane58469b2014-06-04 16:10:16 -07006394 unsigned long end_bitidx,
6395 unsigned long mask)
Mel Gorman835c1342007-10-16 01:25:47 -07006396{
6397 struct zone *zone;
6398 unsigned long *bitmap;
Mel Gormandc4b0ca2014-06-04 16:10:17 -07006399 unsigned long bitidx, word_bitidx;
Mel Gormane58469b2014-06-04 16:10:16 -07006400 unsigned long word;
Mel Gorman835c1342007-10-16 01:25:47 -07006401
6402 zone = page_zone(page);
Mel Gorman835c1342007-10-16 01:25:47 -07006403 bitmap = get_pageblock_bitmap(zone, pfn);
6404 bitidx = pfn_to_bitidx(zone, pfn);
Mel Gormane58469b2014-06-04 16:10:16 -07006405 word_bitidx = bitidx / BITS_PER_LONG;
6406 bitidx &= (BITS_PER_LONG-1);
Mel Gorman835c1342007-10-16 01:25:47 -07006407
Mel Gormane58469b2014-06-04 16:10:16 -07006408 word = bitmap[word_bitidx];
6409 bitidx += end_bitidx;
6410 return (word >> (BITS_PER_LONG - bitidx - 1)) & mask;
Mel Gorman835c1342007-10-16 01:25:47 -07006411}
6412
6413/**
Mel Gormandc4b0ca2014-06-04 16:10:17 -07006414 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
Mel Gorman835c1342007-10-16 01:25:47 -07006415 * @page: The page within the block of interest
Mel Gorman835c1342007-10-16 01:25:47 -07006416 * @flags: The flags to set
Randy Dunlap1aab4d72014-07-27 14:15:33 -07006417 * @pfn: The target page frame number
6418 * @end_bitidx: The last bit of interest
6419 * @mask: mask of bits that the caller is interested in
Mel Gorman835c1342007-10-16 01:25:47 -07006420 */
Mel Gormandc4b0ca2014-06-04 16:10:17 -07006421void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
6422 unsigned long pfn,
Mel Gormane58469b2014-06-04 16:10:16 -07006423 unsigned long end_bitidx,
6424 unsigned long mask)
Mel Gorman835c1342007-10-16 01:25:47 -07006425{
6426 struct zone *zone;
6427 unsigned long *bitmap;
Mel Gormandc4b0ca2014-06-04 16:10:17 -07006428 unsigned long bitidx, word_bitidx;
Mel Gormane58469b2014-06-04 16:10:16 -07006429 unsigned long old_word, word;
6430
6431 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
Mel Gorman835c1342007-10-16 01:25:47 -07006432
6433 zone = page_zone(page);
Mel Gorman835c1342007-10-16 01:25:47 -07006434 bitmap = get_pageblock_bitmap(zone, pfn);
6435 bitidx = pfn_to_bitidx(zone, pfn);
Mel Gormane58469b2014-06-04 16:10:16 -07006436 word_bitidx = bitidx / BITS_PER_LONG;
6437 bitidx &= (BITS_PER_LONG-1);
6438
Sasha Levin309381fea2014-01-23 15:52:54 -08006439 VM_BUG_ON_PAGE(!zone_spans_pfn(zone, pfn), page);
Mel Gorman835c1342007-10-16 01:25:47 -07006440
Mel Gormane58469b2014-06-04 16:10:16 -07006441 bitidx += end_bitidx;
6442 mask <<= (BITS_PER_LONG - bitidx - 1);
6443 flags <<= (BITS_PER_LONG - bitidx - 1);
6444
Jason Low4db0c3c2015-04-15 16:14:08 -07006445 word = READ_ONCE(bitmap[word_bitidx]);
Mel Gormane58469b2014-06-04 16:10:16 -07006446 for (;;) {
6447 old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
6448 if (word == old_word)
6449 break;
6450 word = old_word;
6451 }
Mel Gorman835c1342007-10-16 01:25:47 -07006452}
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -07006453
6454/*
Minchan Kim80934512012-07-31 16:43:01 -07006455 * This function checks whether pageblock includes unmovable pages or not.
6456 * If @count is not zero, it is okay to include less @count unmovable pages
6457 *
Pintu Kumarb8af2942013-09-11 14:20:34 -07006458 * PageLRU check without isolation or lru_lock could race so that
Minchan Kim80934512012-07-31 16:43:01 -07006459 * MIGRATE_MOVABLE block might include unmovable pages. It means you can't
6460 * expect this function should be exact.
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -07006461 */
Wen Congyangb023f462012-12-11 16:00:45 -08006462bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
6463 bool skip_hwpoisoned_pages)
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07006464{
6465 unsigned long pfn, iter, found;
Michal Nazarewicz47118af2011-12-29 13:09:50 +01006466 int mt;
6467
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07006468 /*
6469 * For avoiding noise data, lru_add_drain_all() should be called
Minchan Kim80934512012-07-31 16:43:01 -07006470 * If ZONE_MOVABLE, the zone never contains unmovable pages
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07006471 */
6472 if (zone_idx(zone) == ZONE_MOVABLE)
Minchan Kim80934512012-07-31 16:43:01 -07006473 return false;
Michal Nazarewicz47118af2011-12-29 13:09:50 +01006474 mt = get_pageblock_migratetype(page);
6475 if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
Minchan Kim80934512012-07-31 16:43:01 -07006476 return false;
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07006477
6478 pfn = page_to_pfn(page);
6479 for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
6480 unsigned long check = pfn + iter;
6481
Namhyung Kim29723fc2011-02-25 14:44:25 -08006482 if (!pfn_valid_within(check))
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07006483 continue;
Namhyung Kim29723fc2011-02-25 14:44:25 -08006484
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07006485 page = pfn_to_page(check);
Naoya Horiguchic8721bb2013-09-11 14:22:09 -07006486
6487 /*
6488 * Hugepages are not in LRU lists, but they're movable.
6489 * We need not scan over tail pages bacause we don't
6490 * handle each tail page individually in migration.
6491 */
6492 if (PageHuge(page)) {
6493 iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
6494 continue;
6495 }
6496
Minchan Kim97d255c2012-07-31 16:42:59 -07006497 /*
6498 * We can't use page_count without pin a page
6499 * because another CPU can free compound page.
6500 * This check already skips compound tails of THP
6501 * because their page->_count is zero at all time.
6502 */
6503 if (!atomic_read(&page->_count)) {
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07006504 if (PageBuddy(page))
6505 iter += (1 << page_order(page)) - 1;
6506 continue;
6507 }
Minchan Kim97d255c2012-07-31 16:42:59 -07006508
Wen Congyangb023f462012-12-11 16:00:45 -08006509 /*
6510 * The HWPoisoned page may be not in buddy system, and
6511 * page_count() is not 0.
6512 */
6513 if (skip_hwpoisoned_pages && PageHWPoison(page))
6514 continue;
6515
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07006516 if (!PageLRU(page))
6517 found++;
6518 /*
Johannes Weiner6b4f7792014-12-12 16:56:13 -08006519 * If there are RECLAIMABLE pages, we need to check
6520 * it. But now, memory offline itself doesn't call
6521 * shrink_node_slabs() and it still to be fixed.
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07006522 */
6523 /*
6524 * If the page is not RAM, page_count()should be 0.
6525 * we don't need more check. This is an _used_ not-movable page.
6526 *
6527 * The problematic thing here is PG_reserved pages. PG_reserved
6528 * is set to both of a memory hole page and a _used_ kernel
6529 * page at boot.
6530 */
6531 if (found > count)
Minchan Kim80934512012-07-31 16:43:01 -07006532 return true;
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07006533 }
Minchan Kim80934512012-07-31 16:43:01 -07006534 return false;
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07006535}
6536
6537bool is_pageblock_removable_nolock(struct page *page)
6538{
Michal Hocko656a0702012-01-20 14:33:58 -08006539 struct zone *zone;
6540 unsigned long pfn;
Michal Hocko687875fb2012-01-20 14:33:55 -08006541
6542 /*
6543 * We have to be careful here because we are iterating over memory
6544 * sections which are not zone aware so we might end up outside of
6545 * the zone but still within the section.
Michal Hocko656a0702012-01-20 14:33:58 -08006546 * We have to take care about the node as well. If the node is offline
6547 * its NODE_DATA will be NULL - see page_zone.
Michal Hocko687875fb2012-01-20 14:33:55 -08006548 */
Michal Hocko656a0702012-01-20 14:33:58 -08006549 if (!node_online(page_to_nid(page)))
6550 return false;
6551
6552 zone = page_zone(page);
6553 pfn = page_to_pfn(page);
Cody P Schafer108bcc92013-02-22 16:35:23 -08006554 if (!zone_spans_pfn(zone, pfn))
Michal Hocko687875fb2012-01-20 14:33:55 -08006555 return false;
6556
Wen Congyangb023f462012-12-11 16:00:45 -08006557 return !has_unmovable_pages(zone, page, 0, true);
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -07006558}
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07006559
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01006560#ifdef CONFIG_CMA
6561
6562static unsigned long pfn_max_align_down(unsigned long pfn)
6563{
6564 return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
6565 pageblock_nr_pages) - 1);
6566}
6567
6568static unsigned long pfn_max_align_up(unsigned long pfn)
6569{
6570 return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
6571 pageblock_nr_pages));
6572}
6573
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01006574/* [start, end) must belong to a single zone. */
Mel Gormanbb13ffe2012-10-08 16:32:41 -07006575static int __alloc_contig_migrate_range(struct compact_control *cc,
6576 unsigned long start, unsigned long end)
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01006577{
6578 /* This function is based on compact_zone() from compaction.c. */
Minchan Kimbeb51ea2012-10-08 16:33:51 -07006579 unsigned long nr_reclaimed;
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01006580 unsigned long pfn = start;
6581 unsigned int tries = 0;
6582 int ret = 0;
6583
Marek Szyprowskibe49a6e2012-12-12 13:51:19 -08006584 migrate_prep();
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01006585
Mel Gormanbb13ffe2012-10-08 16:32:41 -07006586 while (pfn < end || !list_empty(&cc->migratepages)) {
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01006587 if (fatal_signal_pending(current)) {
6588 ret = -EINTR;
6589 break;
6590 }
6591
Mel Gormanbb13ffe2012-10-08 16:32:41 -07006592 if (list_empty(&cc->migratepages)) {
6593 cc->nr_migratepages = 0;
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07006594 pfn = isolate_migratepages_range(cc, pfn, end);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01006595 if (!pfn) {
6596 ret = -EINTR;
6597 break;
6598 }
6599 tries = 0;
6600 } else if (++tries == 5) {
6601 ret = ret < 0 ? ret : -EBUSY;
6602 break;
6603 }
6604
Minchan Kimbeb51ea2012-10-08 16:33:51 -07006605 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
6606 &cc->migratepages);
6607 cc->nr_migratepages -= nr_reclaimed;
Minchan Kim02c6de82012-10-08 16:31:55 -07006608
Hugh Dickins9c620e22013-02-22 16:35:14 -08006609 ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
David Rientjese0b9dae2014-06-04 16:08:28 -07006610 NULL, 0, cc->mode, MR_CMA);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01006611 }
Srinivas Pandruvada2a6f5122013-02-22 16:32:09 -08006612 if (ret < 0) {
6613 putback_movable_pages(&cc->migratepages);
6614 return ret;
6615 }
6616 return 0;
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01006617}
6618
6619/**
6620 * alloc_contig_range() -- tries to allocate given range of pages
6621 * @start: start PFN to allocate
6622 * @end: one-past-the-last PFN to allocate
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +02006623 * @migratetype: migratetype of the underlaying pageblocks (either
6624 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
6625 * in range must have the same migratetype and it must
6626 * be either of the two.
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01006627 *
6628 * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
6629 * aligned, however it's the caller's responsibility to guarantee that
6630 * we are the only thread that changes migrate type of pageblocks the
6631 * pages fall in.
6632 *
6633 * The PFN range must belong to a single zone.
6634 *
6635 * Returns zero on success or negative error code. On success all
6636 * pages which PFN is in [start, end) are allocated for the caller and
6637 * need to be freed with free_contig_range().
6638 */
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +02006639int alloc_contig_range(unsigned long start, unsigned long end,
6640 unsigned migratetype)
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01006641{
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01006642 unsigned long outer_start, outer_end;
6643 int ret = 0, order;
6644
Mel Gormanbb13ffe2012-10-08 16:32:41 -07006645 struct compact_control cc = {
6646 .nr_migratepages = 0,
6647 .order = -1,
6648 .zone = page_zone(pfn_to_page(start)),
David Rientjese0b9dae2014-06-04 16:08:28 -07006649 .mode = MIGRATE_SYNC,
Mel Gormanbb13ffe2012-10-08 16:32:41 -07006650 .ignore_skip_hint = true,
6651 };
6652 INIT_LIST_HEAD(&cc.migratepages);
6653
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01006654 /*
6655 * What we do here is we mark all pageblocks in range as
6656 * MIGRATE_ISOLATE. Because pageblock and max order pages may
6657 * have different sizes, and due to the way page allocator
6658 * work, we align the range to biggest of the two pages so
6659 * that page allocator won't try to merge buddies from
6660 * different pageblocks and change MIGRATE_ISOLATE to some
6661 * other migration type.
6662 *
6663 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
6664 * migrate the pages from an unaligned range (ie. pages that
6665 * we are interested in). This will put all the pages in
6666 * range back to page allocator as MIGRATE_ISOLATE.
6667 *
6668 * When this is done, we take the pages in range from page
6669 * allocator removing them from the buddy system. This way
6670 * page allocator will never consider using them.
6671 *
6672 * This lets us mark the pageblocks back as
6673 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
6674 * aligned range but not in the unaligned, original range are
6675 * put back to page allocator so that buddy can use them.
6676 */
6677
6678 ret = start_isolate_page_range(pfn_max_align_down(start),
Wen Congyangb023f462012-12-11 16:00:45 -08006679 pfn_max_align_up(end), migratetype,
6680 false);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01006681 if (ret)
Bob Liu86a595f2012-10-25 13:37:56 -07006682 return ret;
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01006683
Mel Gormanbb13ffe2012-10-08 16:32:41 -07006684 ret = __alloc_contig_migrate_range(&cc, start, end);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01006685 if (ret)
6686 goto done;
6687
6688 /*
6689 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
6690 * aligned blocks that are marked as MIGRATE_ISOLATE. What's
6691 * more, all pages in [start, end) are free in page allocator.
6692 * What we are going to do is to allocate all pages from
6693 * [start, end) (that is remove them from page allocator).
6694 *
6695 * The only problem is that pages at the beginning and at the
6696 * end of interesting range may be not aligned with pages that
6697 * page allocator holds, ie. they can be part of higher order
6698 * pages. Because of this, we reserve the bigger range and
6699 * once this is done free the pages we are not interested in.
6700 *
6701 * We don't have to hold zone->lock here because the pages are
6702 * isolated thus they won't get removed from buddy.
6703 */
6704
6705 lru_add_drain_all();
Vlastimil Babka510f5502014-12-10 15:43:07 -08006706 drain_all_pages(cc.zone);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01006707
6708 order = 0;
6709 outer_start = start;
6710 while (!PageBuddy(pfn_to_page(outer_start))) {
6711 if (++order >= MAX_ORDER) {
6712 ret = -EBUSY;
6713 goto done;
6714 }
6715 outer_start &= ~0UL << order;
6716 }
6717
6718 /* Make sure the range is really isolated. */
Wen Congyangb023f462012-12-11 16:00:45 -08006719 if (test_pages_isolated(outer_start, end, false)) {
Michal Nazarewiczdae803e2014-11-13 15:19:27 -08006720 pr_info("%s: [%lx, %lx) PFNs busy\n",
6721 __func__, outer_start, end);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01006722 ret = -EBUSY;
6723 goto done;
6724 }
6725
Marek Szyprowski49f223a2012-01-25 12:49:24 +01006726 /* Grab isolated pages from freelists. */
Mel Gormanbb13ffe2012-10-08 16:32:41 -07006727 outer_end = isolate_freepages_range(&cc, outer_start, end);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01006728 if (!outer_end) {
6729 ret = -EBUSY;
6730 goto done;
6731 }
6732
6733 /* Free head and tail (if any) */
6734 if (start != outer_start)
6735 free_contig_range(outer_start, start - outer_start);
6736 if (end != outer_end)
6737 free_contig_range(end, outer_end - end);
6738
6739done:
6740 undo_isolate_page_range(pfn_max_align_down(start),
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +02006741 pfn_max_align_up(end), migratetype);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01006742 return ret;
6743}
6744
6745void free_contig_range(unsigned long pfn, unsigned nr_pages)
6746{
Marek Szyprowskibcc2b022012-12-20 15:05:18 -08006747 unsigned int count = 0;
6748
6749 for (; nr_pages--; pfn++) {
6750 struct page *page = pfn_to_page(pfn);
6751
6752 count += page_count(page) != 1;
6753 __free_page(page);
6754 }
6755 WARN(count != 0, "%d pages are still in use!\n", count);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01006756}
6757#endif
6758
Jiang Liu4ed7e022012-07-31 16:43:35 -07006759#ifdef CONFIG_MEMORY_HOTPLUG
Cody P Schafer0a647f32013-07-03 15:01:33 -07006760/*
6761 * The zone indicated has a new number of managed_pages; batch sizes and percpu
6762 * page high values need to be recalulated.
6763 */
Jiang Liu4ed7e022012-07-31 16:43:35 -07006764void __meminit zone_pcp_update(struct zone *zone)
6765{
Cody P Schafer0a647f32013-07-03 15:01:33 -07006766 unsigned cpu;
Cody P Schaferc8e251f2013-07-03 15:01:29 -07006767 mutex_lock(&pcp_batch_high_lock);
Cody P Schafer0a647f32013-07-03 15:01:33 -07006768 for_each_possible_cpu(cpu)
Cody P Schafer169f6c12013-07-03 15:01:41 -07006769 pageset_set_high_and_batch(zone,
6770 per_cpu_ptr(zone->pageset, cpu));
Cody P Schaferc8e251f2013-07-03 15:01:29 -07006771 mutex_unlock(&pcp_batch_high_lock);
Jiang Liu4ed7e022012-07-31 16:43:35 -07006772}
6773#endif
6774
Jiang Liu340175b2012-07-31 16:43:32 -07006775void zone_pcp_reset(struct zone *zone)
6776{
6777 unsigned long flags;
Minchan Kim5a883812012-10-08 16:33:39 -07006778 int cpu;
6779 struct per_cpu_pageset *pset;
Jiang Liu340175b2012-07-31 16:43:32 -07006780
6781 /* avoid races with drain_pages() */
6782 local_irq_save(flags);
6783 if (zone->pageset != &boot_pageset) {
Minchan Kim5a883812012-10-08 16:33:39 -07006784 for_each_online_cpu(cpu) {
6785 pset = per_cpu_ptr(zone->pageset, cpu);
6786 drain_zonestat(zone, pset);
6787 }
Jiang Liu340175b2012-07-31 16:43:32 -07006788 free_percpu(zone->pageset);
6789 zone->pageset = &boot_pageset;
6790 }
6791 local_irq_restore(flags);
6792}
6793
Wen Congyang6dcd73d2012-12-11 16:01:01 -08006794#ifdef CONFIG_MEMORY_HOTREMOVE
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07006795/*
6796 * All pages in the range must be isolated before calling this.
6797 */
6798void
6799__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
6800{
6801 struct page *page;
6802 struct zone *zone;
Mel Gorman7aeb09f2014-06-04 16:10:21 -07006803 unsigned int order, i;
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07006804 unsigned long pfn;
6805 unsigned long flags;
6806 /* find the first valid pfn */
6807 for (pfn = start_pfn; pfn < end_pfn; pfn++)
6808 if (pfn_valid(pfn))
6809 break;
6810 if (pfn == end_pfn)
6811 return;
6812 zone = page_zone(pfn_to_page(pfn));
6813 spin_lock_irqsave(&zone->lock, flags);
6814 pfn = start_pfn;
6815 while (pfn < end_pfn) {
6816 if (!pfn_valid(pfn)) {
6817 pfn++;
6818 continue;
6819 }
6820 page = pfn_to_page(pfn);
Wen Congyangb023f462012-12-11 16:00:45 -08006821 /*
6822 * The HWPoisoned page may be not in buddy system, and
6823 * page_count() is not 0.
6824 */
6825 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
6826 pfn++;
6827 SetPageReserved(page);
6828 continue;
6829 }
6830
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07006831 BUG_ON(page_count(page));
6832 BUG_ON(!PageBuddy(page));
6833 order = page_order(page);
6834#ifdef CONFIG_DEBUG_VM
6835 printk(KERN_INFO "remove from free list %lx %d %lx\n",
6836 pfn, 1 << order, end_pfn);
6837#endif
6838 list_del(&page->lru);
6839 rmv_page_order(page);
6840 zone->free_area[order].nr_free--;
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07006841 for (i = 0; i < (1 << order); i++)
6842 SetPageReserved((page+i));
6843 pfn += (1 << order);
6844 }
6845 spin_unlock_irqrestore(&zone->lock, flags);
6846}
6847#endif
Wu Fengguang8d22ba12009-12-16 12:19:58 +01006848
6849#ifdef CONFIG_MEMORY_FAILURE
6850bool is_free_buddy_page(struct page *page)
6851{
6852 struct zone *zone = page_zone(page);
6853 unsigned long pfn = page_to_pfn(page);
6854 unsigned long flags;
Mel Gorman7aeb09f2014-06-04 16:10:21 -07006855 unsigned int order;
Wu Fengguang8d22ba12009-12-16 12:19:58 +01006856
6857 spin_lock_irqsave(&zone->lock, flags);
6858 for (order = 0; order < MAX_ORDER; order++) {
6859 struct page *page_head = page - (pfn & ((1 << order) - 1));
6860
6861 if (PageBuddy(page_head) && page_order(page_head) >= order)
6862 break;
6863 }
6864 spin_unlock_irqrestore(&zone->lock, flags);
6865
6866 return order < MAX_ORDER;
6867}
6868#endif