blob: e70586523ca3c7fe932b1d3f671810c9a060f9be [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -07002/*
3 * linux/mm/page_isolation.c
4 */
5
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -07006#include <linux/mm.h>
7#include <linux/page-isolation.h>
8#include <linux/pageblock-flags.h>
Minchan Kimee6f5092012-07-31 16:43:50 -07009#include <linux/memory.h>
Naoya Horiguchic8721bb2013-09-11 14:22:09 -070010#include <linux/hugetlb.h>
Joonsoo Kim83358ec2016-07-26 15:23:43 -070011#include <linux/page_owner.h>
Michal Hocko8b913232017-07-10 15:48:47 -070012#include <linux/migrate.h>
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -070013#include "internal.h"
14
Joonsoo Kim0f0848e2016-01-14 15:18:42 -080015#define CREATE_TRACE_POINTS
16#include <trace/events/page_isolation.h>
17
Michal Hockod381c542018-12-28 00:33:56 -080018static int set_migratetype_isolate(struct page *page, int migratetype, int isol_flags)
Minchan Kimee6f5092012-07-31 16:43:50 -070019{
Qian Cai4a55c042020-01-30 22:14:57 -080020 struct page *unmovable = NULL;
Minchan Kimee6f5092012-07-31 16:43:50 -070021 struct zone *zone;
David Hildenbrand3f9903b2020-01-30 22:14:01 -080022 unsigned long flags;
Minchan Kimee6f5092012-07-31 16:43:50 -070023 int ret = -EBUSY;
24
25 zone = page_zone(page);
26
27 spin_lock_irqsave(&zone->lock, flags);
28
Mike Kravetz2c7452a2018-04-05 16:25:26 -070029 /*
30 * We assume the caller intended to SET migrate type to isolate.
31 * If it is already set, then someone else must have raced and
32 * set it before us. Return -EBUSY
33 */
34 if (is_migrate_isolate_page(page))
35 goto out;
36
Minchan Kimee6f5092012-07-31 16:43:50 -070037 /*
38 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
39 * We just check MOVABLE pages.
40 */
Qian Cai4a55c042020-01-30 22:14:57 -080041 unmovable = has_unmovable_pages(zone, page, migratetype, isol_flags);
42 if (!unmovable) {
Bartlomiej Zolnierkiewicz2139cbe2012-10-08 16:32:00 -070043 unsigned long nr_pages;
Michal Hocko4da2ce22017-11-15 17:33:26 -080044 int mt = get_pageblock_migratetype(page);
Bartlomiej Zolnierkiewicz2139cbe2012-10-08 16:32:00 -070045
Bartlomiej Zolnierkiewicza4584312013-01-04 15:35:08 -080046 set_pageblock_migratetype(page, MIGRATE_ISOLATE);
Joonsoo Kimad53f922014-11-13 15:19:11 -080047 zone->nr_isolate_pageblock++;
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -070048 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE,
49 NULL);
Bartlomiej Zolnierkiewicz2139cbe2012-10-08 16:32:00 -070050
Michal Hocko4da2ce22017-11-15 17:33:26 -080051 __mod_zone_freepage_state(zone, -nr_pages, mt);
David Hildenbrand3f9903b2020-01-30 22:14:01 -080052 ret = 0;
Minchan Kimee6f5092012-07-31 16:43:50 -070053 }
54
David Hildenbrand3f9903b2020-01-30 22:14:01 -080055out:
Minchan Kimee6f5092012-07-31 16:43:50 -070056 spin_unlock_irqrestore(&zone->lock, flags);
57 if (!ret)
Vlastimil Babkaec25af82014-12-10 15:43:04 -080058 drain_all_pages(zone);
Qian Cai4a55c042020-01-30 22:14:57 -080059 else if ((isol_flags & REPORT_FAILURE) && unmovable)
60 /*
61 * printk() with zone->lock held will guarantee to trigger a
62 * lockdep splat, so defer it here.
63 */
64 dump_page(unmovable, "unmovable page");
65
Minchan Kimee6f5092012-07-31 16:43:50 -070066 return ret;
67}
68
Naoya Horiguchic5b4e1b2015-09-08 15:02:09 -070069static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
Minchan Kimee6f5092012-07-31 16:43:50 -070070{
71 struct zone *zone;
Bartlomiej Zolnierkiewicz2139cbe2012-10-08 16:32:00 -070072 unsigned long flags, nr_pages;
Joonsoo Kime3a27132016-07-26 15:24:01 -070073 bool isolated_page = false;
Joonsoo Kim3c605092014-11-13 15:19:21 -080074 unsigned int order;
Vlastimil Babka76741e72017-02-22 15:41:48 -080075 unsigned long pfn, buddy_pfn;
Joonsoo Kim3c605092014-11-13 15:19:21 -080076 struct page *buddy;
Bartlomiej Zolnierkiewicz2139cbe2012-10-08 16:32:00 -070077
Minchan Kimee6f5092012-07-31 16:43:50 -070078 zone = page_zone(page);
79 spin_lock_irqsave(&zone->lock, flags);
Xishi Qiubbf9ce92017-05-03 14:52:55 -070080 if (!is_migrate_isolate_page(page))
Minchan Kimee6f5092012-07-31 16:43:50 -070081 goto out;
Joonsoo Kim3c605092014-11-13 15:19:21 -080082
83 /*
84 * Because freepage with more than pageblock_order on isolated
85 * pageblock is restricted to merge due to freepage counting problem,
86 * it is possible that there is free buddy page.
87 * move_freepages_block() doesn't care of merge so we need other
88 * approach in order to merge them. Isolation and free will make
89 * these pages to be merged.
90 */
91 if (PageBuddy(page)) {
92 order = page_order(page);
93 if (order >= pageblock_order) {
Vlastimil Babka76741e72017-02-22 15:41:48 -080094 pfn = page_to_pfn(page);
95 buddy_pfn = __find_buddy_pfn(pfn, order);
96 buddy = page + (buddy_pfn - pfn);
Joonsoo Kim3c605092014-11-13 15:19:21 -080097
Vlastimil Babka13ad59d2017-02-22 15:41:51 -080098 if (pfn_valid_within(buddy_pfn) &&
Hui Zhu1ae70132015-05-14 15:17:04 -070099 !is_migrate_isolate_page(buddy)) {
Joonsoo Kim3c605092014-11-13 15:19:21 -0800100 __isolate_free_page(page, order);
Joonsoo Kime3a27132016-07-26 15:24:01 -0700101 isolated_page = true;
Joonsoo Kim3c605092014-11-13 15:19:21 -0800102 }
103 }
104 }
105
106 /*
107 * If we isolate freepage with more than pageblock_order, there
108 * should be no freepage in the range, so we could avoid costly
109 * pageblock scanning for freepage moving.
110 */
111 if (!isolated_page) {
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -0700112 nr_pages = move_freepages_block(zone, page, migratetype, NULL);
Joonsoo Kim3c605092014-11-13 15:19:21 -0800113 __mod_zone_freepage_state(zone, nr_pages, migratetype);
114 }
Bartlomiej Zolnierkiewicza4584312013-01-04 15:35:08 -0800115 set_pageblock_migratetype(page, migratetype);
Joonsoo Kimad53f922014-11-13 15:19:11 -0800116 zone->nr_isolate_pageblock--;
Minchan Kimee6f5092012-07-31 16:43:50 -0700117out:
118 spin_unlock_irqrestore(&zone->lock, flags);
Joonsoo Kim83358ec2016-07-26 15:23:43 -0700119 if (isolated_page) {
Joonsoo Kim46f24fd2016-07-26 15:23:58 -0700120 post_alloc_hook(page, order, __GFP_MOVABLE);
Joonsoo Kime3a27132016-07-26 15:24:01 -0700121 __free_pages(page, order);
Joonsoo Kim83358ec2016-07-26 15:23:43 -0700122 }
Minchan Kimee6f5092012-07-31 16:43:50 -0700123}
124
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700125static inline struct page *
126__first_valid_page(unsigned long pfn, unsigned long nr_pages)
127{
128 int i;
Michal Hocko2ce13642017-07-06 15:38:04 -0700129
130 for (i = 0; i < nr_pages; i++) {
131 struct page *page;
132
Michal Hocko2ce13642017-07-06 15:38:04 -0700133 page = pfn_to_online_page(pfn + i);
134 if (!page)
135 continue;
136 return page;
137 }
138 return NULL;
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700139}
140
Qian Cai9b7ea462019-03-28 20:43:34 -0700141/**
142 * start_isolate_page_range() - make page-allocation-type of range of pages to
143 * be MIGRATE_ISOLATE.
144 * @start_pfn: The lower PFN of the range to be isolated.
145 * @end_pfn: The upper PFN of the range to be isolated.
146 * start_pfn/end_pfn must be aligned to pageblock_order.
147 * @migratetype: Migrate type to set in error recovery.
148 * @flags: The following flags are allowed (they can be combined in
149 * a bit mask)
David Hildenbrand756d25b2019-11-30 17:54:07 -0800150 * MEMORY_OFFLINE - isolate to offline (!allocate) memory
151 * e.g., skip over PageHWPoison() pages
Qian Cai9b7ea462019-03-28 20:43:34 -0700152 * REPORT_FAILURE - report details about the failure to
153 * isolate the range
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700154 *
155 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
156 * the range will never be allocated. Any free pages and pages freed in the
Qian Cai9b7ea462019-03-28 20:43:34 -0700157 * future will not be allocated again. If specified range includes migrate types
158 * other than MOVABLE or CMA, this will fail with -EBUSY. For isolating all
159 * pages in the range finally, the caller have to free all pages in the range.
160 * test_page_isolated() can be used for test it.
Mike Kravetz2c7452a2018-04-05 16:25:26 -0700161 *
162 * There is no high level synchronization mechanism that prevents two threads
Qian Cai9b7ea462019-03-28 20:43:34 -0700163 * from trying to isolate overlapping ranges. If this happens, one thread
Mike Kravetz2c7452a2018-04-05 16:25:26 -0700164 * will notice pageblocks in the overlapping range already set to isolate.
165 * This happens in set_migratetype_isolate, and set_migratetype_isolate
Qian Cai9b7ea462019-03-28 20:43:34 -0700166 * returns an error. We then clean up by restoring the migration type on
167 * pageblocks we may have modified and return -EBUSY to caller. This
Mike Kravetz2c7452a2018-04-05 16:25:26 -0700168 * prevents two threads from simultaneously working on overlapping ranges.
Qian Cai9b7ea462019-03-28 20:43:34 -0700169 *
170 * Return: the number of isolated pageblocks on success and -EBUSY if any part
171 * of range cannot be isolated.
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700172 */
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +0200173int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
Michal Hockod381c542018-12-28 00:33:56 -0800174 unsigned migratetype, int flags)
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700175{
176 unsigned long pfn;
177 unsigned long undo_pfn;
178 struct page *page;
Qian Cai9b7ea462019-03-28 20:43:34 -0700179 int nr_isolate_pageblock = 0;
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700180
Naoya Horiguchifec174d2016-01-14 15:22:13 -0800181 BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
182 BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700183
184 for (pfn = start_pfn;
185 pfn < end_pfn;
186 pfn += pageblock_nr_pages) {
187 page = __first_valid_page(pfn, pageblock_nr_pages);
Qian Cai9b7ea462019-03-28 20:43:34 -0700188 if (page) {
189 if (set_migratetype_isolate(page, migratetype, flags)) {
190 undo_pfn = pfn;
191 goto undo;
192 }
193 nr_isolate_pageblock++;
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700194 }
195 }
Qian Cai9b7ea462019-03-28 20:43:34 -0700196 return nr_isolate_pageblock;
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700197undo:
198 for (pfn = start_pfn;
KAMEZAWA Hiroyukidbc0e4c2007-11-14 16:59:12 -0800199 pfn < undo_pfn;
Michal Hocko2ce13642017-07-06 15:38:04 -0700200 pfn += pageblock_nr_pages) {
201 struct page *page = pfn_to_online_page(pfn);
202 if (!page)
203 continue;
204 unset_migratetype_isolate(page, migratetype);
205 }
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700206
207 return -EBUSY;
208}
209
210/*
211 * Make isolated pages available again.
212 */
Pingfan Liu1fcf0a52019-07-11 20:54:49 -0700213void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +0200214 unsigned migratetype)
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700215{
216 unsigned long pfn;
217 struct page *page;
Wang Xiaoqiang6f8d2b82016-01-15 16:57:13 -0800218
219 BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
220 BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
221
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700222 for (pfn = start_pfn;
223 pfn < end_pfn;
224 pfn += pageblock_nr_pages) {
225 page = __first_valid_page(pfn, pageblock_nr_pages);
Xishi Qiubbf9ce92017-05-03 14:52:55 -0700226 if (!page || !is_migrate_isolate_page(page))
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700227 continue;
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +0200228 unset_migratetype_isolate(page, migratetype);
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700229 }
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700230}
231/*
232 * Test all pages in the range is free(means isolated) or not.
233 * all pages in [start_pfn...end_pfn) must be in the same zone.
234 * zone->lock must be held before call this.
235 *
Neil Zhangec3b6882016-04-01 14:31:37 -0700236 * Returns the last tested pfn.
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700237 */
Joonsoo Kimfea85cf2016-01-14 15:18:39 -0800238static unsigned long
Wen Congyangb023f462012-12-11 16:00:45 -0800239__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
David Hildenbrand756d25b2019-11-30 17:54:07 -0800240 int flags)
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700241{
242 struct page *page;
243
244 while (pfn < end_pfn) {
245 if (!pfn_valid_within(pfn)) {
246 pfn++;
247 continue;
248 }
249 page = pfn_to_page(pfn);
Vlastimil Babkaaa016d12015-09-08 15:01:22 -0700250 if (PageBuddy(page))
Minchan Kim435b4052012-10-08 16:32:16 -0700251 /*
Vlastimil Babkaaa016d12015-09-08 15:01:22 -0700252 * If the page is on a free list, it has to be on
253 * the correct MIGRATE_ISOLATE freelist. There is no
254 * simple way to verify that as VM_BUG_ON(), though.
Minchan Kim435b4052012-10-08 16:32:16 -0700255 */
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700256 pfn += 1 << page_order(page);
David Hildenbrand756d25b2019-11-30 17:54:07 -0800257 else if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
Vlastimil Babkaaa016d12015-09-08 15:01:22 -0700258 /* A HWPoisoned page cannot be also PageBuddy */
Wen Congyangb023f462012-12-11 16:00:45 -0800259 pfn++;
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700260 else
261 break;
262 }
Joonsoo Kimfea85cf2016-01-14 15:18:39 -0800263
264 return pfn;
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700265}
266
Joonsoo Kimb9eb6312016-05-19 17:12:06 -0700267/* Caller should ensure that requested range is in a single zone */
Wen Congyangb023f462012-12-11 16:00:45 -0800268int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
David Hildenbrand756d25b2019-11-30 17:54:07 -0800269 int isol_flags)
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700270{
Gerald Schaefer6c1b7f62008-10-02 14:50:16 -0700271 unsigned long pfn, flags;
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700272 struct page *page;
Gerald Schaefer6c1b7f62008-10-02 14:50:16 -0700273 struct zone *zone;
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700274
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700275 /*
Tang Chen85dbe702013-06-20 18:10:19 +0800276 * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
277 * are not aligned to pageblock_nr_pages.
278 * Then we just check migratetype first.
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700279 */
280 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
281 page = __first_valid_page(pfn, pageblock_nr_pages);
Xishi Qiubbf9ce92017-05-03 14:52:55 -0700282 if (page && !is_migrate_isolate_page(page))
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700283 break;
284 }
Gerald Schaefera70dcb92008-11-06 12:53:36 -0800285 page = __first_valid_page(start_pfn, end_pfn - start_pfn);
286 if ((pfn < end_pfn) || !page)
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700287 return -EBUSY;
Tang Chen85dbe702013-06-20 18:10:19 +0800288 /* Check all pages are free or marked as ISOLATED */
Gerald Schaefera70dcb92008-11-06 12:53:36 -0800289 zone = page_zone(page);
Gerald Schaefer6c1b7f62008-10-02 14:50:16 -0700290 spin_lock_irqsave(&zone->lock, flags);
David Hildenbrand756d25b2019-11-30 17:54:07 -0800291 pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn, isol_flags);
Gerald Schaefer6c1b7f62008-10-02 14:50:16 -0700292 spin_unlock_irqrestore(&zone->lock, flags);
Joonsoo Kimfea85cf2016-01-14 15:18:39 -0800293
Joonsoo Kim0f0848e2016-01-14 15:18:42 -0800294 trace_test_pages_isolated(start_pfn, end_pfn, pfn);
295
Joonsoo Kimfea85cf2016-01-14 15:18:39 -0800296 return pfn < end_pfn ? -EBUSY : 0;
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700297}
Minchan Kim723a0642012-10-08 16:32:52 -0700298
Michal Hocko666feb22018-04-10 16:30:03 -0700299struct page *alloc_migrate_target(struct page *page, unsigned long private)
Minchan Kim723a0642012-10-08 16:32:52 -0700300{
Michal Hocko8b913232017-07-10 15:48:47 -0700301 return new_page_nodemask(page, numa_node_id(), &node_states[N_MEMORY]);
Minchan Kim723a0642012-10-08 16:32:52 -0700302}